xref: /openbmc/linux/kernel/signal.c (revision 44c2cd80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
48 
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
51 
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
57 
58 /*
59  * SLAB caches for signal bits.
60  */
61 
62 static struct kmem_cache *sigqueue_cachep;
63 
64 int print_fatal_signals __read_mostly;
65 
66 static void __user *sig_handler(struct task_struct *t, int sig)
67 {
68 	return t->sighand->action[sig - 1].sa.sa_handler;
69 }
70 
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
72 {
73 	/* Is it explicitly or implicitly ignored? */
74 	return handler == SIG_IGN ||
75 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
76 }
77 
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
79 {
80 	void __user *handler;
81 
82 	handler = sig_handler(t, sig);
83 
84 	/* SIGKILL and SIGSTOP may not be sent to the global init */
85 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 		return true;
87 
88 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
90 		return true;
91 
92 	/* Only allow kernel generated signals to this kthread */
93 	if (unlikely((t->flags & PF_KTHREAD) &&
94 		     (handler == SIG_KTHREAD_KERNEL) && !force))
95 		return true;
96 
97 	return sig_handler_ignored(handler, sig);
98 }
99 
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
101 {
102 	/*
103 	 * Blocked signals are never ignored, since the
104 	 * signal handler may change by the time it is
105 	 * unblocked.
106 	 */
107 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
108 		return false;
109 
110 	/*
111 	 * Tracers may want to know about even ignored signal unless it
112 	 * is SIGKILL which can't be reported anyway but can be ignored
113 	 * by SIGNAL_UNKILLABLE task.
114 	 */
115 	if (t->ptrace && sig != SIGKILL)
116 		return false;
117 
118 	return sig_task_ignored(t, sig, force);
119 }
120 
121 /*
122  * Re-calculate pending state from the set of locally pending
123  * signals, globally pending signals, and blocked signals.
124  */
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
126 {
127 	unsigned long ready;
128 	long i;
129 
130 	switch (_NSIG_WORDS) {
131 	default:
132 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 			ready |= signal->sig[i] &~ blocked->sig[i];
134 		break;
135 
136 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
137 		ready |= signal->sig[2] &~ blocked->sig[2];
138 		ready |= signal->sig[1] &~ blocked->sig[1];
139 		ready |= signal->sig[0] &~ blocked->sig[0];
140 		break;
141 
142 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
143 		ready |= signal->sig[0] &~ blocked->sig[0];
144 		break;
145 
146 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
147 	}
148 	return ready !=	0;
149 }
150 
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
152 
153 static bool recalc_sigpending_tsk(struct task_struct *t)
154 {
155 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 	    PENDING(&t->pending, &t->blocked) ||
157 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
158 	    cgroup_task_frozen(t)) {
159 		set_tsk_thread_flag(t, TIF_SIGPENDING);
160 		return true;
161 	}
162 
163 	/*
164 	 * We must never clear the flag in another thread, or in current
165 	 * when it's possible the current syscall is returning -ERESTART*.
166 	 * So we don't clear it here, and only callers who know they should do.
167 	 */
168 	return false;
169 }
170 
171 /*
172  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173  * This is superfluous when called on current, the wakeup is a harmless no-op.
174  */
175 void recalc_sigpending_and_wake(struct task_struct *t)
176 {
177 	if (recalc_sigpending_tsk(t))
178 		signal_wake_up(t, 0);
179 }
180 
181 void recalc_sigpending(void)
182 {
183 	if (!recalc_sigpending_tsk(current) && !freezing(current))
184 		clear_thread_flag(TIF_SIGPENDING);
185 
186 }
187 EXPORT_SYMBOL(recalc_sigpending);
188 
189 void calculate_sigpending(void)
190 {
191 	/* Have any signals or users of TIF_SIGPENDING been delayed
192 	 * until after fork?
193 	 */
194 	spin_lock_irq(&current->sighand->siglock);
195 	set_tsk_thread_flag(current, TIF_SIGPENDING);
196 	recalc_sigpending();
197 	spin_unlock_irq(&current->sighand->siglock);
198 }
199 
200 /* Given the mask, find the first available signal that should be serviced. */
201 
202 #define SYNCHRONOUS_MASK \
203 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
205 
206 int next_signal(struct sigpending *pending, sigset_t *mask)
207 {
208 	unsigned long i, *s, *m, x;
209 	int sig = 0;
210 
211 	s = pending->signal.sig;
212 	m = mask->sig;
213 
214 	/*
215 	 * Handle the first word specially: it contains the
216 	 * synchronous signals that need to be dequeued first.
217 	 */
218 	x = *s &~ *m;
219 	if (x) {
220 		if (x & SYNCHRONOUS_MASK)
221 			x &= SYNCHRONOUS_MASK;
222 		sig = ffz(~x) + 1;
223 		return sig;
224 	}
225 
226 	switch (_NSIG_WORDS) {
227 	default:
228 		for (i = 1; i < _NSIG_WORDS; ++i) {
229 			x = *++s &~ *++m;
230 			if (!x)
231 				continue;
232 			sig = ffz(~x) + i*_NSIG_BPW + 1;
233 			break;
234 		}
235 		break;
236 
237 	case 2:
238 		x = s[1] &~ m[1];
239 		if (!x)
240 			break;
241 		sig = ffz(~x) + _NSIG_BPW + 1;
242 		break;
243 
244 	case 1:
245 		/* Nothing to do */
246 		break;
247 	}
248 
249 	return sig;
250 }
251 
252 static inline void print_dropped_signal(int sig)
253 {
254 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
255 
256 	if (!print_fatal_signals)
257 		return;
258 
259 	if (!__ratelimit(&ratelimit_state))
260 		return;
261 
262 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 				current->comm, current->pid, sig);
264 }
265 
266 /**
267  * task_set_jobctl_pending - set jobctl pending bits
268  * @task: target task
269  * @mask: pending bits to set
270  *
271  * Clear @mask from @task->jobctl.  @mask must be subset of
272  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
274  * cleared.  If @task is already being killed or exiting, this function
275  * becomes noop.
276  *
277  * CONTEXT:
278  * Must be called with @task->sighand->siglock held.
279  *
280  * RETURNS:
281  * %true if @mask is set, %false if made noop because @task was dying.
282  */
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
284 {
285 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
288 
289 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
290 		return false;
291 
292 	if (mask & JOBCTL_STOP_SIGMASK)
293 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
294 
295 	task->jobctl |= mask;
296 	return true;
297 }
298 
299 /**
300  * task_clear_jobctl_trapping - clear jobctl trapping bit
301  * @task: target task
302  *
303  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304  * Clear it and wake up the ptracer.  Note that we don't need any further
305  * locking.  @task->siglock guarantees that @task->parent points to the
306  * ptracer.
307  *
308  * CONTEXT:
309  * Must be called with @task->sighand->siglock held.
310  */
311 void task_clear_jobctl_trapping(struct task_struct *task)
312 {
313 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 		task->jobctl &= ~JOBCTL_TRAPPING;
315 		smp_mb();	/* advised by wake_up_bit() */
316 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 	}
318 }
319 
320 /**
321  * task_clear_jobctl_pending - clear jobctl pending bits
322  * @task: target task
323  * @mask: pending bits to clear
324  *
325  * Clear @mask from @task->jobctl.  @mask must be subset of
326  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
327  * STOP bits are cleared together.
328  *
329  * If clearing of @mask leaves no stop or trap pending, this function calls
330  * task_clear_jobctl_trapping().
331  *
332  * CONTEXT:
333  * Must be called with @task->sighand->siglock held.
334  */
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
336 {
337 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
338 
339 	if (mask & JOBCTL_STOP_PENDING)
340 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
341 
342 	task->jobctl &= ~mask;
343 
344 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 		task_clear_jobctl_trapping(task);
346 }
347 
348 /**
349  * task_participate_group_stop - participate in a group stop
350  * @task: task participating in a group stop
351  *
352  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353  * Group stop states are cleared and the group stop count is consumed if
354  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
355  * stop, the appropriate `SIGNAL_*` flags are set.
356  *
357  * CONTEXT:
358  * Must be called with @task->sighand->siglock held.
359  *
360  * RETURNS:
361  * %true if group stop completion should be notified to the parent, %false
362  * otherwise.
363  */
364 static bool task_participate_group_stop(struct task_struct *task)
365 {
366 	struct signal_struct *sig = task->signal;
367 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
368 
369 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
370 
371 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372 
373 	if (!consume)
374 		return false;
375 
376 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 		sig->group_stop_count--;
378 
379 	/*
380 	 * Tell the caller to notify completion iff we are entering into a
381 	 * fresh group stop.  Read comment in do_signal_stop() for details.
382 	 */
383 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
385 		return true;
386 	}
387 	return false;
388 }
389 
390 void task_join_group_stop(struct task_struct *task)
391 {
392 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 	struct signal_struct *sig = current->signal;
394 
395 	if (sig->group_stop_count) {
396 		sig->group_stop_count++;
397 		mask |= JOBCTL_STOP_CONSUME;
398 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
399 		return;
400 
401 	/* Have the new thread join an on-going signal group stop */
402 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
403 }
404 
405 /*
406  * allocate a new signal queue record
407  * - this may be called without locks if and only if t == current, otherwise an
408  *   appropriate lock must be held to stop the target task from exiting
409  */
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 		 int override_rlimit, const unsigned int sigqueue_flags)
413 {
414 	struct sigqueue *q = NULL;
415 	struct ucounts *ucounts = NULL;
416 	long sigpending;
417 
418 	/*
419 	 * Protect access to @t credentials. This can go away when all
420 	 * callers hold rcu read lock.
421 	 *
422 	 * NOTE! A pending signal will hold on to the user refcount,
423 	 * and we get/put the refcount only when the sigpending count
424 	 * changes from/to zero.
425 	 */
426 	rcu_read_lock();
427 	ucounts = task_ucounts(t);
428 	sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
429 	if (sigpending == 1)
430 		ucounts = get_ucounts(ucounts);
431 	rcu_read_unlock();
432 
433 	if (override_rlimit || (sigpending < LONG_MAX && sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
435 	} else {
436 		print_dropped_signal(sig);
437 	}
438 
439 	if (unlikely(q == NULL)) {
440 		if (ucounts && dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
441 			put_ucounts(ucounts);
442 	} else {
443 		INIT_LIST_HEAD(&q->list);
444 		q->flags = sigqueue_flags;
445 		q->ucounts = ucounts;
446 	}
447 	return q;
448 }
449 
450 static void __sigqueue_free(struct sigqueue *q)
451 {
452 	if (q->flags & SIGQUEUE_PREALLOC)
453 		return;
454 	if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
455 		put_ucounts(q->ucounts);
456 		q->ucounts = NULL;
457 	}
458 	kmem_cache_free(sigqueue_cachep, q);
459 }
460 
461 void flush_sigqueue(struct sigpending *queue)
462 {
463 	struct sigqueue *q;
464 
465 	sigemptyset(&queue->signal);
466 	while (!list_empty(&queue->list)) {
467 		q = list_entry(queue->list.next, struct sigqueue , list);
468 		list_del_init(&q->list);
469 		__sigqueue_free(q);
470 	}
471 }
472 
473 /*
474  * Flush all pending signals for this kthread.
475  */
476 void flush_signals(struct task_struct *t)
477 {
478 	unsigned long flags;
479 
480 	spin_lock_irqsave(&t->sighand->siglock, flags);
481 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 	flush_sigqueue(&t->pending);
483 	flush_sigqueue(&t->signal->shared_pending);
484 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
485 }
486 EXPORT_SYMBOL(flush_signals);
487 
488 #ifdef CONFIG_POSIX_TIMERS
489 static void __flush_itimer_signals(struct sigpending *pending)
490 {
491 	sigset_t signal, retain;
492 	struct sigqueue *q, *n;
493 
494 	signal = pending->signal;
495 	sigemptyset(&retain);
496 
497 	list_for_each_entry_safe(q, n, &pending->list, list) {
498 		int sig = q->info.si_signo;
499 
500 		if (likely(q->info.si_code != SI_TIMER)) {
501 			sigaddset(&retain, sig);
502 		} else {
503 			sigdelset(&signal, sig);
504 			list_del_init(&q->list);
505 			__sigqueue_free(q);
506 		}
507 	}
508 
509 	sigorsets(&pending->signal, &signal, &retain);
510 }
511 
512 void flush_itimer_signals(void)
513 {
514 	struct task_struct *tsk = current;
515 	unsigned long flags;
516 
517 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 	__flush_itimer_signals(&tsk->pending);
519 	__flush_itimer_signals(&tsk->signal->shared_pending);
520 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521 }
522 #endif
523 
524 void ignore_signals(struct task_struct *t)
525 {
526 	int i;
527 
528 	for (i = 0; i < _NSIG; ++i)
529 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
530 
531 	flush_signals(t);
532 }
533 
534 /*
535  * Flush all handlers for a task.
536  */
537 
538 void
539 flush_signal_handlers(struct task_struct *t, int force_default)
540 {
541 	int i;
542 	struct k_sigaction *ka = &t->sighand->action[0];
543 	for (i = _NSIG ; i != 0 ; i--) {
544 		if (force_default || ka->sa.sa_handler != SIG_IGN)
545 			ka->sa.sa_handler = SIG_DFL;
546 		ka->sa.sa_flags = 0;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 		ka->sa.sa_restorer = NULL;
549 #endif
550 		sigemptyset(&ka->sa.sa_mask);
551 		ka++;
552 	}
553 }
554 
555 bool unhandled_signal(struct task_struct *tsk, int sig)
556 {
557 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 	if (is_global_init(tsk))
559 		return true;
560 
561 	if (handler != SIG_IGN && handler != SIG_DFL)
562 		return false;
563 
564 	/* if ptraced, let the tracer determine */
565 	return !tsk->ptrace;
566 }
567 
568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 			   bool *resched_timer)
570 {
571 	struct sigqueue *q, *first = NULL;
572 
573 	/*
574 	 * Collect the siginfo appropriate to this signal.  Check if
575 	 * there is another siginfo for the same signal.
576 	*/
577 	list_for_each_entry(q, &list->list, list) {
578 		if (q->info.si_signo == sig) {
579 			if (first)
580 				goto still_pending;
581 			first = q;
582 		}
583 	}
584 
585 	sigdelset(&list->signal, sig);
586 
587 	if (first) {
588 still_pending:
589 		list_del_init(&first->list);
590 		copy_siginfo(info, &first->info);
591 
592 		*resched_timer =
593 			(first->flags & SIGQUEUE_PREALLOC) &&
594 			(info->si_code == SI_TIMER) &&
595 			(info->si_sys_private);
596 
597 		__sigqueue_free(first);
598 	} else {
599 		/*
600 		 * Ok, it wasn't in the queue.  This must be
601 		 * a fast-pathed signal or we must have been
602 		 * out of queue space.  So zero out the info.
603 		 */
604 		clear_siginfo(info);
605 		info->si_signo = sig;
606 		info->si_errno = 0;
607 		info->si_code = SI_USER;
608 		info->si_pid = 0;
609 		info->si_uid = 0;
610 	}
611 }
612 
613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 			kernel_siginfo_t *info, bool *resched_timer)
615 {
616 	int sig = next_signal(pending, mask);
617 
618 	if (sig)
619 		collect_signal(sig, pending, info, resched_timer);
620 	return sig;
621 }
622 
623 /*
624  * Dequeue a signal and return the element to the caller, which is
625  * expected to free it.
626  *
627  * All callers have to hold the siglock.
628  */
629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630 {
631 	bool resched_timer = false;
632 	int signr;
633 
634 	/* We only dequeue private signals from ourselves, we don't let
635 	 * signalfd steal them
636 	 */
637 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 	if (!signr) {
639 		signr = __dequeue_signal(&tsk->signal->shared_pending,
640 					 mask, info, &resched_timer);
641 #ifdef CONFIG_POSIX_TIMERS
642 		/*
643 		 * itimer signal ?
644 		 *
645 		 * itimers are process shared and we restart periodic
646 		 * itimers in the signal delivery path to prevent DoS
647 		 * attacks in the high resolution timer case. This is
648 		 * compliant with the old way of self-restarting
649 		 * itimers, as the SIGALRM is a legacy signal and only
650 		 * queued once. Changing the restart behaviour to
651 		 * restart the timer in the signal dequeue path is
652 		 * reducing the timer noise on heavy loaded !highres
653 		 * systems too.
654 		 */
655 		if (unlikely(signr == SIGALRM)) {
656 			struct hrtimer *tmr = &tsk->signal->real_timer;
657 
658 			if (!hrtimer_is_queued(tmr) &&
659 			    tsk->signal->it_real_incr != 0) {
660 				hrtimer_forward(tmr, tmr->base->get_time(),
661 						tsk->signal->it_real_incr);
662 				hrtimer_restart(tmr);
663 			}
664 		}
665 #endif
666 	}
667 
668 	recalc_sigpending();
669 	if (!signr)
670 		return 0;
671 
672 	if (unlikely(sig_kernel_stop(signr))) {
673 		/*
674 		 * Set a marker that we have dequeued a stop signal.  Our
675 		 * caller might release the siglock and then the pending
676 		 * stop signal it is about to process is no longer in the
677 		 * pending bitmasks, but must still be cleared by a SIGCONT
678 		 * (and overruled by a SIGKILL).  So those cases clear this
679 		 * shared flag after we've set it.  Note that this flag may
680 		 * remain set after the signal we return is ignored or
681 		 * handled.  That doesn't matter because its only purpose
682 		 * is to alert stop-signal processing code when another
683 		 * processor has come along and cleared the flag.
684 		 */
685 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 	}
687 #ifdef CONFIG_POSIX_TIMERS
688 	if (resched_timer) {
689 		/*
690 		 * Release the siglock to ensure proper locking order
691 		 * of timer locks outside of siglocks.  Note, we leave
692 		 * irqs disabled here, since the posix-timers code is
693 		 * about to disable them again anyway.
694 		 */
695 		spin_unlock(&tsk->sighand->siglock);
696 		posixtimer_rearm(info);
697 		spin_lock(&tsk->sighand->siglock);
698 
699 		/* Don't expose the si_sys_private value to userspace */
700 		info->si_sys_private = 0;
701 	}
702 #endif
703 	return signr;
704 }
705 EXPORT_SYMBOL_GPL(dequeue_signal);
706 
707 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708 {
709 	struct task_struct *tsk = current;
710 	struct sigpending *pending = &tsk->pending;
711 	struct sigqueue *q, *sync = NULL;
712 
713 	/*
714 	 * Might a synchronous signal be in the queue?
715 	 */
716 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
717 		return 0;
718 
719 	/*
720 	 * Return the first synchronous signal in the queue.
721 	 */
722 	list_for_each_entry(q, &pending->list, list) {
723 		/* Synchronous signals have a positive si_code */
724 		if ((q->info.si_code > SI_USER) &&
725 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
726 			sync = q;
727 			goto next;
728 		}
729 	}
730 	return 0;
731 next:
732 	/*
733 	 * Check if there is another siginfo for the same signal.
734 	 */
735 	list_for_each_entry_continue(q, &pending->list, list) {
736 		if (q->info.si_signo == sync->info.si_signo)
737 			goto still_pending;
738 	}
739 
740 	sigdelset(&pending->signal, sync->info.si_signo);
741 	recalc_sigpending();
742 still_pending:
743 	list_del_init(&sync->list);
744 	copy_siginfo(info, &sync->info);
745 	__sigqueue_free(sync);
746 	return info->si_signo;
747 }
748 
749 /*
750  * Tell a process that it has a new active signal..
751  *
752  * NOTE! we rely on the previous spin_lock to
753  * lock interrupts for us! We can only be called with
754  * "siglock" held, and the local interrupt must
755  * have been disabled when that got acquired!
756  *
757  * No need to set need_resched since signal event passing
758  * goes through ->blocked
759  */
760 void signal_wake_up_state(struct task_struct *t, unsigned int state)
761 {
762 	set_tsk_thread_flag(t, TIF_SIGPENDING);
763 	/*
764 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765 	 * case. We don't check t->state here because there is a race with it
766 	 * executing another processor and just now entering stopped state.
767 	 * By using wake_up_state, we ensure the process will wake up and
768 	 * handle its death signal.
769 	 */
770 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
771 		kick_process(t);
772 }
773 
774 /*
775  * Remove signals in mask from the pending set and queue.
776  * Returns 1 if any signals were found.
777  *
778  * All callers must be holding the siglock.
779  */
780 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781 {
782 	struct sigqueue *q, *n;
783 	sigset_t m;
784 
785 	sigandsets(&m, mask, &s->signal);
786 	if (sigisemptyset(&m))
787 		return;
788 
789 	sigandnsets(&s->signal, &s->signal, mask);
790 	list_for_each_entry_safe(q, n, &s->list, list) {
791 		if (sigismember(mask, q->info.si_signo)) {
792 			list_del_init(&q->list);
793 			__sigqueue_free(q);
794 		}
795 	}
796 }
797 
798 static inline int is_si_special(const struct kernel_siginfo *info)
799 {
800 	return info <= SEND_SIG_PRIV;
801 }
802 
803 static inline bool si_fromuser(const struct kernel_siginfo *info)
804 {
805 	return info == SEND_SIG_NOINFO ||
806 		(!is_si_special(info) && SI_FROMUSER(info));
807 }
808 
809 /*
810  * called with RCU read lock from check_kill_permission()
811  */
812 static bool kill_ok_by_cred(struct task_struct *t)
813 {
814 	const struct cred *cred = current_cred();
815 	const struct cred *tcred = __task_cred(t);
816 
817 	return uid_eq(cred->euid, tcred->suid) ||
818 	       uid_eq(cred->euid, tcred->uid) ||
819 	       uid_eq(cred->uid, tcred->suid) ||
820 	       uid_eq(cred->uid, tcred->uid) ||
821 	       ns_capable(tcred->user_ns, CAP_KILL);
822 }
823 
824 /*
825  * Bad permissions for sending the signal
826  * - the caller must hold the RCU read lock
827  */
828 static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 				 struct task_struct *t)
830 {
831 	struct pid *sid;
832 	int error;
833 
834 	if (!valid_signal(sig))
835 		return -EINVAL;
836 
837 	if (!si_fromuser(info))
838 		return 0;
839 
840 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
841 	if (error)
842 		return error;
843 
844 	if (!same_thread_group(current, t) &&
845 	    !kill_ok_by_cred(t)) {
846 		switch (sig) {
847 		case SIGCONT:
848 			sid = task_session(t);
849 			/*
850 			 * We don't return the error if sid == NULL. The
851 			 * task was unhashed, the caller must notice this.
852 			 */
853 			if (!sid || sid == task_session(current))
854 				break;
855 			fallthrough;
856 		default:
857 			return -EPERM;
858 		}
859 	}
860 
861 	return security_task_kill(t, info, sig, NULL);
862 }
863 
864 /**
865  * ptrace_trap_notify - schedule trap to notify ptracer
866  * @t: tracee wanting to notify tracer
867  *
868  * This function schedules sticky ptrace trap which is cleared on the next
869  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
870  * ptracer.
871  *
872  * If @t is running, STOP trap will be taken.  If trapped for STOP and
873  * ptracer is listening for events, tracee is woken up so that it can
874  * re-trap for the new event.  If trapped otherwise, STOP trap will be
875  * eventually taken without returning to userland after the existing traps
876  * are finished by PTRACE_CONT.
877  *
878  * CONTEXT:
879  * Must be called with @task->sighand->siglock held.
880  */
881 static void ptrace_trap_notify(struct task_struct *t)
882 {
883 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 	assert_spin_locked(&t->sighand->siglock);
885 
886 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
888 }
889 
890 /*
891  * Handle magic process-wide effects of stop/continue signals. Unlike
892  * the signal actions, these happen immediately at signal-generation
893  * time regardless of blocking, ignoring, or handling.  This does the
894  * actual continuing for SIGCONT, but not the actual stopping for stop
895  * signals. The process stop is done as a signal action for SIG_DFL.
896  *
897  * Returns true if the signal should be actually delivered, otherwise
898  * it should be dropped.
899  */
900 static bool prepare_signal(int sig, struct task_struct *p, bool force)
901 {
902 	struct signal_struct *signal = p->signal;
903 	struct task_struct *t;
904 	sigset_t flush;
905 
906 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 			return sig == SIGKILL;
909 		/*
910 		 * The process is in the middle of dying, nothing to do.
911 		 */
912 	} else if (sig_kernel_stop(sig)) {
913 		/*
914 		 * This is a stop signal.  Remove SIGCONT from all queues.
915 		 */
916 		siginitset(&flush, sigmask(SIGCONT));
917 		flush_sigqueue_mask(&flush, &signal->shared_pending);
918 		for_each_thread(p, t)
919 			flush_sigqueue_mask(&flush, &t->pending);
920 	} else if (sig == SIGCONT) {
921 		unsigned int why;
922 		/*
923 		 * Remove all stop signals from all queues, wake all threads.
924 		 */
925 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 		flush_sigqueue_mask(&flush, &signal->shared_pending);
927 		for_each_thread(p, t) {
928 			flush_sigqueue_mask(&flush, &t->pending);
929 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 			if (likely(!(t->ptrace & PT_SEIZED)))
931 				wake_up_state(t, __TASK_STOPPED);
932 			else
933 				ptrace_trap_notify(t);
934 		}
935 
936 		/*
937 		 * Notify the parent with CLD_CONTINUED if we were stopped.
938 		 *
939 		 * If we were in the middle of a group stop, we pretend it
940 		 * was already finished, and then continued. Since SIGCHLD
941 		 * doesn't queue we report only CLD_STOPPED, as if the next
942 		 * CLD_CONTINUED was dropped.
943 		 */
944 		why = 0;
945 		if (signal->flags & SIGNAL_STOP_STOPPED)
946 			why |= SIGNAL_CLD_CONTINUED;
947 		else if (signal->group_stop_count)
948 			why |= SIGNAL_CLD_STOPPED;
949 
950 		if (why) {
951 			/*
952 			 * The first thread which returns from do_signal_stop()
953 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
954 			 * notify its parent. See get_signal().
955 			 */
956 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 			signal->group_stop_count = 0;
958 			signal->group_exit_code = 0;
959 		}
960 	}
961 
962 	return !sig_ignored(p, sig, force);
963 }
964 
965 /*
966  * Test if P wants to take SIG.  After we've checked all threads with this,
967  * it's equivalent to finding no threads not blocking SIG.  Any threads not
968  * blocking SIG were ruled out because they are not running and already
969  * have pending signals.  Such threads will dequeue from the shared queue
970  * as soon as they're available, so putting the signal on the shared queue
971  * will be equivalent to sending it to one such thread.
972  */
973 static inline bool wants_signal(int sig, struct task_struct *p)
974 {
975 	if (sigismember(&p->blocked, sig))
976 		return false;
977 
978 	if (p->flags & PF_EXITING)
979 		return false;
980 
981 	if (sig == SIGKILL)
982 		return true;
983 
984 	if (task_is_stopped_or_traced(p))
985 		return false;
986 
987 	return task_curr(p) || !task_sigpending(p);
988 }
989 
990 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991 {
992 	struct signal_struct *signal = p->signal;
993 	struct task_struct *t;
994 
995 	/*
996 	 * Now find a thread we can wake up to take the signal off the queue.
997 	 *
998 	 * If the main thread wants the signal, it gets first crack.
999 	 * Probably the least surprising to the average bear.
1000 	 */
1001 	if (wants_signal(sig, p))
1002 		t = p;
1003 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004 		/*
1005 		 * There is just one thread and it does not need to be woken.
1006 		 * It will dequeue unblocked signals before it runs again.
1007 		 */
1008 		return;
1009 	else {
1010 		/*
1011 		 * Otherwise try to find a suitable thread.
1012 		 */
1013 		t = signal->curr_target;
1014 		while (!wants_signal(sig, t)) {
1015 			t = next_thread(t);
1016 			if (t == signal->curr_target)
1017 				/*
1018 				 * No thread needs to be woken.
1019 				 * Any eligible threads will see
1020 				 * the signal in the queue soon.
1021 				 */
1022 				return;
1023 		}
1024 		signal->curr_target = t;
1025 	}
1026 
1027 	/*
1028 	 * Found a killable thread.  If the signal will be fatal,
1029 	 * then start taking the whole group down immediately.
1030 	 */
1031 	if (sig_fatal(p, sig) &&
1032 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 	    !sigismember(&t->real_blocked, sig) &&
1034 	    (sig == SIGKILL || !p->ptrace)) {
1035 		/*
1036 		 * This signal will be fatal to the whole group.
1037 		 */
1038 		if (!sig_kernel_coredump(sig)) {
1039 			/*
1040 			 * Start a group exit and wake everybody up.
1041 			 * This way we don't have other threads
1042 			 * running and doing things after a slower
1043 			 * thread has the fatal signal pending.
1044 			 */
1045 			signal->flags = SIGNAL_GROUP_EXIT;
1046 			signal->group_exit_code = sig;
1047 			signal->group_stop_count = 0;
1048 			t = p;
1049 			do {
1050 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 				sigaddset(&t->pending.signal, SIGKILL);
1052 				signal_wake_up(t, 1);
1053 			} while_each_thread(p, t);
1054 			return;
1055 		}
1056 	}
1057 
1058 	/*
1059 	 * The signal is already in the shared-pending queue.
1060 	 * Tell the chosen thread to wake up and dequeue it.
1061 	 */
1062 	signal_wake_up(t, sig == SIGKILL);
1063 	return;
1064 }
1065 
1066 static inline bool legacy_queue(struct sigpending *signals, int sig)
1067 {
1068 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1069 }
1070 
1071 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 			enum pid_type type, bool force)
1073 {
1074 	struct sigpending *pending;
1075 	struct sigqueue *q;
1076 	int override_rlimit;
1077 	int ret = 0, result;
1078 
1079 	assert_spin_locked(&t->sighand->siglock);
1080 
1081 	result = TRACE_SIGNAL_IGNORED;
1082 	if (!prepare_signal(sig, t, force))
1083 		goto ret;
1084 
1085 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086 	/*
1087 	 * Short-circuit ignored signals and support queuing
1088 	 * exactly one non-rt signal, so that we can get more
1089 	 * detailed information about the cause of the signal.
1090 	 */
1091 	result = TRACE_SIGNAL_ALREADY_PENDING;
1092 	if (legacy_queue(pending, sig))
1093 		goto ret;
1094 
1095 	result = TRACE_SIGNAL_DELIVERED;
1096 	/*
1097 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1098 	 */
1099 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1100 		goto out_set;
1101 
1102 	/*
1103 	 * Real-time signals must be queued if sent by sigqueue, or
1104 	 * some other real-time mechanism.  It is implementation
1105 	 * defined whether kill() does so.  We attempt to do so, on
1106 	 * the principle of least surprise, but since kill is not
1107 	 * allowed to fail with EAGAIN when low on memory we just
1108 	 * make sure at least one signal gets delivered and don't
1109 	 * pass on the info struct.
1110 	 */
1111 	if (sig < SIGRTMIN)
1112 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 	else
1114 		override_rlimit = 0;
1115 
1116 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1117 
1118 	if (q) {
1119 		list_add_tail(&q->list, &pending->list);
1120 		switch ((unsigned long) info) {
1121 		case (unsigned long) SEND_SIG_NOINFO:
1122 			clear_siginfo(&q->info);
1123 			q->info.si_signo = sig;
1124 			q->info.si_errno = 0;
1125 			q->info.si_code = SI_USER;
1126 			q->info.si_pid = task_tgid_nr_ns(current,
1127 							task_active_pid_ns(t));
1128 			rcu_read_lock();
1129 			q->info.si_uid =
1130 				from_kuid_munged(task_cred_xxx(t, user_ns),
1131 						 current_uid());
1132 			rcu_read_unlock();
1133 			break;
1134 		case (unsigned long) SEND_SIG_PRIV:
1135 			clear_siginfo(&q->info);
1136 			q->info.si_signo = sig;
1137 			q->info.si_errno = 0;
1138 			q->info.si_code = SI_KERNEL;
1139 			q->info.si_pid = 0;
1140 			q->info.si_uid = 0;
1141 			break;
1142 		default:
1143 			copy_siginfo(&q->info, info);
1144 			break;
1145 		}
1146 	} else if (!is_si_special(info) &&
1147 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1148 		/*
1149 		 * Queue overflow, abort.  We may abort if the
1150 		 * signal was rt and sent by user using something
1151 		 * other than kill().
1152 		 */
1153 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1154 		ret = -EAGAIN;
1155 		goto ret;
1156 	} else {
1157 		/*
1158 		 * This is a silent loss of information.  We still
1159 		 * send the signal, but the *info bits are lost.
1160 		 */
1161 		result = TRACE_SIGNAL_LOSE_INFO;
1162 	}
1163 
1164 out_set:
1165 	signalfd_notify(t, sig);
1166 	sigaddset(&pending->signal, sig);
1167 
1168 	/* Let multiprocess signals appear after on-going forks */
1169 	if (type > PIDTYPE_TGID) {
1170 		struct multiprocess_signals *delayed;
1171 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 			sigset_t *signal = &delayed->signal;
1173 			/* Can't queue both a stop and a continue signal */
1174 			if (sig == SIGCONT)
1175 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 			else if (sig_kernel_stop(sig))
1177 				sigdelset(signal, SIGCONT);
1178 			sigaddset(signal, sig);
1179 		}
1180 	}
1181 
1182 	complete_signal(sig, t, type);
1183 ret:
1184 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1185 	return ret;
1186 }
1187 
1188 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1189 {
1190 	bool ret = false;
1191 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1192 	case SIL_KILL:
1193 	case SIL_CHLD:
1194 	case SIL_RT:
1195 		ret = true;
1196 		break;
1197 	case SIL_TIMER:
1198 	case SIL_POLL:
1199 	case SIL_FAULT:
1200 	case SIL_FAULT_TRAPNO:
1201 	case SIL_FAULT_MCEERR:
1202 	case SIL_FAULT_BNDERR:
1203 	case SIL_FAULT_PKUERR:
1204 	case SIL_PERF_EVENT:
1205 	case SIL_SYS:
1206 		ret = false;
1207 		break;
1208 	}
1209 	return ret;
1210 }
1211 
1212 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1213 			enum pid_type type)
1214 {
1215 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1216 	bool force = false;
1217 
1218 	if (info == SEND_SIG_NOINFO) {
1219 		/* Force if sent from an ancestor pid namespace */
1220 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1221 	} else if (info == SEND_SIG_PRIV) {
1222 		/* Don't ignore kernel generated signals */
1223 		force = true;
1224 	} else if (has_si_pid_and_uid(info)) {
1225 		/* SIGKILL and SIGSTOP is special or has ids */
1226 		struct user_namespace *t_user_ns;
1227 
1228 		rcu_read_lock();
1229 		t_user_ns = task_cred_xxx(t, user_ns);
1230 		if (current_user_ns() != t_user_ns) {
1231 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1232 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1233 		}
1234 		rcu_read_unlock();
1235 
1236 		/* A kernel generated signal? */
1237 		force = (info->si_code == SI_KERNEL);
1238 
1239 		/* From an ancestor pid namespace? */
1240 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1241 			info->si_pid = 0;
1242 			force = true;
1243 		}
1244 	}
1245 	return __send_signal(sig, info, t, type, force);
1246 }
1247 
1248 static void print_fatal_signal(int signr)
1249 {
1250 	struct pt_regs *regs = signal_pt_regs();
1251 	pr_info("potentially unexpected fatal signal %d.\n", signr);
1252 
1253 #if defined(__i386__) && !defined(__arch_um__)
1254 	pr_info("code at %08lx: ", regs->ip);
1255 	{
1256 		int i;
1257 		for (i = 0; i < 16; i++) {
1258 			unsigned char insn;
1259 
1260 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1261 				break;
1262 			pr_cont("%02x ", insn);
1263 		}
1264 	}
1265 	pr_cont("\n");
1266 #endif
1267 	preempt_disable();
1268 	show_regs(regs);
1269 	preempt_enable();
1270 }
1271 
1272 static int __init setup_print_fatal_signals(char *str)
1273 {
1274 	get_option (&str, &print_fatal_signals);
1275 
1276 	return 1;
1277 }
1278 
1279 __setup("print-fatal-signals=", setup_print_fatal_signals);
1280 
1281 int
1282 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1283 {
1284 	return send_signal(sig, info, p, PIDTYPE_TGID);
1285 }
1286 
1287 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1288 			enum pid_type type)
1289 {
1290 	unsigned long flags;
1291 	int ret = -ESRCH;
1292 
1293 	if (lock_task_sighand(p, &flags)) {
1294 		ret = send_signal(sig, info, p, type);
1295 		unlock_task_sighand(p, &flags);
1296 	}
1297 
1298 	return ret;
1299 }
1300 
1301 /*
1302  * Force a signal that the process can't ignore: if necessary
1303  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1304  *
1305  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1306  * since we do not want to have a signal handler that was blocked
1307  * be invoked when user space had explicitly blocked it.
1308  *
1309  * We don't want to have recursive SIGSEGV's etc, for example,
1310  * that is why we also clear SIGNAL_UNKILLABLE.
1311  */
1312 static int
1313 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1314 {
1315 	unsigned long int flags;
1316 	int ret, blocked, ignored;
1317 	struct k_sigaction *action;
1318 	int sig = info->si_signo;
1319 
1320 	spin_lock_irqsave(&t->sighand->siglock, flags);
1321 	action = &t->sighand->action[sig-1];
1322 	ignored = action->sa.sa_handler == SIG_IGN;
1323 	blocked = sigismember(&t->blocked, sig);
1324 	if (blocked || ignored) {
1325 		action->sa.sa_handler = SIG_DFL;
1326 		if (blocked) {
1327 			sigdelset(&t->blocked, sig);
1328 			recalc_sigpending_and_wake(t);
1329 		}
1330 	}
1331 	/*
1332 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1333 	 * debugging to leave init killable.
1334 	 */
1335 	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1336 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1337 	ret = send_signal(sig, info, t, PIDTYPE_PID);
1338 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1339 
1340 	return ret;
1341 }
1342 
1343 int force_sig_info(struct kernel_siginfo *info)
1344 {
1345 	return force_sig_info_to_task(info, current);
1346 }
1347 
1348 /*
1349  * Nuke all other threads in the group.
1350  */
1351 int zap_other_threads(struct task_struct *p)
1352 {
1353 	struct task_struct *t = p;
1354 	int count = 0;
1355 
1356 	p->signal->group_stop_count = 0;
1357 
1358 	while_each_thread(p, t) {
1359 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1360 		count++;
1361 
1362 		/* Don't bother with already dead threads */
1363 		if (t->exit_state)
1364 			continue;
1365 		sigaddset(&t->pending.signal, SIGKILL);
1366 		signal_wake_up(t, 1);
1367 	}
1368 
1369 	return count;
1370 }
1371 
1372 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1373 					   unsigned long *flags)
1374 {
1375 	struct sighand_struct *sighand;
1376 
1377 	rcu_read_lock();
1378 	for (;;) {
1379 		sighand = rcu_dereference(tsk->sighand);
1380 		if (unlikely(sighand == NULL))
1381 			break;
1382 
1383 		/*
1384 		 * This sighand can be already freed and even reused, but
1385 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1386 		 * initializes ->siglock: this slab can't go away, it has
1387 		 * the same object type, ->siglock can't be reinitialized.
1388 		 *
1389 		 * We need to ensure that tsk->sighand is still the same
1390 		 * after we take the lock, we can race with de_thread() or
1391 		 * __exit_signal(). In the latter case the next iteration
1392 		 * must see ->sighand == NULL.
1393 		 */
1394 		spin_lock_irqsave(&sighand->siglock, *flags);
1395 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1396 			break;
1397 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1398 	}
1399 	rcu_read_unlock();
1400 
1401 	return sighand;
1402 }
1403 
1404 /*
1405  * send signal info to all the members of a group
1406  */
1407 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1408 			struct task_struct *p, enum pid_type type)
1409 {
1410 	int ret;
1411 
1412 	rcu_read_lock();
1413 	ret = check_kill_permission(sig, info, p);
1414 	rcu_read_unlock();
1415 
1416 	if (!ret && sig)
1417 		ret = do_send_sig_info(sig, info, p, type);
1418 
1419 	return ret;
1420 }
1421 
1422 /*
1423  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1424  * control characters do (^C, ^Z etc)
1425  * - the caller must hold at least a readlock on tasklist_lock
1426  */
1427 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1428 {
1429 	struct task_struct *p = NULL;
1430 	int retval, success;
1431 
1432 	success = 0;
1433 	retval = -ESRCH;
1434 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1435 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1436 		success |= !err;
1437 		retval = err;
1438 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1439 	return success ? 0 : retval;
1440 }
1441 
1442 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1443 {
1444 	int error = -ESRCH;
1445 	struct task_struct *p;
1446 
1447 	for (;;) {
1448 		rcu_read_lock();
1449 		p = pid_task(pid, PIDTYPE_PID);
1450 		if (p)
1451 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1452 		rcu_read_unlock();
1453 		if (likely(!p || error != -ESRCH))
1454 			return error;
1455 
1456 		/*
1457 		 * The task was unhashed in between, try again.  If it
1458 		 * is dead, pid_task() will return NULL, if we race with
1459 		 * de_thread() it will find the new leader.
1460 		 */
1461 	}
1462 }
1463 
1464 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1465 {
1466 	int error;
1467 	rcu_read_lock();
1468 	error = kill_pid_info(sig, info, find_vpid(pid));
1469 	rcu_read_unlock();
1470 	return error;
1471 }
1472 
1473 static inline bool kill_as_cred_perm(const struct cred *cred,
1474 				     struct task_struct *target)
1475 {
1476 	const struct cred *pcred = __task_cred(target);
1477 
1478 	return uid_eq(cred->euid, pcred->suid) ||
1479 	       uid_eq(cred->euid, pcred->uid) ||
1480 	       uid_eq(cred->uid, pcred->suid) ||
1481 	       uid_eq(cred->uid, pcred->uid);
1482 }
1483 
1484 /*
1485  * The usb asyncio usage of siginfo is wrong.  The glibc support
1486  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1487  * AKA after the generic fields:
1488  *	kernel_pid_t	si_pid;
1489  *	kernel_uid32_t	si_uid;
1490  *	sigval_t	si_value;
1491  *
1492  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1493  * after the generic fields is:
1494  *	void __user 	*si_addr;
1495  *
1496  * This is a practical problem when there is a 64bit big endian kernel
1497  * and a 32bit userspace.  As the 32bit address will encoded in the low
1498  * 32bits of the pointer.  Those low 32bits will be stored at higher
1499  * address than appear in a 32 bit pointer.  So userspace will not
1500  * see the address it was expecting for it's completions.
1501  *
1502  * There is nothing in the encoding that can allow
1503  * copy_siginfo_to_user32 to detect this confusion of formats, so
1504  * handle this by requiring the caller of kill_pid_usb_asyncio to
1505  * notice when this situration takes place and to store the 32bit
1506  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1507  * parameter.
1508  */
1509 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1510 			 struct pid *pid, const struct cred *cred)
1511 {
1512 	struct kernel_siginfo info;
1513 	struct task_struct *p;
1514 	unsigned long flags;
1515 	int ret = -EINVAL;
1516 
1517 	if (!valid_signal(sig))
1518 		return ret;
1519 
1520 	clear_siginfo(&info);
1521 	info.si_signo = sig;
1522 	info.si_errno = errno;
1523 	info.si_code = SI_ASYNCIO;
1524 	*((sigval_t *)&info.si_pid) = addr;
1525 
1526 	rcu_read_lock();
1527 	p = pid_task(pid, PIDTYPE_PID);
1528 	if (!p) {
1529 		ret = -ESRCH;
1530 		goto out_unlock;
1531 	}
1532 	if (!kill_as_cred_perm(cred, p)) {
1533 		ret = -EPERM;
1534 		goto out_unlock;
1535 	}
1536 	ret = security_task_kill(p, &info, sig, cred);
1537 	if (ret)
1538 		goto out_unlock;
1539 
1540 	if (sig) {
1541 		if (lock_task_sighand(p, &flags)) {
1542 			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1543 			unlock_task_sighand(p, &flags);
1544 		} else
1545 			ret = -ESRCH;
1546 	}
1547 out_unlock:
1548 	rcu_read_unlock();
1549 	return ret;
1550 }
1551 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1552 
1553 /*
1554  * kill_something_info() interprets pid in interesting ways just like kill(2).
1555  *
1556  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1557  * is probably wrong.  Should make it like BSD or SYSV.
1558  */
1559 
1560 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1561 {
1562 	int ret;
1563 
1564 	if (pid > 0)
1565 		return kill_proc_info(sig, info, pid);
1566 
1567 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1568 	if (pid == INT_MIN)
1569 		return -ESRCH;
1570 
1571 	read_lock(&tasklist_lock);
1572 	if (pid != -1) {
1573 		ret = __kill_pgrp_info(sig, info,
1574 				pid ? find_vpid(-pid) : task_pgrp(current));
1575 	} else {
1576 		int retval = 0, count = 0;
1577 		struct task_struct * p;
1578 
1579 		for_each_process(p) {
1580 			if (task_pid_vnr(p) > 1 &&
1581 					!same_thread_group(p, current)) {
1582 				int err = group_send_sig_info(sig, info, p,
1583 							      PIDTYPE_MAX);
1584 				++count;
1585 				if (err != -EPERM)
1586 					retval = err;
1587 			}
1588 		}
1589 		ret = count ? retval : -ESRCH;
1590 	}
1591 	read_unlock(&tasklist_lock);
1592 
1593 	return ret;
1594 }
1595 
1596 /*
1597  * These are for backward compatibility with the rest of the kernel source.
1598  */
1599 
1600 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1601 {
1602 	/*
1603 	 * Make sure legacy kernel users don't send in bad values
1604 	 * (normal paths check this in check_kill_permission).
1605 	 */
1606 	if (!valid_signal(sig))
1607 		return -EINVAL;
1608 
1609 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1610 }
1611 EXPORT_SYMBOL(send_sig_info);
1612 
1613 #define __si_special(priv) \
1614 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1615 
1616 int
1617 send_sig(int sig, struct task_struct *p, int priv)
1618 {
1619 	return send_sig_info(sig, __si_special(priv), p);
1620 }
1621 EXPORT_SYMBOL(send_sig);
1622 
1623 void force_sig(int sig)
1624 {
1625 	struct kernel_siginfo info;
1626 
1627 	clear_siginfo(&info);
1628 	info.si_signo = sig;
1629 	info.si_errno = 0;
1630 	info.si_code = SI_KERNEL;
1631 	info.si_pid = 0;
1632 	info.si_uid = 0;
1633 	force_sig_info(&info);
1634 }
1635 EXPORT_SYMBOL(force_sig);
1636 
1637 /*
1638  * When things go south during signal handling, we
1639  * will force a SIGSEGV. And if the signal that caused
1640  * the problem was already a SIGSEGV, we'll want to
1641  * make sure we don't even try to deliver the signal..
1642  */
1643 void force_sigsegv(int sig)
1644 {
1645 	struct task_struct *p = current;
1646 
1647 	if (sig == SIGSEGV) {
1648 		unsigned long flags;
1649 		spin_lock_irqsave(&p->sighand->siglock, flags);
1650 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1651 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1652 	}
1653 	force_sig(SIGSEGV);
1654 }
1655 
1656 int force_sig_fault_to_task(int sig, int code, void __user *addr
1657 	___ARCH_SI_TRAPNO(int trapno)
1658 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1659 	, struct task_struct *t)
1660 {
1661 	struct kernel_siginfo info;
1662 
1663 	clear_siginfo(&info);
1664 	info.si_signo = sig;
1665 	info.si_errno = 0;
1666 	info.si_code  = code;
1667 	info.si_addr  = addr;
1668 #ifdef __ARCH_SI_TRAPNO
1669 	info.si_trapno = trapno;
1670 #endif
1671 #ifdef __ia64__
1672 	info.si_imm = imm;
1673 	info.si_flags = flags;
1674 	info.si_isr = isr;
1675 #endif
1676 	return force_sig_info_to_task(&info, t);
1677 }
1678 
1679 int force_sig_fault(int sig, int code, void __user *addr
1680 	___ARCH_SI_TRAPNO(int trapno)
1681 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1682 {
1683 	return force_sig_fault_to_task(sig, code, addr
1684 				       ___ARCH_SI_TRAPNO(trapno)
1685 				       ___ARCH_SI_IA64(imm, flags, isr), current);
1686 }
1687 
1688 int send_sig_fault(int sig, int code, void __user *addr
1689 	___ARCH_SI_TRAPNO(int trapno)
1690 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1691 	, struct task_struct *t)
1692 {
1693 	struct kernel_siginfo info;
1694 
1695 	clear_siginfo(&info);
1696 	info.si_signo = sig;
1697 	info.si_errno = 0;
1698 	info.si_code  = code;
1699 	info.si_addr  = addr;
1700 #ifdef __ARCH_SI_TRAPNO
1701 	info.si_trapno = trapno;
1702 #endif
1703 #ifdef __ia64__
1704 	info.si_imm = imm;
1705 	info.si_flags = flags;
1706 	info.si_isr = isr;
1707 #endif
1708 	return send_sig_info(info.si_signo, &info, t);
1709 }
1710 
1711 int force_sig_mceerr(int code, void __user *addr, short lsb)
1712 {
1713 	struct kernel_siginfo info;
1714 
1715 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1716 	clear_siginfo(&info);
1717 	info.si_signo = SIGBUS;
1718 	info.si_errno = 0;
1719 	info.si_code = code;
1720 	info.si_addr = addr;
1721 	info.si_addr_lsb = lsb;
1722 	return force_sig_info(&info);
1723 }
1724 
1725 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1726 {
1727 	struct kernel_siginfo info;
1728 
1729 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1730 	clear_siginfo(&info);
1731 	info.si_signo = SIGBUS;
1732 	info.si_errno = 0;
1733 	info.si_code = code;
1734 	info.si_addr = addr;
1735 	info.si_addr_lsb = lsb;
1736 	return send_sig_info(info.si_signo, &info, t);
1737 }
1738 EXPORT_SYMBOL(send_sig_mceerr);
1739 
1740 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1741 {
1742 	struct kernel_siginfo info;
1743 
1744 	clear_siginfo(&info);
1745 	info.si_signo = SIGSEGV;
1746 	info.si_errno = 0;
1747 	info.si_code  = SEGV_BNDERR;
1748 	info.si_addr  = addr;
1749 	info.si_lower = lower;
1750 	info.si_upper = upper;
1751 	return force_sig_info(&info);
1752 }
1753 
1754 #ifdef SEGV_PKUERR
1755 int force_sig_pkuerr(void __user *addr, u32 pkey)
1756 {
1757 	struct kernel_siginfo info;
1758 
1759 	clear_siginfo(&info);
1760 	info.si_signo = SIGSEGV;
1761 	info.si_errno = 0;
1762 	info.si_code  = SEGV_PKUERR;
1763 	info.si_addr  = addr;
1764 	info.si_pkey  = pkey;
1765 	return force_sig_info(&info);
1766 }
1767 #endif
1768 
1769 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1770 {
1771 	struct kernel_siginfo info;
1772 
1773 	clear_siginfo(&info);
1774 	info.si_signo     = SIGTRAP;
1775 	info.si_errno     = 0;
1776 	info.si_code      = TRAP_PERF;
1777 	info.si_addr      = addr;
1778 	info.si_perf_data = sig_data;
1779 	info.si_perf_type = type;
1780 
1781 	return force_sig_info(&info);
1782 }
1783 
1784 /* For the crazy architectures that include trap information in
1785  * the errno field, instead of an actual errno value.
1786  */
1787 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1788 {
1789 	struct kernel_siginfo info;
1790 
1791 	clear_siginfo(&info);
1792 	info.si_signo = SIGTRAP;
1793 	info.si_errno = errno;
1794 	info.si_code  = TRAP_HWBKPT;
1795 	info.si_addr  = addr;
1796 	return force_sig_info(&info);
1797 }
1798 
1799 int kill_pgrp(struct pid *pid, int sig, int priv)
1800 {
1801 	int ret;
1802 
1803 	read_lock(&tasklist_lock);
1804 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1805 	read_unlock(&tasklist_lock);
1806 
1807 	return ret;
1808 }
1809 EXPORT_SYMBOL(kill_pgrp);
1810 
1811 int kill_pid(struct pid *pid, int sig, int priv)
1812 {
1813 	return kill_pid_info(sig, __si_special(priv), pid);
1814 }
1815 EXPORT_SYMBOL(kill_pid);
1816 
1817 /*
1818  * These functions support sending signals using preallocated sigqueue
1819  * structures.  This is needed "because realtime applications cannot
1820  * afford to lose notifications of asynchronous events, like timer
1821  * expirations or I/O completions".  In the case of POSIX Timers
1822  * we allocate the sigqueue structure from the timer_create.  If this
1823  * allocation fails we are able to report the failure to the application
1824  * with an EAGAIN error.
1825  */
1826 struct sigqueue *sigqueue_alloc(void)
1827 {
1828 	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1829 }
1830 
1831 void sigqueue_free(struct sigqueue *q)
1832 {
1833 	unsigned long flags;
1834 	spinlock_t *lock = &current->sighand->siglock;
1835 
1836 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1837 	/*
1838 	 * We must hold ->siglock while testing q->list
1839 	 * to serialize with collect_signal() or with
1840 	 * __exit_signal()->flush_sigqueue().
1841 	 */
1842 	spin_lock_irqsave(lock, flags);
1843 	q->flags &= ~SIGQUEUE_PREALLOC;
1844 	/*
1845 	 * If it is queued it will be freed when dequeued,
1846 	 * like the "regular" sigqueue.
1847 	 */
1848 	if (!list_empty(&q->list))
1849 		q = NULL;
1850 	spin_unlock_irqrestore(lock, flags);
1851 
1852 	if (q)
1853 		__sigqueue_free(q);
1854 }
1855 
1856 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1857 {
1858 	int sig = q->info.si_signo;
1859 	struct sigpending *pending;
1860 	struct task_struct *t;
1861 	unsigned long flags;
1862 	int ret, result;
1863 
1864 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1865 
1866 	ret = -1;
1867 	rcu_read_lock();
1868 	t = pid_task(pid, type);
1869 	if (!t || !likely(lock_task_sighand(t, &flags)))
1870 		goto ret;
1871 
1872 	ret = 1; /* the signal is ignored */
1873 	result = TRACE_SIGNAL_IGNORED;
1874 	if (!prepare_signal(sig, t, false))
1875 		goto out;
1876 
1877 	ret = 0;
1878 	if (unlikely(!list_empty(&q->list))) {
1879 		/*
1880 		 * If an SI_TIMER entry is already queue just increment
1881 		 * the overrun count.
1882 		 */
1883 		BUG_ON(q->info.si_code != SI_TIMER);
1884 		q->info.si_overrun++;
1885 		result = TRACE_SIGNAL_ALREADY_PENDING;
1886 		goto out;
1887 	}
1888 	q->info.si_overrun = 0;
1889 
1890 	signalfd_notify(t, sig);
1891 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1892 	list_add_tail(&q->list, &pending->list);
1893 	sigaddset(&pending->signal, sig);
1894 	complete_signal(sig, t, type);
1895 	result = TRACE_SIGNAL_DELIVERED;
1896 out:
1897 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1898 	unlock_task_sighand(t, &flags);
1899 ret:
1900 	rcu_read_unlock();
1901 	return ret;
1902 }
1903 
1904 static void do_notify_pidfd(struct task_struct *task)
1905 {
1906 	struct pid *pid;
1907 
1908 	WARN_ON(task->exit_state == 0);
1909 	pid = task_pid(task);
1910 	wake_up_all(&pid->wait_pidfd);
1911 }
1912 
1913 /*
1914  * Let a parent know about the death of a child.
1915  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1916  *
1917  * Returns true if our parent ignored us and so we've switched to
1918  * self-reaping.
1919  */
1920 bool do_notify_parent(struct task_struct *tsk, int sig)
1921 {
1922 	struct kernel_siginfo info;
1923 	unsigned long flags;
1924 	struct sighand_struct *psig;
1925 	bool autoreap = false;
1926 	u64 utime, stime;
1927 
1928 	BUG_ON(sig == -1);
1929 
1930  	/* do_notify_parent_cldstop should have been called instead.  */
1931  	BUG_ON(task_is_stopped_or_traced(tsk));
1932 
1933 	BUG_ON(!tsk->ptrace &&
1934 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1935 
1936 	/* Wake up all pidfd waiters */
1937 	do_notify_pidfd(tsk);
1938 
1939 	if (sig != SIGCHLD) {
1940 		/*
1941 		 * This is only possible if parent == real_parent.
1942 		 * Check if it has changed security domain.
1943 		 */
1944 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1945 			sig = SIGCHLD;
1946 	}
1947 
1948 	clear_siginfo(&info);
1949 	info.si_signo = sig;
1950 	info.si_errno = 0;
1951 	/*
1952 	 * We are under tasklist_lock here so our parent is tied to
1953 	 * us and cannot change.
1954 	 *
1955 	 * task_active_pid_ns will always return the same pid namespace
1956 	 * until a task passes through release_task.
1957 	 *
1958 	 * write_lock() currently calls preempt_disable() which is the
1959 	 * same as rcu_read_lock(), but according to Oleg, this is not
1960 	 * correct to rely on this
1961 	 */
1962 	rcu_read_lock();
1963 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1964 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1965 				       task_uid(tsk));
1966 	rcu_read_unlock();
1967 
1968 	task_cputime(tsk, &utime, &stime);
1969 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1970 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1971 
1972 	info.si_status = tsk->exit_code & 0x7f;
1973 	if (tsk->exit_code & 0x80)
1974 		info.si_code = CLD_DUMPED;
1975 	else if (tsk->exit_code & 0x7f)
1976 		info.si_code = CLD_KILLED;
1977 	else {
1978 		info.si_code = CLD_EXITED;
1979 		info.si_status = tsk->exit_code >> 8;
1980 	}
1981 
1982 	psig = tsk->parent->sighand;
1983 	spin_lock_irqsave(&psig->siglock, flags);
1984 	if (!tsk->ptrace && sig == SIGCHLD &&
1985 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1986 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1987 		/*
1988 		 * We are exiting and our parent doesn't care.  POSIX.1
1989 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1990 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1991 		 * automatically and not left for our parent's wait4 call.
1992 		 * Rather than having the parent do it as a magic kind of
1993 		 * signal handler, we just set this to tell do_exit that we
1994 		 * can be cleaned up without becoming a zombie.  Note that
1995 		 * we still call __wake_up_parent in this case, because a
1996 		 * blocked sys_wait4 might now return -ECHILD.
1997 		 *
1998 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1999 		 * is implementation-defined: we do (if you don't want
2000 		 * it, just use SIG_IGN instead).
2001 		 */
2002 		autoreap = true;
2003 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2004 			sig = 0;
2005 	}
2006 	/*
2007 	 * Send with __send_signal as si_pid and si_uid are in the
2008 	 * parent's namespaces.
2009 	 */
2010 	if (valid_signal(sig) && sig)
2011 		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2012 	__wake_up_parent(tsk, tsk->parent);
2013 	spin_unlock_irqrestore(&psig->siglock, flags);
2014 
2015 	return autoreap;
2016 }
2017 
2018 /**
2019  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2020  * @tsk: task reporting the state change
2021  * @for_ptracer: the notification is for ptracer
2022  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2023  *
2024  * Notify @tsk's parent that the stopped/continued state has changed.  If
2025  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2026  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2027  *
2028  * CONTEXT:
2029  * Must be called with tasklist_lock at least read locked.
2030  */
2031 static void do_notify_parent_cldstop(struct task_struct *tsk,
2032 				     bool for_ptracer, int why)
2033 {
2034 	struct kernel_siginfo info;
2035 	unsigned long flags;
2036 	struct task_struct *parent;
2037 	struct sighand_struct *sighand;
2038 	u64 utime, stime;
2039 
2040 	if (for_ptracer) {
2041 		parent = tsk->parent;
2042 	} else {
2043 		tsk = tsk->group_leader;
2044 		parent = tsk->real_parent;
2045 	}
2046 
2047 	clear_siginfo(&info);
2048 	info.si_signo = SIGCHLD;
2049 	info.si_errno = 0;
2050 	/*
2051 	 * see comment in do_notify_parent() about the following 4 lines
2052 	 */
2053 	rcu_read_lock();
2054 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2055 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2056 	rcu_read_unlock();
2057 
2058 	task_cputime(tsk, &utime, &stime);
2059 	info.si_utime = nsec_to_clock_t(utime);
2060 	info.si_stime = nsec_to_clock_t(stime);
2061 
2062  	info.si_code = why;
2063  	switch (why) {
2064  	case CLD_CONTINUED:
2065  		info.si_status = SIGCONT;
2066  		break;
2067  	case CLD_STOPPED:
2068  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2069  		break;
2070  	case CLD_TRAPPED:
2071  		info.si_status = tsk->exit_code & 0x7f;
2072  		break;
2073  	default:
2074  		BUG();
2075  	}
2076 
2077 	sighand = parent->sighand;
2078 	spin_lock_irqsave(&sighand->siglock, flags);
2079 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2080 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2081 		__group_send_sig_info(SIGCHLD, &info, parent);
2082 	/*
2083 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2084 	 */
2085 	__wake_up_parent(tsk, parent);
2086 	spin_unlock_irqrestore(&sighand->siglock, flags);
2087 }
2088 
2089 static inline bool may_ptrace_stop(void)
2090 {
2091 	if (!likely(current->ptrace))
2092 		return false;
2093 	/*
2094 	 * Are we in the middle of do_coredump?
2095 	 * If so and our tracer is also part of the coredump stopping
2096 	 * is a deadlock situation, and pointless because our tracer
2097 	 * is dead so don't allow us to stop.
2098 	 * If SIGKILL was already sent before the caller unlocked
2099 	 * ->siglock we must see ->core_state != NULL. Otherwise it
2100 	 * is safe to enter schedule().
2101 	 *
2102 	 * This is almost outdated, a task with the pending SIGKILL can't
2103 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2104 	 * after SIGKILL was already dequeued.
2105 	 */
2106 	if (unlikely(current->mm->core_state) &&
2107 	    unlikely(current->mm == current->parent->mm))
2108 		return false;
2109 
2110 	return true;
2111 }
2112 
2113 /*
2114  * Return non-zero if there is a SIGKILL that should be waking us up.
2115  * Called with the siglock held.
2116  */
2117 static bool sigkill_pending(struct task_struct *tsk)
2118 {
2119 	return sigismember(&tsk->pending.signal, SIGKILL) ||
2120 	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2121 }
2122 
2123 /*
2124  * This must be called with current->sighand->siglock held.
2125  *
2126  * This should be the path for all ptrace stops.
2127  * We always set current->last_siginfo while stopped here.
2128  * That makes it a way to test a stopped process for
2129  * being ptrace-stopped vs being job-control-stopped.
2130  *
2131  * If we actually decide not to stop at all because the tracer
2132  * is gone, we keep current->exit_code unless clear_code.
2133  */
2134 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2135 	__releases(&current->sighand->siglock)
2136 	__acquires(&current->sighand->siglock)
2137 {
2138 	bool gstop_done = false;
2139 
2140 	if (arch_ptrace_stop_needed(exit_code, info)) {
2141 		/*
2142 		 * The arch code has something special to do before a
2143 		 * ptrace stop.  This is allowed to block, e.g. for faults
2144 		 * on user stack pages.  We can't keep the siglock while
2145 		 * calling arch_ptrace_stop, so we must release it now.
2146 		 * To preserve proper semantics, we must do this before
2147 		 * any signal bookkeeping like checking group_stop_count.
2148 		 * Meanwhile, a SIGKILL could come in before we retake the
2149 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
2150 		 * So after regaining the lock, we must check for SIGKILL.
2151 		 */
2152 		spin_unlock_irq(&current->sighand->siglock);
2153 		arch_ptrace_stop(exit_code, info);
2154 		spin_lock_irq(&current->sighand->siglock);
2155 		if (sigkill_pending(current))
2156 			return;
2157 	}
2158 
2159 	set_special_state(TASK_TRACED);
2160 
2161 	/*
2162 	 * We're committing to trapping.  TRACED should be visible before
2163 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2164 	 * Also, transition to TRACED and updates to ->jobctl should be
2165 	 * atomic with respect to siglock and should be done after the arch
2166 	 * hook as siglock is released and regrabbed across it.
2167 	 *
2168 	 *     TRACER				    TRACEE
2169 	 *
2170 	 *     ptrace_attach()
2171 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2172 	 *     do_wait()
2173 	 *       set_current_state()                smp_wmb();
2174 	 *       ptrace_do_wait()
2175 	 *         wait_task_stopped()
2176 	 *           task_stopped_code()
2177 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2178 	 */
2179 	smp_wmb();
2180 
2181 	current->last_siginfo = info;
2182 	current->exit_code = exit_code;
2183 
2184 	/*
2185 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2186 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2187 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2188 	 * could be clear now.  We act as if SIGCONT is received after
2189 	 * TASK_TRACED is entered - ignore it.
2190 	 */
2191 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2192 		gstop_done = task_participate_group_stop(current);
2193 
2194 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2195 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2196 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2197 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2198 
2199 	/* entering a trap, clear TRAPPING */
2200 	task_clear_jobctl_trapping(current);
2201 
2202 	spin_unlock_irq(&current->sighand->siglock);
2203 	read_lock(&tasklist_lock);
2204 	if (may_ptrace_stop()) {
2205 		/*
2206 		 * Notify parents of the stop.
2207 		 *
2208 		 * While ptraced, there are two parents - the ptracer and
2209 		 * the real_parent of the group_leader.  The ptracer should
2210 		 * know about every stop while the real parent is only
2211 		 * interested in the completion of group stop.  The states
2212 		 * for the two don't interact with each other.  Notify
2213 		 * separately unless they're gonna be duplicates.
2214 		 */
2215 		do_notify_parent_cldstop(current, true, why);
2216 		if (gstop_done && ptrace_reparented(current))
2217 			do_notify_parent_cldstop(current, false, why);
2218 
2219 		/*
2220 		 * Don't want to allow preemption here, because
2221 		 * sys_ptrace() needs this task to be inactive.
2222 		 *
2223 		 * XXX: implement read_unlock_no_resched().
2224 		 */
2225 		preempt_disable();
2226 		read_unlock(&tasklist_lock);
2227 		cgroup_enter_frozen();
2228 		preempt_enable_no_resched();
2229 		freezable_schedule();
2230 		cgroup_leave_frozen(true);
2231 	} else {
2232 		/*
2233 		 * By the time we got the lock, our tracer went away.
2234 		 * Don't drop the lock yet, another tracer may come.
2235 		 *
2236 		 * If @gstop_done, the ptracer went away between group stop
2237 		 * completion and here.  During detach, it would have set
2238 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2239 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2240 		 * the real parent of the group stop completion is enough.
2241 		 */
2242 		if (gstop_done)
2243 			do_notify_parent_cldstop(current, false, why);
2244 
2245 		/* tasklist protects us from ptrace_freeze_traced() */
2246 		__set_current_state(TASK_RUNNING);
2247 		if (clear_code)
2248 			current->exit_code = 0;
2249 		read_unlock(&tasklist_lock);
2250 	}
2251 
2252 	/*
2253 	 * We are back.  Now reacquire the siglock before touching
2254 	 * last_siginfo, so that we are sure to have synchronized with
2255 	 * any signal-sending on another CPU that wants to examine it.
2256 	 */
2257 	spin_lock_irq(&current->sighand->siglock);
2258 	current->last_siginfo = NULL;
2259 
2260 	/* LISTENING can be set only during STOP traps, clear it */
2261 	current->jobctl &= ~JOBCTL_LISTENING;
2262 
2263 	/*
2264 	 * Queued signals ignored us while we were stopped for tracing.
2265 	 * So check for any that we should take before resuming user mode.
2266 	 * This sets TIF_SIGPENDING, but never clears it.
2267 	 */
2268 	recalc_sigpending_tsk(current);
2269 }
2270 
2271 static void ptrace_do_notify(int signr, int exit_code, int why)
2272 {
2273 	kernel_siginfo_t info;
2274 
2275 	clear_siginfo(&info);
2276 	info.si_signo = signr;
2277 	info.si_code = exit_code;
2278 	info.si_pid = task_pid_vnr(current);
2279 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2280 
2281 	/* Let the debugger run.  */
2282 	ptrace_stop(exit_code, why, 1, &info);
2283 }
2284 
2285 void ptrace_notify(int exit_code)
2286 {
2287 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2288 	if (unlikely(current->task_works))
2289 		task_work_run();
2290 
2291 	spin_lock_irq(&current->sighand->siglock);
2292 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2293 	spin_unlock_irq(&current->sighand->siglock);
2294 }
2295 
2296 /**
2297  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2298  * @signr: signr causing group stop if initiating
2299  *
2300  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2301  * and participate in it.  If already set, participate in the existing
2302  * group stop.  If participated in a group stop (and thus slept), %true is
2303  * returned with siglock released.
2304  *
2305  * If ptraced, this function doesn't handle stop itself.  Instead,
2306  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2307  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2308  * places afterwards.
2309  *
2310  * CONTEXT:
2311  * Must be called with @current->sighand->siglock held, which is released
2312  * on %true return.
2313  *
2314  * RETURNS:
2315  * %false if group stop is already cancelled or ptrace trap is scheduled.
2316  * %true if participated in group stop.
2317  */
2318 static bool do_signal_stop(int signr)
2319 	__releases(&current->sighand->siglock)
2320 {
2321 	struct signal_struct *sig = current->signal;
2322 
2323 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2324 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2325 		struct task_struct *t;
2326 
2327 		/* signr will be recorded in task->jobctl for retries */
2328 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2329 
2330 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2331 		    unlikely(signal_group_exit(sig)))
2332 			return false;
2333 		/*
2334 		 * There is no group stop already in progress.  We must
2335 		 * initiate one now.
2336 		 *
2337 		 * While ptraced, a task may be resumed while group stop is
2338 		 * still in effect and then receive a stop signal and
2339 		 * initiate another group stop.  This deviates from the
2340 		 * usual behavior as two consecutive stop signals can't
2341 		 * cause two group stops when !ptraced.  That is why we
2342 		 * also check !task_is_stopped(t) below.
2343 		 *
2344 		 * The condition can be distinguished by testing whether
2345 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2346 		 * group_exit_code in such case.
2347 		 *
2348 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2349 		 * an intervening stop signal is required to cause two
2350 		 * continued events regardless of ptrace.
2351 		 */
2352 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2353 			sig->group_exit_code = signr;
2354 
2355 		sig->group_stop_count = 0;
2356 
2357 		if (task_set_jobctl_pending(current, signr | gstop))
2358 			sig->group_stop_count++;
2359 
2360 		t = current;
2361 		while_each_thread(current, t) {
2362 			/*
2363 			 * Setting state to TASK_STOPPED for a group
2364 			 * stop is always done with the siglock held,
2365 			 * so this check has no races.
2366 			 */
2367 			if (!task_is_stopped(t) &&
2368 			    task_set_jobctl_pending(t, signr | gstop)) {
2369 				sig->group_stop_count++;
2370 				if (likely(!(t->ptrace & PT_SEIZED)))
2371 					signal_wake_up(t, 0);
2372 				else
2373 					ptrace_trap_notify(t);
2374 			}
2375 		}
2376 	}
2377 
2378 	if (likely(!current->ptrace)) {
2379 		int notify = 0;
2380 
2381 		/*
2382 		 * If there are no other threads in the group, or if there
2383 		 * is a group stop in progress and we are the last to stop,
2384 		 * report to the parent.
2385 		 */
2386 		if (task_participate_group_stop(current))
2387 			notify = CLD_STOPPED;
2388 
2389 		set_special_state(TASK_STOPPED);
2390 		spin_unlock_irq(&current->sighand->siglock);
2391 
2392 		/*
2393 		 * Notify the parent of the group stop completion.  Because
2394 		 * we're not holding either the siglock or tasklist_lock
2395 		 * here, ptracer may attach inbetween; however, this is for
2396 		 * group stop and should always be delivered to the real
2397 		 * parent of the group leader.  The new ptracer will get
2398 		 * its notification when this task transitions into
2399 		 * TASK_TRACED.
2400 		 */
2401 		if (notify) {
2402 			read_lock(&tasklist_lock);
2403 			do_notify_parent_cldstop(current, false, notify);
2404 			read_unlock(&tasklist_lock);
2405 		}
2406 
2407 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2408 		cgroup_enter_frozen();
2409 		freezable_schedule();
2410 		return true;
2411 	} else {
2412 		/*
2413 		 * While ptraced, group stop is handled by STOP trap.
2414 		 * Schedule it and let the caller deal with it.
2415 		 */
2416 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2417 		return false;
2418 	}
2419 }
2420 
2421 /**
2422  * do_jobctl_trap - take care of ptrace jobctl traps
2423  *
2424  * When PT_SEIZED, it's used for both group stop and explicit
2425  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2426  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2427  * the stop signal; otherwise, %SIGTRAP.
2428  *
2429  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2430  * number as exit_code and no siginfo.
2431  *
2432  * CONTEXT:
2433  * Must be called with @current->sighand->siglock held, which may be
2434  * released and re-acquired before returning with intervening sleep.
2435  */
2436 static void do_jobctl_trap(void)
2437 {
2438 	struct signal_struct *signal = current->signal;
2439 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2440 
2441 	if (current->ptrace & PT_SEIZED) {
2442 		if (!signal->group_stop_count &&
2443 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2444 			signr = SIGTRAP;
2445 		WARN_ON_ONCE(!signr);
2446 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2447 				 CLD_STOPPED);
2448 	} else {
2449 		WARN_ON_ONCE(!signr);
2450 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2451 		current->exit_code = 0;
2452 	}
2453 }
2454 
2455 /**
2456  * do_freezer_trap - handle the freezer jobctl trap
2457  *
2458  * Puts the task into frozen state, if only the task is not about to quit.
2459  * In this case it drops JOBCTL_TRAP_FREEZE.
2460  *
2461  * CONTEXT:
2462  * Must be called with @current->sighand->siglock held,
2463  * which is always released before returning.
2464  */
2465 static void do_freezer_trap(void)
2466 	__releases(&current->sighand->siglock)
2467 {
2468 	/*
2469 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2470 	 * let's make another loop to give it a chance to be handled.
2471 	 * In any case, we'll return back.
2472 	 */
2473 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2474 	     JOBCTL_TRAP_FREEZE) {
2475 		spin_unlock_irq(&current->sighand->siglock);
2476 		return;
2477 	}
2478 
2479 	/*
2480 	 * Now we're sure that there is no pending fatal signal and no
2481 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2482 	 * immediately (if there is a non-fatal signal pending), and
2483 	 * put the task into sleep.
2484 	 */
2485 	__set_current_state(TASK_INTERRUPTIBLE);
2486 	clear_thread_flag(TIF_SIGPENDING);
2487 	spin_unlock_irq(&current->sighand->siglock);
2488 	cgroup_enter_frozen();
2489 	freezable_schedule();
2490 }
2491 
2492 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2493 {
2494 	/*
2495 	 * We do not check sig_kernel_stop(signr) but set this marker
2496 	 * unconditionally because we do not know whether debugger will
2497 	 * change signr. This flag has no meaning unless we are going
2498 	 * to stop after return from ptrace_stop(). In this case it will
2499 	 * be checked in do_signal_stop(), we should only stop if it was
2500 	 * not cleared by SIGCONT while we were sleeping. See also the
2501 	 * comment in dequeue_signal().
2502 	 */
2503 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2504 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2505 
2506 	/* We're back.  Did the debugger cancel the sig?  */
2507 	signr = current->exit_code;
2508 	if (signr == 0)
2509 		return signr;
2510 
2511 	current->exit_code = 0;
2512 
2513 	/*
2514 	 * Update the siginfo structure if the signal has
2515 	 * changed.  If the debugger wanted something
2516 	 * specific in the siginfo structure then it should
2517 	 * have updated *info via PTRACE_SETSIGINFO.
2518 	 */
2519 	if (signr != info->si_signo) {
2520 		clear_siginfo(info);
2521 		info->si_signo = signr;
2522 		info->si_errno = 0;
2523 		info->si_code = SI_USER;
2524 		rcu_read_lock();
2525 		info->si_pid = task_pid_vnr(current->parent);
2526 		info->si_uid = from_kuid_munged(current_user_ns(),
2527 						task_uid(current->parent));
2528 		rcu_read_unlock();
2529 	}
2530 
2531 	/* If the (new) signal is now blocked, requeue it.  */
2532 	if (sigismember(&current->blocked, signr)) {
2533 		send_signal(signr, info, current, PIDTYPE_PID);
2534 		signr = 0;
2535 	}
2536 
2537 	return signr;
2538 }
2539 
2540 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2541 {
2542 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2543 	case SIL_FAULT:
2544 	case SIL_FAULT_TRAPNO:
2545 	case SIL_FAULT_MCEERR:
2546 	case SIL_FAULT_BNDERR:
2547 	case SIL_FAULT_PKUERR:
2548 	case SIL_PERF_EVENT:
2549 		ksig->info.si_addr = arch_untagged_si_addr(
2550 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2551 		break;
2552 	case SIL_KILL:
2553 	case SIL_TIMER:
2554 	case SIL_POLL:
2555 	case SIL_CHLD:
2556 	case SIL_RT:
2557 	case SIL_SYS:
2558 		break;
2559 	}
2560 }
2561 
2562 bool get_signal(struct ksignal *ksig)
2563 {
2564 	struct sighand_struct *sighand = current->sighand;
2565 	struct signal_struct *signal = current->signal;
2566 	int signr;
2567 
2568 	if (unlikely(current->task_works))
2569 		task_work_run();
2570 
2571 	/*
2572 	 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2573 	 * that the arch handlers don't all have to do it. If we get here
2574 	 * without TIF_SIGPENDING, just exit after running signal work.
2575 	 */
2576 	if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2577 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2578 			tracehook_notify_signal();
2579 		if (!task_sigpending(current))
2580 			return false;
2581 	}
2582 
2583 	if (unlikely(uprobe_deny_signal()))
2584 		return false;
2585 
2586 	/*
2587 	 * Do this once, we can't return to user-mode if freezing() == T.
2588 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2589 	 * thus do not need another check after return.
2590 	 */
2591 	try_to_freeze();
2592 
2593 relock:
2594 	spin_lock_irq(&sighand->siglock);
2595 
2596 	/*
2597 	 * Every stopped thread goes here after wakeup. Check to see if
2598 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2599 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2600 	 */
2601 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2602 		int why;
2603 
2604 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2605 			why = CLD_CONTINUED;
2606 		else
2607 			why = CLD_STOPPED;
2608 
2609 		signal->flags &= ~SIGNAL_CLD_MASK;
2610 
2611 		spin_unlock_irq(&sighand->siglock);
2612 
2613 		/*
2614 		 * Notify the parent that we're continuing.  This event is
2615 		 * always per-process and doesn't make whole lot of sense
2616 		 * for ptracers, who shouldn't consume the state via
2617 		 * wait(2) either, but, for backward compatibility, notify
2618 		 * the ptracer of the group leader too unless it's gonna be
2619 		 * a duplicate.
2620 		 */
2621 		read_lock(&tasklist_lock);
2622 		do_notify_parent_cldstop(current, false, why);
2623 
2624 		if (ptrace_reparented(current->group_leader))
2625 			do_notify_parent_cldstop(current->group_leader,
2626 						true, why);
2627 		read_unlock(&tasklist_lock);
2628 
2629 		goto relock;
2630 	}
2631 
2632 	/* Has this task already been marked for death? */
2633 	if (signal_group_exit(signal)) {
2634 		ksig->info.si_signo = signr = SIGKILL;
2635 		sigdelset(&current->pending.signal, SIGKILL);
2636 		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2637 				&sighand->action[SIGKILL - 1]);
2638 		recalc_sigpending();
2639 		goto fatal;
2640 	}
2641 
2642 	for (;;) {
2643 		struct k_sigaction *ka;
2644 
2645 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2646 		    do_signal_stop(0))
2647 			goto relock;
2648 
2649 		if (unlikely(current->jobctl &
2650 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2651 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2652 				do_jobctl_trap();
2653 				spin_unlock_irq(&sighand->siglock);
2654 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2655 				do_freezer_trap();
2656 
2657 			goto relock;
2658 		}
2659 
2660 		/*
2661 		 * If the task is leaving the frozen state, let's update
2662 		 * cgroup counters and reset the frozen bit.
2663 		 */
2664 		if (unlikely(cgroup_task_frozen(current))) {
2665 			spin_unlock_irq(&sighand->siglock);
2666 			cgroup_leave_frozen(false);
2667 			goto relock;
2668 		}
2669 
2670 		/*
2671 		 * Signals generated by the execution of an instruction
2672 		 * need to be delivered before any other pending signals
2673 		 * so that the instruction pointer in the signal stack
2674 		 * frame points to the faulting instruction.
2675 		 */
2676 		signr = dequeue_synchronous_signal(&ksig->info);
2677 		if (!signr)
2678 			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2679 
2680 		if (!signr)
2681 			break; /* will return 0 */
2682 
2683 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2684 			signr = ptrace_signal(signr, &ksig->info);
2685 			if (!signr)
2686 				continue;
2687 		}
2688 
2689 		ka = &sighand->action[signr-1];
2690 
2691 		/* Trace actually delivered signals. */
2692 		trace_signal_deliver(signr, &ksig->info, ka);
2693 
2694 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2695 			continue;
2696 		if (ka->sa.sa_handler != SIG_DFL) {
2697 			/* Run the handler.  */
2698 			ksig->ka = *ka;
2699 
2700 			if (ka->sa.sa_flags & SA_ONESHOT)
2701 				ka->sa.sa_handler = SIG_DFL;
2702 
2703 			break; /* will return non-zero "signr" value */
2704 		}
2705 
2706 		/*
2707 		 * Now we are doing the default action for this signal.
2708 		 */
2709 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2710 			continue;
2711 
2712 		/*
2713 		 * Global init gets no signals it doesn't want.
2714 		 * Container-init gets no signals it doesn't want from same
2715 		 * container.
2716 		 *
2717 		 * Note that if global/container-init sees a sig_kernel_only()
2718 		 * signal here, the signal must have been generated internally
2719 		 * or must have come from an ancestor namespace. In either
2720 		 * case, the signal cannot be dropped.
2721 		 */
2722 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2723 				!sig_kernel_only(signr))
2724 			continue;
2725 
2726 		if (sig_kernel_stop(signr)) {
2727 			/*
2728 			 * The default action is to stop all threads in
2729 			 * the thread group.  The job control signals
2730 			 * do nothing in an orphaned pgrp, but SIGSTOP
2731 			 * always works.  Note that siglock needs to be
2732 			 * dropped during the call to is_orphaned_pgrp()
2733 			 * because of lock ordering with tasklist_lock.
2734 			 * This allows an intervening SIGCONT to be posted.
2735 			 * We need to check for that and bail out if necessary.
2736 			 */
2737 			if (signr != SIGSTOP) {
2738 				spin_unlock_irq(&sighand->siglock);
2739 
2740 				/* signals can be posted during this window */
2741 
2742 				if (is_current_pgrp_orphaned())
2743 					goto relock;
2744 
2745 				spin_lock_irq(&sighand->siglock);
2746 			}
2747 
2748 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2749 				/* It released the siglock.  */
2750 				goto relock;
2751 			}
2752 
2753 			/*
2754 			 * We didn't actually stop, due to a race
2755 			 * with SIGCONT or something like that.
2756 			 */
2757 			continue;
2758 		}
2759 
2760 	fatal:
2761 		spin_unlock_irq(&sighand->siglock);
2762 		if (unlikely(cgroup_task_frozen(current)))
2763 			cgroup_leave_frozen(true);
2764 
2765 		/*
2766 		 * Anything else is fatal, maybe with a core dump.
2767 		 */
2768 		current->flags |= PF_SIGNALED;
2769 
2770 		if (sig_kernel_coredump(signr)) {
2771 			if (print_fatal_signals)
2772 				print_fatal_signal(ksig->info.si_signo);
2773 			proc_coredump_connector(current);
2774 			/*
2775 			 * If it was able to dump core, this kills all
2776 			 * other threads in the group and synchronizes with
2777 			 * their demise.  If we lost the race with another
2778 			 * thread getting here, it set group_exit_code
2779 			 * first and our do_group_exit call below will use
2780 			 * that value and ignore the one we pass it.
2781 			 */
2782 			do_coredump(&ksig->info);
2783 		}
2784 
2785 		/*
2786 		 * PF_IO_WORKER threads will catch and exit on fatal signals
2787 		 * themselves. They have cleanup that must be performed, so
2788 		 * we cannot call do_exit() on their behalf.
2789 		 */
2790 		if (current->flags & PF_IO_WORKER)
2791 			goto out;
2792 
2793 		/*
2794 		 * Death signals, no core dump.
2795 		 */
2796 		do_group_exit(ksig->info.si_signo);
2797 		/* NOTREACHED */
2798 	}
2799 	spin_unlock_irq(&sighand->siglock);
2800 out:
2801 	ksig->sig = signr;
2802 
2803 	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2804 		hide_si_addr_tag_bits(ksig);
2805 
2806 	return ksig->sig > 0;
2807 }
2808 
2809 /**
2810  * signal_delivered -
2811  * @ksig:		kernel signal struct
2812  * @stepping:		nonzero if debugger single-step or block-step in use
2813  *
2814  * This function should be called when a signal has successfully been
2815  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2816  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2817  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2818  */
2819 static void signal_delivered(struct ksignal *ksig, int stepping)
2820 {
2821 	sigset_t blocked;
2822 
2823 	/* A signal was successfully delivered, and the
2824 	   saved sigmask was stored on the signal frame,
2825 	   and will be restored by sigreturn.  So we can
2826 	   simply clear the restore sigmask flag.  */
2827 	clear_restore_sigmask();
2828 
2829 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2830 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2831 		sigaddset(&blocked, ksig->sig);
2832 	set_current_blocked(&blocked);
2833 	if (current->sas_ss_flags & SS_AUTODISARM)
2834 		sas_ss_reset(current);
2835 	tracehook_signal_handler(stepping);
2836 }
2837 
2838 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2839 {
2840 	if (failed)
2841 		force_sigsegv(ksig->sig);
2842 	else
2843 		signal_delivered(ksig, stepping);
2844 }
2845 
2846 /*
2847  * It could be that complete_signal() picked us to notify about the
2848  * group-wide signal. Other threads should be notified now to take
2849  * the shared signals in @which since we will not.
2850  */
2851 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2852 {
2853 	sigset_t retarget;
2854 	struct task_struct *t;
2855 
2856 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2857 	if (sigisemptyset(&retarget))
2858 		return;
2859 
2860 	t = tsk;
2861 	while_each_thread(tsk, t) {
2862 		if (t->flags & PF_EXITING)
2863 			continue;
2864 
2865 		if (!has_pending_signals(&retarget, &t->blocked))
2866 			continue;
2867 		/* Remove the signals this thread can handle. */
2868 		sigandsets(&retarget, &retarget, &t->blocked);
2869 
2870 		if (!task_sigpending(t))
2871 			signal_wake_up(t, 0);
2872 
2873 		if (sigisemptyset(&retarget))
2874 			break;
2875 	}
2876 }
2877 
2878 void exit_signals(struct task_struct *tsk)
2879 {
2880 	int group_stop = 0;
2881 	sigset_t unblocked;
2882 
2883 	/*
2884 	 * @tsk is about to have PF_EXITING set - lock out users which
2885 	 * expect stable threadgroup.
2886 	 */
2887 	cgroup_threadgroup_change_begin(tsk);
2888 
2889 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2890 		tsk->flags |= PF_EXITING;
2891 		cgroup_threadgroup_change_end(tsk);
2892 		return;
2893 	}
2894 
2895 	spin_lock_irq(&tsk->sighand->siglock);
2896 	/*
2897 	 * From now this task is not visible for group-wide signals,
2898 	 * see wants_signal(), do_signal_stop().
2899 	 */
2900 	tsk->flags |= PF_EXITING;
2901 
2902 	cgroup_threadgroup_change_end(tsk);
2903 
2904 	if (!task_sigpending(tsk))
2905 		goto out;
2906 
2907 	unblocked = tsk->blocked;
2908 	signotset(&unblocked);
2909 	retarget_shared_pending(tsk, &unblocked);
2910 
2911 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2912 	    task_participate_group_stop(tsk))
2913 		group_stop = CLD_STOPPED;
2914 out:
2915 	spin_unlock_irq(&tsk->sighand->siglock);
2916 
2917 	/*
2918 	 * If group stop has completed, deliver the notification.  This
2919 	 * should always go to the real parent of the group leader.
2920 	 */
2921 	if (unlikely(group_stop)) {
2922 		read_lock(&tasklist_lock);
2923 		do_notify_parent_cldstop(tsk, false, group_stop);
2924 		read_unlock(&tasklist_lock);
2925 	}
2926 }
2927 
2928 /*
2929  * System call entry points.
2930  */
2931 
2932 /**
2933  *  sys_restart_syscall - restart a system call
2934  */
2935 SYSCALL_DEFINE0(restart_syscall)
2936 {
2937 	struct restart_block *restart = &current->restart_block;
2938 	return restart->fn(restart);
2939 }
2940 
2941 long do_no_restart_syscall(struct restart_block *param)
2942 {
2943 	return -EINTR;
2944 }
2945 
2946 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2947 {
2948 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2949 		sigset_t newblocked;
2950 		/* A set of now blocked but previously unblocked signals. */
2951 		sigandnsets(&newblocked, newset, &current->blocked);
2952 		retarget_shared_pending(tsk, &newblocked);
2953 	}
2954 	tsk->blocked = *newset;
2955 	recalc_sigpending();
2956 }
2957 
2958 /**
2959  * set_current_blocked - change current->blocked mask
2960  * @newset: new mask
2961  *
2962  * It is wrong to change ->blocked directly, this helper should be used
2963  * to ensure the process can't miss a shared signal we are going to block.
2964  */
2965 void set_current_blocked(sigset_t *newset)
2966 {
2967 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2968 	__set_current_blocked(newset);
2969 }
2970 
2971 void __set_current_blocked(const sigset_t *newset)
2972 {
2973 	struct task_struct *tsk = current;
2974 
2975 	/*
2976 	 * In case the signal mask hasn't changed, there is nothing we need
2977 	 * to do. The current->blocked shouldn't be modified by other task.
2978 	 */
2979 	if (sigequalsets(&tsk->blocked, newset))
2980 		return;
2981 
2982 	spin_lock_irq(&tsk->sighand->siglock);
2983 	__set_task_blocked(tsk, newset);
2984 	spin_unlock_irq(&tsk->sighand->siglock);
2985 }
2986 
2987 /*
2988  * This is also useful for kernel threads that want to temporarily
2989  * (or permanently) block certain signals.
2990  *
2991  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2992  * interface happily blocks "unblockable" signals like SIGKILL
2993  * and friends.
2994  */
2995 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2996 {
2997 	struct task_struct *tsk = current;
2998 	sigset_t newset;
2999 
3000 	/* Lockless, only current can change ->blocked, never from irq */
3001 	if (oldset)
3002 		*oldset = tsk->blocked;
3003 
3004 	switch (how) {
3005 	case SIG_BLOCK:
3006 		sigorsets(&newset, &tsk->blocked, set);
3007 		break;
3008 	case SIG_UNBLOCK:
3009 		sigandnsets(&newset, &tsk->blocked, set);
3010 		break;
3011 	case SIG_SETMASK:
3012 		newset = *set;
3013 		break;
3014 	default:
3015 		return -EINVAL;
3016 	}
3017 
3018 	__set_current_blocked(&newset);
3019 	return 0;
3020 }
3021 EXPORT_SYMBOL(sigprocmask);
3022 
3023 /*
3024  * The api helps set app-provided sigmasks.
3025  *
3026  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3027  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3028  *
3029  * Note that it does set_restore_sigmask() in advance, so it must be always
3030  * paired with restore_saved_sigmask_unless() before return from syscall.
3031  */
3032 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3033 {
3034 	sigset_t kmask;
3035 
3036 	if (!umask)
3037 		return 0;
3038 	if (sigsetsize != sizeof(sigset_t))
3039 		return -EINVAL;
3040 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3041 		return -EFAULT;
3042 
3043 	set_restore_sigmask();
3044 	current->saved_sigmask = current->blocked;
3045 	set_current_blocked(&kmask);
3046 
3047 	return 0;
3048 }
3049 
3050 #ifdef CONFIG_COMPAT
3051 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3052 			    size_t sigsetsize)
3053 {
3054 	sigset_t kmask;
3055 
3056 	if (!umask)
3057 		return 0;
3058 	if (sigsetsize != sizeof(compat_sigset_t))
3059 		return -EINVAL;
3060 	if (get_compat_sigset(&kmask, umask))
3061 		return -EFAULT;
3062 
3063 	set_restore_sigmask();
3064 	current->saved_sigmask = current->blocked;
3065 	set_current_blocked(&kmask);
3066 
3067 	return 0;
3068 }
3069 #endif
3070 
3071 /**
3072  *  sys_rt_sigprocmask - change the list of currently blocked signals
3073  *  @how: whether to add, remove, or set signals
3074  *  @nset: stores pending signals
3075  *  @oset: previous value of signal mask if non-null
3076  *  @sigsetsize: size of sigset_t type
3077  */
3078 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3079 		sigset_t __user *, oset, size_t, sigsetsize)
3080 {
3081 	sigset_t old_set, new_set;
3082 	int error;
3083 
3084 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3085 	if (sigsetsize != sizeof(sigset_t))
3086 		return -EINVAL;
3087 
3088 	old_set = current->blocked;
3089 
3090 	if (nset) {
3091 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3092 			return -EFAULT;
3093 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3094 
3095 		error = sigprocmask(how, &new_set, NULL);
3096 		if (error)
3097 			return error;
3098 	}
3099 
3100 	if (oset) {
3101 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3102 			return -EFAULT;
3103 	}
3104 
3105 	return 0;
3106 }
3107 
3108 #ifdef CONFIG_COMPAT
3109 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3110 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3111 {
3112 	sigset_t old_set = current->blocked;
3113 
3114 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3115 	if (sigsetsize != sizeof(sigset_t))
3116 		return -EINVAL;
3117 
3118 	if (nset) {
3119 		sigset_t new_set;
3120 		int error;
3121 		if (get_compat_sigset(&new_set, nset))
3122 			return -EFAULT;
3123 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3124 
3125 		error = sigprocmask(how, &new_set, NULL);
3126 		if (error)
3127 			return error;
3128 	}
3129 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3130 }
3131 #endif
3132 
3133 static void do_sigpending(sigset_t *set)
3134 {
3135 	spin_lock_irq(&current->sighand->siglock);
3136 	sigorsets(set, &current->pending.signal,
3137 		  &current->signal->shared_pending.signal);
3138 	spin_unlock_irq(&current->sighand->siglock);
3139 
3140 	/* Outside the lock because only this thread touches it.  */
3141 	sigandsets(set, &current->blocked, set);
3142 }
3143 
3144 /**
3145  *  sys_rt_sigpending - examine a pending signal that has been raised
3146  *			while blocked
3147  *  @uset: stores pending signals
3148  *  @sigsetsize: size of sigset_t type or larger
3149  */
3150 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3151 {
3152 	sigset_t set;
3153 
3154 	if (sigsetsize > sizeof(*uset))
3155 		return -EINVAL;
3156 
3157 	do_sigpending(&set);
3158 
3159 	if (copy_to_user(uset, &set, sigsetsize))
3160 		return -EFAULT;
3161 
3162 	return 0;
3163 }
3164 
3165 #ifdef CONFIG_COMPAT
3166 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3167 		compat_size_t, sigsetsize)
3168 {
3169 	sigset_t set;
3170 
3171 	if (sigsetsize > sizeof(*uset))
3172 		return -EINVAL;
3173 
3174 	do_sigpending(&set);
3175 
3176 	return put_compat_sigset(uset, &set, sigsetsize);
3177 }
3178 #endif
3179 
3180 static const struct {
3181 	unsigned char limit, layout;
3182 } sig_sicodes[] = {
3183 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3184 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3185 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3186 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3187 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3188 #if defined(SIGEMT)
3189 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3190 #endif
3191 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3192 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3193 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3194 };
3195 
3196 static bool known_siginfo_layout(unsigned sig, int si_code)
3197 {
3198 	if (si_code == SI_KERNEL)
3199 		return true;
3200 	else if ((si_code > SI_USER)) {
3201 		if (sig_specific_sicodes(sig)) {
3202 			if (si_code <= sig_sicodes[sig].limit)
3203 				return true;
3204 		}
3205 		else if (si_code <= NSIGPOLL)
3206 			return true;
3207 	}
3208 	else if (si_code >= SI_DETHREAD)
3209 		return true;
3210 	else if (si_code == SI_ASYNCNL)
3211 		return true;
3212 	return false;
3213 }
3214 
3215 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3216 {
3217 	enum siginfo_layout layout = SIL_KILL;
3218 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3219 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3220 		    (si_code <= sig_sicodes[sig].limit)) {
3221 			layout = sig_sicodes[sig].layout;
3222 			/* Handle the exceptions */
3223 			if ((sig == SIGBUS) &&
3224 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3225 				layout = SIL_FAULT_MCEERR;
3226 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3227 				layout = SIL_FAULT_BNDERR;
3228 #ifdef SEGV_PKUERR
3229 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3230 				layout = SIL_FAULT_PKUERR;
3231 #endif
3232 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3233 				layout = SIL_PERF_EVENT;
3234 #ifdef __ARCH_SI_TRAPNO
3235 			else if (layout == SIL_FAULT)
3236 				layout = SIL_FAULT_TRAPNO;
3237 #endif
3238 		}
3239 		else if (si_code <= NSIGPOLL)
3240 			layout = SIL_POLL;
3241 	} else {
3242 		if (si_code == SI_TIMER)
3243 			layout = SIL_TIMER;
3244 		else if (si_code == SI_SIGIO)
3245 			layout = SIL_POLL;
3246 		else if (si_code < 0)
3247 			layout = SIL_RT;
3248 	}
3249 	return layout;
3250 }
3251 
3252 static inline char __user *si_expansion(const siginfo_t __user *info)
3253 {
3254 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3255 }
3256 
3257 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3258 {
3259 	char __user *expansion = si_expansion(to);
3260 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3261 		return -EFAULT;
3262 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3263 		return -EFAULT;
3264 	return 0;
3265 }
3266 
3267 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3268 				       const siginfo_t __user *from)
3269 {
3270 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3271 		char __user *expansion = si_expansion(from);
3272 		char buf[SI_EXPANSION_SIZE];
3273 		int i;
3274 		/*
3275 		 * An unknown si_code might need more than
3276 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3277 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3278 		 * will return this data to userspace exactly.
3279 		 */
3280 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3281 			return -EFAULT;
3282 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3283 			if (buf[i] != 0)
3284 				return -E2BIG;
3285 		}
3286 	}
3287 	return 0;
3288 }
3289 
3290 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3291 				    const siginfo_t __user *from)
3292 {
3293 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3294 		return -EFAULT;
3295 	to->si_signo = signo;
3296 	return post_copy_siginfo_from_user(to, from);
3297 }
3298 
3299 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3300 {
3301 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3302 		return -EFAULT;
3303 	return post_copy_siginfo_from_user(to, from);
3304 }
3305 
3306 #ifdef CONFIG_COMPAT
3307 /**
3308  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3309  * @to: compat siginfo destination
3310  * @from: kernel siginfo source
3311  *
3312  * Note: This function does not work properly for the SIGCHLD on x32, but
3313  * fortunately it doesn't have to.  The only valid callers for this function are
3314  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3315  * The latter does not care because SIGCHLD will never cause a coredump.
3316  */
3317 void copy_siginfo_to_external32(struct compat_siginfo *to,
3318 		const struct kernel_siginfo *from)
3319 {
3320 	memset(to, 0, sizeof(*to));
3321 
3322 	to->si_signo = from->si_signo;
3323 	to->si_errno = from->si_errno;
3324 	to->si_code  = from->si_code;
3325 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3326 	case SIL_KILL:
3327 		to->si_pid = from->si_pid;
3328 		to->si_uid = from->si_uid;
3329 		break;
3330 	case SIL_TIMER:
3331 		to->si_tid     = from->si_tid;
3332 		to->si_overrun = from->si_overrun;
3333 		to->si_int     = from->si_int;
3334 		break;
3335 	case SIL_POLL:
3336 		to->si_band = from->si_band;
3337 		to->si_fd   = from->si_fd;
3338 		break;
3339 	case SIL_FAULT:
3340 		to->si_addr = ptr_to_compat(from->si_addr);
3341 		break;
3342 	case SIL_FAULT_TRAPNO:
3343 		to->si_addr = ptr_to_compat(from->si_addr);
3344 		to->si_trapno = from->si_trapno;
3345 		break;
3346 	case SIL_FAULT_MCEERR:
3347 		to->si_addr = ptr_to_compat(from->si_addr);
3348 		to->si_addr_lsb = from->si_addr_lsb;
3349 		break;
3350 	case SIL_FAULT_BNDERR:
3351 		to->si_addr = ptr_to_compat(from->si_addr);
3352 		to->si_lower = ptr_to_compat(from->si_lower);
3353 		to->si_upper = ptr_to_compat(from->si_upper);
3354 		break;
3355 	case SIL_FAULT_PKUERR:
3356 		to->si_addr = ptr_to_compat(from->si_addr);
3357 		to->si_pkey = from->si_pkey;
3358 		break;
3359 	case SIL_PERF_EVENT:
3360 		to->si_addr = ptr_to_compat(from->si_addr);
3361 		to->si_perf_data = from->si_perf_data;
3362 		to->si_perf_type = from->si_perf_type;
3363 		break;
3364 	case SIL_CHLD:
3365 		to->si_pid = from->si_pid;
3366 		to->si_uid = from->si_uid;
3367 		to->si_status = from->si_status;
3368 		to->si_utime = from->si_utime;
3369 		to->si_stime = from->si_stime;
3370 		break;
3371 	case SIL_RT:
3372 		to->si_pid = from->si_pid;
3373 		to->si_uid = from->si_uid;
3374 		to->si_int = from->si_int;
3375 		break;
3376 	case SIL_SYS:
3377 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3378 		to->si_syscall   = from->si_syscall;
3379 		to->si_arch      = from->si_arch;
3380 		break;
3381 	}
3382 }
3383 
3384 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3385 			   const struct kernel_siginfo *from)
3386 {
3387 	struct compat_siginfo new;
3388 
3389 	copy_siginfo_to_external32(&new, from);
3390 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3391 		return -EFAULT;
3392 	return 0;
3393 }
3394 
3395 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3396 					 const struct compat_siginfo *from)
3397 {
3398 	clear_siginfo(to);
3399 	to->si_signo = from->si_signo;
3400 	to->si_errno = from->si_errno;
3401 	to->si_code  = from->si_code;
3402 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3403 	case SIL_KILL:
3404 		to->si_pid = from->si_pid;
3405 		to->si_uid = from->si_uid;
3406 		break;
3407 	case SIL_TIMER:
3408 		to->si_tid     = from->si_tid;
3409 		to->si_overrun = from->si_overrun;
3410 		to->si_int     = from->si_int;
3411 		break;
3412 	case SIL_POLL:
3413 		to->si_band = from->si_band;
3414 		to->si_fd   = from->si_fd;
3415 		break;
3416 	case SIL_FAULT:
3417 		to->si_addr = compat_ptr(from->si_addr);
3418 		break;
3419 	case SIL_FAULT_TRAPNO:
3420 		to->si_addr = compat_ptr(from->si_addr);
3421 		to->si_trapno = from->si_trapno;
3422 		break;
3423 	case SIL_FAULT_MCEERR:
3424 		to->si_addr = compat_ptr(from->si_addr);
3425 		to->si_addr_lsb = from->si_addr_lsb;
3426 		break;
3427 	case SIL_FAULT_BNDERR:
3428 		to->si_addr = compat_ptr(from->si_addr);
3429 		to->si_lower = compat_ptr(from->si_lower);
3430 		to->si_upper = compat_ptr(from->si_upper);
3431 		break;
3432 	case SIL_FAULT_PKUERR:
3433 		to->si_addr = compat_ptr(from->si_addr);
3434 		to->si_pkey = from->si_pkey;
3435 		break;
3436 	case SIL_PERF_EVENT:
3437 		to->si_addr = compat_ptr(from->si_addr);
3438 		to->si_perf_data = from->si_perf_data;
3439 		to->si_perf_type = from->si_perf_type;
3440 		break;
3441 	case SIL_CHLD:
3442 		to->si_pid    = from->si_pid;
3443 		to->si_uid    = from->si_uid;
3444 		to->si_status = from->si_status;
3445 #ifdef CONFIG_X86_X32_ABI
3446 		if (in_x32_syscall()) {
3447 			to->si_utime = from->_sifields._sigchld_x32._utime;
3448 			to->si_stime = from->_sifields._sigchld_x32._stime;
3449 		} else
3450 #endif
3451 		{
3452 			to->si_utime = from->si_utime;
3453 			to->si_stime = from->si_stime;
3454 		}
3455 		break;
3456 	case SIL_RT:
3457 		to->si_pid = from->si_pid;
3458 		to->si_uid = from->si_uid;
3459 		to->si_int = from->si_int;
3460 		break;
3461 	case SIL_SYS:
3462 		to->si_call_addr = compat_ptr(from->si_call_addr);
3463 		to->si_syscall   = from->si_syscall;
3464 		to->si_arch      = from->si_arch;
3465 		break;
3466 	}
3467 	return 0;
3468 }
3469 
3470 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3471 				      const struct compat_siginfo __user *ufrom)
3472 {
3473 	struct compat_siginfo from;
3474 
3475 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3476 		return -EFAULT;
3477 
3478 	from.si_signo = signo;
3479 	return post_copy_siginfo_from_user32(to, &from);
3480 }
3481 
3482 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3483 			     const struct compat_siginfo __user *ufrom)
3484 {
3485 	struct compat_siginfo from;
3486 
3487 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3488 		return -EFAULT;
3489 
3490 	return post_copy_siginfo_from_user32(to, &from);
3491 }
3492 #endif /* CONFIG_COMPAT */
3493 
3494 /**
3495  *  do_sigtimedwait - wait for queued signals specified in @which
3496  *  @which: queued signals to wait for
3497  *  @info: if non-null, the signal's siginfo is returned here
3498  *  @ts: upper bound on process time suspension
3499  */
3500 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3501 		    const struct timespec64 *ts)
3502 {
3503 	ktime_t *to = NULL, timeout = KTIME_MAX;
3504 	struct task_struct *tsk = current;
3505 	sigset_t mask = *which;
3506 	int sig, ret = 0;
3507 
3508 	if (ts) {
3509 		if (!timespec64_valid(ts))
3510 			return -EINVAL;
3511 		timeout = timespec64_to_ktime(*ts);
3512 		to = &timeout;
3513 	}
3514 
3515 	/*
3516 	 * Invert the set of allowed signals to get those we want to block.
3517 	 */
3518 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3519 	signotset(&mask);
3520 
3521 	spin_lock_irq(&tsk->sighand->siglock);
3522 	sig = dequeue_signal(tsk, &mask, info);
3523 	if (!sig && timeout) {
3524 		/*
3525 		 * None ready, temporarily unblock those we're interested
3526 		 * while we are sleeping in so that we'll be awakened when
3527 		 * they arrive. Unblocking is always fine, we can avoid
3528 		 * set_current_blocked().
3529 		 */
3530 		tsk->real_blocked = tsk->blocked;
3531 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3532 		recalc_sigpending();
3533 		spin_unlock_irq(&tsk->sighand->siglock);
3534 
3535 		__set_current_state(TASK_INTERRUPTIBLE);
3536 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3537 							 HRTIMER_MODE_REL);
3538 		spin_lock_irq(&tsk->sighand->siglock);
3539 		__set_task_blocked(tsk, &tsk->real_blocked);
3540 		sigemptyset(&tsk->real_blocked);
3541 		sig = dequeue_signal(tsk, &mask, info);
3542 	}
3543 	spin_unlock_irq(&tsk->sighand->siglock);
3544 
3545 	if (sig)
3546 		return sig;
3547 	return ret ? -EINTR : -EAGAIN;
3548 }
3549 
3550 /**
3551  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3552  *			in @uthese
3553  *  @uthese: queued signals to wait for
3554  *  @uinfo: if non-null, the signal's siginfo is returned here
3555  *  @uts: upper bound on process time suspension
3556  *  @sigsetsize: size of sigset_t type
3557  */
3558 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3559 		siginfo_t __user *, uinfo,
3560 		const struct __kernel_timespec __user *, uts,
3561 		size_t, sigsetsize)
3562 {
3563 	sigset_t these;
3564 	struct timespec64 ts;
3565 	kernel_siginfo_t info;
3566 	int ret;
3567 
3568 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3569 	if (sigsetsize != sizeof(sigset_t))
3570 		return -EINVAL;
3571 
3572 	if (copy_from_user(&these, uthese, sizeof(these)))
3573 		return -EFAULT;
3574 
3575 	if (uts) {
3576 		if (get_timespec64(&ts, uts))
3577 			return -EFAULT;
3578 	}
3579 
3580 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3581 
3582 	if (ret > 0 && uinfo) {
3583 		if (copy_siginfo_to_user(uinfo, &info))
3584 			ret = -EFAULT;
3585 	}
3586 
3587 	return ret;
3588 }
3589 
3590 #ifdef CONFIG_COMPAT_32BIT_TIME
3591 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3592 		siginfo_t __user *, uinfo,
3593 		const struct old_timespec32 __user *, uts,
3594 		size_t, sigsetsize)
3595 {
3596 	sigset_t these;
3597 	struct timespec64 ts;
3598 	kernel_siginfo_t info;
3599 	int ret;
3600 
3601 	if (sigsetsize != sizeof(sigset_t))
3602 		return -EINVAL;
3603 
3604 	if (copy_from_user(&these, uthese, sizeof(these)))
3605 		return -EFAULT;
3606 
3607 	if (uts) {
3608 		if (get_old_timespec32(&ts, uts))
3609 			return -EFAULT;
3610 	}
3611 
3612 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3613 
3614 	if (ret > 0 && uinfo) {
3615 		if (copy_siginfo_to_user(uinfo, &info))
3616 			ret = -EFAULT;
3617 	}
3618 
3619 	return ret;
3620 }
3621 #endif
3622 
3623 #ifdef CONFIG_COMPAT
3624 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3625 		struct compat_siginfo __user *, uinfo,
3626 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3627 {
3628 	sigset_t s;
3629 	struct timespec64 t;
3630 	kernel_siginfo_t info;
3631 	long ret;
3632 
3633 	if (sigsetsize != sizeof(sigset_t))
3634 		return -EINVAL;
3635 
3636 	if (get_compat_sigset(&s, uthese))
3637 		return -EFAULT;
3638 
3639 	if (uts) {
3640 		if (get_timespec64(&t, uts))
3641 			return -EFAULT;
3642 	}
3643 
3644 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3645 
3646 	if (ret > 0 && uinfo) {
3647 		if (copy_siginfo_to_user32(uinfo, &info))
3648 			ret = -EFAULT;
3649 	}
3650 
3651 	return ret;
3652 }
3653 
3654 #ifdef CONFIG_COMPAT_32BIT_TIME
3655 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3656 		struct compat_siginfo __user *, uinfo,
3657 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3658 {
3659 	sigset_t s;
3660 	struct timespec64 t;
3661 	kernel_siginfo_t info;
3662 	long ret;
3663 
3664 	if (sigsetsize != sizeof(sigset_t))
3665 		return -EINVAL;
3666 
3667 	if (get_compat_sigset(&s, uthese))
3668 		return -EFAULT;
3669 
3670 	if (uts) {
3671 		if (get_old_timespec32(&t, uts))
3672 			return -EFAULT;
3673 	}
3674 
3675 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3676 
3677 	if (ret > 0 && uinfo) {
3678 		if (copy_siginfo_to_user32(uinfo, &info))
3679 			ret = -EFAULT;
3680 	}
3681 
3682 	return ret;
3683 }
3684 #endif
3685 #endif
3686 
3687 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3688 {
3689 	clear_siginfo(info);
3690 	info->si_signo = sig;
3691 	info->si_errno = 0;
3692 	info->si_code = SI_USER;
3693 	info->si_pid = task_tgid_vnr(current);
3694 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3695 }
3696 
3697 /**
3698  *  sys_kill - send a signal to a process
3699  *  @pid: the PID of the process
3700  *  @sig: signal to be sent
3701  */
3702 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3703 {
3704 	struct kernel_siginfo info;
3705 
3706 	prepare_kill_siginfo(sig, &info);
3707 
3708 	return kill_something_info(sig, &info, pid);
3709 }
3710 
3711 /*
3712  * Verify that the signaler and signalee either are in the same pid namespace
3713  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3714  * namespace.
3715  */
3716 static bool access_pidfd_pidns(struct pid *pid)
3717 {
3718 	struct pid_namespace *active = task_active_pid_ns(current);
3719 	struct pid_namespace *p = ns_of_pid(pid);
3720 
3721 	for (;;) {
3722 		if (!p)
3723 			return false;
3724 		if (p == active)
3725 			break;
3726 		p = p->parent;
3727 	}
3728 
3729 	return true;
3730 }
3731 
3732 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3733 		siginfo_t __user *info)
3734 {
3735 #ifdef CONFIG_COMPAT
3736 	/*
3737 	 * Avoid hooking up compat syscalls and instead handle necessary
3738 	 * conversions here. Note, this is a stop-gap measure and should not be
3739 	 * considered a generic solution.
3740 	 */
3741 	if (in_compat_syscall())
3742 		return copy_siginfo_from_user32(
3743 			kinfo, (struct compat_siginfo __user *)info);
3744 #endif
3745 	return copy_siginfo_from_user(kinfo, info);
3746 }
3747 
3748 static struct pid *pidfd_to_pid(const struct file *file)
3749 {
3750 	struct pid *pid;
3751 
3752 	pid = pidfd_pid(file);
3753 	if (!IS_ERR(pid))
3754 		return pid;
3755 
3756 	return tgid_pidfd_to_pid(file);
3757 }
3758 
3759 /**
3760  * sys_pidfd_send_signal - Signal a process through a pidfd
3761  * @pidfd:  file descriptor of the process
3762  * @sig:    signal to send
3763  * @info:   signal info
3764  * @flags:  future flags
3765  *
3766  * The syscall currently only signals via PIDTYPE_PID which covers
3767  * kill(<positive-pid>, <signal>. It does not signal threads or process
3768  * groups.
3769  * In order to extend the syscall to threads and process groups the @flags
3770  * argument should be used. In essence, the @flags argument will determine
3771  * what is signaled and not the file descriptor itself. Put in other words,
3772  * grouping is a property of the flags argument not a property of the file
3773  * descriptor.
3774  *
3775  * Return: 0 on success, negative errno on failure
3776  */
3777 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3778 		siginfo_t __user *, info, unsigned int, flags)
3779 {
3780 	int ret;
3781 	struct fd f;
3782 	struct pid *pid;
3783 	kernel_siginfo_t kinfo;
3784 
3785 	/* Enforce flags be set to 0 until we add an extension. */
3786 	if (flags)
3787 		return -EINVAL;
3788 
3789 	f = fdget(pidfd);
3790 	if (!f.file)
3791 		return -EBADF;
3792 
3793 	/* Is this a pidfd? */
3794 	pid = pidfd_to_pid(f.file);
3795 	if (IS_ERR(pid)) {
3796 		ret = PTR_ERR(pid);
3797 		goto err;
3798 	}
3799 
3800 	ret = -EINVAL;
3801 	if (!access_pidfd_pidns(pid))
3802 		goto err;
3803 
3804 	if (info) {
3805 		ret = copy_siginfo_from_user_any(&kinfo, info);
3806 		if (unlikely(ret))
3807 			goto err;
3808 
3809 		ret = -EINVAL;
3810 		if (unlikely(sig != kinfo.si_signo))
3811 			goto err;
3812 
3813 		/* Only allow sending arbitrary signals to yourself. */
3814 		ret = -EPERM;
3815 		if ((task_pid(current) != pid) &&
3816 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3817 			goto err;
3818 	} else {
3819 		prepare_kill_siginfo(sig, &kinfo);
3820 	}
3821 
3822 	ret = kill_pid_info(sig, &kinfo, pid);
3823 
3824 err:
3825 	fdput(f);
3826 	return ret;
3827 }
3828 
3829 static int
3830 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3831 {
3832 	struct task_struct *p;
3833 	int error = -ESRCH;
3834 
3835 	rcu_read_lock();
3836 	p = find_task_by_vpid(pid);
3837 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3838 		error = check_kill_permission(sig, info, p);
3839 		/*
3840 		 * The null signal is a permissions and process existence
3841 		 * probe.  No signal is actually delivered.
3842 		 */
3843 		if (!error && sig) {
3844 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3845 			/*
3846 			 * If lock_task_sighand() failed we pretend the task
3847 			 * dies after receiving the signal. The window is tiny,
3848 			 * and the signal is private anyway.
3849 			 */
3850 			if (unlikely(error == -ESRCH))
3851 				error = 0;
3852 		}
3853 	}
3854 	rcu_read_unlock();
3855 
3856 	return error;
3857 }
3858 
3859 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3860 {
3861 	struct kernel_siginfo info;
3862 
3863 	clear_siginfo(&info);
3864 	info.si_signo = sig;
3865 	info.si_errno = 0;
3866 	info.si_code = SI_TKILL;
3867 	info.si_pid = task_tgid_vnr(current);
3868 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3869 
3870 	return do_send_specific(tgid, pid, sig, &info);
3871 }
3872 
3873 /**
3874  *  sys_tgkill - send signal to one specific thread
3875  *  @tgid: the thread group ID of the thread
3876  *  @pid: the PID of the thread
3877  *  @sig: signal to be sent
3878  *
3879  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3880  *  exists but it's not belonging to the target process anymore. This
3881  *  method solves the problem of threads exiting and PIDs getting reused.
3882  */
3883 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3884 {
3885 	/* This is only valid for single tasks */
3886 	if (pid <= 0 || tgid <= 0)
3887 		return -EINVAL;
3888 
3889 	return do_tkill(tgid, pid, sig);
3890 }
3891 
3892 /**
3893  *  sys_tkill - send signal to one specific task
3894  *  @pid: the PID of the task
3895  *  @sig: signal to be sent
3896  *
3897  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3898  */
3899 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3900 {
3901 	/* This is only valid for single tasks */
3902 	if (pid <= 0)
3903 		return -EINVAL;
3904 
3905 	return do_tkill(0, pid, sig);
3906 }
3907 
3908 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3909 {
3910 	/* Not even root can pretend to send signals from the kernel.
3911 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3912 	 */
3913 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3914 	    (task_pid_vnr(current) != pid))
3915 		return -EPERM;
3916 
3917 	/* POSIX.1b doesn't mention process groups.  */
3918 	return kill_proc_info(sig, info, pid);
3919 }
3920 
3921 /**
3922  *  sys_rt_sigqueueinfo - send signal information to a signal
3923  *  @pid: the PID of the thread
3924  *  @sig: signal to be sent
3925  *  @uinfo: signal info to be sent
3926  */
3927 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3928 		siginfo_t __user *, uinfo)
3929 {
3930 	kernel_siginfo_t info;
3931 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3932 	if (unlikely(ret))
3933 		return ret;
3934 	return do_rt_sigqueueinfo(pid, sig, &info);
3935 }
3936 
3937 #ifdef CONFIG_COMPAT
3938 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3939 			compat_pid_t, pid,
3940 			int, sig,
3941 			struct compat_siginfo __user *, uinfo)
3942 {
3943 	kernel_siginfo_t info;
3944 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3945 	if (unlikely(ret))
3946 		return ret;
3947 	return do_rt_sigqueueinfo(pid, sig, &info);
3948 }
3949 #endif
3950 
3951 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3952 {
3953 	/* This is only valid for single tasks */
3954 	if (pid <= 0 || tgid <= 0)
3955 		return -EINVAL;
3956 
3957 	/* Not even root can pretend to send signals from the kernel.
3958 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3959 	 */
3960 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3961 	    (task_pid_vnr(current) != pid))
3962 		return -EPERM;
3963 
3964 	return do_send_specific(tgid, pid, sig, info);
3965 }
3966 
3967 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3968 		siginfo_t __user *, uinfo)
3969 {
3970 	kernel_siginfo_t info;
3971 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3972 	if (unlikely(ret))
3973 		return ret;
3974 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3975 }
3976 
3977 #ifdef CONFIG_COMPAT
3978 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3979 			compat_pid_t, tgid,
3980 			compat_pid_t, pid,
3981 			int, sig,
3982 			struct compat_siginfo __user *, uinfo)
3983 {
3984 	kernel_siginfo_t info;
3985 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3986 	if (unlikely(ret))
3987 		return ret;
3988 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3989 }
3990 #endif
3991 
3992 /*
3993  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3994  */
3995 void kernel_sigaction(int sig, __sighandler_t action)
3996 {
3997 	spin_lock_irq(&current->sighand->siglock);
3998 	current->sighand->action[sig - 1].sa.sa_handler = action;
3999 	if (action == SIG_IGN) {
4000 		sigset_t mask;
4001 
4002 		sigemptyset(&mask);
4003 		sigaddset(&mask, sig);
4004 
4005 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4006 		flush_sigqueue_mask(&mask, &current->pending);
4007 		recalc_sigpending();
4008 	}
4009 	spin_unlock_irq(&current->sighand->siglock);
4010 }
4011 EXPORT_SYMBOL(kernel_sigaction);
4012 
4013 void __weak sigaction_compat_abi(struct k_sigaction *act,
4014 		struct k_sigaction *oact)
4015 {
4016 }
4017 
4018 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4019 {
4020 	struct task_struct *p = current, *t;
4021 	struct k_sigaction *k;
4022 	sigset_t mask;
4023 
4024 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4025 		return -EINVAL;
4026 
4027 	k = &p->sighand->action[sig-1];
4028 
4029 	spin_lock_irq(&p->sighand->siglock);
4030 	if (oact)
4031 		*oact = *k;
4032 
4033 	/*
4034 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4035 	 * e.g. by having an architecture use the bit in their uapi.
4036 	 */
4037 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4038 
4039 	/*
4040 	 * Clear unknown flag bits in order to allow userspace to detect missing
4041 	 * support for flag bits and to allow the kernel to use non-uapi bits
4042 	 * internally.
4043 	 */
4044 	if (act)
4045 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4046 	if (oact)
4047 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4048 
4049 	sigaction_compat_abi(act, oact);
4050 
4051 	if (act) {
4052 		sigdelsetmask(&act->sa.sa_mask,
4053 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4054 		*k = *act;
4055 		/*
4056 		 * POSIX 3.3.1.3:
4057 		 *  "Setting a signal action to SIG_IGN for a signal that is
4058 		 *   pending shall cause the pending signal to be discarded,
4059 		 *   whether or not it is blocked."
4060 		 *
4061 		 *  "Setting a signal action to SIG_DFL for a signal that is
4062 		 *   pending and whose default action is to ignore the signal
4063 		 *   (for example, SIGCHLD), shall cause the pending signal to
4064 		 *   be discarded, whether or not it is blocked"
4065 		 */
4066 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4067 			sigemptyset(&mask);
4068 			sigaddset(&mask, sig);
4069 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4070 			for_each_thread(p, t)
4071 				flush_sigqueue_mask(&mask, &t->pending);
4072 		}
4073 	}
4074 
4075 	spin_unlock_irq(&p->sighand->siglock);
4076 	return 0;
4077 }
4078 
4079 static int
4080 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4081 		size_t min_ss_size)
4082 {
4083 	struct task_struct *t = current;
4084 
4085 	if (oss) {
4086 		memset(oss, 0, sizeof(stack_t));
4087 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4088 		oss->ss_size = t->sas_ss_size;
4089 		oss->ss_flags = sas_ss_flags(sp) |
4090 			(current->sas_ss_flags & SS_FLAG_BITS);
4091 	}
4092 
4093 	if (ss) {
4094 		void __user *ss_sp = ss->ss_sp;
4095 		size_t ss_size = ss->ss_size;
4096 		unsigned ss_flags = ss->ss_flags;
4097 		int ss_mode;
4098 
4099 		if (unlikely(on_sig_stack(sp)))
4100 			return -EPERM;
4101 
4102 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4103 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4104 				ss_mode != 0))
4105 			return -EINVAL;
4106 
4107 		if (ss_mode == SS_DISABLE) {
4108 			ss_size = 0;
4109 			ss_sp = NULL;
4110 		} else {
4111 			if (unlikely(ss_size < min_ss_size))
4112 				return -ENOMEM;
4113 		}
4114 
4115 		t->sas_ss_sp = (unsigned long) ss_sp;
4116 		t->sas_ss_size = ss_size;
4117 		t->sas_ss_flags = ss_flags;
4118 	}
4119 	return 0;
4120 }
4121 
4122 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4123 {
4124 	stack_t new, old;
4125 	int err;
4126 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4127 		return -EFAULT;
4128 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4129 			      current_user_stack_pointer(),
4130 			      MINSIGSTKSZ);
4131 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4132 		err = -EFAULT;
4133 	return err;
4134 }
4135 
4136 int restore_altstack(const stack_t __user *uss)
4137 {
4138 	stack_t new;
4139 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4140 		return -EFAULT;
4141 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4142 			     MINSIGSTKSZ);
4143 	/* squash all but EFAULT for now */
4144 	return 0;
4145 }
4146 
4147 int __save_altstack(stack_t __user *uss, unsigned long sp)
4148 {
4149 	struct task_struct *t = current;
4150 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4151 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4152 		__put_user(t->sas_ss_size, &uss->ss_size);
4153 	return err;
4154 }
4155 
4156 #ifdef CONFIG_COMPAT
4157 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4158 				 compat_stack_t __user *uoss_ptr)
4159 {
4160 	stack_t uss, uoss;
4161 	int ret;
4162 
4163 	if (uss_ptr) {
4164 		compat_stack_t uss32;
4165 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4166 			return -EFAULT;
4167 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4168 		uss.ss_flags = uss32.ss_flags;
4169 		uss.ss_size = uss32.ss_size;
4170 	}
4171 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4172 			     compat_user_stack_pointer(),
4173 			     COMPAT_MINSIGSTKSZ);
4174 	if (ret >= 0 && uoss_ptr)  {
4175 		compat_stack_t old;
4176 		memset(&old, 0, sizeof(old));
4177 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4178 		old.ss_flags = uoss.ss_flags;
4179 		old.ss_size = uoss.ss_size;
4180 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4181 			ret = -EFAULT;
4182 	}
4183 	return ret;
4184 }
4185 
4186 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4187 			const compat_stack_t __user *, uss_ptr,
4188 			compat_stack_t __user *, uoss_ptr)
4189 {
4190 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4191 }
4192 
4193 int compat_restore_altstack(const compat_stack_t __user *uss)
4194 {
4195 	int err = do_compat_sigaltstack(uss, NULL);
4196 	/* squash all but -EFAULT for now */
4197 	return err == -EFAULT ? err : 0;
4198 }
4199 
4200 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4201 {
4202 	int err;
4203 	struct task_struct *t = current;
4204 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4205 			 &uss->ss_sp) |
4206 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4207 		__put_user(t->sas_ss_size, &uss->ss_size);
4208 	return err;
4209 }
4210 #endif
4211 
4212 #ifdef __ARCH_WANT_SYS_SIGPENDING
4213 
4214 /**
4215  *  sys_sigpending - examine pending signals
4216  *  @uset: where mask of pending signal is returned
4217  */
4218 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4219 {
4220 	sigset_t set;
4221 
4222 	if (sizeof(old_sigset_t) > sizeof(*uset))
4223 		return -EINVAL;
4224 
4225 	do_sigpending(&set);
4226 
4227 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4228 		return -EFAULT;
4229 
4230 	return 0;
4231 }
4232 
4233 #ifdef CONFIG_COMPAT
4234 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4235 {
4236 	sigset_t set;
4237 
4238 	do_sigpending(&set);
4239 
4240 	return put_user(set.sig[0], set32);
4241 }
4242 #endif
4243 
4244 #endif
4245 
4246 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4247 /**
4248  *  sys_sigprocmask - examine and change blocked signals
4249  *  @how: whether to add, remove, or set signals
4250  *  @nset: signals to add or remove (if non-null)
4251  *  @oset: previous value of signal mask if non-null
4252  *
4253  * Some platforms have their own version with special arguments;
4254  * others support only sys_rt_sigprocmask.
4255  */
4256 
4257 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4258 		old_sigset_t __user *, oset)
4259 {
4260 	old_sigset_t old_set, new_set;
4261 	sigset_t new_blocked;
4262 
4263 	old_set = current->blocked.sig[0];
4264 
4265 	if (nset) {
4266 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4267 			return -EFAULT;
4268 
4269 		new_blocked = current->blocked;
4270 
4271 		switch (how) {
4272 		case SIG_BLOCK:
4273 			sigaddsetmask(&new_blocked, new_set);
4274 			break;
4275 		case SIG_UNBLOCK:
4276 			sigdelsetmask(&new_blocked, new_set);
4277 			break;
4278 		case SIG_SETMASK:
4279 			new_blocked.sig[0] = new_set;
4280 			break;
4281 		default:
4282 			return -EINVAL;
4283 		}
4284 
4285 		set_current_blocked(&new_blocked);
4286 	}
4287 
4288 	if (oset) {
4289 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4290 			return -EFAULT;
4291 	}
4292 
4293 	return 0;
4294 }
4295 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4296 
4297 #ifndef CONFIG_ODD_RT_SIGACTION
4298 /**
4299  *  sys_rt_sigaction - alter an action taken by a process
4300  *  @sig: signal to be sent
4301  *  @act: new sigaction
4302  *  @oact: used to save the previous sigaction
4303  *  @sigsetsize: size of sigset_t type
4304  */
4305 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4306 		const struct sigaction __user *, act,
4307 		struct sigaction __user *, oact,
4308 		size_t, sigsetsize)
4309 {
4310 	struct k_sigaction new_sa, old_sa;
4311 	int ret;
4312 
4313 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4314 	if (sigsetsize != sizeof(sigset_t))
4315 		return -EINVAL;
4316 
4317 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4318 		return -EFAULT;
4319 
4320 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4321 	if (ret)
4322 		return ret;
4323 
4324 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4325 		return -EFAULT;
4326 
4327 	return 0;
4328 }
4329 #ifdef CONFIG_COMPAT
4330 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4331 		const struct compat_sigaction __user *, act,
4332 		struct compat_sigaction __user *, oact,
4333 		compat_size_t, sigsetsize)
4334 {
4335 	struct k_sigaction new_ka, old_ka;
4336 #ifdef __ARCH_HAS_SA_RESTORER
4337 	compat_uptr_t restorer;
4338 #endif
4339 	int ret;
4340 
4341 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4342 	if (sigsetsize != sizeof(compat_sigset_t))
4343 		return -EINVAL;
4344 
4345 	if (act) {
4346 		compat_uptr_t handler;
4347 		ret = get_user(handler, &act->sa_handler);
4348 		new_ka.sa.sa_handler = compat_ptr(handler);
4349 #ifdef __ARCH_HAS_SA_RESTORER
4350 		ret |= get_user(restorer, &act->sa_restorer);
4351 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4352 #endif
4353 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4354 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4355 		if (ret)
4356 			return -EFAULT;
4357 	}
4358 
4359 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4360 	if (!ret && oact) {
4361 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4362 			       &oact->sa_handler);
4363 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4364 					 sizeof(oact->sa_mask));
4365 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4366 #ifdef __ARCH_HAS_SA_RESTORER
4367 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4368 				&oact->sa_restorer);
4369 #endif
4370 	}
4371 	return ret;
4372 }
4373 #endif
4374 #endif /* !CONFIG_ODD_RT_SIGACTION */
4375 
4376 #ifdef CONFIG_OLD_SIGACTION
4377 SYSCALL_DEFINE3(sigaction, int, sig,
4378 		const struct old_sigaction __user *, act,
4379 	        struct old_sigaction __user *, oact)
4380 {
4381 	struct k_sigaction new_ka, old_ka;
4382 	int ret;
4383 
4384 	if (act) {
4385 		old_sigset_t mask;
4386 		if (!access_ok(act, sizeof(*act)) ||
4387 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4388 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4389 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4390 		    __get_user(mask, &act->sa_mask))
4391 			return -EFAULT;
4392 #ifdef __ARCH_HAS_KA_RESTORER
4393 		new_ka.ka_restorer = NULL;
4394 #endif
4395 		siginitset(&new_ka.sa.sa_mask, mask);
4396 	}
4397 
4398 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4399 
4400 	if (!ret && oact) {
4401 		if (!access_ok(oact, sizeof(*oact)) ||
4402 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4403 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4404 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4405 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4406 			return -EFAULT;
4407 	}
4408 
4409 	return ret;
4410 }
4411 #endif
4412 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4413 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4414 		const struct compat_old_sigaction __user *, act,
4415 	        struct compat_old_sigaction __user *, oact)
4416 {
4417 	struct k_sigaction new_ka, old_ka;
4418 	int ret;
4419 	compat_old_sigset_t mask;
4420 	compat_uptr_t handler, restorer;
4421 
4422 	if (act) {
4423 		if (!access_ok(act, sizeof(*act)) ||
4424 		    __get_user(handler, &act->sa_handler) ||
4425 		    __get_user(restorer, &act->sa_restorer) ||
4426 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4427 		    __get_user(mask, &act->sa_mask))
4428 			return -EFAULT;
4429 
4430 #ifdef __ARCH_HAS_KA_RESTORER
4431 		new_ka.ka_restorer = NULL;
4432 #endif
4433 		new_ka.sa.sa_handler = compat_ptr(handler);
4434 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4435 		siginitset(&new_ka.sa.sa_mask, mask);
4436 	}
4437 
4438 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4439 
4440 	if (!ret && oact) {
4441 		if (!access_ok(oact, sizeof(*oact)) ||
4442 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4443 			       &oact->sa_handler) ||
4444 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4445 			       &oact->sa_restorer) ||
4446 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4447 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4448 			return -EFAULT;
4449 	}
4450 	return ret;
4451 }
4452 #endif
4453 
4454 #ifdef CONFIG_SGETMASK_SYSCALL
4455 
4456 /*
4457  * For backwards compatibility.  Functionality superseded by sigprocmask.
4458  */
4459 SYSCALL_DEFINE0(sgetmask)
4460 {
4461 	/* SMP safe */
4462 	return current->blocked.sig[0];
4463 }
4464 
4465 SYSCALL_DEFINE1(ssetmask, int, newmask)
4466 {
4467 	int old = current->blocked.sig[0];
4468 	sigset_t newset;
4469 
4470 	siginitset(&newset, newmask);
4471 	set_current_blocked(&newset);
4472 
4473 	return old;
4474 }
4475 #endif /* CONFIG_SGETMASK_SYSCALL */
4476 
4477 #ifdef __ARCH_WANT_SYS_SIGNAL
4478 /*
4479  * For backwards compatibility.  Functionality superseded by sigaction.
4480  */
4481 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4482 {
4483 	struct k_sigaction new_sa, old_sa;
4484 	int ret;
4485 
4486 	new_sa.sa.sa_handler = handler;
4487 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4488 	sigemptyset(&new_sa.sa.sa_mask);
4489 
4490 	ret = do_sigaction(sig, &new_sa, &old_sa);
4491 
4492 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4493 }
4494 #endif /* __ARCH_WANT_SYS_SIGNAL */
4495 
4496 #ifdef __ARCH_WANT_SYS_PAUSE
4497 
4498 SYSCALL_DEFINE0(pause)
4499 {
4500 	while (!signal_pending(current)) {
4501 		__set_current_state(TASK_INTERRUPTIBLE);
4502 		schedule();
4503 	}
4504 	return -ERESTARTNOHAND;
4505 }
4506 
4507 #endif
4508 
4509 static int sigsuspend(sigset_t *set)
4510 {
4511 	current->saved_sigmask = current->blocked;
4512 	set_current_blocked(set);
4513 
4514 	while (!signal_pending(current)) {
4515 		__set_current_state(TASK_INTERRUPTIBLE);
4516 		schedule();
4517 	}
4518 	set_restore_sigmask();
4519 	return -ERESTARTNOHAND;
4520 }
4521 
4522 /**
4523  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4524  *	@unewset value until a signal is received
4525  *  @unewset: new signal mask value
4526  *  @sigsetsize: size of sigset_t type
4527  */
4528 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4529 {
4530 	sigset_t newset;
4531 
4532 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4533 	if (sigsetsize != sizeof(sigset_t))
4534 		return -EINVAL;
4535 
4536 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4537 		return -EFAULT;
4538 	return sigsuspend(&newset);
4539 }
4540 
4541 #ifdef CONFIG_COMPAT
4542 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4543 {
4544 	sigset_t newset;
4545 
4546 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4547 	if (sigsetsize != sizeof(sigset_t))
4548 		return -EINVAL;
4549 
4550 	if (get_compat_sigset(&newset, unewset))
4551 		return -EFAULT;
4552 	return sigsuspend(&newset);
4553 }
4554 #endif
4555 
4556 #ifdef CONFIG_OLD_SIGSUSPEND
4557 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4558 {
4559 	sigset_t blocked;
4560 	siginitset(&blocked, mask);
4561 	return sigsuspend(&blocked);
4562 }
4563 #endif
4564 #ifdef CONFIG_OLD_SIGSUSPEND3
4565 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4566 {
4567 	sigset_t blocked;
4568 	siginitset(&blocked, mask);
4569 	return sigsuspend(&blocked);
4570 }
4571 #endif
4572 
4573 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4574 {
4575 	return NULL;
4576 }
4577 
4578 static inline void siginfo_buildtime_checks(void)
4579 {
4580 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4581 
4582 	/* Verify the offsets in the two siginfos match */
4583 #define CHECK_OFFSET(field) \
4584 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4585 
4586 	/* kill */
4587 	CHECK_OFFSET(si_pid);
4588 	CHECK_OFFSET(si_uid);
4589 
4590 	/* timer */
4591 	CHECK_OFFSET(si_tid);
4592 	CHECK_OFFSET(si_overrun);
4593 	CHECK_OFFSET(si_value);
4594 
4595 	/* rt */
4596 	CHECK_OFFSET(si_pid);
4597 	CHECK_OFFSET(si_uid);
4598 	CHECK_OFFSET(si_value);
4599 
4600 	/* sigchld */
4601 	CHECK_OFFSET(si_pid);
4602 	CHECK_OFFSET(si_uid);
4603 	CHECK_OFFSET(si_status);
4604 	CHECK_OFFSET(si_utime);
4605 	CHECK_OFFSET(si_stime);
4606 
4607 	/* sigfault */
4608 	CHECK_OFFSET(si_addr);
4609 	CHECK_OFFSET(si_trapno);
4610 	CHECK_OFFSET(si_addr_lsb);
4611 	CHECK_OFFSET(si_lower);
4612 	CHECK_OFFSET(si_upper);
4613 	CHECK_OFFSET(si_pkey);
4614 	CHECK_OFFSET(si_perf_data);
4615 	CHECK_OFFSET(si_perf_type);
4616 
4617 	/* sigpoll */
4618 	CHECK_OFFSET(si_band);
4619 	CHECK_OFFSET(si_fd);
4620 
4621 	/* sigsys */
4622 	CHECK_OFFSET(si_call_addr);
4623 	CHECK_OFFSET(si_syscall);
4624 	CHECK_OFFSET(si_arch);
4625 #undef CHECK_OFFSET
4626 
4627 	/* usb asyncio */
4628 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4629 		     offsetof(struct siginfo, si_addr));
4630 	if (sizeof(int) == sizeof(void __user *)) {
4631 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4632 			     sizeof(void __user *));
4633 	} else {
4634 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4635 			      sizeof_field(struct siginfo, si_uid)) !=
4636 			     sizeof(void __user *));
4637 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4638 			     offsetof(struct siginfo, si_uid));
4639 	}
4640 #ifdef CONFIG_COMPAT
4641 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4642 		     offsetof(struct compat_siginfo, si_addr));
4643 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4644 		     sizeof(compat_uptr_t));
4645 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4646 		     sizeof_field(struct siginfo, si_pid));
4647 #endif
4648 }
4649 
4650 void __init signals_init(void)
4651 {
4652 	siginfo_buildtime_checks();
4653 
4654 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4655 }
4656 
4657 #ifdef CONFIG_KGDB_KDB
4658 #include <linux/kdb.h>
4659 /*
4660  * kdb_send_sig - Allows kdb to send signals without exposing
4661  * signal internals.  This function checks if the required locks are
4662  * available before calling the main signal code, to avoid kdb
4663  * deadlocks.
4664  */
4665 void kdb_send_sig(struct task_struct *t, int sig)
4666 {
4667 	static struct task_struct *kdb_prev_t;
4668 	int new_t, ret;
4669 	if (!spin_trylock(&t->sighand->siglock)) {
4670 		kdb_printf("Can't do kill command now.\n"
4671 			   "The sigmask lock is held somewhere else in "
4672 			   "kernel, try again later\n");
4673 		return;
4674 	}
4675 	new_t = kdb_prev_t != t;
4676 	kdb_prev_t = t;
4677 	if (!task_is_running(t) && new_t) {
4678 		spin_unlock(&t->sighand->siglock);
4679 		kdb_printf("Process is not RUNNING, sending a signal from "
4680 			   "kdb risks deadlock\n"
4681 			   "on the run queue locks. "
4682 			   "The signal has _not_ been sent.\n"
4683 			   "Reissue the kill command if you want to risk "
4684 			   "the deadlock.\n");
4685 		return;
4686 	}
4687 	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4688 	spin_unlock(&t->sighand->siglock);
4689 	if (ret)
4690 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4691 			   sig, t->pid);
4692 	else
4693 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4694 }
4695 #endif	/* CONFIG_KGDB_KDB */
4696