xref: /openbmc/linux/kernel/signal.c (revision f3b82bb2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
50 
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/signal.h>
53 
54 #include <asm/param.h>
55 #include <linux/uaccess.h>
56 #include <asm/unistd.h>
57 #include <asm/siginfo.h>
58 #include <asm/cacheflush.h>
59 #include <asm/syscall.h>	/* for syscall_get_* */
60 
61 /*
62  * SLAB caches for signal bits.
63  */
64 
65 static struct kmem_cache *sigqueue_cachep;
66 
67 int print_fatal_signals __read_mostly;
68 
sig_handler(struct task_struct * t,int sig)69 static void __user *sig_handler(struct task_struct *t, int sig)
70 {
71 	return t->sighand->action[sig - 1].sa.sa_handler;
72 }
73 
sig_handler_ignored(void __user * handler,int sig)74 static inline bool sig_handler_ignored(void __user *handler, int sig)
75 {
76 	/* Is it explicitly or implicitly ignored? */
77 	return handler == SIG_IGN ||
78 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
79 }
80 
sig_task_ignored(struct task_struct * t,int sig,bool force)81 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 {
83 	void __user *handler;
84 
85 	handler = sig_handler(t, sig);
86 
87 	/* SIGKILL and SIGSTOP may not be sent to the global init */
88 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 		return true;
90 
91 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
92 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 		return true;
94 
95 	/* Only allow kernel generated signals to this kthread */
96 	if (unlikely((t->flags & PF_KTHREAD) &&
97 		     (handler == SIG_KTHREAD_KERNEL) && !force))
98 		return true;
99 
100 	return sig_handler_ignored(handler, sig);
101 }
102 
sig_ignored(struct task_struct * t,int sig,bool force)103 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 {
105 	/*
106 	 * Blocked signals are never ignored, since the
107 	 * signal handler may change by the time it is
108 	 * unblocked.
109 	 */
110 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 		return false;
112 
113 	/*
114 	 * Tracers may want to know about even ignored signal unless it
115 	 * is SIGKILL which can't be reported anyway but can be ignored
116 	 * by SIGNAL_UNKILLABLE task.
117 	 */
118 	if (t->ptrace && sig != SIGKILL)
119 		return false;
120 
121 	return sig_task_ignored(t, sig, force);
122 }
123 
124 /*
125  * Re-calculate pending state from the set of locally pending
126  * signals, globally pending signals, and blocked signals.
127  */
has_pending_signals(sigset_t * signal,sigset_t * blocked)128 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
129 {
130 	unsigned long ready;
131 	long i;
132 
133 	switch (_NSIG_WORDS) {
134 	default:
135 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
136 			ready |= signal->sig[i] &~ blocked->sig[i];
137 		break;
138 
139 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
140 		ready |= signal->sig[2] &~ blocked->sig[2];
141 		ready |= signal->sig[1] &~ blocked->sig[1];
142 		ready |= signal->sig[0] &~ blocked->sig[0];
143 		break;
144 
145 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
146 		ready |= signal->sig[0] &~ blocked->sig[0];
147 		break;
148 
149 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
150 	}
151 	return ready !=	0;
152 }
153 
154 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
155 
recalc_sigpending_tsk(struct task_struct * t)156 static bool recalc_sigpending_tsk(struct task_struct *t)
157 {
158 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
159 	    PENDING(&t->pending, &t->blocked) ||
160 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
161 	    cgroup_task_frozen(t)) {
162 		set_tsk_thread_flag(t, TIF_SIGPENDING);
163 		return true;
164 	}
165 
166 	/*
167 	 * We must never clear the flag in another thread, or in current
168 	 * when it's possible the current syscall is returning -ERESTART*.
169 	 * So we don't clear it here, and only callers who know they should do.
170 	 */
171 	return false;
172 }
173 
174 /*
175  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
176  * This is superfluous when called on current, the wakeup is a harmless no-op.
177  */
recalc_sigpending_and_wake(struct task_struct * t)178 void recalc_sigpending_and_wake(struct task_struct *t)
179 {
180 	if (recalc_sigpending_tsk(t))
181 		signal_wake_up(t, 0);
182 }
183 
recalc_sigpending(void)184 void recalc_sigpending(void)
185 {
186 	if (!recalc_sigpending_tsk(current) && !freezing(current))
187 		clear_thread_flag(TIF_SIGPENDING);
188 
189 }
190 EXPORT_SYMBOL(recalc_sigpending);
191 
calculate_sigpending(void)192 void calculate_sigpending(void)
193 {
194 	/* Have any signals or users of TIF_SIGPENDING been delayed
195 	 * until after fork?
196 	 */
197 	spin_lock_irq(&current->sighand->siglock);
198 	set_tsk_thread_flag(current, TIF_SIGPENDING);
199 	recalc_sigpending();
200 	spin_unlock_irq(&current->sighand->siglock);
201 }
202 
203 /* Given the mask, find the first available signal that should be serviced. */
204 
205 #define SYNCHRONOUS_MASK \
206 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
207 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
208 
next_signal(struct sigpending * pending,sigset_t * mask)209 int next_signal(struct sigpending *pending, sigset_t *mask)
210 {
211 	unsigned long i, *s, *m, x;
212 	int sig = 0;
213 
214 	s = pending->signal.sig;
215 	m = mask->sig;
216 
217 	/*
218 	 * Handle the first word specially: it contains the
219 	 * synchronous signals that need to be dequeued first.
220 	 */
221 	x = *s &~ *m;
222 	if (x) {
223 		if (x & SYNCHRONOUS_MASK)
224 			x &= SYNCHRONOUS_MASK;
225 		sig = ffz(~x) + 1;
226 		return sig;
227 	}
228 
229 	switch (_NSIG_WORDS) {
230 	default:
231 		for (i = 1; i < _NSIG_WORDS; ++i) {
232 			x = *++s &~ *++m;
233 			if (!x)
234 				continue;
235 			sig = ffz(~x) + i*_NSIG_BPW + 1;
236 			break;
237 		}
238 		break;
239 
240 	case 2:
241 		x = s[1] &~ m[1];
242 		if (!x)
243 			break;
244 		sig = ffz(~x) + _NSIG_BPW + 1;
245 		break;
246 
247 	case 1:
248 		/* Nothing to do */
249 		break;
250 	}
251 
252 	return sig;
253 }
254 
print_dropped_signal(int sig)255 static inline void print_dropped_signal(int sig)
256 {
257 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
258 
259 	if (!print_fatal_signals)
260 		return;
261 
262 	if (!__ratelimit(&ratelimit_state))
263 		return;
264 
265 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
266 				current->comm, current->pid, sig);
267 }
268 
269 /**
270  * task_set_jobctl_pending - set jobctl pending bits
271  * @task: target task
272  * @mask: pending bits to set
273  *
274  * Clear @mask from @task->jobctl.  @mask must be subset of
275  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
276  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
277  * cleared.  If @task is already being killed or exiting, this function
278  * becomes noop.
279  *
280  * CONTEXT:
281  * Must be called with @task->sighand->siglock held.
282  *
283  * RETURNS:
284  * %true if @mask is set, %false if made noop because @task was dying.
285  */
task_set_jobctl_pending(struct task_struct * task,unsigned long mask)286 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
287 {
288 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
289 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
290 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
291 
292 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
293 		return false;
294 
295 	if (mask & JOBCTL_STOP_SIGMASK)
296 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
297 
298 	task->jobctl |= mask;
299 	return true;
300 }
301 
302 /**
303  * task_clear_jobctl_trapping - clear jobctl trapping bit
304  * @task: target task
305  *
306  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
307  * Clear it and wake up the ptracer.  Note that we don't need any further
308  * locking.  @task->siglock guarantees that @task->parent points to the
309  * ptracer.
310  *
311  * CONTEXT:
312  * Must be called with @task->sighand->siglock held.
313  */
task_clear_jobctl_trapping(struct task_struct * task)314 void task_clear_jobctl_trapping(struct task_struct *task)
315 {
316 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
317 		task->jobctl &= ~JOBCTL_TRAPPING;
318 		smp_mb();	/* advised by wake_up_bit() */
319 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
320 	}
321 }
322 
323 /**
324  * task_clear_jobctl_pending - clear jobctl pending bits
325  * @task: target task
326  * @mask: pending bits to clear
327  *
328  * Clear @mask from @task->jobctl.  @mask must be subset of
329  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
330  * STOP bits are cleared together.
331  *
332  * If clearing of @mask leaves no stop or trap pending, this function calls
333  * task_clear_jobctl_trapping().
334  *
335  * CONTEXT:
336  * Must be called with @task->sighand->siglock held.
337  */
task_clear_jobctl_pending(struct task_struct * task,unsigned long mask)338 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
339 {
340 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
341 
342 	if (mask & JOBCTL_STOP_PENDING)
343 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
344 
345 	task->jobctl &= ~mask;
346 
347 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
348 		task_clear_jobctl_trapping(task);
349 }
350 
351 /**
352  * task_participate_group_stop - participate in a group stop
353  * @task: task participating in a group stop
354  *
355  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
356  * Group stop states are cleared and the group stop count is consumed if
357  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
358  * stop, the appropriate `SIGNAL_*` flags are set.
359  *
360  * CONTEXT:
361  * Must be called with @task->sighand->siglock held.
362  *
363  * RETURNS:
364  * %true if group stop completion should be notified to the parent, %false
365  * otherwise.
366  */
task_participate_group_stop(struct task_struct * task)367 static bool task_participate_group_stop(struct task_struct *task)
368 {
369 	struct signal_struct *sig = task->signal;
370 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
371 
372 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
373 
374 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
375 
376 	if (!consume)
377 		return false;
378 
379 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
380 		sig->group_stop_count--;
381 
382 	/*
383 	 * Tell the caller to notify completion iff we are entering into a
384 	 * fresh group stop.  Read comment in do_signal_stop() for details.
385 	 */
386 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
387 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
388 		return true;
389 	}
390 	return false;
391 }
392 
task_join_group_stop(struct task_struct * task)393 void task_join_group_stop(struct task_struct *task)
394 {
395 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
396 	struct signal_struct *sig = current->signal;
397 
398 	if (sig->group_stop_count) {
399 		sig->group_stop_count++;
400 		mask |= JOBCTL_STOP_CONSUME;
401 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
402 		return;
403 
404 	/* Have the new thread join an on-going signal group stop */
405 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
406 }
407 
408 /*
409  * allocate a new signal queue record
410  * - this may be called without locks if and only if t == current, otherwise an
411  *   appropriate lock must be held to stop the target task from exiting
412  */
413 static struct sigqueue *
__sigqueue_alloc(int sig,struct task_struct * t,gfp_t gfp_flags,int override_rlimit,const unsigned int sigqueue_flags)414 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
415 		 int override_rlimit, const unsigned int sigqueue_flags)
416 {
417 	struct sigqueue *q = NULL;
418 	struct ucounts *ucounts = NULL;
419 	long sigpending;
420 
421 	/*
422 	 * Protect access to @t credentials. This can go away when all
423 	 * callers hold rcu read lock.
424 	 *
425 	 * NOTE! A pending signal will hold on to the user refcount,
426 	 * and we get/put the refcount only when the sigpending count
427 	 * changes from/to zero.
428 	 */
429 	rcu_read_lock();
430 	ucounts = task_ucounts(t);
431 	sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
432 					    override_rlimit);
433 	rcu_read_unlock();
434 	if (!sigpending)
435 		return NULL;
436 
437 	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
438 		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
439 	} else {
440 		print_dropped_signal(sig);
441 	}
442 
443 	if (unlikely(q == NULL)) {
444 		dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
445 	} else {
446 		INIT_LIST_HEAD(&q->list);
447 		q->flags = sigqueue_flags;
448 		q->ucounts = ucounts;
449 	}
450 	return q;
451 }
452 
__sigqueue_free(struct sigqueue * q)453 static void __sigqueue_free(struct sigqueue *q)
454 {
455 	if (q->flags & SIGQUEUE_PREALLOC)
456 		return;
457 	if (q->ucounts) {
458 		dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
459 		q->ucounts = NULL;
460 	}
461 	kmem_cache_free(sigqueue_cachep, q);
462 }
463 
flush_sigqueue(struct sigpending * queue)464 void flush_sigqueue(struct sigpending *queue)
465 {
466 	struct sigqueue *q;
467 
468 	sigemptyset(&queue->signal);
469 	while (!list_empty(&queue->list)) {
470 		q = list_entry(queue->list.next, struct sigqueue , list);
471 		list_del_init(&q->list);
472 		__sigqueue_free(q);
473 	}
474 }
475 
476 /*
477  * Flush all pending signals for this kthread.
478  */
flush_signals(struct task_struct * t)479 void flush_signals(struct task_struct *t)
480 {
481 	unsigned long flags;
482 
483 	spin_lock_irqsave(&t->sighand->siglock, flags);
484 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
485 	flush_sigqueue(&t->pending);
486 	flush_sigqueue(&t->signal->shared_pending);
487 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
488 }
489 EXPORT_SYMBOL(flush_signals);
490 
491 #ifdef CONFIG_POSIX_TIMERS
__flush_itimer_signals(struct sigpending * pending)492 static void __flush_itimer_signals(struct sigpending *pending)
493 {
494 	sigset_t signal, retain;
495 	struct sigqueue *q, *n;
496 
497 	signal = pending->signal;
498 	sigemptyset(&retain);
499 
500 	list_for_each_entry_safe(q, n, &pending->list, list) {
501 		int sig = q->info.si_signo;
502 
503 		if (likely(q->info.si_code != SI_TIMER)) {
504 			sigaddset(&retain, sig);
505 		} else {
506 			sigdelset(&signal, sig);
507 			list_del_init(&q->list);
508 			__sigqueue_free(q);
509 		}
510 	}
511 
512 	sigorsets(&pending->signal, &signal, &retain);
513 }
514 
flush_itimer_signals(void)515 void flush_itimer_signals(void)
516 {
517 	struct task_struct *tsk = current;
518 	unsigned long flags;
519 
520 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
521 	__flush_itimer_signals(&tsk->pending);
522 	__flush_itimer_signals(&tsk->signal->shared_pending);
523 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
524 }
525 #endif
526 
ignore_signals(struct task_struct * t)527 void ignore_signals(struct task_struct *t)
528 {
529 	int i;
530 
531 	for (i = 0; i < _NSIG; ++i)
532 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
533 
534 	flush_signals(t);
535 }
536 
537 /*
538  * Flush all handlers for a task.
539  */
540 
541 void
flush_signal_handlers(struct task_struct * t,int force_default)542 flush_signal_handlers(struct task_struct *t, int force_default)
543 {
544 	int i;
545 	struct k_sigaction *ka = &t->sighand->action[0];
546 	for (i = _NSIG ; i != 0 ; i--) {
547 		if (force_default || ka->sa.sa_handler != SIG_IGN)
548 			ka->sa.sa_handler = SIG_DFL;
549 		ka->sa.sa_flags = 0;
550 #ifdef __ARCH_HAS_SA_RESTORER
551 		ka->sa.sa_restorer = NULL;
552 #endif
553 		sigemptyset(&ka->sa.sa_mask);
554 		ka++;
555 	}
556 }
557 
unhandled_signal(struct task_struct * tsk,int sig)558 bool unhandled_signal(struct task_struct *tsk, int sig)
559 {
560 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
561 	if (is_global_init(tsk))
562 		return true;
563 
564 	if (handler != SIG_IGN && handler != SIG_DFL)
565 		return false;
566 
567 	/* If dying, we handle all new signals by ignoring them */
568 	if (fatal_signal_pending(tsk))
569 		return false;
570 
571 	/* if ptraced, let the tracer determine */
572 	return !tsk->ptrace;
573 }
574 
collect_signal(int sig,struct sigpending * list,kernel_siginfo_t * info,bool * resched_timer)575 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
576 			   bool *resched_timer)
577 {
578 	struct sigqueue *q, *first = NULL;
579 
580 	/*
581 	 * Collect the siginfo appropriate to this signal.  Check if
582 	 * there is another siginfo for the same signal.
583 	*/
584 	list_for_each_entry(q, &list->list, list) {
585 		if (q->info.si_signo == sig) {
586 			if (first)
587 				goto still_pending;
588 			first = q;
589 		}
590 	}
591 
592 	sigdelset(&list->signal, sig);
593 
594 	if (first) {
595 still_pending:
596 		list_del_init(&first->list);
597 		copy_siginfo(info, &first->info);
598 
599 		*resched_timer =
600 			(first->flags & SIGQUEUE_PREALLOC) &&
601 			(info->si_code == SI_TIMER) &&
602 			(info->si_sys_private);
603 
604 		__sigqueue_free(first);
605 	} else {
606 		/*
607 		 * Ok, it wasn't in the queue.  This must be
608 		 * a fast-pathed signal or we must have been
609 		 * out of queue space.  So zero out the info.
610 		 */
611 		clear_siginfo(info);
612 		info->si_signo = sig;
613 		info->si_errno = 0;
614 		info->si_code = SI_USER;
615 		info->si_pid = 0;
616 		info->si_uid = 0;
617 	}
618 }
619 
__dequeue_signal(struct sigpending * pending,sigset_t * mask,kernel_siginfo_t * info,bool * resched_timer)620 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
621 			kernel_siginfo_t *info, bool *resched_timer)
622 {
623 	int sig = next_signal(pending, mask);
624 
625 	if (sig)
626 		collect_signal(sig, pending, info, resched_timer);
627 	return sig;
628 }
629 
630 /*
631  * Dequeue a signal and return the element to the caller, which is
632  * expected to free it.
633  *
634  * All callers have to hold the siglock.
635  */
dequeue_signal(struct task_struct * tsk,sigset_t * mask,kernel_siginfo_t * info,enum pid_type * type)636 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
637 		   kernel_siginfo_t *info, enum pid_type *type)
638 {
639 	bool resched_timer = false;
640 	int signr;
641 
642 	/* We only dequeue private signals from ourselves, we don't let
643 	 * signalfd steal them
644 	 */
645 	*type = PIDTYPE_PID;
646 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
647 	if (!signr) {
648 		*type = PIDTYPE_TGID;
649 		signr = __dequeue_signal(&tsk->signal->shared_pending,
650 					 mask, info, &resched_timer);
651 #ifdef CONFIG_POSIX_TIMERS
652 		/*
653 		 * itimer signal ?
654 		 *
655 		 * itimers are process shared and we restart periodic
656 		 * itimers in the signal delivery path to prevent DoS
657 		 * attacks in the high resolution timer case. This is
658 		 * compliant with the old way of self-restarting
659 		 * itimers, as the SIGALRM is a legacy signal and only
660 		 * queued once. Changing the restart behaviour to
661 		 * restart the timer in the signal dequeue path is
662 		 * reducing the timer noise on heavy loaded !highres
663 		 * systems too.
664 		 */
665 		if (unlikely(signr == SIGALRM)) {
666 			struct hrtimer *tmr = &tsk->signal->real_timer;
667 
668 			if (!hrtimer_is_queued(tmr) &&
669 			    tsk->signal->it_real_incr != 0) {
670 				hrtimer_forward(tmr, tmr->base->get_time(),
671 						tsk->signal->it_real_incr);
672 				hrtimer_restart(tmr);
673 			}
674 		}
675 #endif
676 	}
677 
678 	recalc_sigpending();
679 	if (!signr)
680 		return 0;
681 
682 	if (unlikely(sig_kernel_stop(signr))) {
683 		/*
684 		 * Set a marker that we have dequeued a stop signal.  Our
685 		 * caller might release the siglock and then the pending
686 		 * stop signal it is about to process is no longer in the
687 		 * pending bitmasks, but must still be cleared by a SIGCONT
688 		 * (and overruled by a SIGKILL).  So those cases clear this
689 		 * shared flag after we've set it.  Note that this flag may
690 		 * remain set after the signal we return is ignored or
691 		 * handled.  That doesn't matter because its only purpose
692 		 * is to alert stop-signal processing code when another
693 		 * processor has come along and cleared the flag.
694 		 */
695 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
696 	}
697 #ifdef CONFIG_POSIX_TIMERS
698 	if (resched_timer) {
699 		/*
700 		 * Release the siglock to ensure proper locking order
701 		 * of timer locks outside of siglocks.  Note, we leave
702 		 * irqs disabled here, since the posix-timers code is
703 		 * about to disable them again anyway.
704 		 */
705 		spin_unlock(&tsk->sighand->siglock);
706 		posixtimer_rearm(info);
707 		spin_lock(&tsk->sighand->siglock);
708 
709 		/* Don't expose the si_sys_private value to userspace */
710 		info->si_sys_private = 0;
711 	}
712 #endif
713 	return signr;
714 }
715 EXPORT_SYMBOL_GPL(dequeue_signal);
716 
dequeue_synchronous_signal(kernel_siginfo_t * info)717 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
718 {
719 	struct task_struct *tsk = current;
720 	struct sigpending *pending = &tsk->pending;
721 	struct sigqueue *q, *sync = NULL;
722 
723 	/*
724 	 * Might a synchronous signal be in the queue?
725 	 */
726 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
727 		return 0;
728 
729 	/*
730 	 * Return the first synchronous signal in the queue.
731 	 */
732 	list_for_each_entry(q, &pending->list, list) {
733 		/* Synchronous signals have a positive si_code */
734 		if ((q->info.si_code > SI_USER) &&
735 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
736 			sync = q;
737 			goto next;
738 		}
739 	}
740 	return 0;
741 next:
742 	/*
743 	 * Check if there is another siginfo for the same signal.
744 	 */
745 	list_for_each_entry_continue(q, &pending->list, list) {
746 		if (q->info.si_signo == sync->info.si_signo)
747 			goto still_pending;
748 	}
749 
750 	sigdelset(&pending->signal, sync->info.si_signo);
751 	recalc_sigpending();
752 still_pending:
753 	list_del_init(&sync->list);
754 	copy_siginfo(info, &sync->info);
755 	__sigqueue_free(sync);
756 	return info->si_signo;
757 }
758 
759 /*
760  * Tell a process that it has a new active signal..
761  *
762  * NOTE! we rely on the previous spin_lock to
763  * lock interrupts for us! We can only be called with
764  * "siglock" held, and the local interrupt must
765  * have been disabled when that got acquired!
766  *
767  * No need to set need_resched since signal event passing
768  * goes through ->blocked
769  */
signal_wake_up_state(struct task_struct * t,unsigned int state)770 void signal_wake_up_state(struct task_struct *t, unsigned int state)
771 {
772 	lockdep_assert_held(&t->sighand->siglock);
773 
774 	set_tsk_thread_flag(t, TIF_SIGPENDING);
775 
776 	/*
777 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
778 	 * case. We don't check t->state here because there is a race with it
779 	 * executing another processor and just now entering stopped state.
780 	 * By using wake_up_state, we ensure the process will wake up and
781 	 * handle its death signal.
782 	 */
783 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
784 		kick_process(t);
785 }
786 
787 /*
788  * Remove signals in mask from the pending set and queue.
789  * Returns 1 if any signals were found.
790  *
791  * All callers must be holding the siglock.
792  */
flush_sigqueue_mask(sigset_t * mask,struct sigpending * s)793 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
794 {
795 	struct sigqueue *q, *n;
796 	sigset_t m;
797 
798 	sigandsets(&m, mask, &s->signal);
799 	if (sigisemptyset(&m))
800 		return;
801 
802 	sigandnsets(&s->signal, &s->signal, mask);
803 	list_for_each_entry_safe(q, n, &s->list, list) {
804 		if (sigismember(mask, q->info.si_signo)) {
805 			list_del_init(&q->list);
806 			__sigqueue_free(q);
807 		}
808 	}
809 }
810 
is_si_special(const struct kernel_siginfo * info)811 static inline int is_si_special(const struct kernel_siginfo *info)
812 {
813 	return info <= SEND_SIG_PRIV;
814 }
815 
si_fromuser(const struct kernel_siginfo * info)816 static inline bool si_fromuser(const struct kernel_siginfo *info)
817 {
818 	return info == SEND_SIG_NOINFO ||
819 		(!is_si_special(info) && SI_FROMUSER(info));
820 }
821 
822 /*
823  * called with RCU read lock from check_kill_permission()
824  */
kill_ok_by_cred(struct task_struct * t)825 static bool kill_ok_by_cred(struct task_struct *t)
826 {
827 	const struct cred *cred = current_cred();
828 	const struct cred *tcred = __task_cred(t);
829 
830 	return uid_eq(cred->euid, tcred->suid) ||
831 	       uid_eq(cred->euid, tcred->uid) ||
832 	       uid_eq(cred->uid, tcred->suid) ||
833 	       uid_eq(cred->uid, tcred->uid) ||
834 	       ns_capable(tcred->user_ns, CAP_KILL);
835 }
836 
837 /*
838  * Bad permissions for sending the signal
839  * - the caller must hold the RCU read lock
840  */
check_kill_permission(int sig,struct kernel_siginfo * info,struct task_struct * t)841 static int check_kill_permission(int sig, struct kernel_siginfo *info,
842 				 struct task_struct *t)
843 {
844 	struct pid *sid;
845 	int error;
846 
847 	if (!valid_signal(sig))
848 		return -EINVAL;
849 
850 	if (!si_fromuser(info))
851 		return 0;
852 
853 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
854 	if (error)
855 		return error;
856 
857 	if (!same_thread_group(current, t) &&
858 	    !kill_ok_by_cred(t)) {
859 		switch (sig) {
860 		case SIGCONT:
861 			sid = task_session(t);
862 			/*
863 			 * We don't return the error if sid == NULL. The
864 			 * task was unhashed, the caller must notice this.
865 			 */
866 			if (!sid || sid == task_session(current))
867 				break;
868 			fallthrough;
869 		default:
870 			return -EPERM;
871 		}
872 	}
873 
874 	return security_task_kill(t, info, sig, NULL);
875 }
876 
877 /**
878  * ptrace_trap_notify - schedule trap to notify ptracer
879  * @t: tracee wanting to notify tracer
880  *
881  * This function schedules sticky ptrace trap which is cleared on the next
882  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
883  * ptracer.
884  *
885  * If @t is running, STOP trap will be taken.  If trapped for STOP and
886  * ptracer is listening for events, tracee is woken up so that it can
887  * re-trap for the new event.  If trapped otherwise, STOP trap will be
888  * eventually taken without returning to userland after the existing traps
889  * are finished by PTRACE_CONT.
890  *
891  * CONTEXT:
892  * Must be called with @task->sighand->siglock held.
893  */
ptrace_trap_notify(struct task_struct * t)894 static void ptrace_trap_notify(struct task_struct *t)
895 {
896 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
897 	lockdep_assert_held(&t->sighand->siglock);
898 
899 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
900 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
901 }
902 
903 /*
904  * Handle magic process-wide effects of stop/continue signals. Unlike
905  * the signal actions, these happen immediately at signal-generation
906  * time regardless of blocking, ignoring, or handling.  This does the
907  * actual continuing for SIGCONT, but not the actual stopping for stop
908  * signals. The process stop is done as a signal action for SIG_DFL.
909  *
910  * Returns true if the signal should be actually delivered, otherwise
911  * it should be dropped.
912  */
prepare_signal(int sig,struct task_struct * p,bool force)913 static bool prepare_signal(int sig, struct task_struct *p, bool force)
914 {
915 	struct signal_struct *signal = p->signal;
916 	struct task_struct *t;
917 	sigset_t flush;
918 
919 	if (signal->flags & SIGNAL_GROUP_EXIT) {
920 		if (signal->core_state)
921 			return sig == SIGKILL;
922 		/*
923 		 * The process is in the middle of dying, drop the signal.
924 		 */
925 		return false;
926 	} else if (sig_kernel_stop(sig)) {
927 		/*
928 		 * This is a stop signal.  Remove SIGCONT from all queues.
929 		 */
930 		siginitset(&flush, sigmask(SIGCONT));
931 		flush_sigqueue_mask(&flush, &signal->shared_pending);
932 		for_each_thread(p, t)
933 			flush_sigqueue_mask(&flush, &t->pending);
934 	} else if (sig == SIGCONT) {
935 		unsigned int why;
936 		/*
937 		 * Remove all stop signals from all queues, wake all threads.
938 		 */
939 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
940 		flush_sigqueue_mask(&flush, &signal->shared_pending);
941 		for_each_thread(p, t) {
942 			flush_sigqueue_mask(&flush, &t->pending);
943 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
944 			if (likely(!(t->ptrace & PT_SEIZED))) {
945 				t->jobctl &= ~JOBCTL_STOPPED;
946 				wake_up_state(t, __TASK_STOPPED);
947 			} else
948 				ptrace_trap_notify(t);
949 		}
950 
951 		/*
952 		 * Notify the parent with CLD_CONTINUED if we were stopped.
953 		 *
954 		 * If we were in the middle of a group stop, we pretend it
955 		 * was already finished, and then continued. Since SIGCHLD
956 		 * doesn't queue we report only CLD_STOPPED, as if the next
957 		 * CLD_CONTINUED was dropped.
958 		 */
959 		why = 0;
960 		if (signal->flags & SIGNAL_STOP_STOPPED)
961 			why |= SIGNAL_CLD_CONTINUED;
962 		else if (signal->group_stop_count)
963 			why |= SIGNAL_CLD_STOPPED;
964 
965 		if (why) {
966 			/*
967 			 * The first thread which returns from do_signal_stop()
968 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
969 			 * notify its parent. See get_signal().
970 			 */
971 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
972 			signal->group_stop_count = 0;
973 			signal->group_exit_code = 0;
974 		}
975 	}
976 
977 	return !sig_ignored(p, sig, force);
978 }
979 
980 /*
981  * Test if P wants to take SIG.  After we've checked all threads with this,
982  * it's equivalent to finding no threads not blocking SIG.  Any threads not
983  * blocking SIG were ruled out because they are not running and already
984  * have pending signals.  Such threads will dequeue from the shared queue
985  * as soon as they're available, so putting the signal on the shared queue
986  * will be equivalent to sending it to one such thread.
987  */
wants_signal(int sig,struct task_struct * p)988 static inline bool wants_signal(int sig, struct task_struct *p)
989 {
990 	if (sigismember(&p->blocked, sig))
991 		return false;
992 
993 	if (p->flags & PF_EXITING)
994 		return false;
995 
996 	if (sig == SIGKILL)
997 		return true;
998 
999 	if (task_is_stopped_or_traced(p))
1000 		return false;
1001 
1002 	return task_curr(p) || !task_sigpending(p);
1003 }
1004 
complete_signal(int sig,struct task_struct * p,enum pid_type type)1005 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1006 {
1007 	struct signal_struct *signal = p->signal;
1008 	struct task_struct *t;
1009 
1010 	/*
1011 	 * Now find a thread we can wake up to take the signal off the queue.
1012 	 *
1013 	 * Try the suggested task first (may or may not be the main thread).
1014 	 */
1015 	if (wants_signal(sig, p))
1016 		t = p;
1017 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1018 		/*
1019 		 * There is just one thread and it does not need to be woken.
1020 		 * It will dequeue unblocked signals before it runs again.
1021 		 */
1022 		return;
1023 	else {
1024 		/*
1025 		 * Otherwise try to find a suitable thread.
1026 		 */
1027 		t = signal->curr_target;
1028 		while (!wants_signal(sig, t)) {
1029 			t = next_thread(t);
1030 			if (t == signal->curr_target)
1031 				/*
1032 				 * No thread needs to be woken.
1033 				 * Any eligible threads will see
1034 				 * the signal in the queue soon.
1035 				 */
1036 				return;
1037 		}
1038 		signal->curr_target = t;
1039 	}
1040 
1041 	/*
1042 	 * Found a killable thread.  If the signal will be fatal,
1043 	 * then start taking the whole group down immediately.
1044 	 */
1045 	if (sig_fatal(p, sig) &&
1046 	    (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1047 	    !sigismember(&t->real_blocked, sig) &&
1048 	    (sig == SIGKILL || !p->ptrace)) {
1049 		/*
1050 		 * This signal will be fatal to the whole group.
1051 		 */
1052 		if (!sig_kernel_coredump(sig)) {
1053 			/*
1054 			 * Start a group exit and wake everybody up.
1055 			 * This way we don't have other threads
1056 			 * running and doing things after a slower
1057 			 * thread has the fatal signal pending.
1058 			 */
1059 			signal->flags = SIGNAL_GROUP_EXIT;
1060 			signal->group_exit_code = sig;
1061 			signal->group_stop_count = 0;
1062 			t = p;
1063 			do {
1064 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1065 				sigaddset(&t->pending.signal, SIGKILL);
1066 				signal_wake_up(t, 1);
1067 			} while_each_thread(p, t);
1068 			return;
1069 		}
1070 	}
1071 
1072 	/*
1073 	 * The signal is already in the shared-pending queue.
1074 	 * Tell the chosen thread to wake up and dequeue it.
1075 	 */
1076 	signal_wake_up(t, sig == SIGKILL);
1077 	return;
1078 }
1079 
legacy_queue(struct sigpending * signals,int sig)1080 static inline bool legacy_queue(struct sigpending *signals, int sig)
1081 {
1082 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1083 }
1084 
__send_signal_locked(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type,bool force)1085 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1086 				struct task_struct *t, enum pid_type type, bool force)
1087 {
1088 	struct sigpending *pending;
1089 	struct sigqueue *q;
1090 	int override_rlimit;
1091 	int ret = 0, result;
1092 
1093 	lockdep_assert_held(&t->sighand->siglock);
1094 
1095 	result = TRACE_SIGNAL_IGNORED;
1096 	if (!prepare_signal(sig, t, force))
1097 		goto ret;
1098 
1099 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1100 	/*
1101 	 * Short-circuit ignored signals and support queuing
1102 	 * exactly one non-rt signal, so that we can get more
1103 	 * detailed information about the cause of the signal.
1104 	 */
1105 	result = TRACE_SIGNAL_ALREADY_PENDING;
1106 	if (legacy_queue(pending, sig))
1107 		goto ret;
1108 
1109 	result = TRACE_SIGNAL_DELIVERED;
1110 	/*
1111 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1112 	 */
1113 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1114 		goto out_set;
1115 
1116 	/*
1117 	 * Real-time signals must be queued if sent by sigqueue, or
1118 	 * some other real-time mechanism.  It is implementation
1119 	 * defined whether kill() does so.  We attempt to do so, on
1120 	 * the principle of least surprise, but since kill is not
1121 	 * allowed to fail with EAGAIN when low on memory we just
1122 	 * make sure at least one signal gets delivered and don't
1123 	 * pass on the info struct.
1124 	 */
1125 	if (sig < SIGRTMIN)
1126 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1127 	else
1128 		override_rlimit = 0;
1129 
1130 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1131 
1132 	if (q) {
1133 		list_add_tail(&q->list, &pending->list);
1134 		switch ((unsigned long) info) {
1135 		case (unsigned long) SEND_SIG_NOINFO:
1136 			clear_siginfo(&q->info);
1137 			q->info.si_signo = sig;
1138 			q->info.si_errno = 0;
1139 			q->info.si_code = SI_USER;
1140 			q->info.si_pid = task_tgid_nr_ns(current,
1141 							task_active_pid_ns(t));
1142 			rcu_read_lock();
1143 			q->info.si_uid =
1144 				from_kuid_munged(task_cred_xxx(t, user_ns),
1145 						 current_uid());
1146 			rcu_read_unlock();
1147 			break;
1148 		case (unsigned long) SEND_SIG_PRIV:
1149 			clear_siginfo(&q->info);
1150 			q->info.si_signo = sig;
1151 			q->info.si_errno = 0;
1152 			q->info.si_code = SI_KERNEL;
1153 			q->info.si_pid = 0;
1154 			q->info.si_uid = 0;
1155 			break;
1156 		default:
1157 			copy_siginfo(&q->info, info);
1158 			break;
1159 		}
1160 	} else if (!is_si_special(info) &&
1161 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1162 		/*
1163 		 * Queue overflow, abort.  We may abort if the
1164 		 * signal was rt and sent by user using something
1165 		 * other than kill().
1166 		 */
1167 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1168 		ret = -EAGAIN;
1169 		goto ret;
1170 	} else {
1171 		/*
1172 		 * This is a silent loss of information.  We still
1173 		 * send the signal, but the *info bits are lost.
1174 		 */
1175 		result = TRACE_SIGNAL_LOSE_INFO;
1176 	}
1177 
1178 out_set:
1179 	signalfd_notify(t, sig);
1180 	sigaddset(&pending->signal, sig);
1181 
1182 	/* Let multiprocess signals appear after on-going forks */
1183 	if (type > PIDTYPE_TGID) {
1184 		struct multiprocess_signals *delayed;
1185 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1186 			sigset_t *signal = &delayed->signal;
1187 			/* Can't queue both a stop and a continue signal */
1188 			if (sig == SIGCONT)
1189 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1190 			else if (sig_kernel_stop(sig))
1191 				sigdelset(signal, SIGCONT);
1192 			sigaddset(signal, sig);
1193 		}
1194 	}
1195 
1196 	complete_signal(sig, t, type);
1197 ret:
1198 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1199 	return ret;
1200 }
1201 
has_si_pid_and_uid(struct kernel_siginfo * info)1202 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1203 {
1204 	bool ret = false;
1205 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1206 	case SIL_KILL:
1207 	case SIL_CHLD:
1208 	case SIL_RT:
1209 		ret = true;
1210 		break;
1211 	case SIL_TIMER:
1212 	case SIL_POLL:
1213 	case SIL_FAULT:
1214 	case SIL_FAULT_TRAPNO:
1215 	case SIL_FAULT_MCEERR:
1216 	case SIL_FAULT_BNDERR:
1217 	case SIL_FAULT_PKUERR:
1218 	case SIL_FAULT_PERF_EVENT:
1219 	case SIL_SYS:
1220 		ret = false;
1221 		break;
1222 	}
1223 	return ret;
1224 }
1225 
send_signal_locked(int sig,struct kernel_siginfo * info,struct task_struct * t,enum pid_type type)1226 int send_signal_locked(int sig, struct kernel_siginfo *info,
1227 		       struct task_struct *t, enum pid_type type)
1228 {
1229 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1230 	bool force = false;
1231 
1232 	if (info == SEND_SIG_NOINFO) {
1233 		/* Force if sent from an ancestor pid namespace */
1234 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1235 	} else if (info == SEND_SIG_PRIV) {
1236 		/* Don't ignore kernel generated signals */
1237 		force = true;
1238 	} else if (has_si_pid_and_uid(info)) {
1239 		/* SIGKILL and SIGSTOP is special or has ids */
1240 		struct user_namespace *t_user_ns;
1241 
1242 		rcu_read_lock();
1243 		t_user_ns = task_cred_xxx(t, user_ns);
1244 		if (current_user_ns() != t_user_ns) {
1245 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1246 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1247 		}
1248 		rcu_read_unlock();
1249 
1250 		/* A kernel generated signal? */
1251 		force = (info->si_code == SI_KERNEL);
1252 
1253 		/* From an ancestor pid namespace? */
1254 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1255 			info->si_pid = 0;
1256 			force = true;
1257 		}
1258 	}
1259 	return __send_signal_locked(sig, info, t, type, force);
1260 }
1261 
print_fatal_signal(int signr)1262 static void print_fatal_signal(int signr)
1263 {
1264 	struct pt_regs *regs = task_pt_regs(current);
1265 	struct file *exe_file;
1266 
1267 	exe_file = get_task_exe_file(current);
1268 	if (exe_file) {
1269 		pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1270 			exe_file, current->comm, signr);
1271 		fput(exe_file);
1272 	} else {
1273 		pr_info("%s: potentially unexpected fatal signal %d.\n",
1274 			current->comm, signr);
1275 	}
1276 
1277 #if defined(__i386__) && !defined(__arch_um__)
1278 	pr_info("code at %08lx: ", regs->ip);
1279 	{
1280 		int i;
1281 		for (i = 0; i < 16; i++) {
1282 			unsigned char insn;
1283 
1284 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1285 				break;
1286 			pr_cont("%02x ", insn);
1287 		}
1288 	}
1289 	pr_cont("\n");
1290 #endif
1291 	preempt_disable();
1292 	show_regs(regs);
1293 	preempt_enable();
1294 }
1295 
setup_print_fatal_signals(char * str)1296 static int __init setup_print_fatal_signals(char *str)
1297 {
1298 	get_option (&str, &print_fatal_signals);
1299 
1300 	return 1;
1301 }
1302 
1303 __setup("print-fatal-signals=", setup_print_fatal_signals);
1304 
do_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1305 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1306 			enum pid_type type)
1307 {
1308 	unsigned long flags;
1309 	int ret = -ESRCH;
1310 
1311 	if (lock_task_sighand(p, &flags)) {
1312 		ret = send_signal_locked(sig, info, p, type);
1313 		unlock_task_sighand(p, &flags);
1314 	}
1315 
1316 	return ret;
1317 }
1318 
1319 enum sig_handler {
1320 	HANDLER_CURRENT, /* If reachable use the current handler */
1321 	HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1322 	HANDLER_EXIT,	 /* Only visible as the process exit code */
1323 };
1324 
1325 /*
1326  * Force a signal that the process can't ignore: if necessary
1327  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1328  *
1329  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1330  * since we do not want to have a signal handler that was blocked
1331  * be invoked when user space had explicitly blocked it.
1332  *
1333  * We don't want to have recursive SIGSEGV's etc, for example,
1334  * that is why we also clear SIGNAL_UNKILLABLE.
1335  */
1336 static int
force_sig_info_to_task(struct kernel_siginfo * info,struct task_struct * t,enum sig_handler handler)1337 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1338 	enum sig_handler handler)
1339 {
1340 	unsigned long int flags;
1341 	int ret, blocked, ignored;
1342 	struct k_sigaction *action;
1343 	int sig = info->si_signo;
1344 
1345 	spin_lock_irqsave(&t->sighand->siglock, flags);
1346 	action = &t->sighand->action[sig-1];
1347 	ignored = action->sa.sa_handler == SIG_IGN;
1348 	blocked = sigismember(&t->blocked, sig);
1349 	if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1350 		action->sa.sa_handler = SIG_DFL;
1351 		if (handler == HANDLER_EXIT)
1352 			action->sa.sa_flags |= SA_IMMUTABLE;
1353 		if (blocked) {
1354 			sigdelset(&t->blocked, sig);
1355 			recalc_sigpending_and_wake(t);
1356 		}
1357 	}
1358 	/*
1359 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1360 	 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1361 	 */
1362 	if (action->sa.sa_handler == SIG_DFL &&
1363 	    (!t->ptrace || (handler == HANDLER_EXIT)))
1364 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1365 	ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1366 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1367 
1368 	return ret;
1369 }
1370 
force_sig_info(struct kernel_siginfo * info)1371 int force_sig_info(struct kernel_siginfo *info)
1372 {
1373 	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1374 }
1375 
1376 /*
1377  * Nuke all other threads in the group.
1378  */
zap_other_threads(struct task_struct * p)1379 int zap_other_threads(struct task_struct *p)
1380 {
1381 	struct task_struct *t = p;
1382 	int count = 0;
1383 
1384 	p->signal->group_stop_count = 0;
1385 
1386 	while_each_thread(p, t) {
1387 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1388 		/* Don't require de_thread to wait for the vhost_worker */
1389 		if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1390 			count++;
1391 
1392 		/* Don't bother with already dead threads */
1393 		if (t->exit_state)
1394 			continue;
1395 		sigaddset(&t->pending.signal, SIGKILL);
1396 		signal_wake_up(t, 1);
1397 	}
1398 
1399 	return count;
1400 }
1401 
__lock_task_sighand(struct task_struct * tsk,unsigned long * flags)1402 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1403 					   unsigned long *flags)
1404 {
1405 	struct sighand_struct *sighand;
1406 
1407 	rcu_read_lock();
1408 	for (;;) {
1409 		sighand = rcu_dereference(tsk->sighand);
1410 		if (unlikely(sighand == NULL))
1411 			break;
1412 
1413 		/*
1414 		 * This sighand can be already freed and even reused, but
1415 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1416 		 * initializes ->siglock: this slab can't go away, it has
1417 		 * the same object type, ->siglock can't be reinitialized.
1418 		 *
1419 		 * We need to ensure that tsk->sighand is still the same
1420 		 * after we take the lock, we can race with de_thread() or
1421 		 * __exit_signal(). In the latter case the next iteration
1422 		 * must see ->sighand == NULL.
1423 		 */
1424 		spin_lock_irqsave(&sighand->siglock, *flags);
1425 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1426 			break;
1427 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1428 	}
1429 	rcu_read_unlock();
1430 
1431 	return sighand;
1432 }
1433 
1434 #ifdef CONFIG_LOCKDEP
lockdep_assert_task_sighand_held(struct task_struct * task)1435 void lockdep_assert_task_sighand_held(struct task_struct *task)
1436 {
1437 	struct sighand_struct *sighand;
1438 
1439 	rcu_read_lock();
1440 	sighand = rcu_dereference(task->sighand);
1441 	if (sighand)
1442 		lockdep_assert_held(&sighand->siglock);
1443 	else
1444 		WARN_ON_ONCE(1);
1445 	rcu_read_unlock();
1446 }
1447 #endif
1448 
1449 /*
1450  * send signal info to all the members of a group
1451  */
group_send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p,enum pid_type type)1452 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1453 			struct task_struct *p, enum pid_type type)
1454 {
1455 	int ret;
1456 
1457 	rcu_read_lock();
1458 	ret = check_kill_permission(sig, info, p);
1459 	rcu_read_unlock();
1460 
1461 	if (!ret && sig)
1462 		ret = do_send_sig_info(sig, info, p, type);
1463 
1464 	return ret;
1465 }
1466 
1467 /*
1468  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1469  * control characters do (^C, ^Z etc)
1470  * - the caller must hold at least a readlock on tasklist_lock
1471  */
__kill_pgrp_info(int sig,struct kernel_siginfo * info,struct pid * pgrp)1472 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1473 {
1474 	struct task_struct *p = NULL;
1475 	int retval, success;
1476 
1477 	success = 0;
1478 	retval = -ESRCH;
1479 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1480 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1481 		success |= !err;
1482 		retval = err;
1483 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1484 	return success ? 0 : retval;
1485 }
1486 
kill_pid_info(int sig,struct kernel_siginfo * info,struct pid * pid)1487 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1488 {
1489 	int error = -ESRCH;
1490 	struct task_struct *p;
1491 
1492 	for (;;) {
1493 		rcu_read_lock();
1494 		p = pid_task(pid, PIDTYPE_PID);
1495 		if (p)
1496 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1497 		rcu_read_unlock();
1498 		if (likely(!p || error != -ESRCH))
1499 			return error;
1500 
1501 		/*
1502 		 * The task was unhashed in between, try again.  If it
1503 		 * is dead, pid_task() will return NULL, if we race with
1504 		 * de_thread() it will find the new leader.
1505 		 */
1506 	}
1507 }
1508 
kill_proc_info(int sig,struct kernel_siginfo * info,pid_t pid)1509 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1510 {
1511 	int error;
1512 	rcu_read_lock();
1513 	error = kill_pid_info(sig, info, find_vpid(pid));
1514 	rcu_read_unlock();
1515 	return error;
1516 }
1517 
kill_as_cred_perm(const struct cred * cred,struct task_struct * target)1518 static inline bool kill_as_cred_perm(const struct cred *cred,
1519 				     struct task_struct *target)
1520 {
1521 	const struct cred *pcred = __task_cred(target);
1522 
1523 	return uid_eq(cred->euid, pcred->suid) ||
1524 	       uid_eq(cred->euid, pcred->uid) ||
1525 	       uid_eq(cred->uid, pcred->suid) ||
1526 	       uid_eq(cred->uid, pcred->uid);
1527 }
1528 
1529 /*
1530  * The usb asyncio usage of siginfo is wrong.  The glibc support
1531  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1532  * AKA after the generic fields:
1533  *	kernel_pid_t	si_pid;
1534  *	kernel_uid32_t	si_uid;
1535  *	sigval_t	si_value;
1536  *
1537  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1538  * after the generic fields is:
1539  *	void __user 	*si_addr;
1540  *
1541  * This is a practical problem when there is a 64bit big endian kernel
1542  * and a 32bit userspace.  As the 32bit address will encoded in the low
1543  * 32bits of the pointer.  Those low 32bits will be stored at higher
1544  * address than appear in a 32 bit pointer.  So userspace will not
1545  * see the address it was expecting for it's completions.
1546  *
1547  * There is nothing in the encoding that can allow
1548  * copy_siginfo_to_user32 to detect this confusion of formats, so
1549  * handle this by requiring the caller of kill_pid_usb_asyncio to
1550  * notice when this situration takes place and to store the 32bit
1551  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1552  * parameter.
1553  */
kill_pid_usb_asyncio(int sig,int errno,sigval_t addr,struct pid * pid,const struct cred * cred)1554 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1555 			 struct pid *pid, const struct cred *cred)
1556 {
1557 	struct kernel_siginfo info;
1558 	struct task_struct *p;
1559 	unsigned long flags;
1560 	int ret = -EINVAL;
1561 
1562 	if (!valid_signal(sig))
1563 		return ret;
1564 
1565 	clear_siginfo(&info);
1566 	info.si_signo = sig;
1567 	info.si_errno = errno;
1568 	info.si_code = SI_ASYNCIO;
1569 	*((sigval_t *)&info.si_pid) = addr;
1570 
1571 	rcu_read_lock();
1572 	p = pid_task(pid, PIDTYPE_PID);
1573 	if (!p) {
1574 		ret = -ESRCH;
1575 		goto out_unlock;
1576 	}
1577 	if (!kill_as_cred_perm(cred, p)) {
1578 		ret = -EPERM;
1579 		goto out_unlock;
1580 	}
1581 	ret = security_task_kill(p, &info, sig, cred);
1582 	if (ret)
1583 		goto out_unlock;
1584 
1585 	if (sig) {
1586 		if (lock_task_sighand(p, &flags)) {
1587 			ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1588 			unlock_task_sighand(p, &flags);
1589 		} else
1590 			ret = -ESRCH;
1591 	}
1592 out_unlock:
1593 	rcu_read_unlock();
1594 	return ret;
1595 }
1596 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1597 
1598 /*
1599  * kill_something_info() interprets pid in interesting ways just like kill(2).
1600  *
1601  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1602  * is probably wrong.  Should make it like BSD or SYSV.
1603  */
1604 
kill_something_info(int sig,struct kernel_siginfo * info,pid_t pid)1605 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1606 {
1607 	int ret;
1608 
1609 	if (pid > 0)
1610 		return kill_proc_info(sig, info, pid);
1611 
1612 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1613 	if (pid == INT_MIN)
1614 		return -ESRCH;
1615 
1616 	read_lock(&tasklist_lock);
1617 	if (pid != -1) {
1618 		ret = __kill_pgrp_info(sig, info,
1619 				pid ? find_vpid(-pid) : task_pgrp(current));
1620 	} else {
1621 		int retval = 0, count = 0;
1622 		struct task_struct * p;
1623 
1624 		for_each_process(p) {
1625 			if (task_pid_vnr(p) > 1 &&
1626 					!same_thread_group(p, current)) {
1627 				int err = group_send_sig_info(sig, info, p,
1628 							      PIDTYPE_MAX);
1629 				++count;
1630 				if (err != -EPERM)
1631 					retval = err;
1632 			}
1633 		}
1634 		ret = count ? retval : -ESRCH;
1635 	}
1636 	read_unlock(&tasklist_lock);
1637 
1638 	return ret;
1639 }
1640 
1641 /*
1642  * These are for backward compatibility with the rest of the kernel source.
1643  */
1644 
send_sig_info(int sig,struct kernel_siginfo * info,struct task_struct * p)1645 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1646 {
1647 	/*
1648 	 * Make sure legacy kernel users don't send in bad values
1649 	 * (normal paths check this in check_kill_permission).
1650 	 */
1651 	if (!valid_signal(sig))
1652 		return -EINVAL;
1653 
1654 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1655 }
1656 EXPORT_SYMBOL(send_sig_info);
1657 
1658 #define __si_special(priv) \
1659 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1660 
1661 int
send_sig(int sig,struct task_struct * p,int priv)1662 send_sig(int sig, struct task_struct *p, int priv)
1663 {
1664 	return send_sig_info(sig, __si_special(priv), p);
1665 }
1666 EXPORT_SYMBOL(send_sig);
1667 
force_sig(int sig)1668 void force_sig(int sig)
1669 {
1670 	struct kernel_siginfo info;
1671 
1672 	clear_siginfo(&info);
1673 	info.si_signo = sig;
1674 	info.si_errno = 0;
1675 	info.si_code = SI_KERNEL;
1676 	info.si_pid = 0;
1677 	info.si_uid = 0;
1678 	force_sig_info(&info);
1679 }
1680 EXPORT_SYMBOL(force_sig);
1681 
force_fatal_sig(int sig)1682 void force_fatal_sig(int sig)
1683 {
1684 	struct kernel_siginfo info;
1685 
1686 	clear_siginfo(&info);
1687 	info.si_signo = sig;
1688 	info.si_errno = 0;
1689 	info.si_code = SI_KERNEL;
1690 	info.si_pid = 0;
1691 	info.si_uid = 0;
1692 	force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1693 }
1694 
force_exit_sig(int sig)1695 void force_exit_sig(int sig)
1696 {
1697 	struct kernel_siginfo info;
1698 
1699 	clear_siginfo(&info);
1700 	info.si_signo = sig;
1701 	info.si_errno = 0;
1702 	info.si_code = SI_KERNEL;
1703 	info.si_pid = 0;
1704 	info.si_uid = 0;
1705 	force_sig_info_to_task(&info, current, HANDLER_EXIT);
1706 }
1707 
1708 /*
1709  * When things go south during signal handling, we
1710  * will force a SIGSEGV. And if the signal that caused
1711  * the problem was already a SIGSEGV, we'll want to
1712  * make sure we don't even try to deliver the signal..
1713  */
force_sigsegv(int sig)1714 void force_sigsegv(int sig)
1715 {
1716 	if (sig == SIGSEGV)
1717 		force_fatal_sig(SIGSEGV);
1718 	else
1719 		force_sig(SIGSEGV);
1720 }
1721 
force_sig_fault_to_task(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr),struct task_struct * t)1722 int force_sig_fault_to_task(int sig, int code, void __user *addr
1723 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1724 	, struct task_struct *t)
1725 {
1726 	struct kernel_siginfo info;
1727 
1728 	clear_siginfo(&info);
1729 	info.si_signo = sig;
1730 	info.si_errno = 0;
1731 	info.si_code  = code;
1732 	info.si_addr  = addr;
1733 #ifdef __ia64__
1734 	info.si_imm = imm;
1735 	info.si_flags = flags;
1736 	info.si_isr = isr;
1737 #endif
1738 	return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1739 }
1740 
force_sig_fault(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr))1741 int force_sig_fault(int sig, int code, void __user *addr
1742 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1743 {
1744 	return force_sig_fault_to_task(sig, code, addr
1745 				       ___ARCH_SI_IA64(imm, flags, isr), current);
1746 }
1747 
send_sig_fault(int sig,int code,void __user * addr ___ARCH_SI_IA64 (int imm,unsigned int flags,unsigned long isr),struct task_struct * t)1748 int send_sig_fault(int sig, int code, void __user *addr
1749 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1750 	, struct task_struct *t)
1751 {
1752 	struct kernel_siginfo info;
1753 
1754 	clear_siginfo(&info);
1755 	info.si_signo = sig;
1756 	info.si_errno = 0;
1757 	info.si_code  = code;
1758 	info.si_addr  = addr;
1759 #ifdef __ia64__
1760 	info.si_imm = imm;
1761 	info.si_flags = flags;
1762 	info.si_isr = isr;
1763 #endif
1764 	return send_sig_info(info.si_signo, &info, t);
1765 }
1766 
force_sig_mceerr(int code,void __user * addr,short lsb)1767 int force_sig_mceerr(int code, void __user *addr, short lsb)
1768 {
1769 	struct kernel_siginfo info;
1770 
1771 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1772 	clear_siginfo(&info);
1773 	info.si_signo = SIGBUS;
1774 	info.si_errno = 0;
1775 	info.si_code = code;
1776 	info.si_addr = addr;
1777 	info.si_addr_lsb = lsb;
1778 	return force_sig_info(&info);
1779 }
1780 
send_sig_mceerr(int code,void __user * addr,short lsb,struct task_struct * t)1781 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1782 {
1783 	struct kernel_siginfo info;
1784 
1785 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1786 	clear_siginfo(&info);
1787 	info.si_signo = SIGBUS;
1788 	info.si_errno = 0;
1789 	info.si_code = code;
1790 	info.si_addr = addr;
1791 	info.si_addr_lsb = lsb;
1792 	return send_sig_info(info.si_signo, &info, t);
1793 }
1794 EXPORT_SYMBOL(send_sig_mceerr);
1795 
force_sig_bnderr(void __user * addr,void __user * lower,void __user * upper)1796 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1797 {
1798 	struct kernel_siginfo info;
1799 
1800 	clear_siginfo(&info);
1801 	info.si_signo = SIGSEGV;
1802 	info.si_errno = 0;
1803 	info.si_code  = SEGV_BNDERR;
1804 	info.si_addr  = addr;
1805 	info.si_lower = lower;
1806 	info.si_upper = upper;
1807 	return force_sig_info(&info);
1808 }
1809 
1810 #ifdef SEGV_PKUERR
force_sig_pkuerr(void __user * addr,u32 pkey)1811 int force_sig_pkuerr(void __user *addr, u32 pkey)
1812 {
1813 	struct kernel_siginfo info;
1814 
1815 	clear_siginfo(&info);
1816 	info.si_signo = SIGSEGV;
1817 	info.si_errno = 0;
1818 	info.si_code  = SEGV_PKUERR;
1819 	info.si_addr  = addr;
1820 	info.si_pkey  = pkey;
1821 	return force_sig_info(&info);
1822 }
1823 #endif
1824 
send_sig_perf(void __user * addr,u32 type,u64 sig_data)1825 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1826 {
1827 	struct kernel_siginfo info;
1828 
1829 	clear_siginfo(&info);
1830 	info.si_signo     = SIGTRAP;
1831 	info.si_errno     = 0;
1832 	info.si_code      = TRAP_PERF;
1833 	info.si_addr      = addr;
1834 	info.si_perf_data = sig_data;
1835 	info.si_perf_type = type;
1836 
1837 	/*
1838 	 * Signals generated by perf events should not terminate the whole
1839 	 * process if SIGTRAP is blocked, however, delivering the signal
1840 	 * asynchronously is better than not delivering at all. But tell user
1841 	 * space if the signal was asynchronous, so it can clearly be
1842 	 * distinguished from normal synchronous ones.
1843 	 */
1844 	info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1845 				     TRAP_PERF_FLAG_ASYNC :
1846 				     0;
1847 
1848 	return send_sig_info(info.si_signo, &info, current);
1849 }
1850 
1851 /**
1852  * force_sig_seccomp - signals the task to allow in-process syscall emulation
1853  * @syscall: syscall number to send to userland
1854  * @reason: filter-supplied reason code to send to userland (via si_errno)
1855  * @force_coredump: true to trigger a coredump
1856  *
1857  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1858  */
force_sig_seccomp(int syscall,int reason,bool force_coredump)1859 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1860 {
1861 	struct kernel_siginfo info;
1862 
1863 	clear_siginfo(&info);
1864 	info.si_signo = SIGSYS;
1865 	info.si_code = SYS_SECCOMP;
1866 	info.si_call_addr = (void __user *)KSTK_EIP(current);
1867 	info.si_errno = reason;
1868 	info.si_arch = syscall_get_arch(current);
1869 	info.si_syscall = syscall;
1870 	return force_sig_info_to_task(&info, current,
1871 		force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1872 }
1873 
1874 /* For the crazy architectures that include trap information in
1875  * the errno field, instead of an actual errno value.
1876  */
force_sig_ptrace_errno_trap(int errno,void __user * addr)1877 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1878 {
1879 	struct kernel_siginfo info;
1880 
1881 	clear_siginfo(&info);
1882 	info.si_signo = SIGTRAP;
1883 	info.si_errno = errno;
1884 	info.si_code  = TRAP_HWBKPT;
1885 	info.si_addr  = addr;
1886 	return force_sig_info(&info);
1887 }
1888 
1889 /* For the rare architectures that include trap information using
1890  * si_trapno.
1891  */
force_sig_fault_trapno(int sig,int code,void __user * addr,int trapno)1892 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1893 {
1894 	struct kernel_siginfo info;
1895 
1896 	clear_siginfo(&info);
1897 	info.si_signo = sig;
1898 	info.si_errno = 0;
1899 	info.si_code  = code;
1900 	info.si_addr  = addr;
1901 	info.si_trapno = trapno;
1902 	return force_sig_info(&info);
1903 }
1904 
1905 /* For the rare architectures that include trap information using
1906  * si_trapno.
1907  */
send_sig_fault_trapno(int sig,int code,void __user * addr,int trapno,struct task_struct * t)1908 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1909 			  struct task_struct *t)
1910 {
1911 	struct kernel_siginfo info;
1912 
1913 	clear_siginfo(&info);
1914 	info.si_signo = sig;
1915 	info.si_errno = 0;
1916 	info.si_code  = code;
1917 	info.si_addr  = addr;
1918 	info.si_trapno = trapno;
1919 	return send_sig_info(info.si_signo, &info, t);
1920 }
1921 
kill_pgrp(struct pid * pid,int sig,int priv)1922 int kill_pgrp(struct pid *pid, int sig, int priv)
1923 {
1924 	int ret;
1925 
1926 	read_lock(&tasklist_lock);
1927 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1928 	read_unlock(&tasklist_lock);
1929 
1930 	return ret;
1931 }
1932 EXPORT_SYMBOL(kill_pgrp);
1933 
kill_pid(struct pid * pid,int sig,int priv)1934 int kill_pid(struct pid *pid, int sig, int priv)
1935 {
1936 	return kill_pid_info(sig, __si_special(priv), pid);
1937 }
1938 EXPORT_SYMBOL(kill_pid);
1939 
1940 /*
1941  * These functions support sending signals using preallocated sigqueue
1942  * structures.  This is needed "because realtime applications cannot
1943  * afford to lose notifications of asynchronous events, like timer
1944  * expirations or I/O completions".  In the case of POSIX Timers
1945  * we allocate the sigqueue structure from the timer_create.  If this
1946  * allocation fails we are able to report the failure to the application
1947  * with an EAGAIN error.
1948  */
sigqueue_alloc(void)1949 struct sigqueue *sigqueue_alloc(void)
1950 {
1951 	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1952 }
1953 
sigqueue_free(struct sigqueue * q)1954 void sigqueue_free(struct sigqueue *q)
1955 {
1956 	unsigned long flags;
1957 	spinlock_t *lock = &current->sighand->siglock;
1958 
1959 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1960 	/*
1961 	 * We must hold ->siglock while testing q->list
1962 	 * to serialize with collect_signal() or with
1963 	 * __exit_signal()->flush_sigqueue().
1964 	 */
1965 	spin_lock_irqsave(lock, flags);
1966 	q->flags &= ~SIGQUEUE_PREALLOC;
1967 	/*
1968 	 * If it is queued it will be freed when dequeued,
1969 	 * like the "regular" sigqueue.
1970 	 */
1971 	if (!list_empty(&q->list))
1972 		q = NULL;
1973 	spin_unlock_irqrestore(lock, flags);
1974 
1975 	if (q)
1976 		__sigqueue_free(q);
1977 }
1978 
send_sigqueue(struct sigqueue * q,struct pid * pid,enum pid_type type)1979 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1980 {
1981 	int sig = q->info.si_signo;
1982 	struct sigpending *pending;
1983 	struct task_struct *t;
1984 	unsigned long flags;
1985 	int ret, result;
1986 
1987 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1988 
1989 	ret = -1;
1990 	rcu_read_lock();
1991 
1992 	/*
1993 	 * This function is used by POSIX timers to deliver a timer signal.
1994 	 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1995 	 * set), the signal must be delivered to the specific thread (queues
1996 	 * into t->pending).
1997 	 *
1998 	 * Where type is not PIDTYPE_PID, signals must be delivered to the
1999 	 * process. In this case, prefer to deliver to current if it is in the
2000 	 * same thread group as the target process and its sighand is stable,
2001 	 * which avoids unnecessarily waking up a potentially idle task.
2002 	 */
2003 	t = pid_task(pid, type);
2004 	if (!t)
2005 		goto ret;
2006 	if (type != PIDTYPE_PID &&
2007 	    same_thread_group(t, current) && !current->exit_state)
2008 		t = current;
2009 	if (!likely(lock_task_sighand(t, &flags)))
2010 		goto ret;
2011 
2012 	ret = 1; /* the signal is ignored */
2013 	result = TRACE_SIGNAL_IGNORED;
2014 	if (!prepare_signal(sig, t, false))
2015 		goto out;
2016 
2017 	ret = 0;
2018 	if (unlikely(!list_empty(&q->list))) {
2019 		/*
2020 		 * If an SI_TIMER entry is already queue just increment
2021 		 * the overrun count.
2022 		 */
2023 		BUG_ON(q->info.si_code != SI_TIMER);
2024 		q->info.si_overrun++;
2025 		result = TRACE_SIGNAL_ALREADY_PENDING;
2026 		goto out;
2027 	}
2028 	q->info.si_overrun = 0;
2029 
2030 	signalfd_notify(t, sig);
2031 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2032 	list_add_tail(&q->list, &pending->list);
2033 	sigaddset(&pending->signal, sig);
2034 	complete_signal(sig, t, type);
2035 	result = TRACE_SIGNAL_DELIVERED;
2036 out:
2037 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2038 	unlock_task_sighand(t, &flags);
2039 ret:
2040 	rcu_read_unlock();
2041 	return ret;
2042 }
2043 
do_notify_pidfd(struct task_struct * task)2044 static void do_notify_pidfd(struct task_struct *task)
2045 {
2046 	struct pid *pid;
2047 
2048 	WARN_ON(task->exit_state == 0);
2049 	pid = task_pid(task);
2050 	wake_up_all(&pid->wait_pidfd);
2051 }
2052 
2053 /*
2054  * Let a parent know about the death of a child.
2055  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2056  *
2057  * Returns true if our parent ignored us and so we've switched to
2058  * self-reaping.
2059  */
do_notify_parent(struct task_struct * tsk,int sig)2060 bool do_notify_parent(struct task_struct *tsk, int sig)
2061 {
2062 	struct kernel_siginfo info;
2063 	unsigned long flags;
2064 	struct sighand_struct *psig;
2065 	bool autoreap = false;
2066 	u64 utime, stime;
2067 
2068 	WARN_ON_ONCE(sig == -1);
2069 
2070 	/* do_notify_parent_cldstop should have been called instead.  */
2071 	WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2072 
2073 	WARN_ON_ONCE(!tsk->ptrace &&
2074 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2075 
2076 	/* Wake up all pidfd waiters */
2077 	do_notify_pidfd(tsk);
2078 
2079 	if (sig != SIGCHLD) {
2080 		/*
2081 		 * This is only possible if parent == real_parent.
2082 		 * Check if it has changed security domain.
2083 		 */
2084 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2085 			sig = SIGCHLD;
2086 	}
2087 
2088 	clear_siginfo(&info);
2089 	info.si_signo = sig;
2090 	info.si_errno = 0;
2091 	/*
2092 	 * We are under tasklist_lock here so our parent is tied to
2093 	 * us and cannot change.
2094 	 *
2095 	 * task_active_pid_ns will always return the same pid namespace
2096 	 * until a task passes through release_task.
2097 	 *
2098 	 * write_lock() currently calls preempt_disable() which is the
2099 	 * same as rcu_read_lock(), but according to Oleg, this is not
2100 	 * correct to rely on this
2101 	 */
2102 	rcu_read_lock();
2103 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2104 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2105 				       task_uid(tsk));
2106 	rcu_read_unlock();
2107 
2108 	task_cputime(tsk, &utime, &stime);
2109 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2110 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2111 
2112 	info.si_status = tsk->exit_code & 0x7f;
2113 	if (tsk->exit_code & 0x80)
2114 		info.si_code = CLD_DUMPED;
2115 	else if (tsk->exit_code & 0x7f)
2116 		info.si_code = CLD_KILLED;
2117 	else {
2118 		info.si_code = CLD_EXITED;
2119 		info.si_status = tsk->exit_code >> 8;
2120 	}
2121 
2122 	psig = tsk->parent->sighand;
2123 	spin_lock_irqsave(&psig->siglock, flags);
2124 	if (!tsk->ptrace && sig == SIGCHLD &&
2125 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2126 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2127 		/*
2128 		 * We are exiting and our parent doesn't care.  POSIX.1
2129 		 * defines special semantics for setting SIGCHLD to SIG_IGN
2130 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2131 		 * automatically and not left for our parent's wait4 call.
2132 		 * Rather than having the parent do it as a magic kind of
2133 		 * signal handler, we just set this to tell do_exit that we
2134 		 * can be cleaned up without becoming a zombie.  Note that
2135 		 * we still call __wake_up_parent in this case, because a
2136 		 * blocked sys_wait4 might now return -ECHILD.
2137 		 *
2138 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2139 		 * is implementation-defined: we do (if you don't want
2140 		 * it, just use SIG_IGN instead).
2141 		 */
2142 		autoreap = true;
2143 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2144 			sig = 0;
2145 	}
2146 	/*
2147 	 * Send with __send_signal as si_pid and si_uid are in the
2148 	 * parent's namespaces.
2149 	 */
2150 	if (valid_signal(sig) && sig)
2151 		__send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2152 	__wake_up_parent(tsk, tsk->parent);
2153 	spin_unlock_irqrestore(&psig->siglock, flags);
2154 
2155 	return autoreap;
2156 }
2157 
2158 /**
2159  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2160  * @tsk: task reporting the state change
2161  * @for_ptracer: the notification is for ptracer
2162  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2163  *
2164  * Notify @tsk's parent that the stopped/continued state has changed.  If
2165  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2166  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2167  *
2168  * CONTEXT:
2169  * Must be called with tasklist_lock at least read locked.
2170  */
do_notify_parent_cldstop(struct task_struct * tsk,bool for_ptracer,int why)2171 static void do_notify_parent_cldstop(struct task_struct *tsk,
2172 				     bool for_ptracer, int why)
2173 {
2174 	struct kernel_siginfo info;
2175 	unsigned long flags;
2176 	struct task_struct *parent;
2177 	struct sighand_struct *sighand;
2178 	u64 utime, stime;
2179 
2180 	if (for_ptracer) {
2181 		parent = tsk->parent;
2182 	} else {
2183 		tsk = tsk->group_leader;
2184 		parent = tsk->real_parent;
2185 	}
2186 
2187 	clear_siginfo(&info);
2188 	info.si_signo = SIGCHLD;
2189 	info.si_errno = 0;
2190 	/*
2191 	 * see comment in do_notify_parent() about the following 4 lines
2192 	 */
2193 	rcu_read_lock();
2194 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2195 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2196 	rcu_read_unlock();
2197 
2198 	task_cputime(tsk, &utime, &stime);
2199 	info.si_utime = nsec_to_clock_t(utime);
2200 	info.si_stime = nsec_to_clock_t(stime);
2201 
2202  	info.si_code = why;
2203  	switch (why) {
2204  	case CLD_CONTINUED:
2205  		info.si_status = SIGCONT;
2206  		break;
2207  	case CLD_STOPPED:
2208  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2209  		break;
2210  	case CLD_TRAPPED:
2211  		info.si_status = tsk->exit_code & 0x7f;
2212  		break;
2213  	default:
2214  		BUG();
2215  	}
2216 
2217 	sighand = parent->sighand;
2218 	spin_lock_irqsave(&sighand->siglock, flags);
2219 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2220 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2221 		send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2222 	/*
2223 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2224 	 */
2225 	__wake_up_parent(tsk, parent);
2226 	spin_unlock_irqrestore(&sighand->siglock, flags);
2227 }
2228 
2229 /*
2230  * This must be called with current->sighand->siglock held.
2231  *
2232  * This should be the path for all ptrace stops.
2233  * We always set current->last_siginfo while stopped here.
2234  * That makes it a way to test a stopped process for
2235  * being ptrace-stopped vs being job-control-stopped.
2236  *
2237  * Returns the signal the ptracer requested the code resume
2238  * with.  If the code did not stop because the tracer is gone,
2239  * the stop signal remains unchanged unless clear_code.
2240  */
ptrace_stop(int exit_code,int why,unsigned long message,kernel_siginfo_t * info)2241 static int ptrace_stop(int exit_code, int why, unsigned long message,
2242 		       kernel_siginfo_t *info)
2243 	__releases(&current->sighand->siglock)
2244 	__acquires(&current->sighand->siglock)
2245 {
2246 	bool gstop_done = false;
2247 
2248 	if (arch_ptrace_stop_needed()) {
2249 		/*
2250 		 * The arch code has something special to do before a
2251 		 * ptrace stop.  This is allowed to block, e.g. for faults
2252 		 * on user stack pages.  We can't keep the siglock while
2253 		 * calling arch_ptrace_stop, so we must release it now.
2254 		 * To preserve proper semantics, we must do this before
2255 		 * any signal bookkeeping like checking group_stop_count.
2256 		 */
2257 		spin_unlock_irq(&current->sighand->siglock);
2258 		arch_ptrace_stop();
2259 		spin_lock_irq(&current->sighand->siglock);
2260 	}
2261 
2262 	/*
2263 	 * After this point ptrace_signal_wake_up or signal_wake_up
2264 	 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2265 	 * signal comes in.  Handle previous ptrace_unlinks and fatal
2266 	 * signals here to prevent ptrace_stop sleeping in schedule.
2267 	 */
2268 	if (!current->ptrace || __fatal_signal_pending(current))
2269 		return exit_code;
2270 
2271 	set_special_state(TASK_TRACED);
2272 	current->jobctl |= JOBCTL_TRACED;
2273 
2274 	/*
2275 	 * We're committing to trapping.  TRACED should be visible before
2276 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2277 	 * Also, transition to TRACED and updates to ->jobctl should be
2278 	 * atomic with respect to siglock and should be done after the arch
2279 	 * hook as siglock is released and regrabbed across it.
2280 	 *
2281 	 *     TRACER				    TRACEE
2282 	 *
2283 	 *     ptrace_attach()
2284 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2285 	 *     do_wait()
2286 	 *       set_current_state()                smp_wmb();
2287 	 *       ptrace_do_wait()
2288 	 *         wait_task_stopped()
2289 	 *           task_stopped_code()
2290 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2291 	 */
2292 	smp_wmb();
2293 
2294 	current->ptrace_message = message;
2295 	current->last_siginfo = info;
2296 	current->exit_code = exit_code;
2297 
2298 	/*
2299 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2300 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2301 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2302 	 * could be clear now.  We act as if SIGCONT is received after
2303 	 * TASK_TRACED is entered - ignore it.
2304 	 */
2305 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2306 		gstop_done = task_participate_group_stop(current);
2307 
2308 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2309 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2310 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2311 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2312 
2313 	/* entering a trap, clear TRAPPING */
2314 	task_clear_jobctl_trapping(current);
2315 
2316 	spin_unlock_irq(&current->sighand->siglock);
2317 	read_lock(&tasklist_lock);
2318 	/*
2319 	 * Notify parents of the stop.
2320 	 *
2321 	 * While ptraced, there are two parents - the ptracer and
2322 	 * the real_parent of the group_leader.  The ptracer should
2323 	 * know about every stop while the real parent is only
2324 	 * interested in the completion of group stop.  The states
2325 	 * for the two don't interact with each other.  Notify
2326 	 * separately unless they're gonna be duplicates.
2327 	 */
2328 	if (current->ptrace)
2329 		do_notify_parent_cldstop(current, true, why);
2330 	if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2331 		do_notify_parent_cldstop(current, false, why);
2332 
2333 	/*
2334 	 * Don't want to allow preemption here, because
2335 	 * sys_ptrace() needs this task to be inactive.
2336 	 *
2337 	 * XXX: implement read_unlock_no_resched().
2338 	 */
2339 	preempt_disable();
2340 	read_unlock(&tasklist_lock);
2341 	cgroup_enter_frozen();
2342 	preempt_enable_no_resched();
2343 	schedule();
2344 	cgroup_leave_frozen(true);
2345 
2346 	/*
2347 	 * We are back.  Now reacquire the siglock before touching
2348 	 * last_siginfo, so that we are sure to have synchronized with
2349 	 * any signal-sending on another CPU that wants to examine it.
2350 	 */
2351 	spin_lock_irq(&current->sighand->siglock);
2352 	exit_code = current->exit_code;
2353 	current->last_siginfo = NULL;
2354 	current->ptrace_message = 0;
2355 	current->exit_code = 0;
2356 
2357 	/* LISTENING can be set only during STOP traps, clear it */
2358 	current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2359 
2360 	/*
2361 	 * Queued signals ignored us while we were stopped for tracing.
2362 	 * So check for any that we should take before resuming user mode.
2363 	 * This sets TIF_SIGPENDING, but never clears it.
2364 	 */
2365 	recalc_sigpending_tsk(current);
2366 	return exit_code;
2367 }
2368 
ptrace_do_notify(int signr,int exit_code,int why,unsigned long message)2369 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2370 {
2371 	kernel_siginfo_t info;
2372 
2373 	clear_siginfo(&info);
2374 	info.si_signo = signr;
2375 	info.si_code = exit_code;
2376 	info.si_pid = task_pid_vnr(current);
2377 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2378 
2379 	/* Let the debugger run.  */
2380 	return ptrace_stop(exit_code, why, message, &info);
2381 }
2382 
ptrace_notify(int exit_code,unsigned long message)2383 int ptrace_notify(int exit_code, unsigned long message)
2384 {
2385 	int signr;
2386 
2387 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2388 	if (unlikely(task_work_pending(current)))
2389 		task_work_run();
2390 
2391 	spin_lock_irq(&current->sighand->siglock);
2392 	signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2393 	spin_unlock_irq(&current->sighand->siglock);
2394 	return signr;
2395 }
2396 
2397 /**
2398  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2399  * @signr: signr causing group stop if initiating
2400  *
2401  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2402  * and participate in it.  If already set, participate in the existing
2403  * group stop.  If participated in a group stop (and thus slept), %true is
2404  * returned with siglock released.
2405  *
2406  * If ptraced, this function doesn't handle stop itself.  Instead,
2407  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2408  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2409  * places afterwards.
2410  *
2411  * CONTEXT:
2412  * Must be called with @current->sighand->siglock held, which is released
2413  * on %true return.
2414  *
2415  * RETURNS:
2416  * %false if group stop is already cancelled or ptrace trap is scheduled.
2417  * %true if participated in group stop.
2418  */
do_signal_stop(int signr)2419 static bool do_signal_stop(int signr)
2420 	__releases(&current->sighand->siglock)
2421 {
2422 	struct signal_struct *sig = current->signal;
2423 
2424 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2425 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2426 		struct task_struct *t;
2427 
2428 		/* signr will be recorded in task->jobctl for retries */
2429 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2430 
2431 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2432 		    unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2433 		    unlikely(sig->group_exec_task))
2434 			return false;
2435 		/*
2436 		 * There is no group stop already in progress.  We must
2437 		 * initiate one now.
2438 		 *
2439 		 * While ptraced, a task may be resumed while group stop is
2440 		 * still in effect and then receive a stop signal and
2441 		 * initiate another group stop.  This deviates from the
2442 		 * usual behavior as two consecutive stop signals can't
2443 		 * cause two group stops when !ptraced.  That is why we
2444 		 * also check !task_is_stopped(t) below.
2445 		 *
2446 		 * The condition can be distinguished by testing whether
2447 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2448 		 * group_exit_code in such case.
2449 		 *
2450 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2451 		 * an intervening stop signal is required to cause two
2452 		 * continued events regardless of ptrace.
2453 		 */
2454 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2455 			sig->group_exit_code = signr;
2456 
2457 		sig->group_stop_count = 0;
2458 
2459 		if (task_set_jobctl_pending(current, signr | gstop))
2460 			sig->group_stop_count++;
2461 
2462 		t = current;
2463 		while_each_thread(current, t) {
2464 			/*
2465 			 * Setting state to TASK_STOPPED for a group
2466 			 * stop is always done with the siglock held,
2467 			 * so this check has no races.
2468 			 */
2469 			if (!task_is_stopped(t) &&
2470 			    task_set_jobctl_pending(t, signr | gstop)) {
2471 				sig->group_stop_count++;
2472 				if (likely(!(t->ptrace & PT_SEIZED)))
2473 					signal_wake_up(t, 0);
2474 				else
2475 					ptrace_trap_notify(t);
2476 			}
2477 		}
2478 	}
2479 
2480 	if (likely(!current->ptrace)) {
2481 		int notify = 0;
2482 
2483 		/*
2484 		 * If there are no other threads in the group, or if there
2485 		 * is a group stop in progress and we are the last to stop,
2486 		 * report to the parent.
2487 		 */
2488 		if (task_participate_group_stop(current))
2489 			notify = CLD_STOPPED;
2490 
2491 		current->jobctl |= JOBCTL_STOPPED;
2492 		set_special_state(TASK_STOPPED);
2493 		spin_unlock_irq(&current->sighand->siglock);
2494 
2495 		/*
2496 		 * Notify the parent of the group stop completion.  Because
2497 		 * we're not holding either the siglock or tasklist_lock
2498 		 * here, ptracer may attach inbetween; however, this is for
2499 		 * group stop and should always be delivered to the real
2500 		 * parent of the group leader.  The new ptracer will get
2501 		 * its notification when this task transitions into
2502 		 * TASK_TRACED.
2503 		 */
2504 		if (notify) {
2505 			read_lock(&tasklist_lock);
2506 			do_notify_parent_cldstop(current, false, notify);
2507 			read_unlock(&tasklist_lock);
2508 		}
2509 
2510 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2511 		cgroup_enter_frozen();
2512 		schedule();
2513 		return true;
2514 	} else {
2515 		/*
2516 		 * While ptraced, group stop is handled by STOP trap.
2517 		 * Schedule it and let the caller deal with it.
2518 		 */
2519 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2520 		return false;
2521 	}
2522 }
2523 
2524 /**
2525  * do_jobctl_trap - take care of ptrace jobctl traps
2526  *
2527  * When PT_SEIZED, it's used for both group stop and explicit
2528  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2529  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2530  * the stop signal; otherwise, %SIGTRAP.
2531  *
2532  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2533  * number as exit_code and no siginfo.
2534  *
2535  * CONTEXT:
2536  * Must be called with @current->sighand->siglock held, which may be
2537  * released and re-acquired before returning with intervening sleep.
2538  */
do_jobctl_trap(void)2539 static void do_jobctl_trap(void)
2540 {
2541 	struct signal_struct *signal = current->signal;
2542 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2543 
2544 	if (current->ptrace & PT_SEIZED) {
2545 		if (!signal->group_stop_count &&
2546 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2547 			signr = SIGTRAP;
2548 		WARN_ON_ONCE(!signr);
2549 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2550 				 CLD_STOPPED, 0);
2551 	} else {
2552 		WARN_ON_ONCE(!signr);
2553 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2554 	}
2555 }
2556 
2557 /**
2558  * do_freezer_trap - handle the freezer jobctl trap
2559  *
2560  * Puts the task into frozen state, if only the task is not about to quit.
2561  * In this case it drops JOBCTL_TRAP_FREEZE.
2562  *
2563  * CONTEXT:
2564  * Must be called with @current->sighand->siglock held,
2565  * which is always released before returning.
2566  */
do_freezer_trap(void)2567 static void do_freezer_trap(void)
2568 	__releases(&current->sighand->siglock)
2569 {
2570 	/*
2571 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2572 	 * let's make another loop to give it a chance to be handled.
2573 	 * In any case, we'll return back.
2574 	 */
2575 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2576 	     JOBCTL_TRAP_FREEZE) {
2577 		spin_unlock_irq(&current->sighand->siglock);
2578 		return;
2579 	}
2580 
2581 	/*
2582 	 * Now we're sure that there is no pending fatal signal and no
2583 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2584 	 * immediately (if there is a non-fatal signal pending), and
2585 	 * put the task into sleep.
2586 	 */
2587 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2588 	clear_thread_flag(TIF_SIGPENDING);
2589 	spin_unlock_irq(&current->sighand->siglock);
2590 	cgroup_enter_frozen();
2591 	schedule();
2592 
2593 	/*
2594 	 * We could've been woken by task_work, run it to clear
2595 	 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
2596 	 */
2597 	clear_notify_signal();
2598 	if (unlikely(task_work_pending(current)))
2599 		task_work_run();
2600 }
2601 
ptrace_signal(int signr,kernel_siginfo_t * info,enum pid_type type)2602 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2603 {
2604 	/*
2605 	 * We do not check sig_kernel_stop(signr) but set this marker
2606 	 * unconditionally because we do not know whether debugger will
2607 	 * change signr. This flag has no meaning unless we are going
2608 	 * to stop after return from ptrace_stop(). In this case it will
2609 	 * be checked in do_signal_stop(), we should only stop if it was
2610 	 * not cleared by SIGCONT while we were sleeping. See also the
2611 	 * comment in dequeue_signal().
2612 	 */
2613 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2614 	signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2615 
2616 	/* We're back.  Did the debugger cancel the sig?  */
2617 	if (signr == 0)
2618 		return signr;
2619 
2620 	/*
2621 	 * Update the siginfo structure if the signal has
2622 	 * changed.  If the debugger wanted something
2623 	 * specific in the siginfo structure then it should
2624 	 * have updated *info via PTRACE_SETSIGINFO.
2625 	 */
2626 	if (signr != info->si_signo) {
2627 		clear_siginfo(info);
2628 		info->si_signo = signr;
2629 		info->si_errno = 0;
2630 		info->si_code = SI_USER;
2631 		rcu_read_lock();
2632 		info->si_pid = task_pid_vnr(current->parent);
2633 		info->si_uid = from_kuid_munged(current_user_ns(),
2634 						task_uid(current->parent));
2635 		rcu_read_unlock();
2636 	}
2637 
2638 	/* If the (new) signal is now blocked, requeue it.  */
2639 	if (sigismember(&current->blocked, signr) ||
2640 	    fatal_signal_pending(current)) {
2641 		send_signal_locked(signr, info, current, type);
2642 		signr = 0;
2643 	}
2644 
2645 	return signr;
2646 }
2647 
hide_si_addr_tag_bits(struct ksignal * ksig)2648 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2649 {
2650 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2651 	case SIL_FAULT:
2652 	case SIL_FAULT_TRAPNO:
2653 	case SIL_FAULT_MCEERR:
2654 	case SIL_FAULT_BNDERR:
2655 	case SIL_FAULT_PKUERR:
2656 	case SIL_FAULT_PERF_EVENT:
2657 		ksig->info.si_addr = arch_untagged_si_addr(
2658 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2659 		break;
2660 	case SIL_KILL:
2661 	case SIL_TIMER:
2662 	case SIL_POLL:
2663 	case SIL_CHLD:
2664 	case SIL_RT:
2665 	case SIL_SYS:
2666 		break;
2667 	}
2668 }
2669 
get_signal(struct ksignal * ksig)2670 bool get_signal(struct ksignal *ksig)
2671 {
2672 	struct sighand_struct *sighand = current->sighand;
2673 	struct signal_struct *signal = current->signal;
2674 	int signr;
2675 
2676 	clear_notify_signal();
2677 	if (unlikely(task_work_pending(current)))
2678 		task_work_run();
2679 
2680 	if (!task_sigpending(current))
2681 		return false;
2682 
2683 	if (unlikely(uprobe_deny_signal()))
2684 		return false;
2685 
2686 	/*
2687 	 * Do this once, we can't return to user-mode if freezing() == T.
2688 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2689 	 * thus do not need another check after return.
2690 	 */
2691 	try_to_freeze();
2692 
2693 relock:
2694 	spin_lock_irq(&sighand->siglock);
2695 
2696 	/*
2697 	 * Every stopped thread goes here after wakeup. Check to see if
2698 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2699 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2700 	 */
2701 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2702 		int why;
2703 
2704 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2705 			why = CLD_CONTINUED;
2706 		else
2707 			why = CLD_STOPPED;
2708 
2709 		signal->flags &= ~SIGNAL_CLD_MASK;
2710 
2711 		spin_unlock_irq(&sighand->siglock);
2712 
2713 		/*
2714 		 * Notify the parent that we're continuing.  This event is
2715 		 * always per-process and doesn't make whole lot of sense
2716 		 * for ptracers, who shouldn't consume the state via
2717 		 * wait(2) either, but, for backward compatibility, notify
2718 		 * the ptracer of the group leader too unless it's gonna be
2719 		 * a duplicate.
2720 		 */
2721 		read_lock(&tasklist_lock);
2722 		do_notify_parent_cldstop(current, false, why);
2723 
2724 		if (ptrace_reparented(current->group_leader))
2725 			do_notify_parent_cldstop(current->group_leader,
2726 						true, why);
2727 		read_unlock(&tasklist_lock);
2728 
2729 		goto relock;
2730 	}
2731 
2732 	for (;;) {
2733 		struct k_sigaction *ka;
2734 		enum pid_type type;
2735 
2736 		/* Has this task already been marked for death? */
2737 		if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2738 		     signal->group_exec_task) {
2739 			clear_siginfo(&ksig->info);
2740 			ksig->info.si_signo = signr = SIGKILL;
2741 			sigdelset(&current->pending.signal, SIGKILL);
2742 			trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2743 				&sighand->action[SIGKILL - 1]);
2744 			recalc_sigpending();
2745 			goto fatal;
2746 		}
2747 
2748 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2749 		    do_signal_stop(0))
2750 			goto relock;
2751 
2752 		if (unlikely(current->jobctl &
2753 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2754 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2755 				do_jobctl_trap();
2756 				spin_unlock_irq(&sighand->siglock);
2757 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2758 				do_freezer_trap();
2759 
2760 			goto relock;
2761 		}
2762 
2763 		/*
2764 		 * If the task is leaving the frozen state, let's update
2765 		 * cgroup counters and reset the frozen bit.
2766 		 */
2767 		if (unlikely(cgroup_task_frozen(current))) {
2768 			spin_unlock_irq(&sighand->siglock);
2769 			cgroup_leave_frozen(false);
2770 			goto relock;
2771 		}
2772 
2773 		/*
2774 		 * Signals generated by the execution of an instruction
2775 		 * need to be delivered before any other pending signals
2776 		 * so that the instruction pointer in the signal stack
2777 		 * frame points to the faulting instruction.
2778 		 */
2779 		type = PIDTYPE_PID;
2780 		signr = dequeue_synchronous_signal(&ksig->info);
2781 		if (!signr)
2782 			signr = dequeue_signal(current, &current->blocked,
2783 					       &ksig->info, &type);
2784 
2785 		if (!signr)
2786 			break; /* will return 0 */
2787 
2788 		if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2789 		    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2790 			signr = ptrace_signal(signr, &ksig->info, type);
2791 			if (!signr)
2792 				continue;
2793 		}
2794 
2795 		ka = &sighand->action[signr-1];
2796 
2797 		/* Trace actually delivered signals. */
2798 		trace_signal_deliver(signr, &ksig->info, ka);
2799 
2800 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2801 			continue;
2802 		if (ka->sa.sa_handler != SIG_DFL) {
2803 			/* Run the handler.  */
2804 			ksig->ka = *ka;
2805 
2806 			if (ka->sa.sa_flags & SA_ONESHOT)
2807 				ka->sa.sa_handler = SIG_DFL;
2808 
2809 			break; /* will return non-zero "signr" value */
2810 		}
2811 
2812 		/*
2813 		 * Now we are doing the default action for this signal.
2814 		 */
2815 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2816 			continue;
2817 
2818 		/*
2819 		 * Global init gets no signals it doesn't want.
2820 		 * Container-init gets no signals it doesn't want from same
2821 		 * container.
2822 		 *
2823 		 * Note that if global/container-init sees a sig_kernel_only()
2824 		 * signal here, the signal must have been generated internally
2825 		 * or must have come from an ancestor namespace. In either
2826 		 * case, the signal cannot be dropped.
2827 		 */
2828 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2829 				!sig_kernel_only(signr))
2830 			continue;
2831 
2832 		if (sig_kernel_stop(signr)) {
2833 			/*
2834 			 * The default action is to stop all threads in
2835 			 * the thread group.  The job control signals
2836 			 * do nothing in an orphaned pgrp, but SIGSTOP
2837 			 * always works.  Note that siglock needs to be
2838 			 * dropped during the call to is_orphaned_pgrp()
2839 			 * because of lock ordering with tasklist_lock.
2840 			 * This allows an intervening SIGCONT to be posted.
2841 			 * We need to check for that and bail out if necessary.
2842 			 */
2843 			if (signr != SIGSTOP) {
2844 				spin_unlock_irq(&sighand->siglock);
2845 
2846 				/* signals can be posted during this window */
2847 
2848 				if (is_current_pgrp_orphaned())
2849 					goto relock;
2850 
2851 				spin_lock_irq(&sighand->siglock);
2852 			}
2853 
2854 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2855 				/* It released the siglock.  */
2856 				goto relock;
2857 			}
2858 
2859 			/*
2860 			 * We didn't actually stop, due to a race
2861 			 * with SIGCONT or something like that.
2862 			 */
2863 			continue;
2864 		}
2865 
2866 	fatal:
2867 		spin_unlock_irq(&sighand->siglock);
2868 		if (unlikely(cgroup_task_frozen(current)))
2869 			cgroup_leave_frozen(true);
2870 
2871 		/*
2872 		 * Anything else is fatal, maybe with a core dump.
2873 		 */
2874 		current->flags |= PF_SIGNALED;
2875 
2876 		if (sig_kernel_coredump(signr)) {
2877 			if (print_fatal_signals)
2878 				print_fatal_signal(ksig->info.si_signo);
2879 			proc_coredump_connector(current);
2880 			/*
2881 			 * If it was able to dump core, this kills all
2882 			 * other threads in the group and synchronizes with
2883 			 * their demise.  If we lost the race with another
2884 			 * thread getting here, it set group_exit_code
2885 			 * first and our do_group_exit call below will use
2886 			 * that value and ignore the one we pass it.
2887 			 */
2888 			do_coredump(&ksig->info);
2889 		}
2890 
2891 		/*
2892 		 * PF_USER_WORKER threads will catch and exit on fatal signals
2893 		 * themselves. They have cleanup that must be performed, so
2894 		 * we cannot call do_exit() on their behalf.
2895 		 */
2896 		if (current->flags & PF_USER_WORKER)
2897 			goto out;
2898 
2899 		/*
2900 		 * Death signals, no core dump.
2901 		 */
2902 		do_group_exit(ksig->info.si_signo);
2903 		/* NOTREACHED */
2904 	}
2905 	spin_unlock_irq(&sighand->siglock);
2906 out:
2907 	ksig->sig = signr;
2908 
2909 	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2910 		hide_si_addr_tag_bits(ksig);
2911 
2912 	return ksig->sig > 0;
2913 }
2914 
2915 /**
2916  * signal_delivered - called after signal delivery to update blocked signals
2917  * @ksig:		kernel signal struct
2918  * @stepping:		nonzero if debugger single-step or block-step in use
2919  *
2920  * This function should be called when a signal has successfully been
2921  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2922  * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2923  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2924  */
signal_delivered(struct ksignal * ksig,int stepping)2925 static void signal_delivered(struct ksignal *ksig, int stepping)
2926 {
2927 	sigset_t blocked;
2928 
2929 	/* A signal was successfully delivered, and the
2930 	   saved sigmask was stored on the signal frame,
2931 	   and will be restored by sigreturn.  So we can
2932 	   simply clear the restore sigmask flag.  */
2933 	clear_restore_sigmask();
2934 
2935 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2936 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2937 		sigaddset(&blocked, ksig->sig);
2938 	set_current_blocked(&blocked);
2939 	if (current->sas_ss_flags & SS_AUTODISARM)
2940 		sas_ss_reset(current);
2941 	if (stepping)
2942 		ptrace_notify(SIGTRAP, 0);
2943 }
2944 
signal_setup_done(int failed,struct ksignal * ksig,int stepping)2945 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2946 {
2947 	if (failed)
2948 		force_sigsegv(ksig->sig);
2949 	else
2950 		signal_delivered(ksig, stepping);
2951 }
2952 
2953 /*
2954  * It could be that complete_signal() picked us to notify about the
2955  * group-wide signal. Other threads should be notified now to take
2956  * the shared signals in @which since we will not.
2957  */
retarget_shared_pending(struct task_struct * tsk,sigset_t * which)2958 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2959 {
2960 	sigset_t retarget;
2961 	struct task_struct *t;
2962 
2963 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2964 	if (sigisemptyset(&retarget))
2965 		return;
2966 
2967 	t = tsk;
2968 	while_each_thread(tsk, t) {
2969 		if (t->flags & PF_EXITING)
2970 			continue;
2971 
2972 		if (!has_pending_signals(&retarget, &t->blocked))
2973 			continue;
2974 		/* Remove the signals this thread can handle. */
2975 		sigandsets(&retarget, &retarget, &t->blocked);
2976 
2977 		if (!task_sigpending(t))
2978 			signal_wake_up(t, 0);
2979 
2980 		if (sigisemptyset(&retarget))
2981 			break;
2982 	}
2983 }
2984 
exit_signals(struct task_struct * tsk)2985 void exit_signals(struct task_struct *tsk)
2986 {
2987 	int group_stop = 0;
2988 	sigset_t unblocked;
2989 
2990 	/*
2991 	 * @tsk is about to have PF_EXITING set - lock out users which
2992 	 * expect stable threadgroup.
2993 	 */
2994 	cgroup_threadgroup_change_begin(tsk);
2995 
2996 	if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2997 		sched_mm_cid_exit_signals(tsk);
2998 		tsk->flags |= PF_EXITING;
2999 		cgroup_threadgroup_change_end(tsk);
3000 		return;
3001 	}
3002 
3003 	spin_lock_irq(&tsk->sighand->siglock);
3004 	/*
3005 	 * From now this task is not visible for group-wide signals,
3006 	 * see wants_signal(), do_signal_stop().
3007 	 */
3008 	sched_mm_cid_exit_signals(tsk);
3009 	tsk->flags |= PF_EXITING;
3010 
3011 	cgroup_threadgroup_change_end(tsk);
3012 
3013 	if (!task_sigpending(tsk))
3014 		goto out;
3015 
3016 	unblocked = tsk->blocked;
3017 	signotset(&unblocked);
3018 	retarget_shared_pending(tsk, &unblocked);
3019 
3020 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3021 	    task_participate_group_stop(tsk))
3022 		group_stop = CLD_STOPPED;
3023 out:
3024 	spin_unlock_irq(&tsk->sighand->siglock);
3025 
3026 	/*
3027 	 * If group stop has completed, deliver the notification.  This
3028 	 * should always go to the real parent of the group leader.
3029 	 */
3030 	if (unlikely(group_stop)) {
3031 		read_lock(&tasklist_lock);
3032 		do_notify_parent_cldstop(tsk, false, group_stop);
3033 		read_unlock(&tasklist_lock);
3034 	}
3035 }
3036 
3037 /*
3038  * System call entry points.
3039  */
3040 
3041 /**
3042  *  sys_restart_syscall - restart a system call
3043  */
SYSCALL_DEFINE0(restart_syscall)3044 SYSCALL_DEFINE0(restart_syscall)
3045 {
3046 	struct restart_block *restart = &current->restart_block;
3047 	return restart->fn(restart);
3048 }
3049 
do_no_restart_syscall(struct restart_block * param)3050 long do_no_restart_syscall(struct restart_block *param)
3051 {
3052 	return -EINTR;
3053 }
3054 
__set_task_blocked(struct task_struct * tsk,const sigset_t * newset)3055 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3056 {
3057 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3058 		sigset_t newblocked;
3059 		/* A set of now blocked but previously unblocked signals. */
3060 		sigandnsets(&newblocked, newset, &current->blocked);
3061 		retarget_shared_pending(tsk, &newblocked);
3062 	}
3063 	tsk->blocked = *newset;
3064 	recalc_sigpending();
3065 }
3066 
3067 /**
3068  * set_current_blocked - change current->blocked mask
3069  * @newset: new mask
3070  *
3071  * It is wrong to change ->blocked directly, this helper should be used
3072  * to ensure the process can't miss a shared signal we are going to block.
3073  */
set_current_blocked(sigset_t * newset)3074 void set_current_blocked(sigset_t *newset)
3075 {
3076 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3077 	__set_current_blocked(newset);
3078 }
3079 
__set_current_blocked(const sigset_t * newset)3080 void __set_current_blocked(const sigset_t *newset)
3081 {
3082 	struct task_struct *tsk = current;
3083 
3084 	/*
3085 	 * In case the signal mask hasn't changed, there is nothing we need
3086 	 * to do. The current->blocked shouldn't be modified by other task.
3087 	 */
3088 	if (sigequalsets(&tsk->blocked, newset))
3089 		return;
3090 
3091 	spin_lock_irq(&tsk->sighand->siglock);
3092 	__set_task_blocked(tsk, newset);
3093 	spin_unlock_irq(&tsk->sighand->siglock);
3094 }
3095 
3096 /*
3097  * This is also useful for kernel threads that want to temporarily
3098  * (or permanently) block certain signals.
3099  *
3100  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3101  * interface happily blocks "unblockable" signals like SIGKILL
3102  * and friends.
3103  */
sigprocmask(int how,sigset_t * set,sigset_t * oldset)3104 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3105 {
3106 	struct task_struct *tsk = current;
3107 	sigset_t newset;
3108 
3109 	/* Lockless, only current can change ->blocked, never from irq */
3110 	if (oldset)
3111 		*oldset = tsk->blocked;
3112 
3113 	switch (how) {
3114 	case SIG_BLOCK:
3115 		sigorsets(&newset, &tsk->blocked, set);
3116 		break;
3117 	case SIG_UNBLOCK:
3118 		sigandnsets(&newset, &tsk->blocked, set);
3119 		break;
3120 	case SIG_SETMASK:
3121 		newset = *set;
3122 		break;
3123 	default:
3124 		return -EINVAL;
3125 	}
3126 
3127 	__set_current_blocked(&newset);
3128 	return 0;
3129 }
3130 EXPORT_SYMBOL(sigprocmask);
3131 
3132 /*
3133  * The api helps set app-provided sigmasks.
3134  *
3135  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3136  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3137  *
3138  * Note that it does set_restore_sigmask() in advance, so it must be always
3139  * paired with restore_saved_sigmask_unless() before return from syscall.
3140  */
set_user_sigmask(const sigset_t __user * umask,size_t sigsetsize)3141 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3142 {
3143 	sigset_t kmask;
3144 
3145 	if (!umask)
3146 		return 0;
3147 	if (sigsetsize != sizeof(sigset_t))
3148 		return -EINVAL;
3149 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3150 		return -EFAULT;
3151 
3152 	set_restore_sigmask();
3153 	current->saved_sigmask = current->blocked;
3154 	set_current_blocked(&kmask);
3155 
3156 	return 0;
3157 }
3158 
3159 #ifdef CONFIG_COMPAT
set_compat_user_sigmask(const compat_sigset_t __user * umask,size_t sigsetsize)3160 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3161 			    size_t sigsetsize)
3162 {
3163 	sigset_t kmask;
3164 
3165 	if (!umask)
3166 		return 0;
3167 	if (sigsetsize != sizeof(compat_sigset_t))
3168 		return -EINVAL;
3169 	if (get_compat_sigset(&kmask, umask))
3170 		return -EFAULT;
3171 
3172 	set_restore_sigmask();
3173 	current->saved_sigmask = current->blocked;
3174 	set_current_blocked(&kmask);
3175 
3176 	return 0;
3177 }
3178 #endif
3179 
3180 /**
3181  *  sys_rt_sigprocmask - change the list of currently blocked signals
3182  *  @how: whether to add, remove, or set signals
3183  *  @nset: stores pending signals
3184  *  @oset: previous value of signal mask if non-null
3185  *  @sigsetsize: size of sigset_t type
3186  */
SYSCALL_DEFINE4(rt_sigprocmask,int,how,sigset_t __user *,nset,sigset_t __user *,oset,size_t,sigsetsize)3187 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3188 		sigset_t __user *, oset, size_t, sigsetsize)
3189 {
3190 	sigset_t old_set, new_set;
3191 	int error;
3192 
3193 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3194 	if (sigsetsize != sizeof(sigset_t))
3195 		return -EINVAL;
3196 
3197 	old_set = current->blocked;
3198 
3199 	if (nset) {
3200 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3201 			return -EFAULT;
3202 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3203 
3204 		error = sigprocmask(how, &new_set, NULL);
3205 		if (error)
3206 			return error;
3207 	}
3208 
3209 	if (oset) {
3210 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3211 			return -EFAULT;
3212 	}
3213 
3214 	return 0;
3215 }
3216 
3217 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask,int,how,compat_sigset_t __user *,nset,compat_sigset_t __user *,oset,compat_size_t,sigsetsize)3218 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3219 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3220 {
3221 	sigset_t old_set = current->blocked;
3222 
3223 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3224 	if (sigsetsize != sizeof(sigset_t))
3225 		return -EINVAL;
3226 
3227 	if (nset) {
3228 		sigset_t new_set;
3229 		int error;
3230 		if (get_compat_sigset(&new_set, nset))
3231 			return -EFAULT;
3232 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3233 
3234 		error = sigprocmask(how, &new_set, NULL);
3235 		if (error)
3236 			return error;
3237 	}
3238 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3239 }
3240 #endif
3241 
do_sigpending(sigset_t * set)3242 static void do_sigpending(sigset_t *set)
3243 {
3244 	spin_lock_irq(&current->sighand->siglock);
3245 	sigorsets(set, &current->pending.signal,
3246 		  &current->signal->shared_pending.signal);
3247 	spin_unlock_irq(&current->sighand->siglock);
3248 
3249 	/* Outside the lock because only this thread touches it.  */
3250 	sigandsets(set, &current->blocked, set);
3251 }
3252 
3253 /**
3254  *  sys_rt_sigpending - examine a pending signal that has been raised
3255  *			while blocked
3256  *  @uset: stores pending signals
3257  *  @sigsetsize: size of sigset_t type or larger
3258  */
SYSCALL_DEFINE2(rt_sigpending,sigset_t __user *,uset,size_t,sigsetsize)3259 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3260 {
3261 	sigset_t set;
3262 
3263 	if (sigsetsize > sizeof(*uset))
3264 		return -EINVAL;
3265 
3266 	do_sigpending(&set);
3267 
3268 	if (copy_to_user(uset, &set, sigsetsize))
3269 		return -EFAULT;
3270 
3271 	return 0;
3272 }
3273 
3274 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigpending,compat_sigset_t __user *,uset,compat_size_t,sigsetsize)3275 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3276 		compat_size_t, sigsetsize)
3277 {
3278 	sigset_t set;
3279 
3280 	if (sigsetsize > sizeof(*uset))
3281 		return -EINVAL;
3282 
3283 	do_sigpending(&set);
3284 
3285 	return put_compat_sigset(uset, &set, sigsetsize);
3286 }
3287 #endif
3288 
3289 static const struct {
3290 	unsigned char limit, layout;
3291 } sig_sicodes[] = {
3292 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3293 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3294 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3295 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3296 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3297 #if defined(SIGEMT)
3298 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3299 #endif
3300 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3301 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3302 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3303 };
3304 
known_siginfo_layout(unsigned sig,int si_code)3305 static bool known_siginfo_layout(unsigned sig, int si_code)
3306 {
3307 	if (si_code == SI_KERNEL)
3308 		return true;
3309 	else if ((si_code > SI_USER)) {
3310 		if (sig_specific_sicodes(sig)) {
3311 			if (si_code <= sig_sicodes[sig].limit)
3312 				return true;
3313 		}
3314 		else if (si_code <= NSIGPOLL)
3315 			return true;
3316 	}
3317 	else if (si_code >= SI_DETHREAD)
3318 		return true;
3319 	else if (si_code == SI_ASYNCNL)
3320 		return true;
3321 	return false;
3322 }
3323 
siginfo_layout(unsigned sig,int si_code)3324 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3325 {
3326 	enum siginfo_layout layout = SIL_KILL;
3327 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3328 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3329 		    (si_code <= sig_sicodes[sig].limit)) {
3330 			layout = sig_sicodes[sig].layout;
3331 			/* Handle the exceptions */
3332 			if ((sig == SIGBUS) &&
3333 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3334 				layout = SIL_FAULT_MCEERR;
3335 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3336 				layout = SIL_FAULT_BNDERR;
3337 #ifdef SEGV_PKUERR
3338 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3339 				layout = SIL_FAULT_PKUERR;
3340 #endif
3341 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3342 				layout = SIL_FAULT_PERF_EVENT;
3343 			else if (IS_ENABLED(CONFIG_SPARC) &&
3344 				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3345 				layout = SIL_FAULT_TRAPNO;
3346 			else if (IS_ENABLED(CONFIG_ALPHA) &&
3347 				 ((sig == SIGFPE) ||
3348 				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3349 				layout = SIL_FAULT_TRAPNO;
3350 		}
3351 		else if (si_code <= NSIGPOLL)
3352 			layout = SIL_POLL;
3353 	} else {
3354 		if (si_code == SI_TIMER)
3355 			layout = SIL_TIMER;
3356 		else if (si_code == SI_SIGIO)
3357 			layout = SIL_POLL;
3358 		else if (si_code < 0)
3359 			layout = SIL_RT;
3360 	}
3361 	return layout;
3362 }
3363 
si_expansion(const siginfo_t __user * info)3364 static inline char __user *si_expansion(const siginfo_t __user *info)
3365 {
3366 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3367 }
3368 
copy_siginfo_to_user(siginfo_t __user * to,const kernel_siginfo_t * from)3369 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3370 {
3371 	char __user *expansion = si_expansion(to);
3372 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3373 		return -EFAULT;
3374 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3375 		return -EFAULT;
3376 	return 0;
3377 }
3378 
post_copy_siginfo_from_user(kernel_siginfo_t * info,const siginfo_t __user * from)3379 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3380 				       const siginfo_t __user *from)
3381 {
3382 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3383 		char __user *expansion = si_expansion(from);
3384 		char buf[SI_EXPANSION_SIZE];
3385 		int i;
3386 		/*
3387 		 * An unknown si_code might need more than
3388 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3389 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3390 		 * will return this data to userspace exactly.
3391 		 */
3392 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3393 			return -EFAULT;
3394 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3395 			if (buf[i] != 0)
3396 				return -E2BIG;
3397 		}
3398 	}
3399 	return 0;
3400 }
3401 
__copy_siginfo_from_user(int signo,kernel_siginfo_t * to,const siginfo_t __user * from)3402 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3403 				    const siginfo_t __user *from)
3404 {
3405 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3406 		return -EFAULT;
3407 	to->si_signo = signo;
3408 	return post_copy_siginfo_from_user(to, from);
3409 }
3410 
copy_siginfo_from_user(kernel_siginfo_t * to,const siginfo_t __user * from)3411 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3412 {
3413 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3414 		return -EFAULT;
3415 	return post_copy_siginfo_from_user(to, from);
3416 }
3417 
3418 #ifdef CONFIG_COMPAT
3419 /**
3420  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3421  * @to: compat siginfo destination
3422  * @from: kernel siginfo source
3423  *
3424  * Note: This function does not work properly for the SIGCHLD on x32, but
3425  * fortunately it doesn't have to.  The only valid callers for this function are
3426  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3427  * The latter does not care because SIGCHLD will never cause a coredump.
3428  */
copy_siginfo_to_external32(struct compat_siginfo * to,const struct kernel_siginfo * from)3429 void copy_siginfo_to_external32(struct compat_siginfo *to,
3430 		const struct kernel_siginfo *from)
3431 {
3432 	memset(to, 0, sizeof(*to));
3433 
3434 	to->si_signo = from->si_signo;
3435 	to->si_errno = from->si_errno;
3436 	to->si_code  = from->si_code;
3437 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3438 	case SIL_KILL:
3439 		to->si_pid = from->si_pid;
3440 		to->si_uid = from->si_uid;
3441 		break;
3442 	case SIL_TIMER:
3443 		to->si_tid     = from->si_tid;
3444 		to->si_overrun = from->si_overrun;
3445 		to->si_int     = from->si_int;
3446 		break;
3447 	case SIL_POLL:
3448 		to->si_band = from->si_band;
3449 		to->si_fd   = from->si_fd;
3450 		break;
3451 	case SIL_FAULT:
3452 		to->si_addr = ptr_to_compat(from->si_addr);
3453 		break;
3454 	case SIL_FAULT_TRAPNO:
3455 		to->si_addr = ptr_to_compat(from->si_addr);
3456 		to->si_trapno = from->si_trapno;
3457 		break;
3458 	case SIL_FAULT_MCEERR:
3459 		to->si_addr = ptr_to_compat(from->si_addr);
3460 		to->si_addr_lsb = from->si_addr_lsb;
3461 		break;
3462 	case SIL_FAULT_BNDERR:
3463 		to->si_addr = ptr_to_compat(from->si_addr);
3464 		to->si_lower = ptr_to_compat(from->si_lower);
3465 		to->si_upper = ptr_to_compat(from->si_upper);
3466 		break;
3467 	case SIL_FAULT_PKUERR:
3468 		to->si_addr = ptr_to_compat(from->si_addr);
3469 		to->si_pkey = from->si_pkey;
3470 		break;
3471 	case SIL_FAULT_PERF_EVENT:
3472 		to->si_addr = ptr_to_compat(from->si_addr);
3473 		to->si_perf_data = from->si_perf_data;
3474 		to->si_perf_type = from->si_perf_type;
3475 		to->si_perf_flags = from->si_perf_flags;
3476 		break;
3477 	case SIL_CHLD:
3478 		to->si_pid = from->si_pid;
3479 		to->si_uid = from->si_uid;
3480 		to->si_status = from->si_status;
3481 		to->si_utime = from->si_utime;
3482 		to->si_stime = from->si_stime;
3483 		break;
3484 	case SIL_RT:
3485 		to->si_pid = from->si_pid;
3486 		to->si_uid = from->si_uid;
3487 		to->si_int = from->si_int;
3488 		break;
3489 	case SIL_SYS:
3490 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3491 		to->si_syscall   = from->si_syscall;
3492 		to->si_arch      = from->si_arch;
3493 		break;
3494 	}
3495 }
3496 
__copy_siginfo_to_user32(struct compat_siginfo __user * to,const struct kernel_siginfo * from)3497 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3498 			   const struct kernel_siginfo *from)
3499 {
3500 	struct compat_siginfo new;
3501 
3502 	copy_siginfo_to_external32(&new, from);
3503 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3504 		return -EFAULT;
3505 	return 0;
3506 }
3507 
post_copy_siginfo_from_user32(kernel_siginfo_t * to,const struct compat_siginfo * from)3508 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3509 					 const struct compat_siginfo *from)
3510 {
3511 	clear_siginfo(to);
3512 	to->si_signo = from->si_signo;
3513 	to->si_errno = from->si_errno;
3514 	to->si_code  = from->si_code;
3515 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3516 	case SIL_KILL:
3517 		to->si_pid = from->si_pid;
3518 		to->si_uid = from->si_uid;
3519 		break;
3520 	case SIL_TIMER:
3521 		to->si_tid     = from->si_tid;
3522 		to->si_overrun = from->si_overrun;
3523 		to->si_int     = from->si_int;
3524 		break;
3525 	case SIL_POLL:
3526 		to->si_band = from->si_band;
3527 		to->si_fd   = from->si_fd;
3528 		break;
3529 	case SIL_FAULT:
3530 		to->si_addr = compat_ptr(from->si_addr);
3531 		break;
3532 	case SIL_FAULT_TRAPNO:
3533 		to->si_addr = compat_ptr(from->si_addr);
3534 		to->si_trapno = from->si_trapno;
3535 		break;
3536 	case SIL_FAULT_MCEERR:
3537 		to->si_addr = compat_ptr(from->si_addr);
3538 		to->si_addr_lsb = from->si_addr_lsb;
3539 		break;
3540 	case SIL_FAULT_BNDERR:
3541 		to->si_addr = compat_ptr(from->si_addr);
3542 		to->si_lower = compat_ptr(from->si_lower);
3543 		to->si_upper = compat_ptr(from->si_upper);
3544 		break;
3545 	case SIL_FAULT_PKUERR:
3546 		to->si_addr = compat_ptr(from->si_addr);
3547 		to->si_pkey = from->si_pkey;
3548 		break;
3549 	case SIL_FAULT_PERF_EVENT:
3550 		to->si_addr = compat_ptr(from->si_addr);
3551 		to->si_perf_data = from->si_perf_data;
3552 		to->si_perf_type = from->si_perf_type;
3553 		to->si_perf_flags = from->si_perf_flags;
3554 		break;
3555 	case SIL_CHLD:
3556 		to->si_pid    = from->si_pid;
3557 		to->si_uid    = from->si_uid;
3558 		to->si_status = from->si_status;
3559 #ifdef CONFIG_X86_X32_ABI
3560 		if (in_x32_syscall()) {
3561 			to->si_utime = from->_sifields._sigchld_x32._utime;
3562 			to->si_stime = from->_sifields._sigchld_x32._stime;
3563 		} else
3564 #endif
3565 		{
3566 			to->si_utime = from->si_utime;
3567 			to->si_stime = from->si_stime;
3568 		}
3569 		break;
3570 	case SIL_RT:
3571 		to->si_pid = from->si_pid;
3572 		to->si_uid = from->si_uid;
3573 		to->si_int = from->si_int;
3574 		break;
3575 	case SIL_SYS:
3576 		to->si_call_addr = compat_ptr(from->si_call_addr);
3577 		to->si_syscall   = from->si_syscall;
3578 		to->si_arch      = from->si_arch;
3579 		break;
3580 	}
3581 	return 0;
3582 }
3583 
__copy_siginfo_from_user32(int signo,struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3584 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3585 				      const struct compat_siginfo __user *ufrom)
3586 {
3587 	struct compat_siginfo from;
3588 
3589 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3590 		return -EFAULT;
3591 
3592 	from.si_signo = signo;
3593 	return post_copy_siginfo_from_user32(to, &from);
3594 }
3595 
copy_siginfo_from_user32(struct kernel_siginfo * to,const struct compat_siginfo __user * ufrom)3596 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3597 			     const struct compat_siginfo __user *ufrom)
3598 {
3599 	struct compat_siginfo from;
3600 
3601 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3602 		return -EFAULT;
3603 
3604 	return post_copy_siginfo_from_user32(to, &from);
3605 }
3606 #endif /* CONFIG_COMPAT */
3607 
3608 /**
3609  *  do_sigtimedwait - wait for queued signals specified in @which
3610  *  @which: queued signals to wait for
3611  *  @info: if non-null, the signal's siginfo is returned here
3612  *  @ts: upper bound on process time suspension
3613  */
do_sigtimedwait(const sigset_t * which,kernel_siginfo_t * info,const struct timespec64 * ts)3614 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3615 		    const struct timespec64 *ts)
3616 {
3617 	ktime_t *to = NULL, timeout = KTIME_MAX;
3618 	struct task_struct *tsk = current;
3619 	sigset_t mask = *which;
3620 	enum pid_type type;
3621 	int sig, ret = 0;
3622 
3623 	if (ts) {
3624 		if (!timespec64_valid(ts))
3625 			return -EINVAL;
3626 		timeout = timespec64_to_ktime(*ts);
3627 		to = &timeout;
3628 	}
3629 
3630 	/*
3631 	 * Invert the set of allowed signals to get those we want to block.
3632 	 */
3633 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3634 	signotset(&mask);
3635 
3636 	spin_lock_irq(&tsk->sighand->siglock);
3637 	sig = dequeue_signal(tsk, &mask, info, &type);
3638 	if (!sig && timeout) {
3639 		/*
3640 		 * None ready, temporarily unblock those we're interested
3641 		 * while we are sleeping in so that we'll be awakened when
3642 		 * they arrive. Unblocking is always fine, we can avoid
3643 		 * set_current_blocked().
3644 		 */
3645 		tsk->real_blocked = tsk->blocked;
3646 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3647 		recalc_sigpending();
3648 		spin_unlock_irq(&tsk->sighand->siglock);
3649 
3650 		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3651 		ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3652 					       HRTIMER_MODE_REL);
3653 		spin_lock_irq(&tsk->sighand->siglock);
3654 		__set_task_blocked(tsk, &tsk->real_blocked);
3655 		sigemptyset(&tsk->real_blocked);
3656 		sig = dequeue_signal(tsk, &mask, info, &type);
3657 	}
3658 	spin_unlock_irq(&tsk->sighand->siglock);
3659 
3660 	if (sig)
3661 		return sig;
3662 	return ret ? -EINTR : -EAGAIN;
3663 }
3664 
3665 /**
3666  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3667  *			in @uthese
3668  *  @uthese: queued signals to wait for
3669  *  @uinfo: if non-null, the signal's siginfo is returned here
3670  *  @uts: upper bound on process time suspension
3671  *  @sigsetsize: size of sigset_t type
3672  */
SYSCALL_DEFINE4(rt_sigtimedwait,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct __kernel_timespec __user *,uts,size_t,sigsetsize)3673 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3674 		siginfo_t __user *, uinfo,
3675 		const struct __kernel_timespec __user *, uts,
3676 		size_t, sigsetsize)
3677 {
3678 	sigset_t these;
3679 	struct timespec64 ts;
3680 	kernel_siginfo_t info;
3681 	int ret;
3682 
3683 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3684 	if (sigsetsize != sizeof(sigset_t))
3685 		return -EINVAL;
3686 
3687 	if (copy_from_user(&these, uthese, sizeof(these)))
3688 		return -EFAULT;
3689 
3690 	if (uts) {
3691 		if (get_timespec64(&ts, uts))
3692 			return -EFAULT;
3693 	}
3694 
3695 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3696 
3697 	if (ret > 0 && uinfo) {
3698 		if (copy_siginfo_to_user(uinfo, &info))
3699 			ret = -EFAULT;
3700 	}
3701 
3702 	return ret;
3703 }
3704 
3705 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE4(rt_sigtimedwait_time32,const sigset_t __user *,uthese,siginfo_t __user *,uinfo,const struct old_timespec32 __user *,uts,size_t,sigsetsize)3706 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3707 		siginfo_t __user *, uinfo,
3708 		const struct old_timespec32 __user *, uts,
3709 		size_t, sigsetsize)
3710 {
3711 	sigset_t these;
3712 	struct timespec64 ts;
3713 	kernel_siginfo_t info;
3714 	int ret;
3715 
3716 	if (sigsetsize != sizeof(sigset_t))
3717 		return -EINVAL;
3718 
3719 	if (copy_from_user(&these, uthese, sizeof(these)))
3720 		return -EFAULT;
3721 
3722 	if (uts) {
3723 		if (get_old_timespec32(&ts, uts))
3724 			return -EFAULT;
3725 	}
3726 
3727 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3728 
3729 	if (ret > 0 && uinfo) {
3730 		if (copy_siginfo_to_user(uinfo, &info))
3731 			ret = -EFAULT;
3732 	}
3733 
3734 	return ret;
3735 }
3736 #endif
3737 
3738 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct __kernel_timespec __user *,uts,compat_size_t,sigsetsize)3739 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3740 		struct compat_siginfo __user *, uinfo,
3741 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3742 {
3743 	sigset_t s;
3744 	struct timespec64 t;
3745 	kernel_siginfo_t info;
3746 	long ret;
3747 
3748 	if (sigsetsize != sizeof(sigset_t))
3749 		return -EINVAL;
3750 
3751 	if (get_compat_sigset(&s, uthese))
3752 		return -EFAULT;
3753 
3754 	if (uts) {
3755 		if (get_timespec64(&t, uts))
3756 			return -EFAULT;
3757 	}
3758 
3759 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3760 
3761 	if (ret > 0 && uinfo) {
3762 		if (copy_siginfo_to_user32(uinfo, &info))
3763 			ret = -EFAULT;
3764 	}
3765 
3766 	return ret;
3767 }
3768 
3769 #ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32,compat_sigset_t __user *,uthese,struct compat_siginfo __user *,uinfo,struct old_timespec32 __user *,uts,compat_size_t,sigsetsize)3770 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3771 		struct compat_siginfo __user *, uinfo,
3772 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3773 {
3774 	sigset_t s;
3775 	struct timespec64 t;
3776 	kernel_siginfo_t info;
3777 	long ret;
3778 
3779 	if (sigsetsize != sizeof(sigset_t))
3780 		return -EINVAL;
3781 
3782 	if (get_compat_sigset(&s, uthese))
3783 		return -EFAULT;
3784 
3785 	if (uts) {
3786 		if (get_old_timespec32(&t, uts))
3787 			return -EFAULT;
3788 	}
3789 
3790 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3791 
3792 	if (ret > 0 && uinfo) {
3793 		if (copy_siginfo_to_user32(uinfo, &info))
3794 			ret = -EFAULT;
3795 	}
3796 
3797 	return ret;
3798 }
3799 #endif
3800 #endif
3801 
prepare_kill_siginfo(int sig,struct kernel_siginfo * info)3802 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3803 {
3804 	clear_siginfo(info);
3805 	info->si_signo = sig;
3806 	info->si_errno = 0;
3807 	info->si_code = SI_USER;
3808 	info->si_pid = task_tgid_vnr(current);
3809 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3810 }
3811 
3812 /**
3813  *  sys_kill - send a signal to a process
3814  *  @pid: the PID of the process
3815  *  @sig: signal to be sent
3816  */
SYSCALL_DEFINE2(kill,pid_t,pid,int,sig)3817 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3818 {
3819 	struct kernel_siginfo info;
3820 
3821 	prepare_kill_siginfo(sig, &info);
3822 
3823 	return kill_something_info(sig, &info, pid);
3824 }
3825 
3826 /*
3827  * Verify that the signaler and signalee either are in the same pid namespace
3828  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3829  * namespace.
3830  */
access_pidfd_pidns(struct pid * pid)3831 static bool access_pidfd_pidns(struct pid *pid)
3832 {
3833 	struct pid_namespace *active = task_active_pid_ns(current);
3834 	struct pid_namespace *p = ns_of_pid(pid);
3835 
3836 	for (;;) {
3837 		if (!p)
3838 			return false;
3839 		if (p == active)
3840 			break;
3841 		p = p->parent;
3842 	}
3843 
3844 	return true;
3845 }
3846 
copy_siginfo_from_user_any(kernel_siginfo_t * kinfo,siginfo_t __user * info)3847 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3848 		siginfo_t __user *info)
3849 {
3850 #ifdef CONFIG_COMPAT
3851 	/*
3852 	 * Avoid hooking up compat syscalls and instead handle necessary
3853 	 * conversions here. Note, this is a stop-gap measure and should not be
3854 	 * considered a generic solution.
3855 	 */
3856 	if (in_compat_syscall())
3857 		return copy_siginfo_from_user32(
3858 			kinfo, (struct compat_siginfo __user *)info);
3859 #endif
3860 	return copy_siginfo_from_user(kinfo, info);
3861 }
3862 
pidfd_to_pid(const struct file * file)3863 static struct pid *pidfd_to_pid(const struct file *file)
3864 {
3865 	struct pid *pid;
3866 
3867 	pid = pidfd_pid(file);
3868 	if (!IS_ERR(pid))
3869 		return pid;
3870 
3871 	return tgid_pidfd_to_pid(file);
3872 }
3873 
3874 /**
3875  * sys_pidfd_send_signal - Signal a process through a pidfd
3876  * @pidfd:  file descriptor of the process
3877  * @sig:    signal to send
3878  * @info:   signal info
3879  * @flags:  future flags
3880  *
3881  * The syscall currently only signals via PIDTYPE_PID which covers
3882  * kill(<positive-pid>, <signal>. It does not signal threads or process
3883  * groups.
3884  * In order to extend the syscall to threads and process groups the @flags
3885  * argument should be used. In essence, the @flags argument will determine
3886  * what is signaled and not the file descriptor itself. Put in other words,
3887  * grouping is a property of the flags argument not a property of the file
3888  * descriptor.
3889  *
3890  * Return: 0 on success, negative errno on failure
3891  */
SYSCALL_DEFINE4(pidfd_send_signal,int,pidfd,int,sig,siginfo_t __user *,info,unsigned int,flags)3892 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3893 		siginfo_t __user *, info, unsigned int, flags)
3894 {
3895 	int ret;
3896 	struct fd f;
3897 	struct pid *pid;
3898 	kernel_siginfo_t kinfo;
3899 
3900 	/* Enforce flags be set to 0 until we add an extension. */
3901 	if (flags)
3902 		return -EINVAL;
3903 
3904 	f = fdget(pidfd);
3905 	if (!f.file)
3906 		return -EBADF;
3907 
3908 	/* Is this a pidfd? */
3909 	pid = pidfd_to_pid(f.file);
3910 	if (IS_ERR(pid)) {
3911 		ret = PTR_ERR(pid);
3912 		goto err;
3913 	}
3914 
3915 	ret = -EINVAL;
3916 	if (!access_pidfd_pidns(pid))
3917 		goto err;
3918 
3919 	if (info) {
3920 		ret = copy_siginfo_from_user_any(&kinfo, info);
3921 		if (unlikely(ret))
3922 			goto err;
3923 
3924 		ret = -EINVAL;
3925 		if (unlikely(sig != kinfo.si_signo))
3926 			goto err;
3927 
3928 		/* Only allow sending arbitrary signals to yourself. */
3929 		ret = -EPERM;
3930 		if ((task_pid(current) != pid) &&
3931 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3932 			goto err;
3933 	} else {
3934 		prepare_kill_siginfo(sig, &kinfo);
3935 	}
3936 
3937 	ret = kill_pid_info(sig, &kinfo, pid);
3938 
3939 err:
3940 	fdput(f);
3941 	return ret;
3942 }
3943 
3944 static int
do_send_specific(pid_t tgid,pid_t pid,int sig,struct kernel_siginfo * info)3945 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3946 {
3947 	struct task_struct *p;
3948 	int error = -ESRCH;
3949 
3950 	rcu_read_lock();
3951 	p = find_task_by_vpid(pid);
3952 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3953 		error = check_kill_permission(sig, info, p);
3954 		/*
3955 		 * The null signal is a permissions and process existence
3956 		 * probe.  No signal is actually delivered.
3957 		 */
3958 		if (!error && sig) {
3959 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3960 			/*
3961 			 * If lock_task_sighand() failed we pretend the task
3962 			 * dies after receiving the signal. The window is tiny,
3963 			 * and the signal is private anyway.
3964 			 */
3965 			if (unlikely(error == -ESRCH))
3966 				error = 0;
3967 		}
3968 	}
3969 	rcu_read_unlock();
3970 
3971 	return error;
3972 }
3973 
do_tkill(pid_t tgid,pid_t pid,int sig)3974 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3975 {
3976 	struct kernel_siginfo info;
3977 
3978 	clear_siginfo(&info);
3979 	info.si_signo = sig;
3980 	info.si_errno = 0;
3981 	info.si_code = SI_TKILL;
3982 	info.si_pid = task_tgid_vnr(current);
3983 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3984 
3985 	return do_send_specific(tgid, pid, sig, &info);
3986 }
3987 
3988 /**
3989  *  sys_tgkill - send signal to one specific thread
3990  *  @tgid: the thread group ID of the thread
3991  *  @pid: the PID of the thread
3992  *  @sig: signal to be sent
3993  *
3994  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3995  *  exists but it's not belonging to the target process anymore. This
3996  *  method solves the problem of threads exiting and PIDs getting reused.
3997  */
SYSCALL_DEFINE3(tgkill,pid_t,tgid,pid_t,pid,int,sig)3998 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3999 {
4000 	/* This is only valid for single tasks */
4001 	if (pid <= 0 || tgid <= 0)
4002 		return -EINVAL;
4003 
4004 	return do_tkill(tgid, pid, sig);
4005 }
4006 
4007 /**
4008  *  sys_tkill - send signal to one specific task
4009  *  @pid: the PID of the task
4010  *  @sig: signal to be sent
4011  *
4012  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
4013  */
SYSCALL_DEFINE2(tkill,pid_t,pid,int,sig)4014 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4015 {
4016 	/* This is only valid for single tasks */
4017 	if (pid <= 0)
4018 		return -EINVAL;
4019 
4020 	return do_tkill(0, pid, sig);
4021 }
4022 
do_rt_sigqueueinfo(pid_t pid,int sig,kernel_siginfo_t * info)4023 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4024 {
4025 	/* Not even root can pretend to send signals from the kernel.
4026 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4027 	 */
4028 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4029 	    (task_pid_vnr(current) != pid))
4030 		return -EPERM;
4031 
4032 	/* POSIX.1b doesn't mention process groups.  */
4033 	return kill_proc_info(sig, info, pid);
4034 }
4035 
4036 /**
4037  *  sys_rt_sigqueueinfo - send signal information to a signal
4038  *  @pid: the PID of the thread
4039  *  @sig: signal to be sent
4040  *  @uinfo: signal info to be sent
4041  */
SYSCALL_DEFINE3(rt_sigqueueinfo,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4042 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4043 		siginfo_t __user *, uinfo)
4044 {
4045 	kernel_siginfo_t info;
4046 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4047 	if (unlikely(ret))
4048 		return ret;
4049 	return do_rt_sigqueueinfo(pid, sig, &info);
4050 }
4051 
4052 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4053 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4054 			compat_pid_t, pid,
4055 			int, sig,
4056 			struct compat_siginfo __user *, uinfo)
4057 {
4058 	kernel_siginfo_t info;
4059 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4060 	if (unlikely(ret))
4061 		return ret;
4062 	return do_rt_sigqueueinfo(pid, sig, &info);
4063 }
4064 #endif
4065 
do_rt_tgsigqueueinfo(pid_t tgid,pid_t pid,int sig,kernel_siginfo_t * info)4066 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4067 {
4068 	/* This is only valid for single tasks */
4069 	if (pid <= 0 || tgid <= 0)
4070 		return -EINVAL;
4071 
4072 	/* Not even root can pretend to send signals from the kernel.
4073 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4074 	 */
4075 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4076 	    (task_pid_vnr(current) != pid))
4077 		return -EPERM;
4078 
4079 	return do_send_specific(tgid, pid, sig, info);
4080 }
4081 
SYSCALL_DEFINE4(rt_tgsigqueueinfo,pid_t,tgid,pid_t,pid,int,sig,siginfo_t __user *,uinfo)4082 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4083 		siginfo_t __user *, uinfo)
4084 {
4085 	kernel_siginfo_t info;
4086 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4087 	if (unlikely(ret))
4088 		return ret;
4089 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4090 }
4091 
4092 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,compat_pid_t,tgid,compat_pid_t,pid,int,sig,struct compat_siginfo __user *,uinfo)4093 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4094 			compat_pid_t, tgid,
4095 			compat_pid_t, pid,
4096 			int, sig,
4097 			struct compat_siginfo __user *, uinfo)
4098 {
4099 	kernel_siginfo_t info;
4100 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4101 	if (unlikely(ret))
4102 		return ret;
4103 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4104 }
4105 #endif
4106 
4107 /*
4108  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4109  */
kernel_sigaction(int sig,__sighandler_t action)4110 void kernel_sigaction(int sig, __sighandler_t action)
4111 {
4112 	spin_lock_irq(&current->sighand->siglock);
4113 	current->sighand->action[sig - 1].sa.sa_handler = action;
4114 	if (action == SIG_IGN) {
4115 		sigset_t mask;
4116 
4117 		sigemptyset(&mask);
4118 		sigaddset(&mask, sig);
4119 
4120 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4121 		flush_sigqueue_mask(&mask, &current->pending);
4122 		recalc_sigpending();
4123 	}
4124 	spin_unlock_irq(&current->sighand->siglock);
4125 }
4126 EXPORT_SYMBOL(kernel_sigaction);
4127 
sigaction_compat_abi(struct k_sigaction * act,struct k_sigaction * oact)4128 void __weak sigaction_compat_abi(struct k_sigaction *act,
4129 		struct k_sigaction *oact)
4130 {
4131 }
4132 
do_sigaction(int sig,struct k_sigaction * act,struct k_sigaction * oact)4133 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4134 {
4135 	struct task_struct *p = current, *t;
4136 	struct k_sigaction *k;
4137 	sigset_t mask;
4138 
4139 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4140 		return -EINVAL;
4141 
4142 	k = &p->sighand->action[sig-1];
4143 
4144 	spin_lock_irq(&p->sighand->siglock);
4145 	if (k->sa.sa_flags & SA_IMMUTABLE) {
4146 		spin_unlock_irq(&p->sighand->siglock);
4147 		return -EINVAL;
4148 	}
4149 	if (oact)
4150 		*oact = *k;
4151 
4152 	/*
4153 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4154 	 * e.g. by having an architecture use the bit in their uapi.
4155 	 */
4156 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4157 
4158 	/*
4159 	 * Clear unknown flag bits in order to allow userspace to detect missing
4160 	 * support for flag bits and to allow the kernel to use non-uapi bits
4161 	 * internally.
4162 	 */
4163 	if (act)
4164 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4165 	if (oact)
4166 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4167 
4168 	sigaction_compat_abi(act, oact);
4169 
4170 	if (act) {
4171 		sigdelsetmask(&act->sa.sa_mask,
4172 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4173 		*k = *act;
4174 		/*
4175 		 * POSIX 3.3.1.3:
4176 		 *  "Setting a signal action to SIG_IGN for a signal that is
4177 		 *   pending shall cause the pending signal to be discarded,
4178 		 *   whether or not it is blocked."
4179 		 *
4180 		 *  "Setting a signal action to SIG_DFL for a signal that is
4181 		 *   pending and whose default action is to ignore the signal
4182 		 *   (for example, SIGCHLD), shall cause the pending signal to
4183 		 *   be discarded, whether or not it is blocked"
4184 		 */
4185 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4186 			sigemptyset(&mask);
4187 			sigaddset(&mask, sig);
4188 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4189 			for_each_thread(p, t)
4190 				flush_sigqueue_mask(&mask, &t->pending);
4191 		}
4192 	}
4193 
4194 	spin_unlock_irq(&p->sighand->siglock);
4195 	return 0;
4196 }
4197 
4198 #ifdef CONFIG_DYNAMIC_SIGFRAME
sigaltstack_lock(void)4199 static inline void sigaltstack_lock(void)
4200 	__acquires(&current->sighand->siglock)
4201 {
4202 	spin_lock_irq(&current->sighand->siglock);
4203 }
4204 
sigaltstack_unlock(void)4205 static inline void sigaltstack_unlock(void)
4206 	__releases(&current->sighand->siglock)
4207 {
4208 	spin_unlock_irq(&current->sighand->siglock);
4209 }
4210 #else
sigaltstack_lock(void)4211 static inline void sigaltstack_lock(void) { }
sigaltstack_unlock(void)4212 static inline void sigaltstack_unlock(void) { }
4213 #endif
4214 
4215 static int
do_sigaltstack(const stack_t * ss,stack_t * oss,unsigned long sp,size_t min_ss_size)4216 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4217 		size_t min_ss_size)
4218 {
4219 	struct task_struct *t = current;
4220 	int ret = 0;
4221 
4222 	if (oss) {
4223 		memset(oss, 0, sizeof(stack_t));
4224 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4225 		oss->ss_size = t->sas_ss_size;
4226 		oss->ss_flags = sas_ss_flags(sp) |
4227 			(current->sas_ss_flags & SS_FLAG_BITS);
4228 	}
4229 
4230 	if (ss) {
4231 		void __user *ss_sp = ss->ss_sp;
4232 		size_t ss_size = ss->ss_size;
4233 		unsigned ss_flags = ss->ss_flags;
4234 		int ss_mode;
4235 
4236 		if (unlikely(on_sig_stack(sp)))
4237 			return -EPERM;
4238 
4239 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4240 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4241 				ss_mode != 0))
4242 			return -EINVAL;
4243 
4244 		/*
4245 		 * Return before taking any locks if no actual
4246 		 * sigaltstack changes were requested.
4247 		 */
4248 		if (t->sas_ss_sp == (unsigned long)ss_sp &&
4249 		    t->sas_ss_size == ss_size &&
4250 		    t->sas_ss_flags == ss_flags)
4251 			return 0;
4252 
4253 		sigaltstack_lock();
4254 		if (ss_mode == SS_DISABLE) {
4255 			ss_size = 0;
4256 			ss_sp = NULL;
4257 		} else {
4258 			if (unlikely(ss_size < min_ss_size))
4259 				ret = -ENOMEM;
4260 			if (!sigaltstack_size_valid(ss_size))
4261 				ret = -ENOMEM;
4262 		}
4263 		if (!ret) {
4264 			t->sas_ss_sp = (unsigned long) ss_sp;
4265 			t->sas_ss_size = ss_size;
4266 			t->sas_ss_flags = ss_flags;
4267 		}
4268 		sigaltstack_unlock();
4269 	}
4270 	return ret;
4271 }
4272 
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss,stack_t __user *,uoss)4273 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4274 {
4275 	stack_t new, old;
4276 	int err;
4277 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4278 		return -EFAULT;
4279 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4280 			      current_user_stack_pointer(),
4281 			      MINSIGSTKSZ);
4282 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4283 		err = -EFAULT;
4284 	return err;
4285 }
4286 
restore_altstack(const stack_t __user * uss)4287 int restore_altstack(const stack_t __user *uss)
4288 {
4289 	stack_t new;
4290 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4291 		return -EFAULT;
4292 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4293 			     MINSIGSTKSZ);
4294 	/* squash all but EFAULT for now */
4295 	return 0;
4296 }
4297 
__save_altstack(stack_t __user * uss,unsigned long sp)4298 int __save_altstack(stack_t __user *uss, unsigned long sp)
4299 {
4300 	struct task_struct *t = current;
4301 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4302 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4303 		__put_user(t->sas_ss_size, &uss->ss_size);
4304 	return err;
4305 }
4306 
4307 #ifdef CONFIG_COMPAT
do_compat_sigaltstack(const compat_stack_t __user * uss_ptr,compat_stack_t __user * uoss_ptr)4308 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4309 				 compat_stack_t __user *uoss_ptr)
4310 {
4311 	stack_t uss, uoss;
4312 	int ret;
4313 
4314 	if (uss_ptr) {
4315 		compat_stack_t uss32;
4316 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4317 			return -EFAULT;
4318 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4319 		uss.ss_flags = uss32.ss_flags;
4320 		uss.ss_size = uss32.ss_size;
4321 	}
4322 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4323 			     compat_user_stack_pointer(),
4324 			     COMPAT_MINSIGSTKSZ);
4325 	if (ret >= 0 && uoss_ptr)  {
4326 		compat_stack_t old;
4327 		memset(&old, 0, sizeof(old));
4328 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4329 		old.ss_flags = uoss.ss_flags;
4330 		old.ss_size = uoss.ss_size;
4331 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4332 			ret = -EFAULT;
4333 	}
4334 	return ret;
4335 }
4336 
COMPAT_SYSCALL_DEFINE2(sigaltstack,const compat_stack_t __user *,uss_ptr,compat_stack_t __user *,uoss_ptr)4337 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4338 			const compat_stack_t __user *, uss_ptr,
4339 			compat_stack_t __user *, uoss_ptr)
4340 {
4341 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4342 }
4343 
compat_restore_altstack(const compat_stack_t __user * uss)4344 int compat_restore_altstack(const compat_stack_t __user *uss)
4345 {
4346 	int err = do_compat_sigaltstack(uss, NULL);
4347 	/* squash all but -EFAULT for now */
4348 	return err == -EFAULT ? err : 0;
4349 }
4350 
__compat_save_altstack(compat_stack_t __user * uss,unsigned long sp)4351 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4352 {
4353 	int err;
4354 	struct task_struct *t = current;
4355 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4356 			 &uss->ss_sp) |
4357 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4358 		__put_user(t->sas_ss_size, &uss->ss_size);
4359 	return err;
4360 }
4361 #endif
4362 
4363 #ifdef __ARCH_WANT_SYS_SIGPENDING
4364 
4365 /**
4366  *  sys_sigpending - examine pending signals
4367  *  @uset: where mask of pending signal is returned
4368  */
SYSCALL_DEFINE1(sigpending,old_sigset_t __user *,uset)4369 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4370 {
4371 	sigset_t set;
4372 
4373 	if (sizeof(old_sigset_t) > sizeof(*uset))
4374 		return -EINVAL;
4375 
4376 	do_sigpending(&set);
4377 
4378 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4379 		return -EFAULT;
4380 
4381 	return 0;
4382 }
4383 
4384 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE1(sigpending,compat_old_sigset_t __user *,set32)4385 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4386 {
4387 	sigset_t set;
4388 
4389 	do_sigpending(&set);
4390 
4391 	return put_user(set.sig[0], set32);
4392 }
4393 #endif
4394 
4395 #endif
4396 
4397 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4398 /**
4399  *  sys_sigprocmask - examine and change blocked signals
4400  *  @how: whether to add, remove, or set signals
4401  *  @nset: signals to add or remove (if non-null)
4402  *  @oset: previous value of signal mask if non-null
4403  *
4404  * Some platforms have their own version with special arguments;
4405  * others support only sys_rt_sigprocmask.
4406  */
4407 
SYSCALL_DEFINE3(sigprocmask,int,how,old_sigset_t __user *,nset,old_sigset_t __user *,oset)4408 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4409 		old_sigset_t __user *, oset)
4410 {
4411 	old_sigset_t old_set, new_set;
4412 	sigset_t new_blocked;
4413 
4414 	old_set = current->blocked.sig[0];
4415 
4416 	if (nset) {
4417 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4418 			return -EFAULT;
4419 
4420 		new_blocked = current->blocked;
4421 
4422 		switch (how) {
4423 		case SIG_BLOCK:
4424 			sigaddsetmask(&new_blocked, new_set);
4425 			break;
4426 		case SIG_UNBLOCK:
4427 			sigdelsetmask(&new_blocked, new_set);
4428 			break;
4429 		case SIG_SETMASK:
4430 			new_blocked.sig[0] = new_set;
4431 			break;
4432 		default:
4433 			return -EINVAL;
4434 		}
4435 
4436 		set_current_blocked(&new_blocked);
4437 	}
4438 
4439 	if (oset) {
4440 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4441 			return -EFAULT;
4442 	}
4443 
4444 	return 0;
4445 }
4446 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4447 
4448 #ifndef CONFIG_ODD_RT_SIGACTION
4449 /**
4450  *  sys_rt_sigaction - alter an action taken by a process
4451  *  @sig: signal to be sent
4452  *  @act: new sigaction
4453  *  @oact: used to save the previous sigaction
4454  *  @sigsetsize: size of sigset_t type
4455  */
SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct sigaction __user *,act,struct sigaction __user *,oact,size_t,sigsetsize)4456 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4457 		const struct sigaction __user *, act,
4458 		struct sigaction __user *, oact,
4459 		size_t, sigsetsize)
4460 {
4461 	struct k_sigaction new_sa, old_sa;
4462 	int ret;
4463 
4464 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4465 	if (sigsetsize != sizeof(sigset_t))
4466 		return -EINVAL;
4467 
4468 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4469 		return -EFAULT;
4470 
4471 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4472 	if (ret)
4473 		return ret;
4474 
4475 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4476 		return -EFAULT;
4477 
4478 	return 0;
4479 }
4480 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigaction,int,sig,const struct compat_sigaction __user *,act,struct compat_sigaction __user *,oact,compat_size_t,sigsetsize)4481 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4482 		const struct compat_sigaction __user *, act,
4483 		struct compat_sigaction __user *, oact,
4484 		compat_size_t, sigsetsize)
4485 {
4486 	struct k_sigaction new_ka, old_ka;
4487 #ifdef __ARCH_HAS_SA_RESTORER
4488 	compat_uptr_t restorer;
4489 #endif
4490 	int ret;
4491 
4492 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4493 	if (sigsetsize != sizeof(compat_sigset_t))
4494 		return -EINVAL;
4495 
4496 	if (act) {
4497 		compat_uptr_t handler;
4498 		ret = get_user(handler, &act->sa_handler);
4499 		new_ka.sa.sa_handler = compat_ptr(handler);
4500 #ifdef __ARCH_HAS_SA_RESTORER
4501 		ret |= get_user(restorer, &act->sa_restorer);
4502 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4503 #endif
4504 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4505 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4506 		if (ret)
4507 			return -EFAULT;
4508 	}
4509 
4510 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4511 	if (!ret && oact) {
4512 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4513 			       &oact->sa_handler);
4514 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4515 					 sizeof(oact->sa_mask));
4516 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4517 #ifdef __ARCH_HAS_SA_RESTORER
4518 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4519 				&oact->sa_restorer);
4520 #endif
4521 	}
4522 	return ret;
4523 }
4524 #endif
4525 #endif /* !CONFIG_ODD_RT_SIGACTION */
4526 
4527 #ifdef CONFIG_OLD_SIGACTION
SYSCALL_DEFINE3(sigaction,int,sig,const struct old_sigaction __user *,act,struct old_sigaction __user *,oact)4528 SYSCALL_DEFINE3(sigaction, int, sig,
4529 		const struct old_sigaction __user *, act,
4530 	        struct old_sigaction __user *, oact)
4531 {
4532 	struct k_sigaction new_ka, old_ka;
4533 	int ret;
4534 
4535 	if (act) {
4536 		old_sigset_t mask;
4537 		if (!access_ok(act, sizeof(*act)) ||
4538 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4539 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4540 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4541 		    __get_user(mask, &act->sa_mask))
4542 			return -EFAULT;
4543 #ifdef __ARCH_HAS_KA_RESTORER
4544 		new_ka.ka_restorer = NULL;
4545 #endif
4546 		siginitset(&new_ka.sa.sa_mask, mask);
4547 	}
4548 
4549 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4550 
4551 	if (!ret && oact) {
4552 		if (!access_ok(oact, sizeof(*oact)) ||
4553 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4554 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4555 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4556 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4557 			return -EFAULT;
4558 	}
4559 
4560 	return ret;
4561 }
4562 #endif
4563 #ifdef CONFIG_COMPAT_OLD_SIGACTION
COMPAT_SYSCALL_DEFINE3(sigaction,int,sig,const struct compat_old_sigaction __user *,act,struct compat_old_sigaction __user *,oact)4564 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4565 		const struct compat_old_sigaction __user *, act,
4566 	        struct compat_old_sigaction __user *, oact)
4567 {
4568 	struct k_sigaction new_ka, old_ka;
4569 	int ret;
4570 	compat_old_sigset_t mask;
4571 	compat_uptr_t handler, restorer;
4572 
4573 	if (act) {
4574 		if (!access_ok(act, sizeof(*act)) ||
4575 		    __get_user(handler, &act->sa_handler) ||
4576 		    __get_user(restorer, &act->sa_restorer) ||
4577 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4578 		    __get_user(mask, &act->sa_mask))
4579 			return -EFAULT;
4580 
4581 #ifdef __ARCH_HAS_KA_RESTORER
4582 		new_ka.ka_restorer = NULL;
4583 #endif
4584 		new_ka.sa.sa_handler = compat_ptr(handler);
4585 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4586 		siginitset(&new_ka.sa.sa_mask, mask);
4587 	}
4588 
4589 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4590 
4591 	if (!ret && oact) {
4592 		if (!access_ok(oact, sizeof(*oact)) ||
4593 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4594 			       &oact->sa_handler) ||
4595 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4596 			       &oact->sa_restorer) ||
4597 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4598 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4599 			return -EFAULT;
4600 	}
4601 	return ret;
4602 }
4603 #endif
4604 
4605 #ifdef CONFIG_SGETMASK_SYSCALL
4606 
4607 /*
4608  * For backwards compatibility.  Functionality superseded by sigprocmask.
4609  */
SYSCALL_DEFINE0(sgetmask)4610 SYSCALL_DEFINE0(sgetmask)
4611 {
4612 	/* SMP safe */
4613 	return current->blocked.sig[0];
4614 }
4615 
SYSCALL_DEFINE1(ssetmask,int,newmask)4616 SYSCALL_DEFINE1(ssetmask, int, newmask)
4617 {
4618 	int old = current->blocked.sig[0];
4619 	sigset_t newset;
4620 
4621 	siginitset(&newset, newmask);
4622 	set_current_blocked(&newset);
4623 
4624 	return old;
4625 }
4626 #endif /* CONFIG_SGETMASK_SYSCALL */
4627 
4628 #ifdef __ARCH_WANT_SYS_SIGNAL
4629 /*
4630  * For backwards compatibility.  Functionality superseded by sigaction.
4631  */
SYSCALL_DEFINE2(signal,int,sig,__sighandler_t,handler)4632 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4633 {
4634 	struct k_sigaction new_sa, old_sa;
4635 	int ret;
4636 
4637 	new_sa.sa.sa_handler = handler;
4638 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4639 	sigemptyset(&new_sa.sa.sa_mask);
4640 
4641 	ret = do_sigaction(sig, &new_sa, &old_sa);
4642 
4643 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4644 }
4645 #endif /* __ARCH_WANT_SYS_SIGNAL */
4646 
4647 #ifdef __ARCH_WANT_SYS_PAUSE
4648 
SYSCALL_DEFINE0(pause)4649 SYSCALL_DEFINE0(pause)
4650 {
4651 	while (!signal_pending(current)) {
4652 		__set_current_state(TASK_INTERRUPTIBLE);
4653 		schedule();
4654 	}
4655 	return -ERESTARTNOHAND;
4656 }
4657 
4658 #endif
4659 
sigsuspend(sigset_t * set)4660 static int sigsuspend(sigset_t *set)
4661 {
4662 	current->saved_sigmask = current->blocked;
4663 	set_current_blocked(set);
4664 
4665 	while (!signal_pending(current)) {
4666 		__set_current_state(TASK_INTERRUPTIBLE);
4667 		schedule();
4668 	}
4669 	set_restore_sigmask();
4670 	return -ERESTARTNOHAND;
4671 }
4672 
4673 /**
4674  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4675  *	@unewset value until a signal is received
4676  *  @unewset: new signal mask value
4677  *  @sigsetsize: size of sigset_t type
4678  */
SYSCALL_DEFINE2(rt_sigsuspend,sigset_t __user *,unewset,size_t,sigsetsize)4679 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4680 {
4681 	sigset_t newset;
4682 
4683 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4684 	if (sigsetsize != sizeof(sigset_t))
4685 		return -EINVAL;
4686 
4687 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4688 		return -EFAULT;
4689 	return sigsuspend(&newset);
4690 }
4691 
4692 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(rt_sigsuspend,compat_sigset_t __user *,unewset,compat_size_t,sigsetsize)4693 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4694 {
4695 	sigset_t newset;
4696 
4697 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4698 	if (sigsetsize != sizeof(sigset_t))
4699 		return -EINVAL;
4700 
4701 	if (get_compat_sigset(&newset, unewset))
4702 		return -EFAULT;
4703 	return sigsuspend(&newset);
4704 }
4705 #endif
4706 
4707 #ifdef CONFIG_OLD_SIGSUSPEND
SYSCALL_DEFINE1(sigsuspend,old_sigset_t,mask)4708 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4709 {
4710 	sigset_t blocked;
4711 	siginitset(&blocked, mask);
4712 	return sigsuspend(&blocked);
4713 }
4714 #endif
4715 #ifdef CONFIG_OLD_SIGSUSPEND3
SYSCALL_DEFINE3(sigsuspend,int,unused1,int,unused2,old_sigset_t,mask)4716 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4717 {
4718 	sigset_t blocked;
4719 	siginitset(&blocked, mask);
4720 	return sigsuspend(&blocked);
4721 }
4722 #endif
4723 
arch_vma_name(struct vm_area_struct * vma)4724 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4725 {
4726 	return NULL;
4727 }
4728 
siginfo_buildtime_checks(void)4729 static inline void siginfo_buildtime_checks(void)
4730 {
4731 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4732 
4733 	/* Verify the offsets in the two siginfos match */
4734 #define CHECK_OFFSET(field) \
4735 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4736 
4737 	/* kill */
4738 	CHECK_OFFSET(si_pid);
4739 	CHECK_OFFSET(si_uid);
4740 
4741 	/* timer */
4742 	CHECK_OFFSET(si_tid);
4743 	CHECK_OFFSET(si_overrun);
4744 	CHECK_OFFSET(si_value);
4745 
4746 	/* rt */
4747 	CHECK_OFFSET(si_pid);
4748 	CHECK_OFFSET(si_uid);
4749 	CHECK_OFFSET(si_value);
4750 
4751 	/* sigchld */
4752 	CHECK_OFFSET(si_pid);
4753 	CHECK_OFFSET(si_uid);
4754 	CHECK_OFFSET(si_status);
4755 	CHECK_OFFSET(si_utime);
4756 	CHECK_OFFSET(si_stime);
4757 
4758 	/* sigfault */
4759 	CHECK_OFFSET(si_addr);
4760 	CHECK_OFFSET(si_trapno);
4761 	CHECK_OFFSET(si_addr_lsb);
4762 	CHECK_OFFSET(si_lower);
4763 	CHECK_OFFSET(si_upper);
4764 	CHECK_OFFSET(si_pkey);
4765 	CHECK_OFFSET(si_perf_data);
4766 	CHECK_OFFSET(si_perf_type);
4767 	CHECK_OFFSET(si_perf_flags);
4768 
4769 	/* sigpoll */
4770 	CHECK_OFFSET(si_band);
4771 	CHECK_OFFSET(si_fd);
4772 
4773 	/* sigsys */
4774 	CHECK_OFFSET(si_call_addr);
4775 	CHECK_OFFSET(si_syscall);
4776 	CHECK_OFFSET(si_arch);
4777 #undef CHECK_OFFSET
4778 
4779 	/* usb asyncio */
4780 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4781 		     offsetof(struct siginfo, si_addr));
4782 	if (sizeof(int) == sizeof(void __user *)) {
4783 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4784 			     sizeof(void __user *));
4785 	} else {
4786 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4787 			      sizeof_field(struct siginfo, si_uid)) !=
4788 			     sizeof(void __user *));
4789 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4790 			     offsetof(struct siginfo, si_uid));
4791 	}
4792 #ifdef CONFIG_COMPAT
4793 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4794 		     offsetof(struct compat_siginfo, si_addr));
4795 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4796 		     sizeof(compat_uptr_t));
4797 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4798 		     sizeof_field(struct siginfo, si_pid));
4799 #endif
4800 }
4801 
4802 #if defined(CONFIG_SYSCTL)
4803 static struct ctl_table signal_debug_table[] = {
4804 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4805 	{
4806 		.procname	= "exception-trace",
4807 		.data		= &show_unhandled_signals,
4808 		.maxlen		= sizeof(int),
4809 		.mode		= 0644,
4810 		.proc_handler	= proc_dointvec
4811 	},
4812 #endif
4813 	{ }
4814 };
4815 
init_signal_sysctls(void)4816 static int __init init_signal_sysctls(void)
4817 {
4818 	register_sysctl_init("debug", signal_debug_table);
4819 	return 0;
4820 }
4821 early_initcall(init_signal_sysctls);
4822 #endif /* CONFIG_SYSCTL */
4823 
signals_init(void)4824 void __init signals_init(void)
4825 {
4826 	siginfo_buildtime_checks();
4827 
4828 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4829 }
4830 
4831 #ifdef CONFIG_KGDB_KDB
4832 #include <linux/kdb.h>
4833 /*
4834  * kdb_send_sig - Allows kdb to send signals without exposing
4835  * signal internals.  This function checks if the required locks are
4836  * available before calling the main signal code, to avoid kdb
4837  * deadlocks.
4838  */
kdb_send_sig(struct task_struct * t,int sig)4839 void kdb_send_sig(struct task_struct *t, int sig)
4840 {
4841 	static struct task_struct *kdb_prev_t;
4842 	int new_t, ret;
4843 	if (!spin_trylock(&t->sighand->siglock)) {
4844 		kdb_printf("Can't do kill command now.\n"
4845 			   "The sigmask lock is held somewhere else in "
4846 			   "kernel, try again later\n");
4847 		return;
4848 	}
4849 	new_t = kdb_prev_t != t;
4850 	kdb_prev_t = t;
4851 	if (!task_is_running(t) && new_t) {
4852 		spin_unlock(&t->sighand->siglock);
4853 		kdb_printf("Process is not RUNNING, sending a signal from "
4854 			   "kdb risks deadlock\n"
4855 			   "on the run queue locks. "
4856 			   "The signal has _not_ been sent.\n"
4857 			   "Reissue the kill command if you want to risk "
4858 			   "the deadlock.\n");
4859 		return;
4860 	}
4861 	ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4862 	spin_unlock(&t->sighand->siglock);
4863 	if (ret)
4864 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4865 			   sig, t->pid);
4866 	else
4867 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4868 }
4869 #endif	/* CONFIG_KGDB_KDB */
4870