xref: /openbmc/linux/kernel/signal.c (revision 21278aea)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
40 
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h"	/* audit_signal_info() */
47 
48 /*
49  * SLAB caches for signal bits.
50  */
51 
52 static struct kmem_cache *sigqueue_cachep;
53 
54 int print_fatal_signals __read_mostly;
55 
56 static void __user *sig_handler(struct task_struct *t, int sig)
57 {
58 	return t->sighand->action[sig - 1].sa.sa_handler;
59 }
60 
61 static int sig_handler_ignored(void __user *handler, int sig)
62 {
63 	/* Is it explicitly or implicitly ignored? */
64 	return handler == SIG_IGN ||
65 		(handler == SIG_DFL && sig_kernel_ignore(sig));
66 }
67 
68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69 {
70 	void __user *handler;
71 
72 	handler = sig_handler(t, sig);
73 
74 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 			handler == SIG_DFL && !force)
76 		return 1;
77 
78 	return sig_handler_ignored(handler, sig);
79 }
80 
81 static int sig_ignored(struct task_struct *t, int sig, bool force)
82 {
83 	/*
84 	 * Blocked signals are never ignored, since the
85 	 * signal handler may change by the time it is
86 	 * unblocked.
87 	 */
88 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
89 		return 0;
90 
91 	if (!sig_task_ignored(t, sig, force))
92 		return 0;
93 
94 	/*
95 	 * Tracers may want to know about even ignored signals.
96 	 */
97 	return !t->ptrace;
98 }
99 
100 /*
101  * Re-calculate pending state from the set of locally pending
102  * signals, globally pending signals, and blocked signals.
103  */
104 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
105 {
106 	unsigned long ready;
107 	long i;
108 
109 	switch (_NSIG_WORDS) {
110 	default:
111 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
112 			ready |= signal->sig[i] &~ blocked->sig[i];
113 		break;
114 
115 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
116 		ready |= signal->sig[2] &~ blocked->sig[2];
117 		ready |= signal->sig[1] &~ blocked->sig[1];
118 		ready |= signal->sig[0] &~ blocked->sig[0];
119 		break;
120 
121 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
122 		ready |= signal->sig[0] &~ blocked->sig[0];
123 		break;
124 
125 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
126 	}
127 	return ready !=	0;
128 }
129 
130 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
131 
132 static int recalc_sigpending_tsk(struct task_struct *t)
133 {
134 	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
135 	    PENDING(&t->pending, &t->blocked) ||
136 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
137 		set_tsk_thread_flag(t, TIF_SIGPENDING);
138 		return 1;
139 	}
140 	/*
141 	 * We must never clear the flag in another thread, or in current
142 	 * when it's possible the current syscall is returning -ERESTART*.
143 	 * So we don't clear it here, and only callers who know they should do.
144 	 */
145 	return 0;
146 }
147 
148 /*
149  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
150  * This is superfluous when called on current, the wakeup is a harmless no-op.
151  */
152 void recalc_sigpending_and_wake(struct task_struct *t)
153 {
154 	if (recalc_sigpending_tsk(t))
155 		signal_wake_up(t, 0);
156 }
157 
158 void recalc_sigpending(void)
159 {
160 	if (!recalc_sigpending_tsk(current) && !freezing(current))
161 		clear_thread_flag(TIF_SIGPENDING);
162 
163 }
164 
165 /* Given the mask, find the first available signal that should be serviced. */
166 
167 #define SYNCHRONOUS_MASK \
168 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
169 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
170 
171 int next_signal(struct sigpending *pending, sigset_t *mask)
172 {
173 	unsigned long i, *s, *m, x;
174 	int sig = 0;
175 
176 	s = pending->signal.sig;
177 	m = mask->sig;
178 
179 	/*
180 	 * Handle the first word specially: it contains the
181 	 * synchronous signals that need to be dequeued first.
182 	 */
183 	x = *s &~ *m;
184 	if (x) {
185 		if (x & SYNCHRONOUS_MASK)
186 			x &= SYNCHRONOUS_MASK;
187 		sig = ffz(~x) + 1;
188 		return sig;
189 	}
190 
191 	switch (_NSIG_WORDS) {
192 	default:
193 		for (i = 1; i < _NSIG_WORDS; ++i) {
194 			x = *++s &~ *++m;
195 			if (!x)
196 				continue;
197 			sig = ffz(~x) + i*_NSIG_BPW + 1;
198 			break;
199 		}
200 		break;
201 
202 	case 2:
203 		x = s[1] &~ m[1];
204 		if (!x)
205 			break;
206 		sig = ffz(~x) + _NSIG_BPW + 1;
207 		break;
208 
209 	case 1:
210 		/* Nothing to do */
211 		break;
212 	}
213 
214 	return sig;
215 }
216 
217 static inline void print_dropped_signal(int sig)
218 {
219 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
220 
221 	if (!print_fatal_signals)
222 		return;
223 
224 	if (!__ratelimit(&ratelimit_state))
225 		return;
226 
227 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
228 				current->comm, current->pid, sig);
229 }
230 
231 /**
232  * task_set_jobctl_pending - set jobctl pending bits
233  * @task: target task
234  * @mask: pending bits to set
235  *
236  * Clear @mask from @task->jobctl.  @mask must be subset of
237  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
238  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
239  * cleared.  If @task is already being killed or exiting, this function
240  * becomes noop.
241  *
242  * CONTEXT:
243  * Must be called with @task->sighand->siglock held.
244  *
245  * RETURNS:
246  * %true if @mask is set, %false if made noop because @task was dying.
247  */
248 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
249 {
250 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
251 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
252 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
253 
254 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
255 		return false;
256 
257 	if (mask & JOBCTL_STOP_SIGMASK)
258 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
259 
260 	task->jobctl |= mask;
261 	return true;
262 }
263 
264 /**
265  * task_clear_jobctl_trapping - clear jobctl trapping bit
266  * @task: target task
267  *
268  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
269  * Clear it and wake up the ptracer.  Note that we don't need any further
270  * locking.  @task->siglock guarantees that @task->parent points to the
271  * ptracer.
272  *
273  * CONTEXT:
274  * Must be called with @task->sighand->siglock held.
275  */
276 void task_clear_jobctl_trapping(struct task_struct *task)
277 {
278 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
279 		task->jobctl &= ~JOBCTL_TRAPPING;
280 		smp_mb();	/* advised by wake_up_bit() */
281 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
282 	}
283 }
284 
285 /**
286  * task_clear_jobctl_pending - clear jobctl pending bits
287  * @task: target task
288  * @mask: pending bits to clear
289  *
290  * Clear @mask from @task->jobctl.  @mask must be subset of
291  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
292  * STOP bits are cleared together.
293  *
294  * If clearing of @mask leaves no stop or trap pending, this function calls
295  * task_clear_jobctl_trapping().
296  *
297  * CONTEXT:
298  * Must be called with @task->sighand->siglock held.
299  */
300 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
301 {
302 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
303 
304 	if (mask & JOBCTL_STOP_PENDING)
305 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
306 
307 	task->jobctl &= ~mask;
308 
309 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
310 		task_clear_jobctl_trapping(task);
311 }
312 
313 /**
314  * task_participate_group_stop - participate in a group stop
315  * @task: task participating in a group stop
316  *
317  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
318  * Group stop states are cleared and the group stop count is consumed if
319  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
320  * stop, the appropriate %SIGNAL_* flags are set.
321  *
322  * CONTEXT:
323  * Must be called with @task->sighand->siglock held.
324  *
325  * RETURNS:
326  * %true if group stop completion should be notified to the parent, %false
327  * otherwise.
328  */
329 static bool task_participate_group_stop(struct task_struct *task)
330 {
331 	struct signal_struct *sig = task->signal;
332 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
333 
334 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
335 
336 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
337 
338 	if (!consume)
339 		return false;
340 
341 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
342 		sig->group_stop_count--;
343 
344 	/*
345 	 * Tell the caller to notify completion iff we are entering into a
346 	 * fresh group stop.  Read comment in do_signal_stop() for details.
347 	 */
348 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 		sig->flags = SIGNAL_STOP_STOPPED;
350 		return true;
351 	}
352 	return false;
353 }
354 
355 /*
356  * allocate a new signal queue record
357  * - this may be called without locks if and only if t == current, otherwise an
358  *   appropriate lock must be held to stop the target task from exiting
359  */
360 static struct sigqueue *
361 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
362 {
363 	struct sigqueue *q = NULL;
364 	struct user_struct *user;
365 
366 	/*
367 	 * Protect access to @t credentials. This can go away when all
368 	 * callers hold rcu read lock.
369 	 */
370 	rcu_read_lock();
371 	user = get_uid(__task_cred(t)->user);
372 	atomic_inc(&user->sigpending);
373 	rcu_read_unlock();
374 
375 	if (override_rlimit ||
376 	    atomic_read(&user->sigpending) <=
377 			task_rlimit(t, RLIMIT_SIGPENDING)) {
378 		q = kmem_cache_alloc(sigqueue_cachep, flags);
379 	} else {
380 		print_dropped_signal(sig);
381 	}
382 
383 	if (unlikely(q == NULL)) {
384 		atomic_dec(&user->sigpending);
385 		free_uid(user);
386 	} else {
387 		INIT_LIST_HEAD(&q->list);
388 		q->flags = 0;
389 		q->user = user;
390 	}
391 
392 	return q;
393 }
394 
395 static void __sigqueue_free(struct sigqueue *q)
396 {
397 	if (q->flags & SIGQUEUE_PREALLOC)
398 		return;
399 	atomic_dec(&q->user->sigpending);
400 	free_uid(q->user);
401 	kmem_cache_free(sigqueue_cachep, q);
402 }
403 
404 void flush_sigqueue(struct sigpending *queue)
405 {
406 	struct sigqueue *q;
407 
408 	sigemptyset(&queue->signal);
409 	while (!list_empty(&queue->list)) {
410 		q = list_entry(queue->list.next, struct sigqueue , list);
411 		list_del_init(&q->list);
412 		__sigqueue_free(q);
413 	}
414 }
415 
416 /*
417  * Flush all pending signals for a task.
418  */
419 void __flush_signals(struct task_struct *t)
420 {
421 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
422 	flush_sigqueue(&t->pending);
423 	flush_sigqueue(&t->signal->shared_pending);
424 }
425 
426 void flush_signals(struct task_struct *t)
427 {
428 	unsigned long flags;
429 
430 	spin_lock_irqsave(&t->sighand->siglock, flags);
431 	__flush_signals(t);
432 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
433 }
434 
435 static void __flush_itimer_signals(struct sigpending *pending)
436 {
437 	sigset_t signal, retain;
438 	struct sigqueue *q, *n;
439 
440 	signal = pending->signal;
441 	sigemptyset(&retain);
442 
443 	list_for_each_entry_safe(q, n, &pending->list, list) {
444 		int sig = q->info.si_signo;
445 
446 		if (likely(q->info.si_code != SI_TIMER)) {
447 			sigaddset(&retain, sig);
448 		} else {
449 			sigdelset(&signal, sig);
450 			list_del_init(&q->list);
451 			__sigqueue_free(q);
452 		}
453 	}
454 
455 	sigorsets(&pending->signal, &signal, &retain);
456 }
457 
458 void flush_itimer_signals(void)
459 {
460 	struct task_struct *tsk = current;
461 	unsigned long flags;
462 
463 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
464 	__flush_itimer_signals(&tsk->pending);
465 	__flush_itimer_signals(&tsk->signal->shared_pending);
466 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
467 }
468 
469 void ignore_signals(struct task_struct *t)
470 {
471 	int i;
472 
473 	for (i = 0; i < _NSIG; ++i)
474 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
475 
476 	flush_signals(t);
477 }
478 
479 /*
480  * Flush all handlers for a task.
481  */
482 
483 void
484 flush_signal_handlers(struct task_struct *t, int force_default)
485 {
486 	int i;
487 	struct k_sigaction *ka = &t->sighand->action[0];
488 	for (i = _NSIG ; i != 0 ; i--) {
489 		if (force_default || ka->sa.sa_handler != SIG_IGN)
490 			ka->sa.sa_handler = SIG_DFL;
491 		ka->sa.sa_flags = 0;
492 #ifdef __ARCH_HAS_SA_RESTORER
493 		ka->sa.sa_restorer = NULL;
494 #endif
495 		sigemptyset(&ka->sa.sa_mask);
496 		ka++;
497 	}
498 }
499 
500 int unhandled_signal(struct task_struct *tsk, int sig)
501 {
502 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
503 	if (is_global_init(tsk))
504 		return 1;
505 	if (handler != SIG_IGN && handler != SIG_DFL)
506 		return 0;
507 	/* if ptraced, let the tracer determine */
508 	return !tsk->ptrace;
509 }
510 
511 /*
512  * Notify the system that a driver wants to block all signals for this
513  * process, and wants to be notified if any signals at all were to be
514  * sent/acted upon.  If the notifier routine returns non-zero, then the
515  * signal will be acted upon after all.  If the notifier routine returns 0,
516  * then then signal will be blocked.  Only one block per process is
517  * allowed.  priv is a pointer to private data that the notifier routine
518  * can use to determine if the signal should be blocked or not.
519  */
520 void
521 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
522 {
523 	unsigned long flags;
524 
525 	spin_lock_irqsave(&current->sighand->siglock, flags);
526 	current->notifier_mask = mask;
527 	current->notifier_data = priv;
528 	current->notifier = notifier;
529 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
530 }
531 
532 /* Notify the system that blocking has ended. */
533 
534 void
535 unblock_all_signals(void)
536 {
537 	unsigned long flags;
538 
539 	spin_lock_irqsave(&current->sighand->siglock, flags);
540 	current->notifier = NULL;
541 	current->notifier_data = NULL;
542 	recalc_sigpending();
543 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
544 }
545 
546 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
547 {
548 	struct sigqueue *q, *first = NULL;
549 
550 	/*
551 	 * Collect the siginfo appropriate to this signal.  Check if
552 	 * there is another siginfo for the same signal.
553 	*/
554 	list_for_each_entry(q, &list->list, list) {
555 		if (q->info.si_signo == sig) {
556 			if (first)
557 				goto still_pending;
558 			first = q;
559 		}
560 	}
561 
562 	sigdelset(&list->signal, sig);
563 
564 	if (first) {
565 still_pending:
566 		list_del_init(&first->list);
567 		copy_siginfo(info, &first->info);
568 		__sigqueue_free(first);
569 	} else {
570 		/*
571 		 * Ok, it wasn't in the queue.  This must be
572 		 * a fast-pathed signal or we must have been
573 		 * out of queue space.  So zero out the info.
574 		 */
575 		info->si_signo = sig;
576 		info->si_errno = 0;
577 		info->si_code = SI_USER;
578 		info->si_pid = 0;
579 		info->si_uid = 0;
580 	}
581 }
582 
583 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
584 			siginfo_t *info)
585 {
586 	int sig = next_signal(pending, mask);
587 
588 	if (sig) {
589 		if (current->notifier) {
590 			if (sigismember(current->notifier_mask, sig)) {
591 				if (!(current->notifier)(current->notifier_data)) {
592 					clear_thread_flag(TIF_SIGPENDING);
593 					return 0;
594 				}
595 			}
596 		}
597 
598 		collect_signal(sig, pending, info);
599 	}
600 
601 	return sig;
602 }
603 
604 /*
605  * Dequeue a signal and return the element to the caller, which is
606  * expected to free it.
607  *
608  * All callers have to hold the siglock.
609  */
610 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
611 {
612 	int signr;
613 
614 	/* We only dequeue private signals from ourselves, we don't let
615 	 * signalfd steal them
616 	 */
617 	signr = __dequeue_signal(&tsk->pending, mask, info);
618 	if (!signr) {
619 		signr = __dequeue_signal(&tsk->signal->shared_pending,
620 					 mask, info);
621 		/*
622 		 * itimer signal ?
623 		 *
624 		 * itimers are process shared and we restart periodic
625 		 * itimers in the signal delivery path to prevent DoS
626 		 * attacks in the high resolution timer case. This is
627 		 * compliant with the old way of self-restarting
628 		 * itimers, as the SIGALRM is a legacy signal and only
629 		 * queued once. Changing the restart behaviour to
630 		 * restart the timer in the signal dequeue path is
631 		 * reducing the timer noise on heavy loaded !highres
632 		 * systems too.
633 		 */
634 		if (unlikely(signr == SIGALRM)) {
635 			struct hrtimer *tmr = &tsk->signal->real_timer;
636 
637 			if (!hrtimer_is_queued(tmr) &&
638 			    tsk->signal->it_real_incr.tv64 != 0) {
639 				hrtimer_forward(tmr, tmr->base->get_time(),
640 						tsk->signal->it_real_incr);
641 				hrtimer_restart(tmr);
642 			}
643 		}
644 	}
645 
646 	recalc_sigpending();
647 	if (!signr)
648 		return 0;
649 
650 	if (unlikely(sig_kernel_stop(signr))) {
651 		/*
652 		 * Set a marker that we have dequeued a stop signal.  Our
653 		 * caller might release the siglock and then the pending
654 		 * stop signal it is about to process is no longer in the
655 		 * pending bitmasks, but must still be cleared by a SIGCONT
656 		 * (and overruled by a SIGKILL).  So those cases clear this
657 		 * shared flag after we've set it.  Note that this flag may
658 		 * remain set after the signal we return is ignored or
659 		 * handled.  That doesn't matter because its only purpose
660 		 * is to alert stop-signal processing code when another
661 		 * processor has come along and cleared the flag.
662 		 */
663 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
664 	}
665 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
666 		/*
667 		 * Release the siglock to ensure proper locking order
668 		 * of timer locks outside of siglocks.  Note, we leave
669 		 * irqs disabled here, since the posix-timers code is
670 		 * about to disable them again anyway.
671 		 */
672 		spin_unlock(&tsk->sighand->siglock);
673 		do_schedule_next_timer(info);
674 		spin_lock(&tsk->sighand->siglock);
675 	}
676 	return signr;
677 }
678 
679 /*
680  * Tell a process that it has a new active signal..
681  *
682  * NOTE! we rely on the previous spin_lock to
683  * lock interrupts for us! We can only be called with
684  * "siglock" held, and the local interrupt must
685  * have been disabled when that got acquired!
686  *
687  * No need to set need_resched since signal event passing
688  * goes through ->blocked
689  */
690 void signal_wake_up_state(struct task_struct *t, unsigned int state)
691 {
692 	set_tsk_thread_flag(t, TIF_SIGPENDING);
693 	/*
694 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
695 	 * case. We don't check t->state here because there is a race with it
696 	 * executing another processor and just now entering stopped state.
697 	 * By using wake_up_state, we ensure the process will wake up and
698 	 * handle its death signal.
699 	 */
700 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
701 		kick_process(t);
702 }
703 
704 /*
705  * Remove signals in mask from the pending set and queue.
706  * Returns 1 if any signals were found.
707  *
708  * All callers must be holding the siglock.
709  */
710 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
711 {
712 	struct sigqueue *q, *n;
713 	sigset_t m;
714 
715 	sigandsets(&m, mask, &s->signal);
716 	if (sigisemptyset(&m))
717 		return 0;
718 
719 	sigandnsets(&s->signal, &s->signal, mask);
720 	list_for_each_entry_safe(q, n, &s->list, list) {
721 		if (sigismember(mask, q->info.si_signo)) {
722 			list_del_init(&q->list);
723 			__sigqueue_free(q);
724 		}
725 	}
726 	return 1;
727 }
728 
729 static inline int is_si_special(const struct siginfo *info)
730 {
731 	return info <= SEND_SIG_FORCED;
732 }
733 
734 static inline bool si_fromuser(const struct siginfo *info)
735 {
736 	return info == SEND_SIG_NOINFO ||
737 		(!is_si_special(info) && SI_FROMUSER(info));
738 }
739 
740 /*
741  * called with RCU read lock from check_kill_permission()
742  */
743 static int kill_ok_by_cred(struct task_struct *t)
744 {
745 	const struct cred *cred = current_cred();
746 	const struct cred *tcred = __task_cred(t);
747 
748 	if (uid_eq(cred->euid, tcred->suid) ||
749 	    uid_eq(cred->euid, tcred->uid)  ||
750 	    uid_eq(cred->uid,  tcred->suid) ||
751 	    uid_eq(cred->uid,  tcred->uid))
752 		return 1;
753 
754 	if (ns_capable(tcred->user_ns, CAP_KILL))
755 		return 1;
756 
757 	return 0;
758 }
759 
760 /*
761  * Bad permissions for sending the signal
762  * - the caller must hold the RCU read lock
763  */
764 static int check_kill_permission(int sig, struct siginfo *info,
765 				 struct task_struct *t)
766 {
767 	struct pid *sid;
768 	int error;
769 
770 	if (!valid_signal(sig))
771 		return -EINVAL;
772 
773 	if (!si_fromuser(info))
774 		return 0;
775 
776 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
777 	if (error)
778 		return error;
779 
780 	if (!same_thread_group(current, t) &&
781 	    !kill_ok_by_cred(t)) {
782 		switch (sig) {
783 		case SIGCONT:
784 			sid = task_session(t);
785 			/*
786 			 * We don't return the error if sid == NULL. The
787 			 * task was unhashed, the caller must notice this.
788 			 */
789 			if (!sid || sid == task_session(current))
790 				break;
791 		default:
792 			return -EPERM;
793 		}
794 	}
795 
796 	return security_task_kill(t, info, sig, 0);
797 }
798 
799 /**
800  * ptrace_trap_notify - schedule trap to notify ptracer
801  * @t: tracee wanting to notify tracer
802  *
803  * This function schedules sticky ptrace trap which is cleared on the next
804  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
805  * ptracer.
806  *
807  * If @t is running, STOP trap will be taken.  If trapped for STOP and
808  * ptracer is listening for events, tracee is woken up so that it can
809  * re-trap for the new event.  If trapped otherwise, STOP trap will be
810  * eventually taken without returning to userland after the existing traps
811  * are finished by PTRACE_CONT.
812  *
813  * CONTEXT:
814  * Must be called with @task->sighand->siglock held.
815  */
816 static void ptrace_trap_notify(struct task_struct *t)
817 {
818 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
819 	assert_spin_locked(&t->sighand->siglock);
820 
821 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
822 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
823 }
824 
825 /*
826  * Handle magic process-wide effects of stop/continue signals. Unlike
827  * the signal actions, these happen immediately at signal-generation
828  * time regardless of blocking, ignoring, or handling.  This does the
829  * actual continuing for SIGCONT, but not the actual stopping for stop
830  * signals. The process stop is done as a signal action for SIG_DFL.
831  *
832  * Returns true if the signal should be actually delivered, otherwise
833  * it should be dropped.
834  */
835 static bool prepare_signal(int sig, struct task_struct *p, bool force)
836 {
837 	struct signal_struct *signal = p->signal;
838 	struct task_struct *t;
839 	sigset_t flush;
840 
841 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
842 		if (signal->flags & SIGNAL_GROUP_COREDUMP)
843 			return sig == SIGKILL;
844 		/*
845 		 * The process is in the middle of dying, nothing to do.
846 		 */
847 	} else if (sig_kernel_stop(sig)) {
848 		/*
849 		 * This is a stop signal.  Remove SIGCONT from all queues.
850 		 */
851 		siginitset(&flush, sigmask(SIGCONT));
852 		flush_sigqueue_mask(&flush, &signal->shared_pending);
853 		for_each_thread(p, t)
854 			flush_sigqueue_mask(&flush, &t->pending);
855 	} else if (sig == SIGCONT) {
856 		unsigned int why;
857 		/*
858 		 * Remove all stop signals from all queues, wake all threads.
859 		 */
860 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
861 		flush_sigqueue_mask(&flush, &signal->shared_pending);
862 		for_each_thread(p, t) {
863 			flush_sigqueue_mask(&flush, &t->pending);
864 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
865 			if (likely(!(t->ptrace & PT_SEIZED)))
866 				wake_up_state(t, __TASK_STOPPED);
867 			else
868 				ptrace_trap_notify(t);
869 		}
870 
871 		/*
872 		 * Notify the parent with CLD_CONTINUED if we were stopped.
873 		 *
874 		 * If we were in the middle of a group stop, we pretend it
875 		 * was already finished, and then continued. Since SIGCHLD
876 		 * doesn't queue we report only CLD_STOPPED, as if the next
877 		 * CLD_CONTINUED was dropped.
878 		 */
879 		why = 0;
880 		if (signal->flags & SIGNAL_STOP_STOPPED)
881 			why |= SIGNAL_CLD_CONTINUED;
882 		else if (signal->group_stop_count)
883 			why |= SIGNAL_CLD_STOPPED;
884 
885 		if (why) {
886 			/*
887 			 * The first thread which returns from do_signal_stop()
888 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
889 			 * notify its parent. See get_signal_to_deliver().
890 			 */
891 			signal->flags = why | SIGNAL_STOP_CONTINUED;
892 			signal->group_stop_count = 0;
893 			signal->group_exit_code = 0;
894 		}
895 	}
896 
897 	return !sig_ignored(p, sig, force);
898 }
899 
900 /*
901  * Test if P wants to take SIG.  After we've checked all threads with this,
902  * it's equivalent to finding no threads not blocking SIG.  Any threads not
903  * blocking SIG were ruled out because they are not running and already
904  * have pending signals.  Such threads will dequeue from the shared queue
905  * as soon as they're available, so putting the signal on the shared queue
906  * will be equivalent to sending it to one such thread.
907  */
908 static inline int wants_signal(int sig, struct task_struct *p)
909 {
910 	if (sigismember(&p->blocked, sig))
911 		return 0;
912 	if (p->flags & PF_EXITING)
913 		return 0;
914 	if (sig == SIGKILL)
915 		return 1;
916 	if (task_is_stopped_or_traced(p))
917 		return 0;
918 	return task_curr(p) || !signal_pending(p);
919 }
920 
921 static void complete_signal(int sig, struct task_struct *p, int group)
922 {
923 	struct signal_struct *signal = p->signal;
924 	struct task_struct *t;
925 
926 	/*
927 	 * Now find a thread we can wake up to take the signal off the queue.
928 	 *
929 	 * If the main thread wants the signal, it gets first crack.
930 	 * Probably the least surprising to the average bear.
931 	 */
932 	if (wants_signal(sig, p))
933 		t = p;
934 	else if (!group || thread_group_empty(p))
935 		/*
936 		 * There is just one thread and it does not need to be woken.
937 		 * It will dequeue unblocked signals before it runs again.
938 		 */
939 		return;
940 	else {
941 		/*
942 		 * Otherwise try to find a suitable thread.
943 		 */
944 		t = signal->curr_target;
945 		while (!wants_signal(sig, t)) {
946 			t = next_thread(t);
947 			if (t == signal->curr_target)
948 				/*
949 				 * No thread needs to be woken.
950 				 * Any eligible threads will see
951 				 * the signal in the queue soon.
952 				 */
953 				return;
954 		}
955 		signal->curr_target = t;
956 	}
957 
958 	/*
959 	 * Found a killable thread.  If the signal will be fatal,
960 	 * then start taking the whole group down immediately.
961 	 */
962 	if (sig_fatal(p, sig) &&
963 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
964 	    !sigismember(&t->real_blocked, sig) &&
965 	    (sig == SIGKILL || !t->ptrace)) {
966 		/*
967 		 * This signal will be fatal to the whole group.
968 		 */
969 		if (!sig_kernel_coredump(sig)) {
970 			/*
971 			 * Start a group exit and wake everybody up.
972 			 * This way we don't have other threads
973 			 * running and doing things after a slower
974 			 * thread has the fatal signal pending.
975 			 */
976 			signal->flags = SIGNAL_GROUP_EXIT;
977 			signal->group_exit_code = sig;
978 			signal->group_stop_count = 0;
979 			t = p;
980 			do {
981 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
982 				sigaddset(&t->pending.signal, SIGKILL);
983 				signal_wake_up(t, 1);
984 			} while_each_thread(p, t);
985 			return;
986 		}
987 	}
988 
989 	/*
990 	 * The signal is already in the shared-pending queue.
991 	 * Tell the chosen thread to wake up and dequeue it.
992 	 */
993 	signal_wake_up(t, sig == SIGKILL);
994 	return;
995 }
996 
997 static inline int legacy_queue(struct sigpending *signals, int sig)
998 {
999 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1000 }
1001 
1002 #ifdef CONFIG_USER_NS
1003 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1004 {
1005 	if (current_user_ns() == task_cred_xxx(t, user_ns))
1006 		return;
1007 
1008 	if (SI_FROMKERNEL(info))
1009 		return;
1010 
1011 	rcu_read_lock();
1012 	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1013 					make_kuid(current_user_ns(), info->si_uid));
1014 	rcu_read_unlock();
1015 }
1016 #else
1017 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1018 {
1019 	return;
1020 }
1021 #endif
1022 
1023 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1024 			int group, int from_ancestor_ns)
1025 {
1026 	struct sigpending *pending;
1027 	struct sigqueue *q;
1028 	int override_rlimit;
1029 	int ret = 0, result;
1030 
1031 	assert_spin_locked(&t->sighand->siglock);
1032 
1033 	result = TRACE_SIGNAL_IGNORED;
1034 	if (!prepare_signal(sig, t,
1035 			from_ancestor_ns || (info == SEND_SIG_FORCED)))
1036 		goto ret;
1037 
1038 	pending = group ? &t->signal->shared_pending : &t->pending;
1039 	/*
1040 	 * Short-circuit ignored signals and support queuing
1041 	 * exactly one non-rt signal, so that we can get more
1042 	 * detailed information about the cause of the signal.
1043 	 */
1044 	result = TRACE_SIGNAL_ALREADY_PENDING;
1045 	if (legacy_queue(pending, sig))
1046 		goto ret;
1047 
1048 	result = TRACE_SIGNAL_DELIVERED;
1049 	/*
1050 	 * fast-pathed signals for kernel-internal things like SIGSTOP
1051 	 * or SIGKILL.
1052 	 */
1053 	if (info == SEND_SIG_FORCED)
1054 		goto out_set;
1055 
1056 	/*
1057 	 * Real-time signals must be queued if sent by sigqueue, or
1058 	 * some other real-time mechanism.  It is implementation
1059 	 * defined whether kill() does so.  We attempt to do so, on
1060 	 * the principle of least surprise, but since kill is not
1061 	 * allowed to fail with EAGAIN when low on memory we just
1062 	 * make sure at least one signal gets delivered and don't
1063 	 * pass on the info struct.
1064 	 */
1065 	if (sig < SIGRTMIN)
1066 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1067 	else
1068 		override_rlimit = 0;
1069 
1070 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1071 		override_rlimit);
1072 	if (q) {
1073 		list_add_tail(&q->list, &pending->list);
1074 		switch ((unsigned long) info) {
1075 		case (unsigned long) SEND_SIG_NOINFO:
1076 			q->info.si_signo = sig;
1077 			q->info.si_errno = 0;
1078 			q->info.si_code = SI_USER;
1079 			q->info.si_pid = task_tgid_nr_ns(current,
1080 							task_active_pid_ns(t));
1081 			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1082 			break;
1083 		case (unsigned long) SEND_SIG_PRIV:
1084 			q->info.si_signo = sig;
1085 			q->info.si_errno = 0;
1086 			q->info.si_code = SI_KERNEL;
1087 			q->info.si_pid = 0;
1088 			q->info.si_uid = 0;
1089 			break;
1090 		default:
1091 			copy_siginfo(&q->info, info);
1092 			if (from_ancestor_ns)
1093 				q->info.si_pid = 0;
1094 			break;
1095 		}
1096 
1097 		userns_fixup_signal_uid(&q->info, t);
1098 
1099 	} else if (!is_si_special(info)) {
1100 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1101 			/*
1102 			 * Queue overflow, abort.  We may abort if the
1103 			 * signal was rt and sent by user using something
1104 			 * other than kill().
1105 			 */
1106 			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1107 			ret = -EAGAIN;
1108 			goto ret;
1109 		} else {
1110 			/*
1111 			 * This is a silent loss of information.  We still
1112 			 * send the signal, but the *info bits are lost.
1113 			 */
1114 			result = TRACE_SIGNAL_LOSE_INFO;
1115 		}
1116 	}
1117 
1118 out_set:
1119 	signalfd_notify(t, sig);
1120 	sigaddset(&pending->signal, sig);
1121 	complete_signal(sig, t, group);
1122 ret:
1123 	trace_signal_generate(sig, info, t, group, result);
1124 	return ret;
1125 }
1126 
1127 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1128 			int group)
1129 {
1130 	int from_ancestor_ns = 0;
1131 
1132 #ifdef CONFIG_PID_NS
1133 	from_ancestor_ns = si_fromuser(info) &&
1134 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1135 #endif
1136 
1137 	return __send_signal(sig, info, t, group, from_ancestor_ns);
1138 }
1139 
1140 static void print_fatal_signal(int signr)
1141 {
1142 	struct pt_regs *regs = signal_pt_regs();
1143 	printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1144 
1145 #if defined(__i386__) && !defined(__arch_um__)
1146 	printk(KERN_INFO "code at %08lx: ", regs->ip);
1147 	{
1148 		int i;
1149 		for (i = 0; i < 16; i++) {
1150 			unsigned char insn;
1151 
1152 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1153 				break;
1154 			printk(KERN_CONT "%02x ", insn);
1155 		}
1156 	}
1157 	printk(KERN_CONT "\n");
1158 #endif
1159 	preempt_disable();
1160 	show_regs(regs);
1161 	preempt_enable();
1162 }
1163 
1164 static int __init setup_print_fatal_signals(char *str)
1165 {
1166 	get_option (&str, &print_fatal_signals);
1167 
1168 	return 1;
1169 }
1170 
1171 __setup("print-fatal-signals=", setup_print_fatal_signals);
1172 
1173 int
1174 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1175 {
1176 	return send_signal(sig, info, p, 1);
1177 }
1178 
1179 static int
1180 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1181 {
1182 	return send_signal(sig, info, t, 0);
1183 }
1184 
1185 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1186 			bool group)
1187 {
1188 	unsigned long flags;
1189 	int ret = -ESRCH;
1190 
1191 	if (lock_task_sighand(p, &flags)) {
1192 		ret = send_signal(sig, info, p, group);
1193 		unlock_task_sighand(p, &flags);
1194 	}
1195 
1196 	return ret;
1197 }
1198 
1199 /*
1200  * Force a signal that the process can't ignore: if necessary
1201  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1202  *
1203  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1204  * since we do not want to have a signal handler that was blocked
1205  * be invoked when user space had explicitly blocked it.
1206  *
1207  * We don't want to have recursive SIGSEGV's etc, for example,
1208  * that is why we also clear SIGNAL_UNKILLABLE.
1209  */
1210 int
1211 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1212 {
1213 	unsigned long int flags;
1214 	int ret, blocked, ignored;
1215 	struct k_sigaction *action;
1216 
1217 	spin_lock_irqsave(&t->sighand->siglock, flags);
1218 	action = &t->sighand->action[sig-1];
1219 	ignored = action->sa.sa_handler == SIG_IGN;
1220 	blocked = sigismember(&t->blocked, sig);
1221 	if (blocked || ignored) {
1222 		action->sa.sa_handler = SIG_DFL;
1223 		if (blocked) {
1224 			sigdelset(&t->blocked, sig);
1225 			recalc_sigpending_and_wake(t);
1226 		}
1227 	}
1228 	if (action->sa.sa_handler == SIG_DFL)
1229 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1230 	ret = specific_send_sig_info(sig, info, t);
1231 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1232 
1233 	return ret;
1234 }
1235 
1236 /*
1237  * Nuke all other threads in the group.
1238  */
1239 int zap_other_threads(struct task_struct *p)
1240 {
1241 	struct task_struct *t = p;
1242 	int count = 0;
1243 
1244 	p->signal->group_stop_count = 0;
1245 
1246 	while_each_thread(p, t) {
1247 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1248 		count++;
1249 
1250 		/* Don't bother with already dead threads */
1251 		if (t->exit_state)
1252 			continue;
1253 		sigaddset(&t->pending.signal, SIGKILL);
1254 		signal_wake_up(t, 1);
1255 	}
1256 
1257 	return count;
1258 }
1259 
1260 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1261 					   unsigned long *flags)
1262 {
1263 	struct sighand_struct *sighand;
1264 
1265 	for (;;) {
1266 		local_irq_save(*flags);
1267 		rcu_read_lock();
1268 		sighand = rcu_dereference(tsk->sighand);
1269 		if (unlikely(sighand == NULL)) {
1270 			rcu_read_unlock();
1271 			local_irq_restore(*flags);
1272 			break;
1273 		}
1274 
1275 		spin_lock(&sighand->siglock);
1276 		if (likely(sighand == tsk->sighand)) {
1277 			rcu_read_unlock();
1278 			break;
1279 		}
1280 		spin_unlock(&sighand->siglock);
1281 		rcu_read_unlock();
1282 		local_irq_restore(*flags);
1283 	}
1284 
1285 	return sighand;
1286 }
1287 
1288 /*
1289  * send signal info to all the members of a group
1290  */
1291 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1292 {
1293 	int ret;
1294 
1295 	rcu_read_lock();
1296 	ret = check_kill_permission(sig, info, p);
1297 	rcu_read_unlock();
1298 
1299 	if (!ret && sig)
1300 		ret = do_send_sig_info(sig, info, p, true);
1301 
1302 	return ret;
1303 }
1304 
1305 /*
1306  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1307  * control characters do (^C, ^Z etc)
1308  * - the caller must hold at least a readlock on tasklist_lock
1309  */
1310 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1311 {
1312 	struct task_struct *p = NULL;
1313 	int retval, success;
1314 
1315 	success = 0;
1316 	retval = -ESRCH;
1317 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1318 		int err = group_send_sig_info(sig, info, p);
1319 		success |= !err;
1320 		retval = err;
1321 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1322 	return success ? 0 : retval;
1323 }
1324 
1325 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1326 {
1327 	int error = -ESRCH;
1328 	struct task_struct *p;
1329 
1330 	rcu_read_lock();
1331 retry:
1332 	p = pid_task(pid, PIDTYPE_PID);
1333 	if (p) {
1334 		error = group_send_sig_info(sig, info, p);
1335 		if (unlikely(error == -ESRCH))
1336 			/*
1337 			 * The task was unhashed in between, try again.
1338 			 * If it is dead, pid_task() will return NULL,
1339 			 * if we race with de_thread() it will find the
1340 			 * new leader.
1341 			 */
1342 			goto retry;
1343 	}
1344 	rcu_read_unlock();
1345 
1346 	return error;
1347 }
1348 
1349 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1350 {
1351 	int error;
1352 	rcu_read_lock();
1353 	error = kill_pid_info(sig, info, find_vpid(pid));
1354 	rcu_read_unlock();
1355 	return error;
1356 }
1357 
1358 static int kill_as_cred_perm(const struct cred *cred,
1359 			     struct task_struct *target)
1360 {
1361 	const struct cred *pcred = __task_cred(target);
1362 	if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1363 	    !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
1364 		return 0;
1365 	return 1;
1366 }
1367 
1368 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1369 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1370 			 const struct cred *cred, u32 secid)
1371 {
1372 	int ret = -EINVAL;
1373 	struct task_struct *p;
1374 	unsigned long flags;
1375 
1376 	if (!valid_signal(sig))
1377 		return ret;
1378 
1379 	rcu_read_lock();
1380 	p = pid_task(pid, PIDTYPE_PID);
1381 	if (!p) {
1382 		ret = -ESRCH;
1383 		goto out_unlock;
1384 	}
1385 	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1386 		ret = -EPERM;
1387 		goto out_unlock;
1388 	}
1389 	ret = security_task_kill(p, info, sig, secid);
1390 	if (ret)
1391 		goto out_unlock;
1392 
1393 	if (sig) {
1394 		if (lock_task_sighand(p, &flags)) {
1395 			ret = __send_signal(sig, info, p, 1, 0);
1396 			unlock_task_sighand(p, &flags);
1397 		} else
1398 			ret = -ESRCH;
1399 	}
1400 out_unlock:
1401 	rcu_read_unlock();
1402 	return ret;
1403 }
1404 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1405 
1406 /*
1407  * kill_something_info() interprets pid in interesting ways just like kill(2).
1408  *
1409  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1410  * is probably wrong.  Should make it like BSD or SYSV.
1411  */
1412 
1413 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1414 {
1415 	int ret;
1416 
1417 	if (pid > 0) {
1418 		rcu_read_lock();
1419 		ret = kill_pid_info(sig, info, find_vpid(pid));
1420 		rcu_read_unlock();
1421 		return ret;
1422 	}
1423 
1424 	read_lock(&tasklist_lock);
1425 	if (pid != -1) {
1426 		ret = __kill_pgrp_info(sig, info,
1427 				pid ? find_vpid(-pid) : task_pgrp(current));
1428 	} else {
1429 		int retval = 0, count = 0;
1430 		struct task_struct * p;
1431 
1432 		for_each_process(p) {
1433 			if (task_pid_vnr(p) > 1 &&
1434 					!same_thread_group(p, current)) {
1435 				int err = group_send_sig_info(sig, info, p);
1436 				++count;
1437 				if (err != -EPERM)
1438 					retval = err;
1439 			}
1440 		}
1441 		ret = count ? retval : -ESRCH;
1442 	}
1443 	read_unlock(&tasklist_lock);
1444 
1445 	return ret;
1446 }
1447 
1448 /*
1449  * These are for backward compatibility with the rest of the kernel source.
1450  */
1451 
1452 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1453 {
1454 	/*
1455 	 * Make sure legacy kernel users don't send in bad values
1456 	 * (normal paths check this in check_kill_permission).
1457 	 */
1458 	if (!valid_signal(sig))
1459 		return -EINVAL;
1460 
1461 	return do_send_sig_info(sig, info, p, false);
1462 }
1463 
1464 #define __si_special(priv) \
1465 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1466 
1467 int
1468 send_sig(int sig, struct task_struct *p, int priv)
1469 {
1470 	return send_sig_info(sig, __si_special(priv), p);
1471 }
1472 
1473 void
1474 force_sig(int sig, struct task_struct *p)
1475 {
1476 	force_sig_info(sig, SEND_SIG_PRIV, p);
1477 }
1478 
1479 /*
1480  * When things go south during signal handling, we
1481  * will force a SIGSEGV. And if the signal that caused
1482  * the problem was already a SIGSEGV, we'll want to
1483  * make sure we don't even try to deliver the signal..
1484  */
1485 int
1486 force_sigsegv(int sig, struct task_struct *p)
1487 {
1488 	if (sig == SIGSEGV) {
1489 		unsigned long flags;
1490 		spin_lock_irqsave(&p->sighand->siglock, flags);
1491 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1492 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1493 	}
1494 	force_sig(SIGSEGV, p);
1495 	return 0;
1496 }
1497 
1498 int kill_pgrp(struct pid *pid, int sig, int priv)
1499 {
1500 	int ret;
1501 
1502 	read_lock(&tasklist_lock);
1503 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1504 	read_unlock(&tasklist_lock);
1505 
1506 	return ret;
1507 }
1508 EXPORT_SYMBOL(kill_pgrp);
1509 
1510 int kill_pid(struct pid *pid, int sig, int priv)
1511 {
1512 	return kill_pid_info(sig, __si_special(priv), pid);
1513 }
1514 EXPORT_SYMBOL(kill_pid);
1515 
1516 /*
1517  * These functions support sending signals using preallocated sigqueue
1518  * structures.  This is needed "because realtime applications cannot
1519  * afford to lose notifications of asynchronous events, like timer
1520  * expirations or I/O completions".  In the case of POSIX Timers
1521  * we allocate the sigqueue structure from the timer_create.  If this
1522  * allocation fails we are able to report the failure to the application
1523  * with an EAGAIN error.
1524  */
1525 struct sigqueue *sigqueue_alloc(void)
1526 {
1527 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1528 
1529 	if (q)
1530 		q->flags |= SIGQUEUE_PREALLOC;
1531 
1532 	return q;
1533 }
1534 
1535 void sigqueue_free(struct sigqueue *q)
1536 {
1537 	unsigned long flags;
1538 	spinlock_t *lock = &current->sighand->siglock;
1539 
1540 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1541 	/*
1542 	 * We must hold ->siglock while testing q->list
1543 	 * to serialize with collect_signal() or with
1544 	 * __exit_signal()->flush_sigqueue().
1545 	 */
1546 	spin_lock_irqsave(lock, flags);
1547 	q->flags &= ~SIGQUEUE_PREALLOC;
1548 	/*
1549 	 * If it is queued it will be freed when dequeued,
1550 	 * like the "regular" sigqueue.
1551 	 */
1552 	if (!list_empty(&q->list))
1553 		q = NULL;
1554 	spin_unlock_irqrestore(lock, flags);
1555 
1556 	if (q)
1557 		__sigqueue_free(q);
1558 }
1559 
1560 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1561 {
1562 	int sig = q->info.si_signo;
1563 	struct sigpending *pending;
1564 	unsigned long flags;
1565 	int ret, result;
1566 
1567 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1568 
1569 	ret = -1;
1570 	if (!likely(lock_task_sighand(t, &flags)))
1571 		goto ret;
1572 
1573 	ret = 1; /* the signal is ignored */
1574 	result = TRACE_SIGNAL_IGNORED;
1575 	if (!prepare_signal(sig, t, false))
1576 		goto out;
1577 
1578 	ret = 0;
1579 	if (unlikely(!list_empty(&q->list))) {
1580 		/*
1581 		 * If an SI_TIMER entry is already queue just increment
1582 		 * the overrun count.
1583 		 */
1584 		BUG_ON(q->info.si_code != SI_TIMER);
1585 		q->info.si_overrun++;
1586 		result = TRACE_SIGNAL_ALREADY_PENDING;
1587 		goto out;
1588 	}
1589 	q->info.si_overrun = 0;
1590 
1591 	signalfd_notify(t, sig);
1592 	pending = group ? &t->signal->shared_pending : &t->pending;
1593 	list_add_tail(&q->list, &pending->list);
1594 	sigaddset(&pending->signal, sig);
1595 	complete_signal(sig, t, group);
1596 	result = TRACE_SIGNAL_DELIVERED;
1597 out:
1598 	trace_signal_generate(sig, &q->info, t, group, result);
1599 	unlock_task_sighand(t, &flags);
1600 ret:
1601 	return ret;
1602 }
1603 
1604 /*
1605  * Let a parent know about the death of a child.
1606  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1607  *
1608  * Returns true if our parent ignored us and so we've switched to
1609  * self-reaping.
1610  */
1611 bool do_notify_parent(struct task_struct *tsk, int sig)
1612 {
1613 	struct siginfo info;
1614 	unsigned long flags;
1615 	struct sighand_struct *psig;
1616 	bool autoreap = false;
1617 	cputime_t utime, stime;
1618 
1619 	BUG_ON(sig == -1);
1620 
1621  	/* do_notify_parent_cldstop should have been called instead.  */
1622  	BUG_ON(task_is_stopped_or_traced(tsk));
1623 
1624 	BUG_ON(!tsk->ptrace &&
1625 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1626 
1627 	if (sig != SIGCHLD) {
1628 		/*
1629 		 * This is only possible if parent == real_parent.
1630 		 * Check if it has changed security domain.
1631 		 */
1632 		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1633 			sig = SIGCHLD;
1634 	}
1635 
1636 	info.si_signo = sig;
1637 	info.si_errno = 0;
1638 	/*
1639 	 * We are under tasklist_lock here so our parent is tied to
1640 	 * us and cannot change.
1641 	 *
1642 	 * task_active_pid_ns will always return the same pid namespace
1643 	 * until a task passes through release_task.
1644 	 *
1645 	 * write_lock() currently calls preempt_disable() which is the
1646 	 * same as rcu_read_lock(), but according to Oleg, this is not
1647 	 * correct to rely on this
1648 	 */
1649 	rcu_read_lock();
1650 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1651 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1652 				       task_uid(tsk));
1653 	rcu_read_unlock();
1654 
1655 	task_cputime(tsk, &utime, &stime);
1656 	info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1657 	info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1658 
1659 	info.si_status = tsk->exit_code & 0x7f;
1660 	if (tsk->exit_code & 0x80)
1661 		info.si_code = CLD_DUMPED;
1662 	else if (tsk->exit_code & 0x7f)
1663 		info.si_code = CLD_KILLED;
1664 	else {
1665 		info.si_code = CLD_EXITED;
1666 		info.si_status = tsk->exit_code >> 8;
1667 	}
1668 
1669 	psig = tsk->parent->sighand;
1670 	spin_lock_irqsave(&psig->siglock, flags);
1671 	if (!tsk->ptrace && sig == SIGCHLD &&
1672 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1673 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1674 		/*
1675 		 * We are exiting and our parent doesn't care.  POSIX.1
1676 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1677 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1678 		 * automatically and not left for our parent's wait4 call.
1679 		 * Rather than having the parent do it as a magic kind of
1680 		 * signal handler, we just set this to tell do_exit that we
1681 		 * can be cleaned up without becoming a zombie.  Note that
1682 		 * we still call __wake_up_parent in this case, because a
1683 		 * blocked sys_wait4 might now return -ECHILD.
1684 		 *
1685 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1686 		 * is implementation-defined: we do (if you don't want
1687 		 * it, just use SIG_IGN instead).
1688 		 */
1689 		autoreap = true;
1690 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1691 			sig = 0;
1692 	}
1693 	if (valid_signal(sig) && sig)
1694 		__group_send_sig_info(sig, &info, tsk->parent);
1695 	__wake_up_parent(tsk, tsk->parent);
1696 	spin_unlock_irqrestore(&psig->siglock, flags);
1697 
1698 	return autoreap;
1699 }
1700 
1701 /**
1702  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1703  * @tsk: task reporting the state change
1704  * @for_ptracer: the notification is for ptracer
1705  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1706  *
1707  * Notify @tsk's parent that the stopped/continued state has changed.  If
1708  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1709  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1710  *
1711  * CONTEXT:
1712  * Must be called with tasklist_lock at least read locked.
1713  */
1714 static void do_notify_parent_cldstop(struct task_struct *tsk,
1715 				     bool for_ptracer, int why)
1716 {
1717 	struct siginfo info;
1718 	unsigned long flags;
1719 	struct task_struct *parent;
1720 	struct sighand_struct *sighand;
1721 	cputime_t utime, stime;
1722 
1723 	if (for_ptracer) {
1724 		parent = tsk->parent;
1725 	} else {
1726 		tsk = tsk->group_leader;
1727 		parent = tsk->real_parent;
1728 	}
1729 
1730 	info.si_signo = SIGCHLD;
1731 	info.si_errno = 0;
1732 	/*
1733 	 * see comment in do_notify_parent() about the following 4 lines
1734 	 */
1735 	rcu_read_lock();
1736 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1737 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1738 	rcu_read_unlock();
1739 
1740 	task_cputime(tsk, &utime, &stime);
1741 	info.si_utime = cputime_to_clock_t(utime);
1742 	info.si_stime = cputime_to_clock_t(stime);
1743 
1744  	info.si_code = why;
1745  	switch (why) {
1746  	case CLD_CONTINUED:
1747  		info.si_status = SIGCONT;
1748  		break;
1749  	case CLD_STOPPED:
1750  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1751  		break;
1752  	case CLD_TRAPPED:
1753  		info.si_status = tsk->exit_code & 0x7f;
1754  		break;
1755  	default:
1756  		BUG();
1757  	}
1758 
1759 	sighand = parent->sighand;
1760 	spin_lock_irqsave(&sighand->siglock, flags);
1761 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1762 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1763 		__group_send_sig_info(SIGCHLD, &info, parent);
1764 	/*
1765 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1766 	 */
1767 	__wake_up_parent(tsk, parent);
1768 	spin_unlock_irqrestore(&sighand->siglock, flags);
1769 }
1770 
1771 static inline int may_ptrace_stop(void)
1772 {
1773 	if (!likely(current->ptrace))
1774 		return 0;
1775 	/*
1776 	 * Are we in the middle of do_coredump?
1777 	 * If so and our tracer is also part of the coredump stopping
1778 	 * is a deadlock situation, and pointless because our tracer
1779 	 * is dead so don't allow us to stop.
1780 	 * If SIGKILL was already sent before the caller unlocked
1781 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1782 	 * is safe to enter schedule().
1783 	 *
1784 	 * This is almost outdated, a task with the pending SIGKILL can't
1785 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1786 	 * after SIGKILL was already dequeued.
1787 	 */
1788 	if (unlikely(current->mm->core_state) &&
1789 	    unlikely(current->mm == current->parent->mm))
1790 		return 0;
1791 
1792 	return 1;
1793 }
1794 
1795 /*
1796  * Return non-zero if there is a SIGKILL that should be waking us up.
1797  * Called with the siglock held.
1798  */
1799 static int sigkill_pending(struct task_struct *tsk)
1800 {
1801 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1802 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1803 }
1804 
1805 /*
1806  * This must be called with current->sighand->siglock held.
1807  *
1808  * This should be the path for all ptrace stops.
1809  * We always set current->last_siginfo while stopped here.
1810  * That makes it a way to test a stopped process for
1811  * being ptrace-stopped vs being job-control-stopped.
1812  *
1813  * If we actually decide not to stop at all because the tracer
1814  * is gone, we keep current->exit_code unless clear_code.
1815  */
1816 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1817 	__releases(&current->sighand->siglock)
1818 	__acquires(&current->sighand->siglock)
1819 {
1820 	bool gstop_done = false;
1821 
1822 	if (arch_ptrace_stop_needed(exit_code, info)) {
1823 		/*
1824 		 * The arch code has something special to do before a
1825 		 * ptrace stop.  This is allowed to block, e.g. for faults
1826 		 * on user stack pages.  We can't keep the siglock while
1827 		 * calling arch_ptrace_stop, so we must release it now.
1828 		 * To preserve proper semantics, we must do this before
1829 		 * any signal bookkeeping like checking group_stop_count.
1830 		 * Meanwhile, a SIGKILL could come in before we retake the
1831 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1832 		 * So after regaining the lock, we must check for SIGKILL.
1833 		 */
1834 		spin_unlock_irq(&current->sighand->siglock);
1835 		arch_ptrace_stop(exit_code, info);
1836 		spin_lock_irq(&current->sighand->siglock);
1837 		if (sigkill_pending(current))
1838 			return;
1839 	}
1840 
1841 	/*
1842 	 * We're committing to trapping.  TRACED should be visible before
1843 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1844 	 * Also, transition to TRACED and updates to ->jobctl should be
1845 	 * atomic with respect to siglock and should be done after the arch
1846 	 * hook as siglock is released and regrabbed across it.
1847 	 */
1848 	set_current_state(TASK_TRACED);
1849 
1850 	current->last_siginfo = info;
1851 	current->exit_code = exit_code;
1852 
1853 	/*
1854 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1855 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1856 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1857 	 * could be clear now.  We act as if SIGCONT is received after
1858 	 * TASK_TRACED is entered - ignore it.
1859 	 */
1860 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1861 		gstop_done = task_participate_group_stop(current);
1862 
1863 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1864 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1865 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1866 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1867 
1868 	/* entering a trap, clear TRAPPING */
1869 	task_clear_jobctl_trapping(current);
1870 
1871 	spin_unlock_irq(&current->sighand->siglock);
1872 	read_lock(&tasklist_lock);
1873 	if (may_ptrace_stop()) {
1874 		/*
1875 		 * Notify parents of the stop.
1876 		 *
1877 		 * While ptraced, there are two parents - the ptracer and
1878 		 * the real_parent of the group_leader.  The ptracer should
1879 		 * know about every stop while the real parent is only
1880 		 * interested in the completion of group stop.  The states
1881 		 * for the two don't interact with each other.  Notify
1882 		 * separately unless they're gonna be duplicates.
1883 		 */
1884 		do_notify_parent_cldstop(current, true, why);
1885 		if (gstop_done && ptrace_reparented(current))
1886 			do_notify_parent_cldstop(current, false, why);
1887 
1888 		/*
1889 		 * Don't want to allow preemption here, because
1890 		 * sys_ptrace() needs this task to be inactive.
1891 		 *
1892 		 * XXX: implement read_unlock_no_resched().
1893 		 */
1894 		preempt_disable();
1895 		read_unlock(&tasklist_lock);
1896 		preempt_enable_no_resched();
1897 		freezable_schedule();
1898 	} else {
1899 		/*
1900 		 * By the time we got the lock, our tracer went away.
1901 		 * Don't drop the lock yet, another tracer may come.
1902 		 *
1903 		 * If @gstop_done, the ptracer went away between group stop
1904 		 * completion and here.  During detach, it would have set
1905 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1906 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1907 		 * the real parent of the group stop completion is enough.
1908 		 */
1909 		if (gstop_done)
1910 			do_notify_parent_cldstop(current, false, why);
1911 
1912 		/* tasklist protects us from ptrace_freeze_traced() */
1913 		__set_current_state(TASK_RUNNING);
1914 		if (clear_code)
1915 			current->exit_code = 0;
1916 		read_unlock(&tasklist_lock);
1917 	}
1918 
1919 	/*
1920 	 * We are back.  Now reacquire the siglock before touching
1921 	 * last_siginfo, so that we are sure to have synchronized with
1922 	 * any signal-sending on another CPU that wants to examine it.
1923 	 */
1924 	spin_lock_irq(&current->sighand->siglock);
1925 	current->last_siginfo = NULL;
1926 
1927 	/* LISTENING can be set only during STOP traps, clear it */
1928 	current->jobctl &= ~JOBCTL_LISTENING;
1929 
1930 	/*
1931 	 * Queued signals ignored us while we were stopped for tracing.
1932 	 * So check for any that we should take before resuming user mode.
1933 	 * This sets TIF_SIGPENDING, but never clears it.
1934 	 */
1935 	recalc_sigpending_tsk(current);
1936 }
1937 
1938 static void ptrace_do_notify(int signr, int exit_code, int why)
1939 {
1940 	siginfo_t info;
1941 
1942 	memset(&info, 0, sizeof info);
1943 	info.si_signo = signr;
1944 	info.si_code = exit_code;
1945 	info.si_pid = task_pid_vnr(current);
1946 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1947 
1948 	/* Let the debugger run.  */
1949 	ptrace_stop(exit_code, why, 1, &info);
1950 }
1951 
1952 void ptrace_notify(int exit_code)
1953 {
1954 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1955 	if (unlikely(current->task_works))
1956 		task_work_run();
1957 
1958 	spin_lock_irq(&current->sighand->siglock);
1959 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1960 	spin_unlock_irq(&current->sighand->siglock);
1961 }
1962 
1963 /**
1964  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1965  * @signr: signr causing group stop if initiating
1966  *
1967  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1968  * and participate in it.  If already set, participate in the existing
1969  * group stop.  If participated in a group stop (and thus slept), %true is
1970  * returned with siglock released.
1971  *
1972  * If ptraced, this function doesn't handle stop itself.  Instead,
1973  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1974  * untouched.  The caller must ensure that INTERRUPT trap handling takes
1975  * places afterwards.
1976  *
1977  * CONTEXT:
1978  * Must be called with @current->sighand->siglock held, which is released
1979  * on %true return.
1980  *
1981  * RETURNS:
1982  * %false if group stop is already cancelled or ptrace trap is scheduled.
1983  * %true if participated in group stop.
1984  */
1985 static bool do_signal_stop(int signr)
1986 	__releases(&current->sighand->siglock)
1987 {
1988 	struct signal_struct *sig = current->signal;
1989 
1990 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1991 		unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1992 		struct task_struct *t;
1993 
1994 		/* signr will be recorded in task->jobctl for retries */
1995 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1996 
1997 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1998 		    unlikely(signal_group_exit(sig)))
1999 			return false;
2000 		/*
2001 		 * There is no group stop already in progress.  We must
2002 		 * initiate one now.
2003 		 *
2004 		 * While ptraced, a task may be resumed while group stop is
2005 		 * still in effect and then receive a stop signal and
2006 		 * initiate another group stop.  This deviates from the
2007 		 * usual behavior as two consecutive stop signals can't
2008 		 * cause two group stops when !ptraced.  That is why we
2009 		 * also check !task_is_stopped(t) below.
2010 		 *
2011 		 * The condition can be distinguished by testing whether
2012 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2013 		 * group_exit_code in such case.
2014 		 *
2015 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2016 		 * an intervening stop signal is required to cause two
2017 		 * continued events regardless of ptrace.
2018 		 */
2019 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2020 			sig->group_exit_code = signr;
2021 
2022 		sig->group_stop_count = 0;
2023 
2024 		if (task_set_jobctl_pending(current, signr | gstop))
2025 			sig->group_stop_count++;
2026 
2027 		t = current;
2028 		while_each_thread(current, t) {
2029 			/*
2030 			 * Setting state to TASK_STOPPED for a group
2031 			 * stop is always done with the siglock held,
2032 			 * so this check has no races.
2033 			 */
2034 			if (!task_is_stopped(t) &&
2035 			    task_set_jobctl_pending(t, signr | gstop)) {
2036 				sig->group_stop_count++;
2037 				if (likely(!(t->ptrace & PT_SEIZED)))
2038 					signal_wake_up(t, 0);
2039 				else
2040 					ptrace_trap_notify(t);
2041 			}
2042 		}
2043 	}
2044 
2045 	if (likely(!current->ptrace)) {
2046 		int notify = 0;
2047 
2048 		/*
2049 		 * If there are no other threads in the group, or if there
2050 		 * is a group stop in progress and we are the last to stop,
2051 		 * report to the parent.
2052 		 */
2053 		if (task_participate_group_stop(current))
2054 			notify = CLD_STOPPED;
2055 
2056 		__set_current_state(TASK_STOPPED);
2057 		spin_unlock_irq(&current->sighand->siglock);
2058 
2059 		/*
2060 		 * Notify the parent of the group stop completion.  Because
2061 		 * we're not holding either the siglock or tasklist_lock
2062 		 * here, ptracer may attach inbetween; however, this is for
2063 		 * group stop and should always be delivered to the real
2064 		 * parent of the group leader.  The new ptracer will get
2065 		 * its notification when this task transitions into
2066 		 * TASK_TRACED.
2067 		 */
2068 		if (notify) {
2069 			read_lock(&tasklist_lock);
2070 			do_notify_parent_cldstop(current, false, notify);
2071 			read_unlock(&tasklist_lock);
2072 		}
2073 
2074 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2075 		freezable_schedule();
2076 		return true;
2077 	} else {
2078 		/*
2079 		 * While ptraced, group stop is handled by STOP trap.
2080 		 * Schedule it and let the caller deal with it.
2081 		 */
2082 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2083 		return false;
2084 	}
2085 }
2086 
2087 /**
2088  * do_jobctl_trap - take care of ptrace jobctl traps
2089  *
2090  * When PT_SEIZED, it's used for both group stop and explicit
2091  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2092  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2093  * the stop signal; otherwise, %SIGTRAP.
2094  *
2095  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2096  * number as exit_code and no siginfo.
2097  *
2098  * CONTEXT:
2099  * Must be called with @current->sighand->siglock held, which may be
2100  * released and re-acquired before returning with intervening sleep.
2101  */
2102 static void do_jobctl_trap(void)
2103 {
2104 	struct signal_struct *signal = current->signal;
2105 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2106 
2107 	if (current->ptrace & PT_SEIZED) {
2108 		if (!signal->group_stop_count &&
2109 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2110 			signr = SIGTRAP;
2111 		WARN_ON_ONCE(!signr);
2112 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2113 				 CLD_STOPPED);
2114 	} else {
2115 		WARN_ON_ONCE(!signr);
2116 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2117 		current->exit_code = 0;
2118 	}
2119 }
2120 
2121 static int ptrace_signal(int signr, siginfo_t *info)
2122 {
2123 	ptrace_signal_deliver();
2124 	/*
2125 	 * We do not check sig_kernel_stop(signr) but set this marker
2126 	 * unconditionally because we do not know whether debugger will
2127 	 * change signr. This flag has no meaning unless we are going
2128 	 * to stop after return from ptrace_stop(). In this case it will
2129 	 * be checked in do_signal_stop(), we should only stop if it was
2130 	 * not cleared by SIGCONT while we were sleeping. See also the
2131 	 * comment in dequeue_signal().
2132 	 */
2133 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2134 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2135 
2136 	/* We're back.  Did the debugger cancel the sig?  */
2137 	signr = current->exit_code;
2138 	if (signr == 0)
2139 		return signr;
2140 
2141 	current->exit_code = 0;
2142 
2143 	/*
2144 	 * Update the siginfo structure if the signal has
2145 	 * changed.  If the debugger wanted something
2146 	 * specific in the siginfo structure then it should
2147 	 * have updated *info via PTRACE_SETSIGINFO.
2148 	 */
2149 	if (signr != info->si_signo) {
2150 		info->si_signo = signr;
2151 		info->si_errno = 0;
2152 		info->si_code = SI_USER;
2153 		rcu_read_lock();
2154 		info->si_pid = task_pid_vnr(current->parent);
2155 		info->si_uid = from_kuid_munged(current_user_ns(),
2156 						task_uid(current->parent));
2157 		rcu_read_unlock();
2158 	}
2159 
2160 	/* If the (new) signal is now blocked, requeue it.  */
2161 	if (sigismember(&current->blocked, signr)) {
2162 		specific_send_sig_info(signr, info, current);
2163 		signr = 0;
2164 	}
2165 
2166 	return signr;
2167 }
2168 
2169 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2170 			  struct pt_regs *regs, void *cookie)
2171 {
2172 	struct sighand_struct *sighand = current->sighand;
2173 	struct signal_struct *signal = current->signal;
2174 	int signr;
2175 
2176 	if (unlikely(current->task_works))
2177 		task_work_run();
2178 
2179 	if (unlikely(uprobe_deny_signal()))
2180 		return 0;
2181 
2182 	/*
2183 	 * Do this once, we can't return to user-mode if freezing() == T.
2184 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2185 	 * thus do not need another check after return.
2186 	 */
2187 	try_to_freeze();
2188 
2189 relock:
2190 	spin_lock_irq(&sighand->siglock);
2191 	/*
2192 	 * Every stopped thread goes here after wakeup. Check to see if
2193 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2194 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2195 	 */
2196 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2197 		int why;
2198 
2199 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2200 			why = CLD_CONTINUED;
2201 		else
2202 			why = CLD_STOPPED;
2203 
2204 		signal->flags &= ~SIGNAL_CLD_MASK;
2205 
2206 		spin_unlock_irq(&sighand->siglock);
2207 
2208 		/*
2209 		 * Notify the parent that we're continuing.  This event is
2210 		 * always per-process and doesn't make whole lot of sense
2211 		 * for ptracers, who shouldn't consume the state via
2212 		 * wait(2) either, but, for backward compatibility, notify
2213 		 * the ptracer of the group leader too unless it's gonna be
2214 		 * a duplicate.
2215 		 */
2216 		read_lock(&tasklist_lock);
2217 		do_notify_parent_cldstop(current, false, why);
2218 
2219 		if (ptrace_reparented(current->group_leader))
2220 			do_notify_parent_cldstop(current->group_leader,
2221 						true, why);
2222 		read_unlock(&tasklist_lock);
2223 
2224 		goto relock;
2225 	}
2226 
2227 	for (;;) {
2228 		struct k_sigaction *ka;
2229 
2230 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2231 		    do_signal_stop(0))
2232 			goto relock;
2233 
2234 		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2235 			do_jobctl_trap();
2236 			spin_unlock_irq(&sighand->siglock);
2237 			goto relock;
2238 		}
2239 
2240 		signr = dequeue_signal(current, &current->blocked, info);
2241 
2242 		if (!signr)
2243 			break; /* will return 0 */
2244 
2245 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2246 			signr = ptrace_signal(signr, info);
2247 			if (!signr)
2248 				continue;
2249 		}
2250 
2251 		ka = &sighand->action[signr-1];
2252 
2253 		/* Trace actually delivered signals. */
2254 		trace_signal_deliver(signr, info, ka);
2255 
2256 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2257 			continue;
2258 		if (ka->sa.sa_handler != SIG_DFL) {
2259 			/* Run the handler.  */
2260 			*return_ka = *ka;
2261 
2262 			if (ka->sa.sa_flags & SA_ONESHOT)
2263 				ka->sa.sa_handler = SIG_DFL;
2264 
2265 			break; /* will return non-zero "signr" value */
2266 		}
2267 
2268 		/*
2269 		 * Now we are doing the default action for this signal.
2270 		 */
2271 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2272 			continue;
2273 
2274 		/*
2275 		 * Global init gets no signals it doesn't want.
2276 		 * Container-init gets no signals it doesn't want from same
2277 		 * container.
2278 		 *
2279 		 * Note that if global/container-init sees a sig_kernel_only()
2280 		 * signal here, the signal must have been generated internally
2281 		 * or must have come from an ancestor namespace. In either
2282 		 * case, the signal cannot be dropped.
2283 		 */
2284 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2285 				!sig_kernel_only(signr))
2286 			continue;
2287 
2288 		if (sig_kernel_stop(signr)) {
2289 			/*
2290 			 * The default action is to stop all threads in
2291 			 * the thread group.  The job control signals
2292 			 * do nothing in an orphaned pgrp, but SIGSTOP
2293 			 * always works.  Note that siglock needs to be
2294 			 * dropped during the call to is_orphaned_pgrp()
2295 			 * because of lock ordering with tasklist_lock.
2296 			 * This allows an intervening SIGCONT to be posted.
2297 			 * We need to check for that and bail out if necessary.
2298 			 */
2299 			if (signr != SIGSTOP) {
2300 				spin_unlock_irq(&sighand->siglock);
2301 
2302 				/* signals can be posted during this window */
2303 
2304 				if (is_current_pgrp_orphaned())
2305 					goto relock;
2306 
2307 				spin_lock_irq(&sighand->siglock);
2308 			}
2309 
2310 			if (likely(do_signal_stop(info->si_signo))) {
2311 				/* It released the siglock.  */
2312 				goto relock;
2313 			}
2314 
2315 			/*
2316 			 * We didn't actually stop, due to a race
2317 			 * with SIGCONT or something like that.
2318 			 */
2319 			continue;
2320 		}
2321 
2322 		spin_unlock_irq(&sighand->siglock);
2323 
2324 		/*
2325 		 * Anything else is fatal, maybe with a core dump.
2326 		 */
2327 		current->flags |= PF_SIGNALED;
2328 
2329 		if (sig_kernel_coredump(signr)) {
2330 			if (print_fatal_signals)
2331 				print_fatal_signal(info->si_signo);
2332 			proc_coredump_connector(current);
2333 			/*
2334 			 * If it was able to dump core, this kills all
2335 			 * other threads in the group and synchronizes with
2336 			 * their demise.  If we lost the race with another
2337 			 * thread getting here, it set group_exit_code
2338 			 * first and our do_group_exit call below will use
2339 			 * that value and ignore the one we pass it.
2340 			 */
2341 			do_coredump(info);
2342 		}
2343 
2344 		/*
2345 		 * Death signals, no core dump.
2346 		 */
2347 		do_group_exit(info->si_signo);
2348 		/* NOTREACHED */
2349 	}
2350 	spin_unlock_irq(&sighand->siglock);
2351 	return signr;
2352 }
2353 
2354 /**
2355  * signal_delivered -
2356  * @sig:		number of signal being delivered
2357  * @info:		siginfo_t of signal being delivered
2358  * @ka:			sigaction setting that chose the handler
2359  * @regs:		user register state
2360  * @stepping:		nonzero if debugger single-step or block-step in use
2361  *
2362  * This function should be called when a signal has successfully been
2363  * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
2364  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2365  * is set in @ka->sa.sa_flags.  Tracing is notified.
2366  */
2367 void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2368 			struct pt_regs *regs, int stepping)
2369 {
2370 	sigset_t blocked;
2371 
2372 	/* A signal was successfully delivered, and the
2373 	   saved sigmask was stored on the signal frame,
2374 	   and will be restored by sigreturn.  So we can
2375 	   simply clear the restore sigmask flag.  */
2376 	clear_restore_sigmask();
2377 
2378 	sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
2379 	if (!(ka->sa.sa_flags & SA_NODEFER))
2380 		sigaddset(&blocked, sig);
2381 	set_current_blocked(&blocked);
2382 	tracehook_signal_handler(sig, info, ka, regs, stepping);
2383 }
2384 
2385 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2386 {
2387 	if (failed)
2388 		force_sigsegv(ksig->sig, current);
2389 	else
2390 		signal_delivered(ksig->sig, &ksig->info, &ksig->ka,
2391 			signal_pt_regs(), stepping);
2392 }
2393 
2394 /*
2395  * It could be that complete_signal() picked us to notify about the
2396  * group-wide signal. Other threads should be notified now to take
2397  * the shared signals in @which since we will not.
2398  */
2399 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2400 {
2401 	sigset_t retarget;
2402 	struct task_struct *t;
2403 
2404 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2405 	if (sigisemptyset(&retarget))
2406 		return;
2407 
2408 	t = tsk;
2409 	while_each_thread(tsk, t) {
2410 		if (t->flags & PF_EXITING)
2411 			continue;
2412 
2413 		if (!has_pending_signals(&retarget, &t->blocked))
2414 			continue;
2415 		/* Remove the signals this thread can handle. */
2416 		sigandsets(&retarget, &retarget, &t->blocked);
2417 
2418 		if (!signal_pending(t))
2419 			signal_wake_up(t, 0);
2420 
2421 		if (sigisemptyset(&retarget))
2422 			break;
2423 	}
2424 }
2425 
2426 void exit_signals(struct task_struct *tsk)
2427 {
2428 	int group_stop = 0;
2429 	sigset_t unblocked;
2430 
2431 	/*
2432 	 * @tsk is about to have PF_EXITING set - lock out users which
2433 	 * expect stable threadgroup.
2434 	 */
2435 	threadgroup_change_begin(tsk);
2436 
2437 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2438 		tsk->flags |= PF_EXITING;
2439 		threadgroup_change_end(tsk);
2440 		return;
2441 	}
2442 
2443 	spin_lock_irq(&tsk->sighand->siglock);
2444 	/*
2445 	 * From now this task is not visible for group-wide signals,
2446 	 * see wants_signal(), do_signal_stop().
2447 	 */
2448 	tsk->flags |= PF_EXITING;
2449 
2450 	threadgroup_change_end(tsk);
2451 
2452 	if (!signal_pending(tsk))
2453 		goto out;
2454 
2455 	unblocked = tsk->blocked;
2456 	signotset(&unblocked);
2457 	retarget_shared_pending(tsk, &unblocked);
2458 
2459 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2460 	    task_participate_group_stop(tsk))
2461 		group_stop = CLD_STOPPED;
2462 out:
2463 	spin_unlock_irq(&tsk->sighand->siglock);
2464 
2465 	/*
2466 	 * If group stop has completed, deliver the notification.  This
2467 	 * should always go to the real parent of the group leader.
2468 	 */
2469 	if (unlikely(group_stop)) {
2470 		read_lock(&tasklist_lock);
2471 		do_notify_parent_cldstop(tsk, false, group_stop);
2472 		read_unlock(&tasklist_lock);
2473 	}
2474 }
2475 
2476 EXPORT_SYMBOL(recalc_sigpending);
2477 EXPORT_SYMBOL_GPL(dequeue_signal);
2478 EXPORT_SYMBOL(flush_signals);
2479 EXPORT_SYMBOL(force_sig);
2480 EXPORT_SYMBOL(send_sig);
2481 EXPORT_SYMBOL(send_sig_info);
2482 EXPORT_SYMBOL(sigprocmask);
2483 EXPORT_SYMBOL(block_all_signals);
2484 EXPORT_SYMBOL(unblock_all_signals);
2485 
2486 
2487 /*
2488  * System call entry points.
2489  */
2490 
2491 /**
2492  *  sys_restart_syscall - restart a system call
2493  */
2494 SYSCALL_DEFINE0(restart_syscall)
2495 {
2496 	struct restart_block *restart = &current_thread_info()->restart_block;
2497 	return restart->fn(restart);
2498 }
2499 
2500 long do_no_restart_syscall(struct restart_block *param)
2501 {
2502 	return -EINTR;
2503 }
2504 
2505 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2506 {
2507 	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2508 		sigset_t newblocked;
2509 		/* A set of now blocked but previously unblocked signals. */
2510 		sigandnsets(&newblocked, newset, &current->blocked);
2511 		retarget_shared_pending(tsk, &newblocked);
2512 	}
2513 	tsk->blocked = *newset;
2514 	recalc_sigpending();
2515 }
2516 
2517 /**
2518  * set_current_blocked - change current->blocked mask
2519  * @newset: new mask
2520  *
2521  * It is wrong to change ->blocked directly, this helper should be used
2522  * to ensure the process can't miss a shared signal we are going to block.
2523  */
2524 void set_current_blocked(sigset_t *newset)
2525 {
2526 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2527 	__set_current_blocked(newset);
2528 }
2529 
2530 void __set_current_blocked(const sigset_t *newset)
2531 {
2532 	struct task_struct *tsk = current;
2533 
2534 	spin_lock_irq(&tsk->sighand->siglock);
2535 	__set_task_blocked(tsk, newset);
2536 	spin_unlock_irq(&tsk->sighand->siglock);
2537 }
2538 
2539 /*
2540  * This is also useful for kernel threads that want to temporarily
2541  * (or permanently) block certain signals.
2542  *
2543  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2544  * interface happily blocks "unblockable" signals like SIGKILL
2545  * and friends.
2546  */
2547 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2548 {
2549 	struct task_struct *tsk = current;
2550 	sigset_t newset;
2551 
2552 	/* Lockless, only current can change ->blocked, never from irq */
2553 	if (oldset)
2554 		*oldset = tsk->blocked;
2555 
2556 	switch (how) {
2557 	case SIG_BLOCK:
2558 		sigorsets(&newset, &tsk->blocked, set);
2559 		break;
2560 	case SIG_UNBLOCK:
2561 		sigandnsets(&newset, &tsk->blocked, set);
2562 		break;
2563 	case SIG_SETMASK:
2564 		newset = *set;
2565 		break;
2566 	default:
2567 		return -EINVAL;
2568 	}
2569 
2570 	__set_current_blocked(&newset);
2571 	return 0;
2572 }
2573 
2574 /**
2575  *  sys_rt_sigprocmask - change the list of currently blocked signals
2576  *  @how: whether to add, remove, or set signals
2577  *  @nset: stores pending signals
2578  *  @oset: previous value of signal mask if non-null
2579  *  @sigsetsize: size of sigset_t type
2580  */
2581 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2582 		sigset_t __user *, oset, size_t, sigsetsize)
2583 {
2584 	sigset_t old_set, new_set;
2585 	int error;
2586 
2587 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2588 	if (sigsetsize != sizeof(sigset_t))
2589 		return -EINVAL;
2590 
2591 	old_set = current->blocked;
2592 
2593 	if (nset) {
2594 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2595 			return -EFAULT;
2596 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2597 
2598 		error = sigprocmask(how, &new_set, NULL);
2599 		if (error)
2600 			return error;
2601 	}
2602 
2603 	if (oset) {
2604 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2605 			return -EFAULT;
2606 	}
2607 
2608 	return 0;
2609 }
2610 
2611 #ifdef CONFIG_COMPAT
2612 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2613 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2614 {
2615 #ifdef __BIG_ENDIAN
2616 	sigset_t old_set = current->blocked;
2617 
2618 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2619 	if (sigsetsize != sizeof(sigset_t))
2620 		return -EINVAL;
2621 
2622 	if (nset) {
2623 		compat_sigset_t new32;
2624 		sigset_t new_set;
2625 		int error;
2626 		if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2627 			return -EFAULT;
2628 
2629 		sigset_from_compat(&new_set, &new32);
2630 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2631 
2632 		error = sigprocmask(how, &new_set, NULL);
2633 		if (error)
2634 			return error;
2635 	}
2636 	if (oset) {
2637 		compat_sigset_t old32;
2638 		sigset_to_compat(&old32, &old_set);
2639 		if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2640 			return -EFAULT;
2641 	}
2642 	return 0;
2643 #else
2644 	return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2645 				  (sigset_t __user *)oset, sigsetsize);
2646 #endif
2647 }
2648 #endif
2649 
2650 static int do_sigpending(void *set, unsigned long sigsetsize)
2651 {
2652 	if (sigsetsize > sizeof(sigset_t))
2653 		return -EINVAL;
2654 
2655 	spin_lock_irq(&current->sighand->siglock);
2656 	sigorsets(set, &current->pending.signal,
2657 		  &current->signal->shared_pending.signal);
2658 	spin_unlock_irq(&current->sighand->siglock);
2659 
2660 	/* Outside the lock because only this thread touches it.  */
2661 	sigandsets(set, &current->blocked, set);
2662 	return 0;
2663 }
2664 
2665 /**
2666  *  sys_rt_sigpending - examine a pending signal that has been raised
2667  *			while blocked
2668  *  @uset: stores pending signals
2669  *  @sigsetsize: size of sigset_t type or larger
2670  */
2671 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2672 {
2673 	sigset_t set;
2674 	int err = do_sigpending(&set, sigsetsize);
2675 	if (!err && copy_to_user(uset, &set, sigsetsize))
2676 		err = -EFAULT;
2677 	return err;
2678 }
2679 
2680 #ifdef CONFIG_COMPAT
2681 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2682 		compat_size_t, sigsetsize)
2683 {
2684 #ifdef __BIG_ENDIAN
2685 	sigset_t set;
2686 	int err = do_sigpending(&set, sigsetsize);
2687 	if (!err) {
2688 		compat_sigset_t set32;
2689 		sigset_to_compat(&set32, &set);
2690 		/* we can get here only if sigsetsize <= sizeof(set) */
2691 		if (copy_to_user(uset, &set32, sigsetsize))
2692 			err = -EFAULT;
2693 	}
2694 	return err;
2695 #else
2696 	return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2697 #endif
2698 }
2699 #endif
2700 
2701 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2702 
2703 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2704 {
2705 	int err;
2706 
2707 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2708 		return -EFAULT;
2709 	if (from->si_code < 0)
2710 		return __copy_to_user(to, from, sizeof(siginfo_t))
2711 			? -EFAULT : 0;
2712 	/*
2713 	 * If you change siginfo_t structure, please be sure
2714 	 * this code is fixed accordingly.
2715 	 * Please remember to update the signalfd_copyinfo() function
2716 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2717 	 * It should never copy any pad contained in the structure
2718 	 * to avoid security leaks, but must copy the generic
2719 	 * 3 ints plus the relevant union member.
2720 	 */
2721 	err = __put_user(from->si_signo, &to->si_signo);
2722 	err |= __put_user(from->si_errno, &to->si_errno);
2723 	err |= __put_user((short)from->si_code, &to->si_code);
2724 	switch (from->si_code & __SI_MASK) {
2725 	case __SI_KILL:
2726 		err |= __put_user(from->si_pid, &to->si_pid);
2727 		err |= __put_user(from->si_uid, &to->si_uid);
2728 		break;
2729 	case __SI_TIMER:
2730 		 err |= __put_user(from->si_tid, &to->si_tid);
2731 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2732 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2733 		break;
2734 	case __SI_POLL:
2735 		err |= __put_user(from->si_band, &to->si_band);
2736 		err |= __put_user(from->si_fd, &to->si_fd);
2737 		break;
2738 	case __SI_FAULT:
2739 		err |= __put_user(from->si_addr, &to->si_addr);
2740 #ifdef __ARCH_SI_TRAPNO
2741 		err |= __put_user(from->si_trapno, &to->si_trapno);
2742 #endif
2743 #ifdef BUS_MCEERR_AO
2744 		/*
2745 		 * Other callers might not initialize the si_lsb field,
2746 		 * so check explicitly for the right codes here.
2747 		 */
2748 		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2749 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2750 #endif
2751 		break;
2752 	case __SI_CHLD:
2753 		err |= __put_user(from->si_pid, &to->si_pid);
2754 		err |= __put_user(from->si_uid, &to->si_uid);
2755 		err |= __put_user(from->si_status, &to->si_status);
2756 		err |= __put_user(from->si_utime, &to->si_utime);
2757 		err |= __put_user(from->si_stime, &to->si_stime);
2758 		break;
2759 	case __SI_RT: /* This is not generated by the kernel as of now. */
2760 	case __SI_MESGQ: /* But this is */
2761 		err |= __put_user(from->si_pid, &to->si_pid);
2762 		err |= __put_user(from->si_uid, &to->si_uid);
2763 		err |= __put_user(from->si_ptr, &to->si_ptr);
2764 		break;
2765 #ifdef __ARCH_SIGSYS
2766 	case __SI_SYS:
2767 		err |= __put_user(from->si_call_addr, &to->si_call_addr);
2768 		err |= __put_user(from->si_syscall, &to->si_syscall);
2769 		err |= __put_user(from->si_arch, &to->si_arch);
2770 		break;
2771 #endif
2772 	default: /* this is just in case for now ... */
2773 		err |= __put_user(from->si_pid, &to->si_pid);
2774 		err |= __put_user(from->si_uid, &to->si_uid);
2775 		break;
2776 	}
2777 	return err;
2778 }
2779 
2780 #endif
2781 
2782 /**
2783  *  do_sigtimedwait - wait for queued signals specified in @which
2784  *  @which: queued signals to wait for
2785  *  @info: if non-null, the signal's siginfo is returned here
2786  *  @ts: upper bound on process time suspension
2787  */
2788 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2789 			const struct timespec *ts)
2790 {
2791 	struct task_struct *tsk = current;
2792 	long timeout = MAX_SCHEDULE_TIMEOUT;
2793 	sigset_t mask = *which;
2794 	int sig;
2795 
2796 	if (ts) {
2797 		if (!timespec_valid(ts))
2798 			return -EINVAL;
2799 		timeout = timespec_to_jiffies(ts);
2800 		/*
2801 		 * We can be close to the next tick, add another one
2802 		 * to ensure we will wait at least the time asked for.
2803 		 */
2804 		if (ts->tv_sec || ts->tv_nsec)
2805 			timeout++;
2806 	}
2807 
2808 	/*
2809 	 * Invert the set of allowed signals to get those we want to block.
2810 	 */
2811 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2812 	signotset(&mask);
2813 
2814 	spin_lock_irq(&tsk->sighand->siglock);
2815 	sig = dequeue_signal(tsk, &mask, info);
2816 	if (!sig && timeout) {
2817 		/*
2818 		 * None ready, temporarily unblock those we're interested
2819 		 * while we are sleeping in so that we'll be awakened when
2820 		 * they arrive. Unblocking is always fine, we can avoid
2821 		 * set_current_blocked().
2822 		 */
2823 		tsk->real_blocked = tsk->blocked;
2824 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2825 		recalc_sigpending();
2826 		spin_unlock_irq(&tsk->sighand->siglock);
2827 
2828 		timeout = freezable_schedule_timeout_interruptible(timeout);
2829 
2830 		spin_lock_irq(&tsk->sighand->siglock);
2831 		__set_task_blocked(tsk, &tsk->real_blocked);
2832 		sigemptyset(&tsk->real_blocked);
2833 		sig = dequeue_signal(tsk, &mask, info);
2834 	}
2835 	spin_unlock_irq(&tsk->sighand->siglock);
2836 
2837 	if (sig)
2838 		return sig;
2839 	return timeout ? -EINTR : -EAGAIN;
2840 }
2841 
2842 /**
2843  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2844  *			in @uthese
2845  *  @uthese: queued signals to wait for
2846  *  @uinfo: if non-null, the signal's siginfo is returned here
2847  *  @uts: upper bound on process time suspension
2848  *  @sigsetsize: size of sigset_t type
2849  */
2850 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2851 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2852 		size_t, sigsetsize)
2853 {
2854 	sigset_t these;
2855 	struct timespec ts;
2856 	siginfo_t info;
2857 	int ret;
2858 
2859 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2860 	if (sigsetsize != sizeof(sigset_t))
2861 		return -EINVAL;
2862 
2863 	if (copy_from_user(&these, uthese, sizeof(these)))
2864 		return -EFAULT;
2865 
2866 	if (uts) {
2867 		if (copy_from_user(&ts, uts, sizeof(ts)))
2868 			return -EFAULT;
2869 	}
2870 
2871 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2872 
2873 	if (ret > 0 && uinfo) {
2874 		if (copy_siginfo_to_user(uinfo, &info))
2875 			ret = -EFAULT;
2876 	}
2877 
2878 	return ret;
2879 }
2880 
2881 /**
2882  *  sys_kill - send a signal to a process
2883  *  @pid: the PID of the process
2884  *  @sig: signal to be sent
2885  */
2886 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2887 {
2888 	struct siginfo info;
2889 
2890 	info.si_signo = sig;
2891 	info.si_errno = 0;
2892 	info.si_code = SI_USER;
2893 	info.si_pid = task_tgid_vnr(current);
2894 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2895 
2896 	return kill_something_info(sig, &info, pid);
2897 }
2898 
2899 static int
2900 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2901 {
2902 	struct task_struct *p;
2903 	int error = -ESRCH;
2904 
2905 	rcu_read_lock();
2906 	p = find_task_by_vpid(pid);
2907 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2908 		error = check_kill_permission(sig, info, p);
2909 		/*
2910 		 * The null signal is a permissions and process existence
2911 		 * probe.  No signal is actually delivered.
2912 		 */
2913 		if (!error && sig) {
2914 			error = do_send_sig_info(sig, info, p, false);
2915 			/*
2916 			 * If lock_task_sighand() failed we pretend the task
2917 			 * dies after receiving the signal. The window is tiny,
2918 			 * and the signal is private anyway.
2919 			 */
2920 			if (unlikely(error == -ESRCH))
2921 				error = 0;
2922 		}
2923 	}
2924 	rcu_read_unlock();
2925 
2926 	return error;
2927 }
2928 
2929 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2930 {
2931 	struct siginfo info = {};
2932 
2933 	info.si_signo = sig;
2934 	info.si_errno = 0;
2935 	info.si_code = SI_TKILL;
2936 	info.si_pid = task_tgid_vnr(current);
2937 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2938 
2939 	return do_send_specific(tgid, pid, sig, &info);
2940 }
2941 
2942 /**
2943  *  sys_tgkill - send signal to one specific thread
2944  *  @tgid: the thread group ID of the thread
2945  *  @pid: the PID of the thread
2946  *  @sig: signal to be sent
2947  *
2948  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2949  *  exists but it's not belonging to the target process anymore. This
2950  *  method solves the problem of threads exiting and PIDs getting reused.
2951  */
2952 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2953 {
2954 	/* This is only valid for single tasks */
2955 	if (pid <= 0 || tgid <= 0)
2956 		return -EINVAL;
2957 
2958 	return do_tkill(tgid, pid, sig);
2959 }
2960 
2961 /**
2962  *  sys_tkill - send signal to one specific task
2963  *  @pid: the PID of the task
2964  *  @sig: signal to be sent
2965  *
2966  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2967  */
2968 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2969 {
2970 	/* This is only valid for single tasks */
2971 	if (pid <= 0)
2972 		return -EINVAL;
2973 
2974 	return do_tkill(0, pid, sig);
2975 }
2976 
2977 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2978 {
2979 	/* Not even root can pretend to send signals from the kernel.
2980 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2981 	 */
2982 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2983 	    (task_pid_vnr(current) != pid)) {
2984 		/* We used to allow any < 0 si_code */
2985 		WARN_ON_ONCE(info->si_code < 0);
2986 		return -EPERM;
2987 	}
2988 	info->si_signo = sig;
2989 
2990 	/* POSIX.1b doesn't mention process groups.  */
2991 	return kill_proc_info(sig, info, pid);
2992 }
2993 
2994 /**
2995  *  sys_rt_sigqueueinfo - send signal information to a signal
2996  *  @pid: the PID of the thread
2997  *  @sig: signal to be sent
2998  *  @uinfo: signal info to be sent
2999  */
3000 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3001 		siginfo_t __user *, uinfo)
3002 {
3003 	siginfo_t info;
3004 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3005 		return -EFAULT;
3006 	return do_rt_sigqueueinfo(pid, sig, &info);
3007 }
3008 
3009 #ifdef CONFIG_COMPAT
3010 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3011 			compat_pid_t, pid,
3012 			int, sig,
3013 			struct compat_siginfo __user *, uinfo)
3014 {
3015 	siginfo_t info;
3016 	int ret = copy_siginfo_from_user32(&info, uinfo);
3017 	if (unlikely(ret))
3018 		return ret;
3019 	return do_rt_sigqueueinfo(pid, sig, &info);
3020 }
3021 #endif
3022 
3023 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3024 {
3025 	/* This is only valid for single tasks */
3026 	if (pid <= 0 || tgid <= 0)
3027 		return -EINVAL;
3028 
3029 	/* Not even root can pretend to send signals from the kernel.
3030 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3031 	 */
3032 	if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3033 	    (task_pid_vnr(current) != pid)) {
3034 		/* We used to allow any < 0 si_code */
3035 		WARN_ON_ONCE(info->si_code < 0);
3036 		return -EPERM;
3037 	}
3038 	info->si_signo = sig;
3039 
3040 	return do_send_specific(tgid, pid, sig, info);
3041 }
3042 
3043 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3044 		siginfo_t __user *, uinfo)
3045 {
3046 	siginfo_t info;
3047 
3048 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3049 		return -EFAULT;
3050 
3051 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3052 }
3053 
3054 #ifdef CONFIG_COMPAT
3055 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3056 			compat_pid_t, tgid,
3057 			compat_pid_t, pid,
3058 			int, sig,
3059 			struct compat_siginfo __user *, uinfo)
3060 {
3061 	siginfo_t info;
3062 
3063 	if (copy_siginfo_from_user32(&info, uinfo))
3064 		return -EFAULT;
3065 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3066 }
3067 #endif
3068 
3069 /*
3070  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3071  */
3072 void kernel_sigaction(int sig, __sighandler_t action)
3073 {
3074 	spin_lock_irq(&current->sighand->siglock);
3075 	current->sighand->action[sig - 1].sa.sa_handler = action;
3076 	if (action == SIG_IGN) {
3077 		sigset_t mask;
3078 
3079 		sigemptyset(&mask);
3080 		sigaddset(&mask, sig);
3081 
3082 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3083 		flush_sigqueue_mask(&mask, &current->pending);
3084 		recalc_sigpending();
3085 	}
3086 	spin_unlock_irq(&current->sighand->siglock);
3087 }
3088 EXPORT_SYMBOL(kernel_sigaction);
3089 
3090 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3091 {
3092 	struct task_struct *p = current, *t;
3093 	struct k_sigaction *k;
3094 	sigset_t mask;
3095 
3096 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3097 		return -EINVAL;
3098 
3099 	k = &p->sighand->action[sig-1];
3100 
3101 	spin_lock_irq(&p->sighand->siglock);
3102 	if (oact)
3103 		*oact = *k;
3104 
3105 	if (act) {
3106 		sigdelsetmask(&act->sa.sa_mask,
3107 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3108 		*k = *act;
3109 		/*
3110 		 * POSIX 3.3.1.3:
3111 		 *  "Setting a signal action to SIG_IGN for a signal that is
3112 		 *   pending shall cause the pending signal to be discarded,
3113 		 *   whether or not it is blocked."
3114 		 *
3115 		 *  "Setting a signal action to SIG_DFL for a signal that is
3116 		 *   pending and whose default action is to ignore the signal
3117 		 *   (for example, SIGCHLD), shall cause the pending signal to
3118 		 *   be discarded, whether or not it is blocked"
3119 		 */
3120 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3121 			sigemptyset(&mask);
3122 			sigaddset(&mask, sig);
3123 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3124 			for_each_thread(p, t)
3125 				flush_sigqueue_mask(&mask, &t->pending);
3126 		}
3127 	}
3128 
3129 	spin_unlock_irq(&p->sighand->siglock);
3130 	return 0;
3131 }
3132 
3133 static int
3134 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3135 {
3136 	stack_t oss;
3137 	int error;
3138 
3139 	oss.ss_sp = (void __user *) current->sas_ss_sp;
3140 	oss.ss_size = current->sas_ss_size;
3141 	oss.ss_flags = sas_ss_flags(sp);
3142 
3143 	if (uss) {
3144 		void __user *ss_sp;
3145 		size_t ss_size;
3146 		int ss_flags;
3147 
3148 		error = -EFAULT;
3149 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3150 			goto out;
3151 		error = __get_user(ss_sp, &uss->ss_sp) |
3152 			__get_user(ss_flags, &uss->ss_flags) |
3153 			__get_user(ss_size, &uss->ss_size);
3154 		if (error)
3155 			goto out;
3156 
3157 		error = -EPERM;
3158 		if (on_sig_stack(sp))
3159 			goto out;
3160 
3161 		error = -EINVAL;
3162 		/*
3163 		 * Note - this code used to test ss_flags incorrectly:
3164 		 *  	  old code may have been written using ss_flags==0
3165 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
3166 		 *	  way that worked) - this fix preserves that older
3167 		 *	  mechanism.
3168 		 */
3169 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3170 			goto out;
3171 
3172 		if (ss_flags == SS_DISABLE) {
3173 			ss_size = 0;
3174 			ss_sp = NULL;
3175 		} else {
3176 			error = -ENOMEM;
3177 			if (ss_size < MINSIGSTKSZ)
3178 				goto out;
3179 		}
3180 
3181 		current->sas_ss_sp = (unsigned long) ss_sp;
3182 		current->sas_ss_size = ss_size;
3183 	}
3184 
3185 	error = 0;
3186 	if (uoss) {
3187 		error = -EFAULT;
3188 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3189 			goto out;
3190 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3191 			__put_user(oss.ss_size, &uoss->ss_size) |
3192 			__put_user(oss.ss_flags, &uoss->ss_flags);
3193 	}
3194 
3195 out:
3196 	return error;
3197 }
3198 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3199 {
3200 	return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3201 }
3202 
3203 int restore_altstack(const stack_t __user *uss)
3204 {
3205 	int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3206 	/* squash all but EFAULT for now */
3207 	return err == -EFAULT ? err : 0;
3208 }
3209 
3210 int __save_altstack(stack_t __user *uss, unsigned long sp)
3211 {
3212 	struct task_struct *t = current;
3213 	return  __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3214 		__put_user(sas_ss_flags(sp), &uss->ss_flags) |
3215 		__put_user(t->sas_ss_size, &uss->ss_size);
3216 }
3217 
3218 #ifdef CONFIG_COMPAT
3219 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3220 			const compat_stack_t __user *, uss_ptr,
3221 			compat_stack_t __user *, uoss_ptr)
3222 {
3223 	stack_t uss, uoss;
3224 	int ret;
3225 	mm_segment_t seg;
3226 
3227 	if (uss_ptr) {
3228 		compat_stack_t uss32;
3229 
3230 		memset(&uss, 0, sizeof(stack_t));
3231 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3232 			return -EFAULT;
3233 		uss.ss_sp = compat_ptr(uss32.ss_sp);
3234 		uss.ss_flags = uss32.ss_flags;
3235 		uss.ss_size = uss32.ss_size;
3236 	}
3237 	seg = get_fs();
3238 	set_fs(KERNEL_DS);
3239 	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3240 			     (stack_t __force __user *) &uoss,
3241 			     compat_user_stack_pointer());
3242 	set_fs(seg);
3243 	if (ret >= 0 && uoss_ptr)  {
3244 		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3245 		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3246 		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3247 		    __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3248 			ret = -EFAULT;
3249 	}
3250 	return ret;
3251 }
3252 
3253 int compat_restore_altstack(const compat_stack_t __user *uss)
3254 {
3255 	int err = compat_sys_sigaltstack(uss, NULL);
3256 	/* squash all but -EFAULT for now */
3257 	return err == -EFAULT ? err : 0;
3258 }
3259 
3260 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3261 {
3262 	struct task_struct *t = current;
3263 	return  __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3264 		__put_user(sas_ss_flags(sp), &uss->ss_flags) |
3265 		__put_user(t->sas_ss_size, &uss->ss_size);
3266 }
3267 #endif
3268 
3269 #ifdef __ARCH_WANT_SYS_SIGPENDING
3270 
3271 /**
3272  *  sys_sigpending - examine pending signals
3273  *  @set: where mask of pending signal is returned
3274  */
3275 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3276 {
3277 	return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3278 }
3279 
3280 #endif
3281 
3282 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3283 /**
3284  *  sys_sigprocmask - examine and change blocked signals
3285  *  @how: whether to add, remove, or set signals
3286  *  @nset: signals to add or remove (if non-null)
3287  *  @oset: previous value of signal mask if non-null
3288  *
3289  * Some platforms have their own version with special arguments;
3290  * others support only sys_rt_sigprocmask.
3291  */
3292 
3293 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3294 		old_sigset_t __user *, oset)
3295 {
3296 	old_sigset_t old_set, new_set;
3297 	sigset_t new_blocked;
3298 
3299 	old_set = current->blocked.sig[0];
3300 
3301 	if (nset) {
3302 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3303 			return -EFAULT;
3304 
3305 		new_blocked = current->blocked;
3306 
3307 		switch (how) {
3308 		case SIG_BLOCK:
3309 			sigaddsetmask(&new_blocked, new_set);
3310 			break;
3311 		case SIG_UNBLOCK:
3312 			sigdelsetmask(&new_blocked, new_set);
3313 			break;
3314 		case SIG_SETMASK:
3315 			new_blocked.sig[0] = new_set;
3316 			break;
3317 		default:
3318 			return -EINVAL;
3319 		}
3320 
3321 		set_current_blocked(&new_blocked);
3322 	}
3323 
3324 	if (oset) {
3325 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3326 			return -EFAULT;
3327 	}
3328 
3329 	return 0;
3330 }
3331 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3332 
3333 #ifndef CONFIG_ODD_RT_SIGACTION
3334 /**
3335  *  sys_rt_sigaction - alter an action taken by a process
3336  *  @sig: signal to be sent
3337  *  @act: new sigaction
3338  *  @oact: used to save the previous sigaction
3339  *  @sigsetsize: size of sigset_t type
3340  */
3341 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3342 		const struct sigaction __user *, act,
3343 		struct sigaction __user *, oact,
3344 		size_t, sigsetsize)
3345 {
3346 	struct k_sigaction new_sa, old_sa;
3347 	int ret = -EINVAL;
3348 
3349 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3350 	if (sigsetsize != sizeof(sigset_t))
3351 		goto out;
3352 
3353 	if (act) {
3354 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3355 			return -EFAULT;
3356 	}
3357 
3358 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3359 
3360 	if (!ret && oact) {
3361 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3362 			return -EFAULT;
3363 	}
3364 out:
3365 	return ret;
3366 }
3367 #ifdef CONFIG_COMPAT
3368 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3369 		const struct compat_sigaction __user *, act,
3370 		struct compat_sigaction __user *, oact,
3371 		compat_size_t, sigsetsize)
3372 {
3373 	struct k_sigaction new_ka, old_ka;
3374 	compat_sigset_t mask;
3375 #ifdef __ARCH_HAS_SA_RESTORER
3376 	compat_uptr_t restorer;
3377 #endif
3378 	int ret;
3379 
3380 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3381 	if (sigsetsize != sizeof(compat_sigset_t))
3382 		return -EINVAL;
3383 
3384 	if (act) {
3385 		compat_uptr_t handler;
3386 		ret = get_user(handler, &act->sa_handler);
3387 		new_ka.sa.sa_handler = compat_ptr(handler);
3388 #ifdef __ARCH_HAS_SA_RESTORER
3389 		ret |= get_user(restorer, &act->sa_restorer);
3390 		new_ka.sa.sa_restorer = compat_ptr(restorer);
3391 #endif
3392 		ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3393 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3394 		if (ret)
3395 			return -EFAULT;
3396 		sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3397 	}
3398 
3399 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3400 	if (!ret && oact) {
3401 		sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3402 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3403 			       &oact->sa_handler);
3404 		ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3405 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3406 #ifdef __ARCH_HAS_SA_RESTORER
3407 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3408 				&oact->sa_restorer);
3409 #endif
3410 	}
3411 	return ret;
3412 }
3413 #endif
3414 #endif /* !CONFIG_ODD_RT_SIGACTION */
3415 
3416 #ifdef CONFIG_OLD_SIGACTION
3417 SYSCALL_DEFINE3(sigaction, int, sig,
3418 		const struct old_sigaction __user *, act,
3419 	        struct old_sigaction __user *, oact)
3420 {
3421 	struct k_sigaction new_ka, old_ka;
3422 	int ret;
3423 
3424 	if (act) {
3425 		old_sigset_t mask;
3426 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3427 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3428 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3429 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3430 		    __get_user(mask, &act->sa_mask))
3431 			return -EFAULT;
3432 #ifdef __ARCH_HAS_KA_RESTORER
3433 		new_ka.ka_restorer = NULL;
3434 #endif
3435 		siginitset(&new_ka.sa.sa_mask, mask);
3436 	}
3437 
3438 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3439 
3440 	if (!ret && oact) {
3441 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3442 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3443 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3444 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3445 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3446 			return -EFAULT;
3447 	}
3448 
3449 	return ret;
3450 }
3451 #endif
3452 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3453 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3454 		const struct compat_old_sigaction __user *, act,
3455 	        struct compat_old_sigaction __user *, oact)
3456 {
3457 	struct k_sigaction new_ka, old_ka;
3458 	int ret;
3459 	compat_old_sigset_t mask;
3460 	compat_uptr_t handler, restorer;
3461 
3462 	if (act) {
3463 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3464 		    __get_user(handler, &act->sa_handler) ||
3465 		    __get_user(restorer, &act->sa_restorer) ||
3466 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3467 		    __get_user(mask, &act->sa_mask))
3468 			return -EFAULT;
3469 
3470 #ifdef __ARCH_HAS_KA_RESTORER
3471 		new_ka.ka_restorer = NULL;
3472 #endif
3473 		new_ka.sa.sa_handler = compat_ptr(handler);
3474 		new_ka.sa.sa_restorer = compat_ptr(restorer);
3475 		siginitset(&new_ka.sa.sa_mask, mask);
3476 	}
3477 
3478 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3479 
3480 	if (!ret && oact) {
3481 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3482 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3483 			       &oact->sa_handler) ||
3484 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3485 			       &oact->sa_restorer) ||
3486 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3487 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3488 			return -EFAULT;
3489 	}
3490 	return ret;
3491 }
3492 #endif
3493 
3494 #ifdef CONFIG_SGETMASK_SYSCALL
3495 
3496 /*
3497  * For backwards compatibility.  Functionality superseded by sigprocmask.
3498  */
3499 SYSCALL_DEFINE0(sgetmask)
3500 {
3501 	/* SMP safe */
3502 	return current->blocked.sig[0];
3503 }
3504 
3505 SYSCALL_DEFINE1(ssetmask, int, newmask)
3506 {
3507 	int old = current->blocked.sig[0];
3508 	sigset_t newset;
3509 
3510 	siginitset(&newset, newmask);
3511 	set_current_blocked(&newset);
3512 
3513 	return old;
3514 }
3515 #endif /* CONFIG_SGETMASK_SYSCALL */
3516 
3517 #ifdef __ARCH_WANT_SYS_SIGNAL
3518 /*
3519  * For backwards compatibility.  Functionality superseded by sigaction.
3520  */
3521 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3522 {
3523 	struct k_sigaction new_sa, old_sa;
3524 	int ret;
3525 
3526 	new_sa.sa.sa_handler = handler;
3527 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3528 	sigemptyset(&new_sa.sa.sa_mask);
3529 
3530 	ret = do_sigaction(sig, &new_sa, &old_sa);
3531 
3532 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3533 }
3534 #endif /* __ARCH_WANT_SYS_SIGNAL */
3535 
3536 #ifdef __ARCH_WANT_SYS_PAUSE
3537 
3538 SYSCALL_DEFINE0(pause)
3539 {
3540 	while (!signal_pending(current)) {
3541 		current->state = TASK_INTERRUPTIBLE;
3542 		schedule();
3543 	}
3544 	return -ERESTARTNOHAND;
3545 }
3546 
3547 #endif
3548 
3549 int sigsuspend(sigset_t *set)
3550 {
3551 	current->saved_sigmask = current->blocked;
3552 	set_current_blocked(set);
3553 
3554 	current->state = TASK_INTERRUPTIBLE;
3555 	schedule();
3556 	set_restore_sigmask();
3557 	return -ERESTARTNOHAND;
3558 }
3559 
3560 /**
3561  *  sys_rt_sigsuspend - replace the signal mask for a value with the
3562  *	@unewset value until a signal is received
3563  *  @unewset: new signal mask value
3564  *  @sigsetsize: size of sigset_t type
3565  */
3566 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3567 {
3568 	sigset_t newset;
3569 
3570 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3571 	if (sigsetsize != sizeof(sigset_t))
3572 		return -EINVAL;
3573 
3574 	if (copy_from_user(&newset, unewset, sizeof(newset)))
3575 		return -EFAULT;
3576 	return sigsuspend(&newset);
3577 }
3578 
3579 #ifdef CONFIG_COMPAT
3580 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3581 {
3582 #ifdef __BIG_ENDIAN
3583 	sigset_t newset;
3584 	compat_sigset_t newset32;
3585 
3586 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3587 	if (sigsetsize != sizeof(sigset_t))
3588 		return -EINVAL;
3589 
3590 	if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3591 		return -EFAULT;
3592 	sigset_from_compat(&newset, &newset32);
3593 	return sigsuspend(&newset);
3594 #else
3595 	/* on little-endian bitmaps don't care about granularity */
3596 	return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3597 #endif
3598 }
3599 #endif
3600 
3601 #ifdef CONFIG_OLD_SIGSUSPEND
3602 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3603 {
3604 	sigset_t blocked;
3605 	siginitset(&blocked, mask);
3606 	return sigsuspend(&blocked);
3607 }
3608 #endif
3609 #ifdef CONFIG_OLD_SIGSUSPEND3
3610 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3611 {
3612 	sigset_t blocked;
3613 	siginitset(&blocked, mask);
3614 	return sigsuspend(&blocked);
3615 }
3616 #endif
3617 
3618 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3619 {
3620 	return NULL;
3621 }
3622 
3623 void __init signals_init(void)
3624 {
3625 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3626 }
3627 
3628 #ifdef CONFIG_KGDB_KDB
3629 #include <linux/kdb.h>
3630 /*
3631  * kdb_send_sig_info - Allows kdb to send signals without exposing
3632  * signal internals.  This function checks if the required locks are
3633  * available before calling the main signal code, to avoid kdb
3634  * deadlocks.
3635  */
3636 void
3637 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3638 {
3639 	static struct task_struct *kdb_prev_t;
3640 	int sig, new_t;
3641 	if (!spin_trylock(&t->sighand->siglock)) {
3642 		kdb_printf("Can't do kill command now.\n"
3643 			   "The sigmask lock is held somewhere else in "
3644 			   "kernel, try again later\n");
3645 		return;
3646 	}
3647 	spin_unlock(&t->sighand->siglock);
3648 	new_t = kdb_prev_t != t;
3649 	kdb_prev_t = t;
3650 	if (t->state != TASK_RUNNING && new_t) {
3651 		kdb_printf("Process is not RUNNING, sending a signal from "
3652 			   "kdb risks deadlock\n"
3653 			   "on the run queue locks. "
3654 			   "The signal has _not_ been sent.\n"
3655 			   "Reissue the kill command if you want to risk "
3656 			   "the deadlock.\n");
3657 		return;
3658 	}
3659 	sig = info->si_signo;
3660 	if (send_sig_info(sig, info, t))
3661 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3662 			   sig, t->pid);
3663 	else
3664 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3665 }
3666 #endif	/* CONFIG_KGDB_KDB */
3667