xref: /openbmc/linux/kernel/signal.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
29 
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h"	/* audit_signal_info() */
35 
36 /*
37  * SLAB caches for signal bits.
38  */
39 
40 static struct kmem_cache *sigqueue_cachep;
41 
42 
43 static int sig_ignored(struct task_struct *t, int sig)
44 {
45 	void __user * handler;
46 
47 	/*
48 	 * Tracers always want to know about signals..
49 	 */
50 	if (t->ptrace & PT_PTRACED)
51 		return 0;
52 
53 	/*
54 	 * Blocked signals are never ignored, since the
55 	 * signal handler may change by the time it is
56 	 * unblocked.
57 	 */
58 	if (sigismember(&t->blocked, sig))
59 		return 0;
60 
61 	/* Is it explicitly or implicitly ignored? */
62 	handler = t->sighand->action[sig-1].sa.sa_handler;
63 	return   handler == SIG_IGN ||
64 		(handler == SIG_DFL && sig_kernel_ignore(sig));
65 }
66 
67 /*
68  * Re-calculate pending state from the set of locally pending
69  * signals, globally pending signals, and blocked signals.
70  */
71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
72 {
73 	unsigned long ready;
74 	long i;
75 
76 	switch (_NSIG_WORDS) {
77 	default:
78 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79 			ready |= signal->sig[i] &~ blocked->sig[i];
80 		break;
81 
82 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
83 		ready |= signal->sig[2] &~ blocked->sig[2];
84 		ready |= signal->sig[1] &~ blocked->sig[1];
85 		ready |= signal->sig[0] &~ blocked->sig[0];
86 		break;
87 
88 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
89 		ready |= signal->sig[0] &~ blocked->sig[0];
90 		break;
91 
92 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
93 	}
94 	return ready !=	0;
95 }
96 
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98 
99 fastcall void recalc_sigpending_tsk(struct task_struct *t)
100 {
101 	if (t->signal->group_stop_count > 0 ||
102 	    (freezing(t)) ||
103 	    PENDING(&t->pending, &t->blocked) ||
104 	    PENDING(&t->signal->shared_pending, &t->blocked))
105 		set_tsk_thread_flag(t, TIF_SIGPENDING);
106 	else
107 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
108 }
109 
110 void recalc_sigpending(void)
111 {
112 	recalc_sigpending_tsk(current);
113 }
114 
115 /* Given the mask, find the first available signal that should be serviced. */
116 
117 int next_signal(struct sigpending *pending, sigset_t *mask)
118 {
119 	unsigned long i, *s, *m, x;
120 	int sig = 0;
121 
122 	s = pending->signal.sig;
123 	m = mask->sig;
124 	switch (_NSIG_WORDS) {
125 	default:
126 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
127 			if ((x = *s &~ *m) != 0) {
128 				sig = ffz(~x) + i*_NSIG_BPW + 1;
129 				break;
130 			}
131 		break;
132 
133 	case 2: if ((x = s[0] &~ m[0]) != 0)
134 			sig = 1;
135 		else if ((x = s[1] &~ m[1]) != 0)
136 			sig = _NSIG_BPW + 1;
137 		else
138 			break;
139 		sig += ffz(~x);
140 		break;
141 
142 	case 1: if ((x = *s &~ *m) != 0)
143 			sig = ffz(~x) + 1;
144 		break;
145 	}
146 
147 	return sig;
148 }
149 
150 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
151 					 int override_rlimit)
152 {
153 	struct sigqueue *q = NULL;
154 	struct user_struct *user;
155 
156 	/*
157 	 * In order to avoid problems with "switch_user()", we want to make
158 	 * sure that the compiler doesn't re-load "t->user"
159 	 */
160 	user = t->user;
161 	barrier();
162 	atomic_inc(&user->sigpending);
163 	if (override_rlimit ||
164 	    atomic_read(&user->sigpending) <=
165 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
166 		q = kmem_cache_alloc(sigqueue_cachep, flags);
167 	if (unlikely(q == NULL)) {
168 		atomic_dec(&user->sigpending);
169 	} else {
170 		INIT_LIST_HEAD(&q->list);
171 		q->flags = 0;
172 		q->user = get_uid(user);
173 	}
174 	return(q);
175 }
176 
177 static void __sigqueue_free(struct sigqueue *q)
178 {
179 	if (q->flags & SIGQUEUE_PREALLOC)
180 		return;
181 	atomic_dec(&q->user->sigpending);
182 	free_uid(q->user);
183 	kmem_cache_free(sigqueue_cachep, q);
184 }
185 
186 void flush_sigqueue(struct sigpending *queue)
187 {
188 	struct sigqueue *q;
189 
190 	sigemptyset(&queue->signal);
191 	while (!list_empty(&queue->list)) {
192 		q = list_entry(queue->list.next, struct sigqueue , list);
193 		list_del_init(&q->list);
194 		__sigqueue_free(q);
195 	}
196 }
197 
198 /*
199  * Flush all pending signals for a task.
200  */
201 void flush_signals(struct task_struct *t)
202 {
203 	unsigned long flags;
204 
205 	spin_lock_irqsave(&t->sighand->siglock, flags);
206 	clear_tsk_thread_flag(t,TIF_SIGPENDING);
207 	flush_sigqueue(&t->pending);
208 	flush_sigqueue(&t->signal->shared_pending);
209 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
210 }
211 
212 void ignore_signals(struct task_struct *t)
213 {
214 	int i;
215 
216 	for (i = 0; i < _NSIG; ++i)
217 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
218 
219 	flush_signals(t);
220 }
221 
222 /*
223  * Flush all handlers for a task.
224  */
225 
226 void
227 flush_signal_handlers(struct task_struct *t, int force_default)
228 {
229 	int i;
230 	struct k_sigaction *ka = &t->sighand->action[0];
231 	for (i = _NSIG ; i != 0 ; i--) {
232 		if (force_default || ka->sa.sa_handler != SIG_IGN)
233 			ka->sa.sa_handler = SIG_DFL;
234 		ka->sa.sa_flags = 0;
235 		sigemptyset(&ka->sa.sa_mask);
236 		ka++;
237 	}
238 }
239 
240 
241 /* Notify the system that a driver wants to block all signals for this
242  * process, and wants to be notified if any signals at all were to be
243  * sent/acted upon.  If the notifier routine returns non-zero, then the
244  * signal will be acted upon after all.  If the notifier routine returns 0,
245  * then then signal will be blocked.  Only one block per process is
246  * allowed.  priv is a pointer to private data that the notifier routine
247  * can use to determine if the signal should be blocked or not.  */
248 
249 void
250 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
251 {
252 	unsigned long flags;
253 
254 	spin_lock_irqsave(&current->sighand->siglock, flags);
255 	current->notifier_mask = mask;
256 	current->notifier_data = priv;
257 	current->notifier = notifier;
258 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
259 }
260 
261 /* Notify the system that blocking has ended. */
262 
263 void
264 unblock_all_signals(void)
265 {
266 	unsigned long flags;
267 
268 	spin_lock_irqsave(&current->sighand->siglock, flags);
269 	current->notifier = NULL;
270 	current->notifier_data = NULL;
271 	recalc_sigpending();
272 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
273 }
274 
275 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
276 {
277 	struct sigqueue *q, *first = NULL;
278 	int still_pending = 0;
279 
280 	if (unlikely(!sigismember(&list->signal, sig)))
281 		return 0;
282 
283 	/*
284 	 * Collect the siginfo appropriate to this signal.  Check if
285 	 * there is another siginfo for the same signal.
286 	*/
287 	list_for_each_entry(q, &list->list, list) {
288 		if (q->info.si_signo == sig) {
289 			if (first) {
290 				still_pending = 1;
291 				break;
292 			}
293 			first = q;
294 		}
295 	}
296 	if (first) {
297 		list_del_init(&first->list);
298 		copy_siginfo(info, &first->info);
299 		__sigqueue_free(first);
300 		if (!still_pending)
301 			sigdelset(&list->signal, sig);
302 	} else {
303 
304 		/* Ok, it wasn't in the queue.  This must be
305 		   a fast-pathed signal or we must have been
306 		   out of queue space.  So zero out the info.
307 		 */
308 		sigdelset(&list->signal, sig);
309 		info->si_signo = sig;
310 		info->si_errno = 0;
311 		info->si_code = 0;
312 		info->si_pid = 0;
313 		info->si_uid = 0;
314 	}
315 	return 1;
316 }
317 
318 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
319 			siginfo_t *info)
320 {
321 	int sig = next_signal(pending, mask);
322 
323 	if (sig) {
324 		if (current->notifier) {
325 			if (sigismember(current->notifier_mask, sig)) {
326 				if (!(current->notifier)(current->notifier_data)) {
327 					clear_thread_flag(TIF_SIGPENDING);
328 					return 0;
329 				}
330 			}
331 		}
332 
333 		if (!collect_signal(sig, pending, info))
334 			sig = 0;
335 	}
336 
337 	return sig;
338 }
339 
340 /*
341  * Dequeue a signal and return the element to the caller, which is
342  * expected to free it.
343  *
344  * All callers have to hold the siglock.
345  */
346 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
347 {
348 	int signr = __dequeue_signal(&tsk->pending, mask, info);
349 	if (!signr) {
350 		signr = __dequeue_signal(&tsk->signal->shared_pending,
351 					 mask, info);
352 		/*
353 		 * itimer signal ?
354 		 *
355 		 * itimers are process shared and we restart periodic
356 		 * itimers in the signal delivery path to prevent DoS
357 		 * attacks in the high resolution timer case. This is
358 		 * compliant with the old way of self restarting
359 		 * itimers, as the SIGALRM is a legacy signal and only
360 		 * queued once. Changing the restart behaviour to
361 		 * restart the timer in the signal dequeue path is
362 		 * reducing the timer noise on heavy loaded !highres
363 		 * systems too.
364 		 */
365 		if (unlikely(signr == SIGALRM)) {
366 			struct hrtimer *tmr = &tsk->signal->real_timer;
367 
368 			if (!hrtimer_is_queued(tmr) &&
369 			    tsk->signal->it_real_incr.tv64 != 0) {
370 				hrtimer_forward(tmr, tmr->base->get_time(),
371 						tsk->signal->it_real_incr);
372 				hrtimer_restart(tmr);
373 			}
374 		}
375 	}
376 	recalc_sigpending_tsk(tsk);
377 	if (signr && unlikely(sig_kernel_stop(signr))) {
378 		/*
379 		 * Set a marker that we have dequeued a stop signal.  Our
380 		 * caller might release the siglock and then the pending
381 		 * stop signal it is about to process is no longer in the
382 		 * pending bitmasks, but must still be cleared by a SIGCONT
383 		 * (and overruled by a SIGKILL).  So those cases clear this
384 		 * shared flag after we've set it.  Note that this flag may
385 		 * remain set after the signal we return is ignored or
386 		 * handled.  That doesn't matter because its only purpose
387 		 * is to alert stop-signal processing code when another
388 		 * processor has come along and cleared the flag.
389 		 */
390 		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
391 			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
392 	}
393 	if ( signr &&
394 	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
395 	     info->si_sys_private){
396 		/*
397 		 * Release the siglock to ensure proper locking order
398 		 * of timer locks outside of siglocks.  Note, we leave
399 		 * irqs disabled here, since the posix-timers code is
400 		 * about to disable them again anyway.
401 		 */
402 		spin_unlock(&tsk->sighand->siglock);
403 		do_schedule_next_timer(info);
404 		spin_lock(&tsk->sighand->siglock);
405 	}
406 	return signr;
407 }
408 
409 /*
410  * Tell a process that it has a new active signal..
411  *
412  * NOTE! we rely on the previous spin_lock to
413  * lock interrupts for us! We can only be called with
414  * "siglock" held, and the local interrupt must
415  * have been disabled when that got acquired!
416  *
417  * No need to set need_resched since signal event passing
418  * goes through ->blocked
419  */
420 void signal_wake_up(struct task_struct *t, int resume)
421 {
422 	unsigned int mask;
423 
424 	set_tsk_thread_flag(t, TIF_SIGPENDING);
425 
426 	/*
427 	 * For SIGKILL, we want to wake it up in the stopped/traced case.
428 	 * We don't check t->state here because there is a race with it
429 	 * executing another processor and just now entering stopped state.
430 	 * By using wake_up_state, we ensure the process will wake up and
431 	 * handle its death signal.
432 	 */
433 	mask = TASK_INTERRUPTIBLE;
434 	if (resume)
435 		mask |= TASK_STOPPED | TASK_TRACED;
436 	if (!wake_up_state(t, mask))
437 		kick_process(t);
438 }
439 
440 /*
441  * Remove signals in mask from the pending set and queue.
442  * Returns 1 if any signals were found.
443  *
444  * All callers must be holding the siglock.
445  *
446  * This version takes a sigset mask and looks at all signals,
447  * not just those in the first mask word.
448  */
449 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
450 {
451 	struct sigqueue *q, *n;
452 	sigset_t m;
453 
454 	sigandsets(&m, mask, &s->signal);
455 	if (sigisemptyset(&m))
456 		return 0;
457 
458 	signandsets(&s->signal, &s->signal, mask);
459 	list_for_each_entry_safe(q, n, &s->list, list) {
460 		if (sigismember(mask, q->info.si_signo)) {
461 			list_del_init(&q->list);
462 			__sigqueue_free(q);
463 		}
464 	}
465 	return 1;
466 }
467 /*
468  * Remove signals in mask from the pending set and queue.
469  * Returns 1 if any signals were found.
470  *
471  * All callers must be holding the siglock.
472  */
473 static int rm_from_queue(unsigned long mask, struct sigpending *s)
474 {
475 	struct sigqueue *q, *n;
476 
477 	if (!sigtestsetmask(&s->signal, mask))
478 		return 0;
479 
480 	sigdelsetmask(&s->signal, mask);
481 	list_for_each_entry_safe(q, n, &s->list, list) {
482 		if (q->info.si_signo < SIGRTMIN &&
483 		    (mask & sigmask(q->info.si_signo))) {
484 			list_del_init(&q->list);
485 			__sigqueue_free(q);
486 		}
487 	}
488 	return 1;
489 }
490 
491 /*
492  * Bad permissions for sending the signal
493  */
494 static int check_kill_permission(int sig, struct siginfo *info,
495 				 struct task_struct *t)
496 {
497 	int error = -EINVAL;
498 	if (!valid_signal(sig))
499 		return error;
500 
501 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
502 	if (error)
503 		return error;
504 
505 	error = -EPERM;
506 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
507 	    && ((sig != SIGCONT) ||
508 		(process_session(current) != process_session(t)))
509 	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
510 	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
511 	    && !capable(CAP_KILL))
512 		return error;
513 
514 	return security_task_kill(t, info, sig, 0);
515 }
516 
517 /* forward decl */
518 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
519 
520 /*
521  * Handle magic process-wide effects of stop/continue signals.
522  * Unlike the signal actions, these happen immediately at signal-generation
523  * time regardless of blocking, ignoring, or handling.  This does the
524  * actual continuing for SIGCONT, but not the actual stopping for stop
525  * signals.  The process stop is done as a signal action for SIG_DFL.
526  */
527 static void handle_stop_signal(int sig, struct task_struct *p)
528 {
529 	struct task_struct *t;
530 
531 	if (p->signal->flags & SIGNAL_GROUP_EXIT)
532 		/*
533 		 * The process is in the middle of dying already.
534 		 */
535 		return;
536 
537 	if (sig_kernel_stop(sig)) {
538 		/*
539 		 * This is a stop signal.  Remove SIGCONT from all queues.
540 		 */
541 		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
542 		t = p;
543 		do {
544 			rm_from_queue(sigmask(SIGCONT), &t->pending);
545 			t = next_thread(t);
546 		} while (t != p);
547 	} else if (sig == SIGCONT) {
548 		/*
549 		 * Remove all stop signals from all queues,
550 		 * and wake all threads.
551 		 */
552 		if (unlikely(p->signal->group_stop_count > 0)) {
553 			/*
554 			 * There was a group stop in progress.  We'll
555 			 * pretend it finished before we got here.  We are
556 			 * obliged to report it to the parent: if the
557 			 * SIGSTOP happened "after" this SIGCONT, then it
558 			 * would have cleared this pending SIGCONT.  If it
559 			 * happened "before" this SIGCONT, then the parent
560 			 * got the SIGCHLD about the stop finishing before
561 			 * the continue happened.  We do the notification
562 			 * now, and it's as if the stop had finished and
563 			 * the SIGCHLD was pending on entry to this kill.
564 			 */
565 			p->signal->group_stop_count = 0;
566 			p->signal->flags = SIGNAL_STOP_CONTINUED;
567 			spin_unlock(&p->sighand->siglock);
568 			do_notify_parent_cldstop(p, CLD_STOPPED);
569 			spin_lock(&p->sighand->siglock);
570 		}
571 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
572 		t = p;
573 		do {
574 			unsigned int state;
575 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
576 
577 			/*
578 			 * If there is a handler for SIGCONT, we must make
579 			 * sure that no thread returns to user mode before
580 			 * we post the signal, in case it was the only
581 			 * thread eligible to run the signal handler--then
582 			 * it must not do anything between resuming and
583 			 * running the handler.  With the TIF_SIGPENDING
584 			 * flag set, the thread will pause and acquire the
585 			 * siglock that we hold now and until we've queued
586 			 * the pending signal.
587 			 *
588 			 * Wake up the stopped thread _after_ setting
589 			 * TIF_SIGPENDING
590 			 */
591 			state = TASK_STOPPED;
592 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
593 				set_tsk_thread_flag(t, TIF_SIGPENDING);
594 				state |= TASK_INTERRUPTIBLE;
595 			}
596 			wake_up_state(t, state);
597 
598 			t = next_thread(t);
599 		} while (t != p);
600 
601 		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
602 			/*
603 			 * We were in fact stopped, and are now continued.
604 			 * Notify the parent with CLD_CONTINUED.
605 			 */
606 			p->signal->flags = SIGNAL_STOP_CONTINUED;
607 			p->signal->group_exit_code = 0;
608 			spin_unlock(&p->sighand->siglock);
609 			do_notify_parent_cldstop(p, CLD_CONTINUED);
610 			spin_lock(&p->sighand->siglock);
611 		} else {
612 			/*
613 			 * We are not stopped, but there could be a stop
614 			 * signal in the middle of being processed after
615 			 * being removed from the queue.  Clear that too.
616 			 */
617 			p->signal->flags = 0;
618 		}
619 	} else if (sig == SIGKILL) {
620 		/*
621 		 * Make sure that any pending stop signal already dequeued
622 		 * is undone by the wakeup for SIGKILL.
623 		 */
624 		p->signal->flags = 0;
625 	}
626 }
627 
628 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
629 			struct sigpending *signals)
630 {
631 	struct sigqueue * q = NULL;
632 	int ret = 0;
633 
634 	/*
635 	 * Deliver the signal to listening signalfds. This must be called
636 	 * with the sighand lock held.
637 	 */
638 	signalfd_notify(t, sig);
639 
640 	/*
641 	 * fast-pathed signals for kernel-internal things like SIGSTOP
642 	 * or SIGKILL.
643 	 */
644 	if (info == SEND_SIG_FORCED)
645 		goto out_set;
646 
647 	/* Real-time signals must be queued if sent by sigqueue, or
648 	   some other real-time mechanism.  It is implementation
649 	   defined whether kill() does so.  We attempt to do so, on
650 	   the principle of least surprise, but since kill is not
651 	   allowed to fail with EAGAIN when low on memory we just
652 	   make sure at least one signal gets delivered and don't
653 	   pass on the info struct.  */
654 
655 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
656 					     (is_si_special(info) ||
657 					      info->si_code >= 0)));
658 	if (q) {
659 		list_add_tail(&q->list, &signals->list);
660 		switch ((unsigned long) info) {
661 		case (unsigned long) SEND_SIG_NOINFO:
662 			q->info.si_signo = sig;
663 			q->info.si_errno = 0;
664 			q->info.si_code = SI_USER;
665 			q->info.si_pid = current->pid;
666 			q->info.si_uid = current->uid;
667 			break;
668 		case (unsigned long) SEND_SIG_PRIV:
669 			q->info.si_signo = sig;
670 			q->info.si_errno = 0;
671 			q->info.si_code = SI_KERNEL;
672 			q->info.si_pid = 0;
673 			q->info.si_uid = 0;
674 			break;
675 		default:
676 			copy_siginfo(&q->info, info);
677 			break;
678 		}
679 	} else if (!is_si_special(info)) {
680 		if (sig >= SIGRTMIN && info->si_code != SI_USER)
681 		/*
682 		 * Queue overflow, abort.  We may abort if the signal was rt
683 		 * and sent by user using something other than kill().
684 		 */
685 			return -EAGAIN;
686 	}
687 
688 out_set:
689 	sigaddset(&signals->signal, sig);
690 	return ret;
691 }
692 
693 #define LEGACY_QUEUE(sigptr, sig) \
694 	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
695 
696 
697 static int
698 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
699 {
700 	int ret = 0;
701 
702 	BUG_ON(!irqs_disabled());
703 	assert_spin_locked(&t->sighand->siglock);
704 
705 	/* Short-circuit ignored signals.  */
706 	if (sig_ignored(t, sig))
707 		goto out;
708 
709 	/* Support queueing exactly one non-rt signal, so that we
710 	   can get more detailed information about the cause of
711 	   the signal. */
712 	if (LEGACY_QUEUE(&t->pending, sig))
713 		goto out;
714 
715 	ret = send_signal(sig, info, t, &t->pending);
716 	if (!ret && !sigismember(&t->blocked, sig))
717 		signal_wake_up(t, sig == SIGKILL);
718 out:
719 	return ret;
720 }
721 
722 /*
723  * Force a signal that the process can't ignore: if necessary
724  * we unblock the signal and change any SIG_IGN to SIG_DFL.
725  *
726  * Note: If we unblock the signal, we always reset it to SIG_DFL,
727  * since we do not want to have a signal handler that was blocked
728  * be invoked when user space had explicitly blocked it.
729  *
730  * We don't want to have recursive SIGSEGV's etc, for example.
731  */
732 int
733 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
734 {
735 	unsigned long int flags;
736 	int ret, blocked, ignored;
737 	struct k_sigaction *action;
738 
739 	spin_lock_irqsave(&t->sighand->siglock, flags);
740 	action = &t->sighand->action[sig-1];
741 	ignored = action->sa.sa_handler == SIG_IGN;
742 	blocked = sigismember(&t->blocked, sig);
743 	if (blocked || ignored) {
744 		action->sa.sa_handler = SIG_DFL;
745 		if (blocked) {
746 			sigdelset(&t->blocked, sig);
747 			recalc_sigpending_tsk(t);
748 		}
749 	}
750 	ret = specific_send_sig_info(sig, info, t);
751 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
752 
753 	return ret;
754 }
755 
756 void
757 force_sig_specific(int sig, struct task_struct *t)
758 {
759 	force_sig_info(sig, SEND_SIG_FORCED, t);
760 }
761 
762 /*
763  * Test if P wants to take SIG.  After we've checked all threads with this,
764  * it's equivalent to finding no threads not blocking SIG.  Any threads not
765  * blocking SIG were ruled out because they are not running and already
766  * have pending signals.  Such threads will dequeue from the shared queue
767  * as soon as they're available, so putting the signal on the shared queue
768  * will be equivalent to sending it to one such thread.
769  */
770 static inline int wants_signal(int sig, struct task_struct *p)
771 {
772 	if (sigismember(&p->blocked, sig))
773 		return 0;
774 	if (p->flags & PF_EXITING)
775 		return 0;
776 	if (sig == SIGKILL)
777 		return 1;
778 	if (p->state & (TASK_STOPPED | TASK_TRACED))
779 		return 0;
780 	return task_curr(p) || !signal_pending(p);
781 }
782 
783 static void
784 __group_complete_signal(int sig, struct task_struct *p)
785 {
786 	struct task_struct *t;
787 
788 	/*
789 	 * Now find a thread we can wake up to take the signal off the queue.
790 	 *
791 	 * If the main thread wants the signal, it gets first crack.
792 	 * Probably the least surprising to the average bear.
793 	 */
794 	if (wants_signal(sig, p))
795 		t = p;
796 	else if (thread_group_empty(p))
797 		/*
798 		 * There is just one thread and it does not need to be woken.
799 		 * It will dequeue unblocked signals before it runs again.
800 		 */
801 		return;
802 	else {
803 		/*
804 		 * Otherwise try to find a suitable thread.
805 		 */
806 		t = p->signal->curr_target;
807 		if (t == NULL)
808 			/* restart balancing at this thread */
809 			t = p->signal->curr_target = p;
810 
811 		while (!wants_signal(sig, t)) {
812 			t = next_thread(t);
813 			if (t == p->signal->curr_target)
814 				/*
815 				 * No thread needs to be woken.
816 				 * Any eligible threads will see
817 				 * the signal in the queue soon.
818 				 */
819 				return;
820 		}
821 		p->signal->curr_target = t;
822 	}
823 
824 	/*
825 	 * Found a killable thread.  If the signal will be fatal,
826 	 * then start taking the whole group down immediately.
827 	 */
828 	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
829 	    !sigismember(&t->real_blocked, sig) &&
830 	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
831 		/*
832 		 * This signal will be fatal to the whole group.
833 		 */
834 		if (!sig_kernel_coredump(sig)) {
835 			/*
836 			 * Start a group exit and wake everybody up.
837 			 * This way we don't have other threads
838 			 * running and doing things after a slower
839 			 * thread has the fatal signal pending.
840 			 */
841 			p->signal->flags = SIGNAL_GROUP_EXIT;
842 			p->signal->group_exit_code = sig;
843 			p->signal->group_stop_count = 0;
844 			t = p;
845 			do {
846 				sigaddset(&t->pending.signal, SIGKILL);
847 				signal_wake_up(t, 1);
848 				t = next_thread(t);
849 			} while (t != p);
850 			return;
851 		}
852 
853 		/*
854 		 * There will be a core dump.  We make all threads other
855 		 * than the chosen one go into a group stop so that nothing
856 		 * happens until it gets scheduled, takes the signal off
857 		 * the shared queue, and does the core dump.  This is a
858 		 * little more complicated than strictly necessary, but it
859 		 * keeps the signal state that winds up in the core dump
860 		 * unchanged from the death state, e.g. which thread had
861 		 * the core-dump signal unblocked.
862 		 */
863 		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
864 		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
865 		p->signal->group_stop_count = 0;
866 		p->signal->group_exit_task = t;
867 		t = p;
868 		do {
869 			p->signal->group_stop_count++;
870 			signal_wake_up(t, 0);
871 			t = next_thread(t);
872 		} while (t != p);
873 		wake_up_process(p->signal->group_exit_task);
874 		return;
875 	}
876 
877 	/*
878 	 * The signal is already in the shared-pending queue.
879 	 * Tell the chosen thread to wake up and dequeue it.
880 	 */
881 	signal_wake_up(t, sig == SIGKILL);
882 	return;
883 }
884 
885 int
886 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
887 {
888 	int ret = 0;
889 
890 	assert_spin_locked(&p->sighand->siglock);
891 	handle_stop_signal(sig, p);
892 
893 	/* Short-circuit ignored signals.  */
894 	if (sig_ignored(p, sig))
895 		return ret;
896 
897 	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
898 		/* This is a non-RT signal and we already have one queued.  */
899 		return ret;
900 
901 	/*
902 	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
903 	 * We always use the shared queue for process-wide signals,
904 	 * to avoid several races.
905 	 */
906 	ret = send_signal(sig, info, p, &p->signal->shared_pending);
907 	if (unlikely(ret))
908 		return ret;
909 
910 	__group_complete_signal(sig, p);
911 	return 0;
912 }
913 
914 /*
915  * Nuke all other threads in the group.
916  */
917 void zap_other_threads(struct task_struct *p)
918 {
919 	struct task_struct *t;
920 
921 	p->signal->flags = SIGNAL_GROUP_EXIT;
922 	p->signal->group_stop_count = 0;
923 
924 	if (thread_group_empty(p))
925 		return;
926 
927 	for (t = next_thread(p); t != p; t = next_thread(t)) {
928 		/*
929 		 * Don't bother with already dead threads
930 		 */
931 		if (t->exit_state)
932 			continue;
933 
934 		/* SIGKILL will be handled before any pending SIGSTOP */
935 		sigaddset(&t->pending.signal, SIGKILL);
936 		signal_wake_up(t, 1);
937 	}
938 }
939 
940 /*
941  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
942  */
943 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
944 {
945 	struct sighand_struct *sighand;
946 
947 	for (;;) {
948 		sighand = rcu_dereference(tsk->sighand);
949 		if (unlikely(sighand == NULL))
950 			break;
951 
952 		spin_lock_irqsave(&sighand->siglock, *flags);
953 		if (likely(sighand == tsk->sighand))
954 			break;
955 		spin_unlock_irqrestore(&sighand->siglock, *flags);
956 	}
957 
958 	return sighand;
959 }
960 
961 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
962 {
963 	unsigned long flags;
964 	int ret;
965 
966 	ret = check_kill_permission(sig, info, p);
967 
968 	if (!ret && sig) {
969 		ret = -ESRCH;
970 		if (lock_task_sighand(p, &flags)) {
971 			ret = __group_send_sig_info(sig, info, p);
972 			unlock_task_sighand(p, &flags);
973 		}
974 	}
975 
976 	return ret;
977 }
978 
979 /*
980  * kill_pgrp_info() sends a signal to a process group: this is what the tty
981  * control characters do (^C, ^Z etc)
982  */
983 
984 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
985 {
986 	struct task_struct *p = NULL;
987 	int retval, success;
988 
989 	success = 0;
990 	retval = -ESRCH;
991 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
992 		int err = group_send_sig_info(sig, info, p);
993 		success |= !err;
994 		retval = err;
995 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
996 	return success ? 0 : retval;
997 }
998 
999 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1000 {
1001 	int retval;
1002 
1003 	read_lock(&tasklist_lock);
1004 	retval = __kill_pgrp_info(sig, info, pgrp);
1005 	read_unlock(&tasklist_lock);
1006 
1007 	return retval;
1008 }
1009 
1010 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1011 {
1012 	int error;
1013 	struct task_struct *p;
1014 
1015 	rcu_read_lock();
1016 	if (unlikely(sig_needs_tasklist(sig)))
1017 		read_lock(&tasklist_lock);
1018 
1019 	p = pid_task(pid, PIDTYPE_PID);
1020 	error = -ESRCH;
1021 	if (p)
1022 		error = group_send_sig_info(sig, info, p);
1023 
1024 	if (unlikely(sig_needs_tasklist(sig)))
1025 		read_unlock(&tasklist_lock);
1026 	rcu_read_unlock();
1027 	return error;
1028 }
1029 
1030 int
1031 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1032 {
1033 	int error;
1034 	rcu_read_lock();
1035 	error = kill_pid_info(sig, info, find_pid(pid));
1036 	rcu_read_unlock();
1037 	return error;
1038 }
1039 
1040 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1041 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1042 		      uid_t uid, uid_t euid, u32 secid)
1043 {
1044 	int ret = -EINVAL;
1045 	struct task_struct *p;
1046 
1047 	if (!valid_signal(sig))
1048 		return ret;
1049 
1050 	read_lock(&tasklist_lock);
1051 	p = pid_task(pid, PIDTYPE_PID);
1052 	if (!p) {
1053 		ret = -ESRCH;
1054 		goto out_unlock;
1055 	}
1056 	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1057 	    && (euid != p->suid) && (euid != p->uid)
1058 	    && (uid != p->suid) && (uid != p->uid)) {
1059 		ret = -EPERM;
1060 		goto out_unlock;
1061 	}
1062 	ret = security_task_kill(p, info, sig, secid);
1063 	if (ret)
1064 		goto out_unlock;
1065 	if (sig && p->sighand) {
1066 		unsigned long flags;
1067 		spin_lock_irqsave(&p->sighand->siglock, flags);
1068 		ret = __group_send_sig_info(sig, info, p);
1069 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1070 	}
1071 out_unlock:
1072 	read_unlock(&tasklist_lock);
1073 	return ret;
1074 }
1075 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1076 
1077 /*
1078  * kill_something_info() interprets pid in interesting ways just like kill(2).
1079  *
1080  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1081  * is probably wrong.  Should make it like BSD or SYSV.
1082  */
1083 
1084 static int kill_something_info(int sig, struct siginfo *info, int pid)
1085 {
1086 	int ret;
1087 	rcu_read_lock();
1088 	if (!pid) {
1089 		ret = kill_pgrp_info(sig, info, task_pgrp(current));
1090 	} else if (pid == -1) {
1091 		int retval = 0, count = 0;
1092 		struct task_struct * p;
1093 
1094 		read_lock(&tasklist_lock);
1095 		for_each_process(p) {
1096 			if (p->pid > 1 && p->tgid != current->tgid) {
1097 				int err = group_send_sig_info(sig, info, p);
1098 				++count;
1099 				if (err != -EPERM)
1100 					retval = err;
1101 			}
1102 		}
1103 		read_unlock(&tasklist_lock);
1104 		ret = count ? retval : -ESRCH;
1105 	} else if (pid < 0) {
1106 		ret = kill_pgrp_info(sig, info, find_pid(-pid));
1107 	} else {
1108 		ret = kill_pid_info(sig, info, find_pid(pid));
1109 	}
1110 	rcu_read_unlock();
1111 	return ret;
1112 }
1113 
1114 /*
1115  * These are for backward compatibility with the rest of the kernel source.
1116  */
1117 
1118 /*
1119  * These two are the most common entry points.  They send a signal
1120  * just to the specific thread.
1121  */
1122 int
1123 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1124 {
1125 	int ret;
1126 	unsigned long flags;
1127 
1128 	/*
1129 	 * Make sure legacy kernel users don't send in bad values
1130 	 * (normal paths check this in check_kill_permission).
1131 	 */
1132 	if (!valid_signal(sig))
1133 		return -EINVAL;
1134 
1135 	/*
1136 	 * We need the tasklist lock even for the specific
1137 	 * thread case (when we don't need to follow the group
1138 	 * lists) in order to avoid races with "p->sighand"
1139 	 * going away or changing from under us.
1140 	 */
1141 	read_lock(&tasklist_lock);
1142 	spin_lock_irqsave(&p->sighand->siglock, flags);
1143 	ret = specific_send_sig_info(sig, info, p);
1144 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1145 	read_unlock(&tasklist_lock);
1146 	return ret;
1147 }
1148 
1149 #define __si_special(priv) \
1150 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1151 
1152 int
1153 send_sig(int sig, struct task_struct *p, int priv)
1154 {
1155 	return send_sig_info(sig, __si_special(priv), p);
1156 }
1157 
1158 /*
1159  * This is the entry point for "process-wide" signals.
1160  * They will go to an appropriate thread in the thread group.
1161  */
1162 int
1163 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1164 {
1165 	int ret;
1166 	read_lock(&tasklist_lock);
1167 	ret = group_send_sig_info(sig, info, p);
1168 	read_unlock(&tasklist_lock);
1169 	return ret;
1170 }
1171 
1172 void
1173 force_sig(int sig, struct task_struct *p)
1174 {
1175 	force_sig_info(sig, SEND_SIG_PRIV, p);
1176 }
1177 
1178 /*
1179  * When things go south during signal handling, we
1180  * will force a SIGSEGV. And if the signal that caused
1181  * the problem was already a SIGSEGV, we'll want to
1182  * make sure we don't even try to deliver the signal..
1183  */
1184 int
1185 force_sigsegv(int sig, struct task_struct *p)
1186 {
1187 	if (sig == SIGSEGV) {
1188 		unsigned long flags;
1189 		spin_lock_irqsave(&p->sighand->siglock, flags);
1190 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1191 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1192 	}
1193 	force_sig(SIGSEGV, p);
1194 	return 0;
1195 }
1196 
1197 int kill_pgrp(struct pid *pid, int sig, int priv)
1198 {
1199 	return kill_pgrp_info(sig, __si_special(priv), pid);
1200 }
1201 EXPORT_SYMBOL(kill_pgrp);
1202 
1203 int kill_pid(struct pid *pid, int sig, int priv)
1204 {
1205 	return kill_pid_info(sig, __si_special(priv), pid);
1206 }
1207 EXPORT_SYMBOL(kill_pid);
1208 
1209 int
1210 kill_proc(pid_t pid, int sig, int priv)
1211 {
1212 	return kill_proc_info(sig, __si_special(priv), pid);
1213 }
1214 
1215 /*
1216  * These functions support sending signals using preallocated sigqueue
1217  * structures.  This is needed "because realtime applications cannot
1218  * afford to lose notifications of asynchronous events, like timer
1219  * expirations or I/O completions".  In the case of Posix Timers
1220  * we allocate the sigqueue structure from the timer_create.  If this
1221  * allocation fails we are able to report the failure to the application
1222  * with an EAGAIN error.
1223  */
1224 
1225 struct sigqueue *sigqueue_alloc(void)
1226 {
1227 	struct sigqueue *q;
1228 
1229 	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1230 		q->flags |= SIGQUEUE_PREALLOC;
1231 	return(q);
1232 }
1233 
1234 void sigqueue_free(struct sigqueue *q)
1235 {
1236 	unsigned long flags;
1237 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1238 	/*
1239 	 * If the signal is still pending remove it from the
1240 	 * pending queue.
1241 	 */
1242 	if (unlikely(!list_empty(&q->list))) {
1243 		spinlock_t *lock = &current->sighand->siglock;
1244 		read_lock(&tasklist_lock);
1245 		spin_lock_irqsave(lock, flags);
1246 		if (!list_empty(&q->list))
1247 			list_del_init(&q->list);
1248 		spin_unlock_irqrestore(lock, flags);
1249 		read_unlock(&tasklist_lock);
1250 	}
1251 	q->flags &= ~SIGQUEUE_PREALLOC;
1252 	__sigqueue_free(q);
1253 }
1254 
1255 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1256 {
1257 	unsigned long flags;
1258 	int ret = 0;
1259 
1260 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1261 
1262 	/*
1263 	 * The rcu based delayed sighand destroy makes it possible to
1264 	 * run this without tasklist lock held. The task struct itself
1265 	 * cannot go away as create_timer did get_task_struct().
1266 	 *
1267 	 * We return -1, when the task is marked exiting, so
1268 	 * posix_timer_event can redirect it to the group leader
1269 	 */
1270 	rcu_read_lock();
1271 
1272 	if (!likely(lock_task_sighand(p, &flags))) {
1273 		ret = -1;
1274 		goto out_err;
1275 	}
1276 
1277 	if (unlikely(!list_empty(&q->list))) {
1278 		/*
1279 		 * If an SI_TIMER entry is already queue just increment
1280 		 * the overrun count.
1281 		 */
1282 		BUG_ON(q->info.si_code != SI_TIMER);
1283 		q->info.si_overrun++;
1284 		goto out;
1285 	}
1286 	/* Short-circuit ignored signals.  */
1287 	if (sig_ignored(p, sig)) {
1288 		ret = 1;
1289 		goto out;
1290 	}
1291 	/*
1292 	 * Deliver the signal to listening signalfds. This must be called
1293 	 * with the sighand lock held.
1294 	 */
1295 	signalfd_notify(p, sig);
1296 
1297 	list_add_tail(&q->list, &p->pending.list);
1298 	sigaddset(&p->pending.signal, sig);
1299 	if (!sigismember(&p->blocked, sig))
1300 		signal_wake_up(p, sig == SIGKILL);
1301 
1302 out:
1303 	unlock_task_sighand(p, &flags);
1304 out_err:
1305 	rcu_read_unlock();
1306 
1307 	return ret;
1308 }
1309 
1310 int
1311 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1312 {
1313 	unsigned long flags;
1314 	int ret = 0;
1315 
1316 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1317 
1318 	read_lock(&tasklist_lock);
1319 	/* Since it_lock is held, p->sighand cannot be NULL. */
1320 	spin_lock_irqsave(&p->sighand->siglock, flags);
1321 	handle_stop_signal(sig, p);
1322 
1323 	/* Short-circuit ignored signals.  */
1324 	if (sig_ignored(p, sig)) {
1325 		ret = 1;
1326 		goto out;
1327 	}
1328 
1329 	if (unlikely(!list_empty(&q->list))) {
1330 		/*
1331 		 * If an SI_TIMER entry is already queue just increment
1332 		 * the overrun count.  Other uses should not try to
1333 		 * send the signal multiple times.
1334 		 */
1335 		BUG_ON(q->info.si_code != SI_TIMER);
1336 		q->info.si_overrun++;
1337 		goto out;
1338 	}
1339 	/*
1340 	 * Deliver the signal to listening signalfds. This must be called
1341 	 * with the sighand lock held.
1342 	 */
1343 	signalfd_notify(p, sig);
1344 
1345 	/*
1346 	 * Put this signal on the shared-pending queue.
1347 	 * We always use the shared queue for process-wide signals,
1348 	 * to avoid several races.
1349 	 */
1350 	list_add_tail(&q->list, &p->signal->shared_pending.list);
1351 	sigaddset(&p->signal->shared_pending.signal, sig);
1352 
1353 	__group_complete_signal(sig, p);
1354 out:
1355 	spin_unlock_irqrestore(&p->sighand->siglock, flags);
1356 	read_unlock(&tasklist_lock);
1357 	return ret;
1358 }
1359 
1360 /*
1361  * Wake up any threads in the parent blocked in wait* syscalls.
1362  */
1363 static inline void __wake_up_parent(struct task_struct *p,
1364 				    struct task_struct *parent)
1365 {
1366 	wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1367 }
1368 
1369 /*
1370  * Let a parent know about the death of a child.
1371  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1372  */
1373 
1374 void do_notify_parent(struct task_struct *tsk, int sig)
1375 {
1376 	struct siginfo info;
1377 	unsigned long flags;
1378 	struct sighand_struct *psig;
1379 
1380 	BUG_ON(sig == -1);
1381 
1382  	/* do_notify_parent_cldstop should have been called instead.  */
1383  	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1384 
1385 	BUG_ON(!tsk->ptrace &&
1386 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1387 
1388 	info.si_signo = sig;
1389 	info.si_errno = 0;
1390 	info.si_pid = tsk->pid;
1391 	info.si_uid = tsk->uid;
1392 
1393 	/* FIXME: find out whether or not this is supposed to be c*time. */
1394 	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1395 						       tsk->signal->utime));
1396 	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1397 						       tsk->signal->stime));
1398 
1399 	info.si_status = tsk->exit_code & 0x7f;
1400 	if (tsk->exit_code & 0x80)
1401 		info.si_code = CLD_DUMPED;
1402 	else if (tsk->exit_code & 0x7f)
1403 		info.si_code = CLD_KILLED;
1404 	else {
1405 		info.si_code = CLD_EXITED;
1406 		info.si_status = tsk->exit_code >> 8;
1407 	}
1408 
1409 	psig = tsk->parent->sighand;
1410 	spin_lock_irqsave(&psig->siglock, flags);
1411 	if (!tsk->ptrace && sig == SIGCHLD &&
1412 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1413 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1414 		/*
1415 		 * We are exiting and our parent doesn't care.  POSIX.1
1416 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1417 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1418 		 * automatically and not left for our parent's wait4 call.
1419 		 * Rather than having the parent do it as a magic kind of
1420 		 * signal handler, we just set this to tell do_exit that we
1421 		 * can be cleaned up without becoming a zombie.  Note that
1422 		 * we still call __wake_up_parent in this case, because a
1423 		 * blocked sys_wait4 might now return -ECHILD.
1424 		 *
1425 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1426 		 * is implementation-defined: we do (if you don't want
1427 		 * it, just use SIG_IGN instead).
1428 		 */
1429 		tsk->exit_signal = -1;
1430 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1431 			sig = 0;
1432 	}
1433 	if (valid_signal(sig) && sig > 0)
1434 		__group_send_sig_info(sig, &info, tsk->parent);
1435 	__wake_up_parent(tsk, tsk->parent);
1436 	spin_unlock_irqrestore(&psig->siglock, flags);
1437 }
1438 
1439 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1440 {
1441 	struct siginfo info;
1442 	unsigned long flags;
1443 	struct task_struct *parent;
1444 	struct sighand_struct *sighand;
1445 
1446 	if (tsk->ptrace & PT_PTRACED)
1447 		parent = tsk->parent;
1448 	else {
1449 		tsk = tsk->group_leader;
1450 		parent = tsk->real_parent;
1451 	}
1452 
1453 	info.si_signo = SIGCHLD;
1454 	info.si_errno = 0;
1455 	info.si_pid = tsk->pid;
1456 	info.si_uid = tsk->uid;
1457 
1458 	/* FIXME: find out whether or not this is supposed to be c*time. */
1459 	info.si_utime = cputime_to_jiffies(tsk->utime);
1460 	info.si_stime = cputime_to_jiffies(tsk->stime);
1461 
1462  	info.si_code = why;
1463  	switch (why) {
1464  	case CLD_CONTINUED:
1465  		info.si_status = SIGCONT;
1466  		break;
1467  	case CLD_STOPPED:
1468  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1469  		break;
1470  	case CLD_TRAPPED:
1471  		info.si_status = tsk->exit_code & 0x7f;
1472  		break;
1473  	default:
1474  		BUG();
1475  	}
1476 
1477 	sighand = parent->sighand;
1478 	spin_lock_irqsave(&sighand->siglock, flags);
1479 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1480 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1481 		__group_send_sig_info(SIGCHLD, &info, parent);
1482 	/*
1483 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1484 	 */
1485 	__wake_up_parent(tsk, parent);
1486 	spin_unlock_irqrestore(&sighand->siglock, flags);
1487 }
1488 
1489 static inline int may_ptrace_stop(void)
1490 {
1491 	if (!likely(current->ptrace & PT_PTRACED))
1492 		return 0;
1493 
1494 	if (unlikely(current->parent == current->real_parent &&
1495 		    (current->ptrace & PT_ATTACHED)))
1496 		return 0;
1497 
1498 	if (unlikely(current->signal == current->parent->signal) &&
1499 	    unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1500 		return 0;
1501 
1502 	/*
1503 	 * Are we in the middle of do_coredump?
1504 	 * If so and our tracer is also part of the coredump stopping
1505 	 * is a deadlock situation, and pointless because our tracer
1506 	 * is dead so don't allow us to stop.
1507 	 * If SIGKILL was already sent before the caller unlocked
1508 	 * ->siglock we must see ->core_waiters != 0. Otherwise it
1509 	 * is safe to enter schedule().
1510 	 */
1511 	if (unlikely(current->mm->core_waiters) &&
1512 	    unlikely(current->mm == current->parent->mm))
1513 		return 0;
1514 
1515 	return 1;
1516 }
1517 
1518 /*
1519  * This must be called with current->sighand->siglock held.
1520  *
1521  * This should be the path for all ptrace stops.
1522  * We always set current->last_siginfo while stopped here.
1523  * That makes it a way to test a stopped process for
1524  * being ptrace-stopped vs being job-control-stopped.
1525  *
1526  * If we actually decide not to stop at all because the tracer is gone,
1527  * we leave nostop_code in current->exit_code.
1528  */
1529 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1530 {
1531 	/*
1532 	 * If there is a group stop in progress,
1533 	 * we must participate in the bookkeeping.
1534 	 */
1535 	if (current->signal->group_stop_count > 0)
1536 		--current->signal->group_stop_count;
1537 
1538 	current->last_siginfo = info;
1539 	current->exit_code = exit_code;
1540 
1541 	/* Let the debugger run.  */
1542 	set_current_state(TASK_TRACED);
1543 	spin_unlock_irq(&current->sighand->siglock);
1544 	try_to_freeze();
1545 	read_lock(&tasklist_lock);
1546 	if (may_ptrace_stop()) {
1547 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1548 		read_unlock(&tasklist_lock);
1549 		schedule();
1550 	} else {
1551 		/*
1552 		 * By the time we got the lock, our tracer went away.
1553 		 * Don't stop here.
1554 		 */
1555 		read_unlock(&tasklist_lock);
1556 		set_current_state(TASK_RUNNING);
1557 		current->exit_code = nostop_code;
1558 	}
1559 
1560 	/*
1561 	 * We are back.  Now reacquire the siglock before touching
1562 	 * last_siginfo, so that we are sure to have synchronized with
1563 	 * any signal-sending on another CPU that wants to examine it.
1564 	 */
1565 	spin_lock_irq(&current->sighand->siglock);
1566 	current->last_siginfo = NULL;
1567 
1568 	/*
1569 	 * Queued signals ignored us while we were stopped for tracing.
1570 	 * So check for any that we should take before resuming user mode.
1571 	 */
1572 	recalc_sigpending();
1573 }
1574 
1575 void ptrace_notify(int exit_code)
1576 {
1577 	siginfo_t info;
1578 
1579 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1580 
1581 	memset(&info, 0, sizeof info);
1582 	info.si_signo = SIGTRAP;
1583 	info.si_code = exit_code;
1584 	info.si_pid = current->pid;
1585 	info.si_uid = current->uid;
1586 
1587 	/* Let the debugger run.  */
1588 	spin_lock_irq(&current->sighand->siglock);
1589 	ptrace_stop(exit_code, 0, &info);
1590 	spin_unlock_irq(&current->sighand->siglock);
1591 }
1592 
1593 static void
1594 finish_stop(int stop_count)
1595 {
1596 	/*
1597 	 * If there are no other threads in the group, or if there is
1598 	 * a group stop in progress and we are the last to stop,
1599 	 * report to the parent.  When ptraced, every thread reports itself.
1600 	 */
1601 	if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1602 		read_lock(&tasklist_lock);
1603 		do_notify_parent_cldstop(current, CLD_STOPPED);
1604 		read_unlock(&tasklist_lock);
1605 	}
1606 
1607 	do {
1608 		schedule();
1609 	} while (try_to_freeze());
1610 	/*
1611 	 * Now we don't run again until continued.
1612 	 */
1613 	current->exit_code = 0;
1614 }
1615 
1616 /*
1617  * This performs the stopping for SIGSTOP and other stop signals.
1618  * We have to stop all threads in the thread group.
1619  * Returns nonzero if we've actually stopped and released the siglock.
1620  * Returns zero if we didn't stop and still hold the siglock.
1621  */
1622 static int do_signal_stop(int signr)
1623 {
1624 	struct signal_struct *sig = current->signal;
1625 	int stop_count;
1626 
1627 	if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1628 		return 0;
1629 
1630 	if (sig->group_stop_count > 0) {
1631 		/*
1632 		 * There is a group stop in progress.  We don't need to
1633 		 * start another one.
1634 		 */
1635 		stop_count = --sig->group_stop_count;
1636 	} else {
1637 		/*
1638 		 * There is no group stop already in progress.
1639 		 * We must initiate one now.
1640 		 */
1641 		struct task_struct *t;
1642 
1643 		sig->group_exit_code = signr;
1644 
1645 		stop_count = 0;
1646 		for (t = next_thread(current); t != current; t = next_thread(t))
1647 			/*
1648 			 * Setting state to TASK_STOPPED for a group
1649 			 * stop is always done with the siglock held,
1650 			 * so this check has no races.
1651 			 */
1652 			if (!t->exit_state &&
1653 			    !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1654 				stop_count++;
1655 				signal_wake_up(t, 0);
1656 			}
1657 		sig->group_stop_count = stop_count;
1658 	}
1659 
1660 	if (stop_count == 0)
1661 		sig->flags = SIGNAL_STOP_STOPPED;
1662 	current->exit_code = sig->group_exit_code;
1663 	__set_current_state(TASK_STOPPED);
1664 
1665 	spin_unlock_irq(&current->sighand->siglock);
1666 	finish_stop(stop_count);
1667 	return 1;
1668 }
1669 
1670 /*
1671  * Do appropriate magic when group_stop_count > 0.
1672  * We return nonzero if we stopped, after releasing the siglock.
1673  * We return zero if we still hold the siglock and should look
1674  * for another signal without checking group_stop_count again.
1675  */
1676 static int handle_group_stop(void)
1677 {
1678 	int stop_count;
1679 
1680 	if (current->signal->group_exit_task == current) {
1681 		/*
1682 		 * Group stop is so we can do a core dump,
1683 		 * We are the initiating thread, so get on with it.
1684 		 */
1685 		current->signal->group_exit_task = NULL;
1686 		return 0;
1687 	}
1688 
1689 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1690 		/*
1691 		 * Group stop is so another thread can do a core dump,
1692 		 * or else we are racing against a death signal.
1693 		 * Just punt the stop so we can get the next signal.
1694 		 */
1695 		return 0;
1696 
1697 	/*
1698 	 * There is a group stop in progress.  We stop
1699 	 * without any associated signal being in our queue.
1700 	 */
1701 	stop_count = --current->signal->group_stop_count;
1702 	if (stop_count == 0)
1703 		current->signal->flags = SIGNAL_STOP_STOPPED;
1704 	current->exit_code = current->signal->group_exit_code;
1705 	set_current_state(TASK_STOPPED);
1706 	spin_unlock_irq(&current->sighand->siglock);
1707 	finish_stop(stop_count);
1708 	return 1;
1709 }
1710 
1711 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1712 			  struct pt_regs *regs, void *cookie)
1713 {
1714 	sigset_t *mask = &current->blocked;
1715 	int signr = 0;
1716 
1717 	try_to_freeze();
1718 
1719 relock:
1720 	spin_lock_irq(&current->sighand->siglock);
1721 	for (;;) {
1722 		struct k_sigaction *ka;
1723 
1724 		if (unlikely(current->signal->group_stop_count > 0) &&
1725 		    handle_group_stop())
1726 			goto relock;
1727 
1728 		signr = dequeue_signal(current, mask, info);
1729 
1730 		if (!signr)
1731 			break; /* will return 0 */
1732 
1733 		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1734 			ptrace_signal_deliver(regs, cookie);
1735 
1736 			/* Let the debugger run.  */
1737 			ptrace_stop(signr, signr, info);
1738 
1739 			/* We're back.  Did the debugger cancel the sig?  */
1740 			signr = current->exit_code;
1741 			if (signr == 0)
1742 				continue;
1743 
1744 			current->exit_code = 0;
1745 
1746 			/* Update the siginfo structure if the signal has
1747 			   changed.  If the debugger wanted something
1748 			   specific in the siginfo structure then it should
1749 			   have updated *info via PTRACE_SETSIGINFO.  */
1750 			if (signr != info->si_signo) {
1751 				info->si_signo = signr;
1752 				info->si_errno = 0;
1753 				info->si_code = SI_USER;
1754 				info->si_pid = current->parent->pid;
1755 				info->si_uid = current->parent->uid;
1756 			}
1757 
1758 			/* If the (new) signal is now blocked, requeue it.  */
1759 			if (sigismember(&current->blocked, signr)) {
1760 				specific_send_sig_info(signr, info, current);
1761 				continue;
1762 			}
1763 		}
1764 
1765 		ka = &current->sighand->action[signr-1];
1766 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1767 			continue;
1768 		if (ka->sa.sa_handler != SIG_DFL) {
1769 			/* Run the handler.  */
1770 			*return_ka = *ka;
1771 
1772 			if (ka->sa.sa_flags & SA_ONESHOT)
1773 				ka->sa.sa_handler = SIG_DFL;
1774 
1775 			break; /* will return non-zero "signr" value */
1776 		}
1777 
1778 		/*
1779 		 * Now we are doing the default action for this signal.
1780 		 */
1781 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1782 			continue;
1783 
1784 		/*
1785 		 * Init of a pid space gets no signals it doesn't want from
1786 		 * within that pid space. It can of course get signals from
1787 		 * its parent pid space.
1788 		 */
1789 		if (current == child_reaper(current))
1790 			continue;
1791 
1792 		if (sig_kernel_stop(signr)) {
1793 			/*
1794 			 * The default action is to stop all threads in
1795 			 * the thread group.  The job control signals
1796 			 * do nothing in an orphaned pgrp, but SIGSTOP
1797 			 * always works.  Note that siglock needs to be
1798 			 * dropped during the call to is_orphaned_pgrp()
1799 			 * because of lock ordering with tasklist_lock.
1800 			 * This allows an intervening SIGCONT to be posted.
1801 			 * We need to check for that and bail out if necessary.
1802 			 */
1803 			if (signr != SIGSTOP) {
1804 				spin_unlock_irq(&current->sighand->siglock);
1805 
1806 				/* signals can be posted during this window */
1807 
1808 				if (is_current_pgrp_orphaned())
1809 					goto relock;
1810 
1811 				spin_lock_irq(&current->sighand->siglock);
1812 			}
1813 
1814 			if (likely(do_signal_stop(signr))) {
1815 				/* It released the siglock.  */
1816 				goto relock;
1817 			}
1818 
1819 			/*
1820 			 * We didn't actually stop, due to a race
1821 			 * with SIGCONT or something like that.
1822 			 */
1823 			continue;
1824 		}
1825 
1826 		spin_unlock_irq(&current->sighand->siglock);
1827 
1828 		/*
1829 		 * Anything else is fatal, maybe with a core dump.
1830 		 */
1831 		current->flags |= PF_SIGNALED;
1832 		if (sig_kernel_coredump(signr)) {
1833 			/*
1834 			 * If it was able to dump core, this kills all
1835 			 * other threads in the group and synchronizes with
1836 			 * their demise.  If we lost the race with another
1837 			 * thread getting here, it set group_exit_code
1838 			 * first and our do_group_exit call below will use
1839 			 * that value and ignore the one we pass it.
1840 			 */
1841 			do_coredump((long)signr, signr, regs);
1842 		}
1843 
1844 		/*
1845 		 * Death signals, no core dump.
1846 		 */
1847 		do_group_exit(signr);
1848 		/* NOTREACHED */
1849 	}
1850 	spin_unlock_irq(&current->sighand->siglock);
1851 	return signr;
1852 }
1853 
1854 EXPORT_SYMBOL(recalc_sigpending);
1855 EXPORT_SYMBOL_GPL(dequeue_signal);
1856 EXPORT_SYMBOL(flush_signals);
1857 EXPORT_SYMBOL(force_sig);
1858 EXPORT_SYMBOL(kill_proc);
1859 EXPORT_SYMBOL(ptrace_notify);
1860 EXPORT_SYMBOL(send_sig);
1861 EXPORT_SYMBOL(send_sig_info);
1862 EXPORT_SYMBOL(sigprocmask);
1863 EXPORT_SYMBOL(block_all_signals);
1864 EXPORT_SYMBOL(unblock_all_signals);
1865 
1866 
1867 /*
1868  * System call entry points.
1869  */
1870 
1871 asmlinkage long sys_restart_syscall(void)
1872 {
1873 	struct restart_block *restart = &current_thread_info()->restart_block;
1874 	return restart->fn(restart);
1875 }
1876 
1877 long do_no_restart_syscall(struct restart_block *param)
1878 {
1879 	return -EINTR;
1880 }
1881 
1882 /*
1883  * We don't need to get the kernel lock - this is all local to this
1884  * particular thread.. (and that's good, because this is _heavily_
1885  * used by various programs)
1886  */
1887 
1888 /*
1889  * This is also useful for kernel threads that want to temporarily
1890  * (or permanently) block certain signals.
1891  *
1892  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1893  * interface happily blocks "unblockable" signals like SIGKILL
1894  * and friends.
1895  */
1896 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1897 {
1898 	int error;
1899 
1900 	spin_lock_irq(&current->sighand->siglock);
1901 	if (oldset)
1902 		*oldset = current->blocked;
1903 
1904 	error = 0;
1905 	switch (how) {
1906 	case SIG_BLOCK:
1907 		sigorsets(&current->blocked, &current->blocked, set);
1908 		break;
1909 	case SIG_UNBLOCK:
1910 		signandsets(&current->blocked, &current->blocked, set);
1911 		break;
1912 	case SIG_SETMASK:
1913 		current->blocked = *set;
1914 		break;
1915 	default:
1916 		error = -EINVAL;
1917 	}
1918 	recalc_sigpending();
1919 	spin_unlock_irq(&current->sighand->siglock);
1920 
1921 	return error;
1922 }
1923 
1924 asmlinkage long
1925 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1926 {
1927 	int error = -EINVAL;
1928 	sigset_t old_set, new_set;
1929 
1930 	/* XXX: Don't preclude handling different sized sigset_t's.  */
1931 	if (sigsetsize != sizeof(sigset_t))
1932 		goto out;
1933 
1934 	if (set) {
1935 		error = -EFAULT;
1936 		if (copy_from_user(&new_set, set, sizeof(*set)))
1937 			goto out;
1938 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1939 
1940 		error = sigprocmask(how, &new_set, &old_set);
1941 		if (error)
1942 			goto out;
1943 		if (oset)
1944 			goto set_old;
1945 	} else if (oset) {
1946 		spin_lock_irq(&current->sighand->siglock);
1947 		old_set = current->blocked;
1948 		spin_unlock_irq(&current->sighand->siglock);
1949 
1950 	set_old:
1951 		error = -EFAULT;
1952 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
1953 			goto out;
1954 	}
1955 	error = 0;
1956 out:
1957 	return error;
1958 }
1959 
1960 long do_sigpending(void __user *set, unsigned long sigsetsize)
1961 {
1962 	long error = -EINVAL;
1963 	sigset_t pending;
1964 
1965 	if (sigsetsize > sizeof(sigset_t))
1966 		goto out;
1967 
1968 	spin_lock_irq(&current->sighand->siglock);
1969 	sigorsets(&pending, &current->pending.signal,
1970 		  &current->signal->shared_pending.signal);
1971 	spin_unlock_irq(&current->sighand->siglock);
1972 
1973 	/* Outside the lock because only this thread touches it.  */
1974 	sigandsets(&pending, &current->blocked, &pending);
1975 
1976 	error = -EFAULT;
1977 	if (!copy_to_user(set, &pending, sigsetsize))
1978 		error = 0;
1979 
1980 out:
1981 	return error;
1982 }
1983 
1984 asmlinkage long
1985 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
1986 {
1987 	return do_sigpending(set, sigsetsize);
1988 }
1989 
1990 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1991 
1992 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
1993 {
1994 	int err;
1995 
1996 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
1997 		return -EFAULT;
1998 	if (from->si_code < 0)
1999 		return __copy_to_user(to, from, sizeof(siginfo_t))
2000 			? -EFAULT : 0;
2001 	/*
2002 	 * If you change siginfo_t structure, please be sure
2003 	 * this code is fixed accordingly.
2004 	 * Please remember to update the signalfd_copyinfo() function
2005 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2006 	 * It should never copy any pad contained in the structure
2007 	 * to avoid security leaks, but must copy the generic
2008 	 * 3 ints plus the relevant union member.
2009 	 */
2010 	err = __put_user(from->si_signo, &to->si_signo);
2011 	err |= __put_user(from->si_errno, &to->si_errno);
2012 	err |= __put_user((short)from->si_code, &to->si_code);
2013 	switch (from->si_code & __SI_MASK) {
2014 	case __SI_KILL:
2015 		err |= __put_user(from->si_pid, &to->si_pid);
2016 		err |= __put_user(from->si_uid, &to->si_uid);
2017 		break;
2018 	case __SI_TIMER:
2019 		 err |= __put_user(from->si_tid, &to->si_tid);
2020 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2021 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2022 		break;
2023 	case __SI_POLL:
2024 		err |= __put_user(from->si_band, &to->si_band);
2025 		err |= __put_user(from->si_fd, &to->si_fd);
2026 		break;
2027 	case __SI_FAULT:
2028 		err |= __put_user(from->si_addr, &to->si_addr);
2029 #ifdef __ARCH_SI_TRAPNO
2030 		err |= __put_user(from->si_trapno, &to->si_trapno);
2031 #endif
2032 		break;
2033 	case __SI_CHLD:
2034 		err |= __put_user(from->si_pid, &to->si_pid);
2035 		err |= __put_user(from->si_uid, &to->si_uid);
2036 		err |= __put_user(from->si_status, &to->si_status);
2037 		err |= __put_user(from->si_utime, &to->si_utime);
2038 		err |= __put_user(from->si_stime, &to->si_stime);
2039 		break;
2040 	case __SI_RT: /* This is not generated by the kernel as of now. */
2041 	case __SI_MESGQ: /* But this is */
2042 		err |= __put_user(from->si_pid, &to->si_pid);
2043 		err |= __put_user(from->si_uid, &to->si_uid);
2044 		err |= __put_user(from->si_ptr, &to->si_ptr);
2045 		break;
2046 	default: /* this is just in case for now ... */
2047 		err |= __put_user(from->si_pid, &to->si_pid);
2048 		err |= __put_user(from->si_uid, &to->si_uid);
2049 		break;
2050 	}
2051 	return err;
2052 }
2053 
2054 #endif
2055 
2056 asmlinkage long
2057 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2058 		    siginfo_t __user *uinfo,
2059 		    const struct timespec __user *uts,
2060 		    size_t sigsetsize)
2061 {
2062 	int ret, sig;
2063 	sigset_t these;
2064 	struct timespec ts;
2065 	siginfo_t info;
2066 	long timeout = 0;
2067 
2068 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2069 	if (sigsetsize != sizeof(sigset_t))
2070 		return -EINVAL;
2071 
2072 	if (copy_from_user(&these, uthese, sizeof(these)))
2073 		return -EFAULT;
2074 
2075 	/*
2076 	 * Invert the set of allowed signals to get those we
2077 	 * want to block.
2078 	 */
2079 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2080 	signotset(&these);
2081 
2082 	if (uts) {
2083 		if (copy_from_user(&ts, uts, sizeof(ts)))
2084 			return -EFAULT;
2085 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2086 		    || ts.tv_sec < 0)
2087 			return -EINVAL;
2088 	}
2089 
2090 	spin_lock_irq(&current->sighand->siglock);
2091 	sig = dequeue_signal(current, &these, &info);
2092 	if (!sig) {
2093 		timeout = MAX_SCHEDULE_TIMEOUT;
2094 		if (uts)
2095 			timeout = (timespec_to_jiffies(&ts)
2096 				   + (ts.tv_sec || ts.tv_nsec));
2097 
2098 		if (timeout) {
2099 			/* None ready -- temporarily unblock those we're
2100 			 * interested while we are sleeping in so that we'll
2101 			 * be awakened when they arrive.  */
2102 			current->real_blocked = current->blocked;
2103 			sigandsets(&current->blocked, &current->blocked, &these);
2104 			recalc_sigpending();
2105 			spin_unlock_irq(&current->sighand->siglock);
2106 
2107 			timeout = schedule_timeout_interruptible(timeout);
2108 
2109 			spin_lock_irq(&current->sighand->siglock);
2110 			sig = dequeue_signal(current, &these, &info);
2111 			current->blocked = current->real_blocked;
2112 			siginitset(&current->real_blocked, 0);
2113 			recalc_sigpending();
2114 		}
2115 	}
2116 	spin_unlock_irq(&current->sighand->siglock);
2117 
2118 	if (sig) {
2119 		ret = sig;
2120 		if (uinfo) {
2121 			if (copy_siginfo_to_user(uinfo, &info))
2122 				ret = -EFAULT;
2123 		}
2124 	} else {
2125 		ret = -EAGAIN;
2126 		if (timeout)
2127 			ret = -EINTR;
2128 	}
2129 
2130 	return ret;
2131 }
2132 
2133 asmlinkage long
2134 sys_kill(int pid, int sig)
2135 {
2136 	struct siginfo info;
2137 
2138 	info.si_signo = sig;
2139 	info.si_errno = 0;
2140 	info.si_code = SI_USER;
2141 	info.si_pid = current->tgid;
2142 	info.si_uid = current->uid;
2143 
2144 	return kill_something_info(sig, &info, pid);
2145 }
2146 
2147 static int do_tkill(int tgid, int pid, int sig)
2148 {
2149 	int error;
2150 	struct siginfo info;
2151 	struct task_struct *p;
2152 
2153 	error = -ESRCH;
2154 	info.si_signo = sig;
2155 	info.si_errno = 0;
2156 	info.si_code = SI_TKILL;
2157 	info.si_pid = current->tgid;
2158 	info.si_uid = current->uid;
2159 
2160 	read_lock(&tasklist_lock);
2161 	p = find_task_by_pid(pid);
2162 	if (p && (tgid <= 0 || p->tgid == tgid)) {
2163 		error = check_kill_permission(sig, &info, p);
2164 		/*
2165 		 * The null signal is a permissions and process existence
2166 		 * probe.  No signal is actually delivered.
2167 		 */
2168 		if (!error && sig && p->sighand) {
2169 			spin_lock_irq(&p->sighand->siglock);
2170 			handle_stop_signal(sig, p);
2171 			error = specific_send_sig_info(sig, &info, p);
2172 			spin_unlock_irq(&p->sighand->siglock);
2173 		}
2174 	}
2175 	read_unlock(&tasklist_lock);
2176 
2177 	return error;
2178 }
2179 
2180 /**
2181  *  sys_tgkill - send signal to one specific thread
2182  *  @tgid: the thread group ID of the thread
2183  *  @pid: the PID of the thread
2184  *  @sig: signal to be sent
2185  *
2186  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2187  *  exists but it's not belonging to the target process anymore. This
2188  *  method solves the problem of threads exiting and PIDs getting reused.
2189  */
2190 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2191 {
2192 	/* This is only valid for single tasks */
2193 	if (pid <= 0 || tgid <= 0)
2194 		return -EINVAL;
2195 
2196 	return do_tkill(tgid, pid, sig);
2197 }
2198 
2199 /*
2200  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2201  */
2202 asmlinkage long
2203 sys_tkill(int pid, int sig)
2204 {
2205 	/* This is only valid for single tasks */
2206 	if (pid <= 0)
2207 		return -EINVAL;
2208 
2209 	return do_tkill(0, pid, sig);
2210 }
2211 
2212 asmlinkage long
2213 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2214 {
2215 	siginfo_t info;
2216 
2217 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2218 		return -EFAULT;
2219 
2220 	/* Not even root can pretend to send signals from the kernel.
2221 	   Nor can they impersonate a kill(), which adds source info.  */
2222 	if (info.si_code >= 0)
2223 		return -EPERM;
2224 	info.si_signo = sig;
2225 
2226 	/* POSIX.1b doesn't mention process groups.  */
2227 	return kill_proc_info(sig, &info, pid);
2228 }
2229 
2230 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2231 {
2232 	struct k_sigaction *k;
2233 	sigset_t mask;
2234 
2235 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2236 		return -EINVAL;
2237 
2238 	k = &current->sighand->action[sig-1];
2239 
2240 	spin_lock_irq(&current->sighand->siglock);
2241 	if (signal_pending(current)) {
2242 		/*
2243 		 * If there might be a fatal signal pending on multiple
2244 		 * threads, make sure we take it before changing the action.
2245 		 */
2246 		spin_unlock_irq(&current->sighand->siglock);
2247 		return -ERESTARTNOINTR;
2248 	}
2249 
2250 	if (oact)
2251 		*oact = *k;
2252 
2253 	if (act) {
2254 		sigdelsetmask(&act->sa.sa_mask,
2255 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2256 		*k = *act;
2257 		/*
2258 		 * POSIX 3.3.1.3:
2259 		 *  "Setting a signal action to SIG_IGN for a signal that is
2260 		 *   pending shall cause the pending signal to be discarded,
2261 		 *   whether or not it is blocked."
2262 		 *
2263 		 *  "Setting a signal action to SIG_DFL for a signal that is
2264 		 *   pending and whose default action is to ignore the signal
2265 		 *   (for example, SIGCHLD), shall cause the pending signal to
2266 		 *   be discarded, whether or not it is blocked"
2267 		 */
2268 		if (act->sa.sa_handler == SIG_IGN ||
2269 		   (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2270 			struct task_struct *t = current;
2271 			sigemptyset(&mask);
2272 			sigaddset(&mask, sig);
2273 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2274 			do {
2275 				rm_from_queue_full(&mask, &t->pending);
2276 				recalc_sigpending_tsk(t);
2277 				t = next_thread(t);
2278 			} while (t != current);
2279 		}
2280 	}
2281 
2282 	spin_unlock_irq(&current->sighand->siglock);
2283 	return 0;
2284 }
2285 
2286 int
2287 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2288 {
2289 	stack_t oss;
2290 	int error;
2291 
2292 	if (uoss) {
2293 		oss.ss_sp = (void __user *) current->sas_ss_sp;
2294 		oss.ss_size = current->sas_ss_size;
2295 		oss.ss_flags = sas_ss_flags(sp);
2296 	}
2297 
2298 	if (uss) {
2299 		void __user *ss_sp;
2300 		size_t ss_size;
2301 		int ss_flags;
2302 
2303 		error = -EFAULT;
2304 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2305 		    || __get_user(ss_sp, &uss->ss_sp)
2306 		    || __get_user(ss_flags, &uss->ss_flags)
2307 		    || __get_user(ss_size, &uss->ss_size))
2308 			goto out;
2309 
2310 		error = -EPERM;
2311 		if (on_sig_stack(sp))
2312 			goto out;
2313 
2314 		error = -EINVAL;
2315 		/*
2316 		 *
2317 		 * Note - this code used to test ss_flags incorrectly
2318 		 *  	  old code may have been written using ss_flags==0
2319 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2320 		 *	  way that worked) - this fix preserves that older
2321 		 *	  mechanism
2322 		 */
2323 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2324 			goto out;
2325 
2326 		if (ss_flags == SS_DISABLE) {
2327 			ss_size = 0;
2328 			ss_sp = NULL;
2329 		} else {
2330 			error = -ENOMEM;
2331 			if (ss_size < MINSIGSTKSZ)
2332 				goto out;
2333 		}
2334 
2335 		current->sas_ss_sp = (unsigned long) ss_sp;
2336 		current->sas_ss_size = ss_size;
2337 	}
2338 
2339 	if (uoss) {
2340 		error = -EFAULT;
2341 		if (copy_to_user(uoss, &oss, sizeof(oss)))
2342 			goto out;
2343 	}
2344 
2345 	error = 0;
2346 out:
2347 	return error;
2348 }
2349 
2350 #ifdef __ARCH_WANT_SYS_SIGPENDING
2351 
2352 asmlinkage long
2353 sys_sigpending(old_sigset_t __user *set)
2354 {
2355 	return do_sigpending(set, sizeof(*set));
2356 }
2357 
2358 #endif
2359 
2360 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2361 /* Some platforms have their own version with special arguments others
2362    support only sys_rt_sigprocmask.  */
2363 
2364 asmlinkage long
2365 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2366 {
2367 	int error;
2368 	old_sigset_t old_set, new_set;
2369 
2370 	if (set) {
2371 		error = -EFAULT;
2372 		if (copy_from_user(&new_set, set, sizeof(*set)))
2373 			goto out;
2374 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2375 
2376 		spin_lock_irq(&current->sighand->siglock);
2377 		old_set = current->blocked.sig[0];
2378 
2379 		error = 0;
2380 		switch (how) {
2381 		default:
2382 			error = -EINVAL;
2383 			break;
2384 		case SIG_BLOCK:
2385 			sigaddsetmask(&current->blocked, new_set);
2386 			break;
2387 		case SIG_UNBLOCK:
2388 			sigdelsetmask(&current->blocked, new_set);
2389 			break;
2390 		case SIG_SETMASK:
2391 			current->blocked.sig[0] = new_set;
2392 			break;
2393 		}
2394 
2395 		recalc_sigpending();
2396 		spin_unlock_irq(&current->sighand->siglock);
2397 		if (error)
2398 			goto out;
2399 		if (oset)
2400 			goto set_old;
2401 	} else if (oset) {
2402 		old_set = current->blocked.sig[0];
2403 	set_old:
2404 		error = -EFAULT;
2405 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2406 			goto out;
2407 	}
2408 	error = 0;
2409 out:
2410 	return error;
2411 }
2412 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2413 
2414 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2415 asmlinkage long
2416 sys_rt_sigaction(int sig,
2417 		 const struct sigaction __user *act,
2418 		 struct sigaction __user *oact,
2419 		 size_t sigsetsize)
2420 {
2421 	struct k_sigaction new_sa, old_sa;
2422 	int ret = -EINVAL;
2423 
2424 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2425 	if (sigsetsize != sizeof(sigset_t))
2426 		goto out;
2427 
2428 	if (act) {
2429 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2430 			return -EFAULT;
2431 	}
2432 
2433 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2434 
2435 	if (!ret && oact) {
2436 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2437 			return -EFAULT;
2438 	}
2439 out:
2440 	return ret;
2441 }
2442 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2443 
2444 #ifdef __ARCH_WANT_SYS_SGETMASK
2445 
2446 /*
2447  * For backwards compatibility.  Functionality superseded by sigprocmask.
2448  */
2449 asmlinkage long
2450 sys_sgetmask(void)
2451 {
2452 	/* SMP safe */
2453 	return current->blocked.sig[0];
2454 }
2455 
2456 asmlinkage long
2457 sys_ssetmask(int newmask)
2458 {
2459 	int old;
2460 
2461 	spin_lock_irq(&current->sighand->siglock);
2462 	old = current->blocked.sig[0];
2463 
2464 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2465 						  sigmask(SIGSTOP)));
2466 	recalc_sigpending();
2467 	spin_unlock_irq(&current->sighand->siglock);
2468 
2469 	return old;
2470 }
2471 #endif /* __ARCH_WANT_SGETMASK */
2472 
2473 #ifdef __ARCH_WANT_SYS_SIGNAL
2474 /*
2475  * For backwards compatibility.  Functionality superseded by sigaction.
2476  */
2477 asmlinkage unsigned long
2478 sys_signal(int sig, __sighandler_t handler)
2479 {
2480 	struct k_sigaction new_sa, old_sa;
2481 	int ret;
2482 
2483 	new_sa.sa.sa_handler = handler;
2484 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2485 	sigemptyset(&new_sa.sa.sa_mask);
2486 
2487 	ret = do_sigaction(sig, &new_sa, &old_sa);
2488 
2489 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2490 }
2491 #endif /* __ARCH_WANT_SYS_SIGNAL */
2492 
2493 #ifdef __ARCH_WANT_SYS_PAUSE
2494 
2495 asmlinkage long
2496 sys_pause(void)
2497 {
2498 	current->state = TASK_INTERRUPTIBLE;
2499 	schedule();
2500 	return -ERESTARTNOHAND;
2501 }
2502 
2503 #endif
2504 
2505 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2506 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2507 {
2508 	sigset_t newset;
2509 
2510 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2511 	if (sigsetsize != sizeof(sigset_t))
2512 		return -EINVAL;
2513 
2514 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2515 		return -EFAULT;
2516 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2517 
2518 	spin_lock_irq(&current->sighand->siglock);
2519 	current->saved_sigmask = current->blocked;
2520 	current->blocked = newset;
2521 	recalc_sigpending();
2522 	spin_unlock_irq(&current->sighand->siglock);
2523 
2524 	current->state = TASK_INTERRUPTIBLE;
2525 	schedule();
2526 	set_thread_flag(TIF_RESTORE_SIGMASK);
2527 	return -ERESTARTNOHAND;
2528 }
2529 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2530 
2531 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2532 {
2533 	return NULL;
2534 }
2535 
2536 void __init signals_init(void)
2537 {
2538 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2539 }
2540