1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/sched.h> 17 #include <linux/fs.h> 18 #include <linux/tty.h> 19 #include <linux/binfmts.h> 20 #include <linux/security.h> 21 #include <linux/syscalls.h> 22 #include <linux/ptrace.h> 23 #include <linux/signal.h> 24 #include <linux/signalfd.h> 25 #include <linux/capability.h> 26 #include <linux/freezer.h> 27 #include <linux/pid_namespace.h> 28 #include <linux/nsproxy.h> 29 30 #include <asm/param.h> 31 #include <asm/uaccess.h> 32 #include <asm/unistd.h> 33 #include <asm/siginfo.h> 34 #include "audit.h" /* audit_signal_info() */ 35 36 /* 37 * SLAB caches for signal bits. 38 */ 39 40 static struct kmem_cache *sigqueue_cachep; 41 42 static int __sig_ignored(struct task_struct *t, int sig) 43 { 44 void __user *handler; 45 46 /* Is it explicitly or implicitly ignored? */ 47 48 handler = t->sighand->action[sig - 1].sa.sa_handler; 49 return handler == SIG_IGN || 50 (handler == SIG_DFL && sig_kernel_ignore(sig)); 51 } 52 53 static int sig_ignored(struct task_struct *t, int sig) 54 { 55 /* 56 * Tracers always want to know about signals.. 57 */ 58 if (t->ptrace & PT_PTRACED) 59 return 0; 60 61 /* 62 * Blocked signals are never ignored, since the 63 * signal handler may change by the time it is 64 * unblocked. 65 */ 66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 67 return 0; 68 69 return __sig_ignored(t, sig); 70 } 71 72 /* 73 * Re-calculate pending state from the set of locally pending 74 * signals, globally pending signals, and blocked signals. 75 */ 76 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 77 { 78 unsigned long ready; 79 long i; 80 81 switch (_NSIG_WORDS) { 82 default: 83 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 84 ready |= signal->sig[i] &~ blocked->sig[i]; 85 break; 86 87 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 88 ready |= signal->sig[2] &~ blocked->sig[2]; 89 ready |= signal->sig[1] &~ blocked->sig[1]; 90 ready |= signal->sig[0] &~ blocked->sig[0]; 91 break; 92 93 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 94 ready |= signal->sig[0] &~ blocked->sig[0]; 95 break; 96 97 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 98 } 99 return ready != 0; 100 } 101 102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 103 104 static int recalc_sigpending_tsk(struct task_struct *t) 105 { 106 if (t->signal->group_stop_count > 0 || 107 PENDING(&t->pending, &t->blocked) || 108 PENDING(&t->signal->shared_pending, &t->blocked)) { 109 set_tsk_thread_flag(t, TIF_SIGPENDING); 110 return 1; 111 } 112 /* 113 * We must never clear the flag in another thread, or in current 114 * when it's possible the current syscall is returning -ERESTART*. 115 * So we don't clear it here, and only callers who know they should do. 116 */ 117 return 0; 118 } 119 120 /* 121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 122 * This is superfluous when called on current, the wakeup is a harmless no-op. 123 */ 124 void recalc_sigpending_and_wake(struct task_struct *t) 125 { 126 if (recalc_sigpending_tsk(t)) 127 signal_wake_up(t, 0); 128 } 129 130 void recalc_sigpending(void) 131 { 132 if (!recalc_sigpending_tsk(current) && !freezing(current)) 133 clear_thread_flag(TIF_SIGPENDING); 134 135 } 136 137 /* Given the mask, find the first available signal that should be serviced. */ 138 139 int next_signal(struct sigpending *pending, sigset_t *mask) 140 { 141 unsigned long i, *s, *m, x; 142 int sig = 0; 143 144 s = pending->signal.sig; 145 m = mask->sig; 146 switch (_NSIG_WORDS) { 147 default: 148 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) 149 if ((x = *s &~ *m) != 0) { 150 sig = ffz(~x) + i*_NSIG_BPW + 1; 151 break; 152 } 153 break; 154 155 case 2: if ((x = s[0] &~ m[0]) != 0) 156 sig = 1; 157 else if ((x = s[1] &~ m[1]) != 0) 158 sig = _NSIG_BPW + 1; 159 else 160 break; 161 sig += ffz(~x); 162 break; 163 164 case 1: if ((x = *s &~ *m) != 0) 165 sig = ffz(~x) + 1; 166 break; 167 } 168 169 return sig; 170 } 171 172 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 173 int override_rlimit) 174 { 175 struct sigqueue *q = NULL; 176 struct user_struct *user; 177 178 /* 179 * In order to avoid problems with "switch_user()", we want to make 180 * sure that the compiler doesn't re-load "t->user" 181 */ 182 user = t->user; 183 barrier(); 184 atomic_inc(&user->sigpending); 185 if (override_rlimit || 186 atomic_read(&user->sigpending) <= 187 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 188 q = kmem_cache_alloc(sigqueue_cachep, flags); 189 if (unlikely(q == NULL)) { 190 atomic_dec(&user->sigpending); 191 } else { 192 INIT_LIST_HEAD(&q->list); 193 q->flags = 0; 194 q->user = get_uid(user); 195 } 196 return(q); 197 } 198 199 static void __sigqueue_free(struct sigqueue *q) 200 { 201 if (q->flags & SIGQUEUE_PREALLOC) 202 return; 203 atomic_dec(&q->user->sigpending); 204 free_uid(q->user); 205 kmem_cache_free(sigqueue_cachep, q); 206 } 207 208 void flush_sigqueue(struct sigpending *queue) 209 { 210 struct sigqueue *q; 211 212 sigemptyset(&queue->signal); 213 while (!list_empty(&queue->list)) { 214 q = list_entry(queue->list.next, struct sigqueue , list); 215 list_del_init(&q->list); 216 __sigqueue_free(q); 217 } 218 } 219 220 /* 221 * Flush all pending signals for a task. 222 */ 223 void flush_signals(struct task_struct *t) 224 { 225 unsigned long flags; 226 227 spin_lock_irqsave(&t->sighand->siglock, flags); 228 clear_tsk_thread_flag(t, TIF_SIGPENDING); 229 flush_sigqueue(&t->pending); 230 flush_sigqueue(&t->signal->shared_pending); 231 spin_unlock_irqrestore(&t->sighand->siglock, flags); 232 } 233 234 static void __flush_itimer_signals(struct sigpending *pending) 235 { 236 sigset_t signal, retain; 237 struct sigqueue *q, *n; 238 239 signal = pending->signal; 240 sigemptyset(&retain); 241 242 list_for_each_entry_safe(q, n, &pending->list, list) { 243 int sig = q->info.si_signo; 244 245 if (likely(q->info.si_code != SI_TIMER)) { 246 sigaddset(&retain, sig); 247 } else { 248 sigdelset(&signal, sig); 249 list_del_init(&q->list); 250 __sigqueue_free(q); 251 } 252 } 253 254 sigorsets(&pending->signal, &signal, &retain); 255 } 256 257 void flush_itimer_signals(void) 258 { 259 struct task_struct *tsk = current; 260 unsigned long flags; 261 262 spin_lock_irqsave(&tsk->sighand->siglock, flags); 263 __flush_itimer_signals(&tsk->pending); 264 __flush_itimer_signals(&tsk->signal->shared_pending); 265 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 266 } 267 268 void ignore_signals(struct task_struct *t) 269 { 270 int i; 271 272 for (i = 0; i < _NSIG; ++i) 273 t->sighand->action[i].sa.sa_handler = SIG_IGN; 274 275 flush_signals(t); 276 } 277 278 /* 279 * Flush all handlers for a task. 280 */ 281 282 void 283 flush_signal_handlers(struct task_struct *t, int force_default) 284 { 285 int i; 286 struct k_sigaction *ka = &t->sighand->action[0]; 287 for (i = _NSIG ; i != 0 ; i--) { 288 if (force_default || ka->sa.sa_handler != SIG_IGN) 289 ka->sa.sa_handler = SIG_DFL; 290 ka->sa.sa_flags = 0; 291 sigemptyset(&ka->sa.sa_mask); 292 ka++; 293 } 294 } 295 296 int unhandled_signal(struct task_struct *tsk, int sig) 297 { 298 if (is_global_init(tsk)) 299 return 1; 300 if (tsk->ptrace & PT_PTRACED) 301 return 0; 302 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || 303 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL); 304 } 305 306 307 /* Notify the system that a driver wants to block all signals for this 308 * process, and wants to be notified if any signals at all were to be 309 * sent/acted upon. If the notifier routine returns non-zero, then the 310 * signal will be acted upon after all. If the notifier routine returns 0, 311 * then then signal will be blocked. Only one block per process is 312 * allowed. priv is a pointer to private data that the notifier routine 313 * can use to determine if the signal should be blocked or not. */ 314 315 void 316 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 317 { 318 unsigned long flags; 319 320 spin_lock_irqsave(¤t->sighand->siglock, flags); 321 current->notifier_mask = mask; 322 current->notifier_data = priv; 323 current->notifier = notifier; 324 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 325 } 326 327 /* Notify the system that blocking has ended. */ 328 329 void 330 unblock_all_signals(void) 331 { 332 unsigned long flags; 333 334 spin_lock_irqsave(¤t->sighand->siglock, flags); 335 current->notifier = NULL; 336 current->notifier_data = NULL; 337 recalc_sigpending(); 338 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 339 } 340 341 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 342 { 343 struct sigqueue *q, *first = NULL; 344 int still_pending = 0; 345 346 if (unlikely(!sigismember(&list->signal, sig))) 347 return 0; 348 349 /* 350 * Collect the siginfo appropriate to this signal. Check if 351 * there is another siginfo for the same signal. 352 */ 353 list_for_each_entry(q, &list->list, list) { 354 if (q->info.si_signo == sig) { 355 if (first) { 356 still_pending = 1; 357 break; 358 } 359 first = q; 360 } 361 } 362 if (first) { 363 list_del_init(&first->list); 364 copy_siginfo(info, &first->info); 365 __sigqueue_free(first); 366 if (!still_pending) 367 sigdelset(&list->signal, sig); 368 } else { 369 370 /* Ok, it wasn't in the queue. This must be 371 a fast-pathed signal or we must have been 372 out of queue space. So zero out the info. 373 */ 374 sigdelset(&list->signal, sig); 375 info->si_signo = sig; 376 info->si_errno = 0; 377 info->si_code = 0; 378 info->si_pid = 0; 379 info->si_uid = 0; 380 } 381 return 1; 382 } 383 384 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 385 siginfo_t *info) 386 { 387 int sig = next_signal(pending, mask); 388 389 if (sig) { 390 if (current->notifier) { 391 if (sigismember(current->notifier_mask, sig)) { 392 if (!(current->notifier)(current->notifier_data)) { 393 clear_thread_flag(TIF_SIGPENDING); 394 return 0; 395 } 396 } 397 } 398 399 if (!collect_signal(sig, pending, info)) 400 sig = 0; 401 } 402 403 return sig; 404 } 405 406 /* 407 * Dequeue a signal and return the element to the caller, which is 408 * expected to free it. 409 * 410 * All callers have to hold the siglock. 411 */ 412 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 413 { 414 int signr; 415 416 /* We only dequeue private signals from ourselves, we don't let 417 * signalfd steal them 418 */ 419 signr = __dequeue_signal(&tsk->pending, mask, info); 420 if (!signr) { 421 signr = __dequeue_signal(&tsk->signal->shared_pending, 422 mask, info); 423 /* 424 * itimer signal ? 425 * 426 * itimers are process shared and we restart periodic 427 * itimers in the signal delivery path to prevent DoS 428 * attacks in the high resolution timer case. This is 429 * compliant with the old way of self restarting 430 * itimers, as the SIGALRM is a legacy signal and only 431 * queued once. Changing the restart behaviour to 432 * restart the timer in the signal dequeue path is 433 * reducing the timer noise on heavy loaded !highres 434 * systems too. 435 */ 436 if (unlikely(signr == SIGALRM)) { 437 struct hrtimer *tmr = &tsk->signal->real_timer; 438 439 if (!hrtimer_is_queued(tmr) && 440 tsk->signal->it_real_incr.tv64 != 0) { 441 hrtimer_forward(tmr, tmr->base->get_time(), 442 tsk->signal->it_real_incr); 443 hrtimer_restart(tmr); 444 } 445 } 446 } 447 448 recalc_sigpending(); 449 if (!signr) 450 return 0; 451 452 if (unlikely(sig_kernel_stop(signr))) { 453 /* 454 * Set a marker that we have dequeued a stop signal. Our 455 * caller might release the siglock and then the pending 456 * stop signal it is about to process is no longer in the 457 * pending bitmasks, but must still be cleared by a SIGCONT 458 * (and overruled by a SIGKILL). So those cases clear this 459 * shared flag after we've set it. Note that this flag may 460 * remain set after the signal we return is ignored or 461 * handled. That doesn't matter because its only purpose 462 * is to alert stop-signal processing code when another 463 * processor has come along and cleared the flag. 464 */ 465 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 466 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 467 } 468 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 469 /* 470 * Release the siglock to ensure proper locking order 471 * of timer locks outside of siglocks. Note, we leave 472 * irqs disabled here, since the posix-timers code is 473 * about to disable them again anyway. 474 */ 475 spin_unlock(&tsk->sighand->siglock); 476 do_schedule_next_timer(info); 477 spin_lock(&tsk->sighand->siglock); 478 } 479 return signr; 480 } 481 482 /* 483 * Tell a process that it has a new active signal.. 484 * 485 * NOTE! we rely on the previous spin_lock to 486 * lock interrupts for us! We can only be called with 487 * "siglock" held, and the local interrupt must 488 * have been disabled when that got acquired! 489 * 490 * No need to set need_resched since signal event passing 491 * goes through ->blocked 492 */ 493 void signal_wake_up(struct task_struct *t, int resume) 494 { 495 unsigned int mask; 496 497 set_tsk_thread_flag(t, TIF_SIGPENDING); 498 499 /* 500 * For SIGKILL, we want to wake it up in the stopped/traced/killable 501 * case. We don't check t->state here because there is a race with it 502 * executing another processor and just now entering stopped state. 503 * By using wake_up_state, we ensure the process will wake up and 504 * handle its death signal. 505 */ 506 mask = TASK_INTERRUPTIBLE; 507 if (resume) 508 mask |= TASK_WAKEKILL; 509 if (!wake_up_state(t, mask)) 510 kick_process(t); 511 } 512 513 /* 514 * Remove signals in mask from the pending set and queue. 515 * Returns 1 if any signals were found. 516 * 517 * All callers must be holding the siglock. 518 * 519 * This version takes a sigset mask and looks at all signals, 520 * not just those in the first mask word. 521 */ 522 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) 523 { 524 struct sigqueue *q, *n; 525 sigset_t m; 526 527 sigandsets(&m, mask, &s->signal); 528 if (sigisemptyset(&m)) 529 return 0; 530 531 signandsets(&s->signal, &s->signal, mask); 532 list_for_each_entry_safe(q, n, &s->list, list) { 533 if (sigismember(mask, q->info.si_signo)) { 534 list_del_init(&q->list); 535 __sigqueue_free(q); 536 } 537 } 538 return 1; 539 } 540 /* 541 * Remove signals in mask from the pending set and queue. 542 * Returns 1 if any signals were found. 543 * 544 * All callers must be holding the siglock. 545 */ 546 static int rm_from_queue(unsigned long mask, struct sigpending *s) 547 { 548 struct sigqueue *q, *n; 549 550 if (!sigtestsetmask(&s->signal, mask)) 551 return 0; 552 553 sigdelsetmask(&s->signal, mask); 554 list_for_each_entry_safe(q, n, &s->list, list) { 555 if (q->info.si_signo < SIGRTMIN && 556 (mask & sigmask(q->info.si_signo))) { 557 list_del_init(&q->list); 558 __sigqueue_free(q); 559 } 560 } 561 return 1; 562 } 563 564 /* 565 * Bad permissions for sending the signal 566 */ 567 static int check_kill_permission(int sig, struct siginfo *info, 568 struct task_struct *t) 569 { 570 struct pid *sid; 571 int error; 572 573 if (!valid_signal(sig)) 574 return -EINVAL; 575 576 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info))) 577 return 0; 578 579 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 580 if (error) 581 return error; 582 583 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) && 584 (current->uid ^ t->suid) && (current->uid ^ t->uid) && 585 !capable(CAP_KILL)) { 586 switch (sig) { 587 case SIGCONT: 588 sid = task_session(t); 589 /* 590 * We don't return the error if sid == NULL. The 591 * task was unhashed, the caller must notice this. 592 */ 593 if (!sid || sid == task_session(current)) 594 break; 595 default: 596 return -EPERM; 597 } 598 } 599 600 return security_task_kill(t, info, sig, 0); 601 } 602 603 /* forward decl */ 604 static void do_notify_parent_cldstop(struct task_struct *tsk, int why); 605 606 /* 607 * Handle magic process-wide effects of stop/continue signals. Unlike 608 * the signal actions, these happen immediately at signal-generation 609 * time regardless of blocking, ignoring, or handling. This does the 610 * actual continuing for SIGCONT, but not the actual stopping for stop 611 * signals. The process stop is done as a signal action for SIG_DFL. 612 * 613 * Returns true if the signal should be actually delivered, otherwise 614 * it should be dropped. 615 */ 616 static int prepare_signal(int sig, struct task_struct *p) 617 { 618 struct signal_struct *signal = p->signal; 619 struct task_struct *t; 620 621 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { 622 /* 623 * The process is in the middle of dying, nothing to do. 624 */ 625 } else if (sig_kernel_stop(sig)) { 626 /* 627 * This is a stop signal. Remove SIGCONT from all queues. 628 */ 629 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); 630 t = p; 631 do { 632 rm_from_queue(sigmask(SIGCONT), &t->pending); 633 } while_each_thread(p, t); 634 } else if (sig == SIGCONT) { 635 unsigned int why; 636 /* 637 * Remove all stop signals from all queues, 638 * and wake all threads. 639 */ 640 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); 641 t = p; 642 do { 643 unsigned int state; 644 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 645 /* 646 * If there is a handler for SIGCONT, we must make 647 * sure that no thread returns to user mode before 648 * we post the signal, in case it was the only 649 * thread eligible to run the signal handler--then 650 * it must not do anything between resuming and 651 * running the handler. With the TIF_SIGPENDING 652 * flag set, the thread will pause and acquire the 653 * siglock that we hold now and until we've queued 654 * the pending signal. 655 * 656 * Wake up the stopped thread _after_ setting 657 * TIF_SIGPENDING 658 */ 659 state = __TASK_STOPPED; 660 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { 661 set_tsk_thread_flag(t, TIF_SIGPENDING); 662 state |= TASK_INTERRUPTIBLE; 663 } 664 wake_up_state(t, state); 665 } while_each_thread(p, t); 666 667 /* 668 * Notify the parent with CLD_CONTINUED if we were stopped. 669 * 670 * If we were in the middle of a group stop, we pretend it 671 * was already finished, and then continued. Since SIGCHLD 672 * doesn't queue we report only CLD_STOPPED, as if the next 673 * CLD_CONTINUED was dropped. 674 */ 675 why = 0; 676 if (signal->flags & SIGNAL_STOP_STOPPED) 677 why |= SIGNAL_CLD_CONTINUED; 678 else if (signal->group_stop_count) 679 why |= SIGNAL_CLD_STOPPED; 680 681 if (why) { 682 /* 683 * The first thread which returns from finish_stop() 684 * will take ->siglock, notice SIGNAL_CLD_MASK, and 685 * notify its parent. See get_signal_to_deliver(). 686 */ 687 signal->flags = why | SIGNAL_STOP_CONTINUED; 688 signal->group_stop_count = 0; 689 signal->group_exit_code = 0; 690 } else { 691 /* 692 * We are not stopped, but there could be a stop 693 * signal in the middle of being processed after 694 * being removed from the queue. Clear that too. 695 */ 696 signal->flags &= ~SIGNAL_STOP_DEQUEUED; 697 } 698 } 699 700 return !sig_ignored(p, sig); 701 } 702 703 /* 704 * Test if P wants to take SIG. After we've checked all threads with this, 705 * it's equivalent to finding no threads not blocking SIG. Any threads not 706 * blocking SIG were ruled out because they are not running and already 707 * have pending signals. Such threads will dequeue from the shared queue 708 * as soon as they're available, so putting the signal on the shared queue 709 * will be equivalent to sending it to one such thread. 710 */ 711 static inline int wants_signal(int sig, struct task_struct *p) 712 { 713 if (sigismember(&p->blocked, sig)) 714 return 0; 715 if (p->flags & PF_EXITING) 716 return 0; 717 if (sig == SIGKILL) 718 return 1; 719 if (task_is_stopped_or_traced(p)) 720 return 0; 721 return task_curr(p) || !signal_pending(p); 722 } 723 724 static void complete_signal(int sig, struct task_struct *p, int group) 725 { 726 struct signal_struct *signal = p->signal; 727 struct task_struct *t; 728 729 /* 730 * Now find a thread we can wake up to take the signal off the queue. 731 * 732 * If the main thread wants the signal, it gets first crack. 733 * Probably the least surprising to the average bear. 734 */ 735 if (wants_signal(sig, p)) 736 t = p; 737 else if (!group || thread_group_empty(p)) 738 /* 739 * There is just one thread and it does not need to be woken. 740 * It will dequeue unblocked signals before it runs again. 741 */ 742 return; 743 else { 744 /* 745 * Otherwise try to find a suitable thread. 746 */ 747 t = signal->curr_target; 748 while (!wants_signal(sig, t)) { 749 t = next_thread(t); 750 if (t == signal->curr_target) 751 /* 752 * No thread needs to be woken. 753 * Any eligible threads will see 754 * the signal in the queue soon. 755 */ 756 return; 757 } 758 signal->curr_target = t; 759 } 760 761 /* 762 * Found a killable thread. If the signal will be fatal, 763 * then start taking the whole group down immediately. 764 */ 765 if (sig_fatal(p, sig) && 766 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 767 !sigismember(&t->real_blocked, sig) && 768 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 769 /* 770 * This signal will be fatal to the whole group. 771 */ 772 if (!sig_kernel_coredump(sig)) { 773 /* 774 * Start a group exit and wake everybody up. 775 * This way we don't have other threads 776 * running and doing things after a slower 777 * thread has the fatal signal pending. 778 */ 779 signal->flags = SIGNAL_GROUP_EXIT; 780 signal->group_exit_code = sig; 781 signal->group_stop_count = 0; 782 t = p; 783 do { 784 sigaddset(&t->pending.signal, SIGKILL); 785 signal_wake_up(t, 1); 786 } while_each_thread(p, t); 787 return; 788 } 789 } 790 791 /* 792 * The signal is already in the shared-pending queue. 793 * Tell the chosen thread to wake up and dequeue it. 794 */ 795 signal_wake_up(t, sig == SIGKILL); 796 return; 797 } 798 799 static inline int legacy_queue(struct sigpending *signals, int sig) 800 { 801 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 802 } 803 804 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 805 int group) 806 { 807 struct sigpending *pending; 808 struct sigqueue *q; 809 810 assert_spin_locked(&t->sighand->siglock); 811 if (!prepare_signal(sig, t)) 812 return 0; 813 814 pending = group ? &t->signal->shared_pending : &t->pending; 815 /* 816 * Short-circuit ignored signals and support queuing 817 * exactly one non-rt signal, so that we can get more 818 * detailed information about the cause of the signal. 819 */ 820 if (legacy_queue(pending, sig)) 821 return 0; 822 /* 823 * fast-pathed signals for kernel-internal things like SIGSTOP 824 * or SIGKILL. 825 */ 826 if (info == SEND_SIG_FORCED) 827 goto out_set; 828 829 /* Real-time signals must be queued if sent by sigqueue, or 830 some other real-time mechanism. It is implementation 831 defined whether kill() does so. We attempt to do so, on 832 the principle of least surprise, but since kill is not 833 allowed to fail with EAGAIN when low on memory we just 834 make sure at least one signal gets delivered and don't 835 pass on the info struct. */ 836 837 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && 838 (is_si_special(info) || 839 info->si_code >= 0))); 840 if (q) { 841 list_add_tail(&q->list, &pending->list); 842 switch ((unsigned long) info) { 843 case (unsigned long) SEND_SIG_NOINFO: 844 q->info.si_signo = sig; 845 q->info.si_errno = 0; 846 q->info.si_code = SI_USER; 847 q->info.si_pid = task_pid_vnr(current); 848 q->info.si_uid = current->uid; 849 break; 850 case (unsigned long) SEND_SIG_PRIV: 851 q->info.si_signo = sig; 852 q->info.si_errno = 0; 853 q->info.si_code = SI_KERNEL; 854 q->info.si_pid = 0; 855 q->info.si_uid = 0; 856 break; 857 default: 858 copy_siginfo(&q->info, info); 859 break; 860 } 861 } else if (!is_si_special(info)) { 862 if (sig >= SIGRTMIN && info->si_code != SI_USER) 863 /* 864 * Queue overflow, abort. We may abort if the signal was rt 865 * and sent by user using something other than kill(). 866 */ 867 return -EAGAIN; 868 } 869 870 out_set: 871 signalfd_notify(t, sig); 872 sigaddset(&pending->signal, sig); 873 complete_signal(sig, t, group); 874 return 0; 875 } 876 877 int print_fatal_signals; 878 879 static void print_fatal_signal(struct pt_regs *regs, int signr) 880 { 881 printk("%s/%d: potentially unexpected fatal signal %d.\n", 882 current->comm, task_pid_nr(current), signr); 883 884 #if defined(__i386__) && !defined(__arch_um__) 885 printk("code at %08lx: ", regs->ip); 886 { 887 int i; 888 for (i = 0; i < 16; i++) { 889 unsigned char insn; 890 891 __get_user(insn, (unsigned char *)(regs->ip + i)); 892 printk("%02x ", insn); 893 } 894 } 895 #endif 896 printk("\n"); 897 show_regs(regs); 898 } 899 900 static int __init setup_print_fatal_signals(char *str) 901 { 902 get_option (&str, &print_fatal_signals); 903 904 return 1; 905 } 906 907 __setup("print-fatal-signals=", setup_print_fatal_signals); 908 909 int 910 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 911 { 912 return send_signal(sig, info, p, 1); 913 } 914 915 static int 916 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 917 { 918 return send_signal(sig, info, t, 0); 919 } 920 921 /* 922 * Force a signal that the process can't ignore: if necessary 923 * we unblock the signal and change any SIG_IGN to SIG_DFL. 924 * 925 * Note: If we unblock the signal, we always reset it to SIG_DFL, 926 * since we do not want to have a signal handler that was blocked 927 * be invoked when user space had explicitly blocked it. 928 * 929 * We don't want to have recursive SIGSEGV's etc, for example, 930 * that is why we also clear SIGNAL_UNKILLABLE. 931 */ 932 int 933 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 934 { 935 unsigned long int flags; 936 int ret, blocked, ignored; 937 struct k_sigaction *action; 938 939 spin_lock_irqsave(&t->sighand->siglock, flags); 940 action = &t->sighand->action[sig-1]; 941 ignored = action->sa.sa_handler == SIG_IGN; 942 blocked = sigismember(&t->blocked, sig); 943 if (blocked || ignored) { 944 action->sa.sa_handler = SIG_DFL; 945 if (blocked) { 946 sigdelset(&t->blocked, sig); 947 recalc_sigpending_and_wake(t); 948 } 949 } 950 if (action->sa.sa_handler == SIG_DFL) 951 t->signal->flags &= ~SIGNAL_UNKILLABLE; 952 ret = specific_send_sig_info(sig, info, t); 953 spin_unlock_irqrestore(&t->sighand->siglock, flags); 954 955 return ret; 956 } 957 958 void 959 force_sig_specific(int sig, struct task_struct *t) 960 { 961 force_sig_info(sig, SEND_SIG_FORCED, t); 962 } 963 964 /* 965 * Nuke all other threads in the group. 966 */ 967 void zap_other_threads(struct task_struct *p) 968 { 969 struct task_struct *t; 970 971 p->signal->group_stop_count = 0; 972 973 for (t = next_thread(p); t != p; t = next_thread(t)) { 974 /* 975 * Don't bother with already dead threads 976 */ 977 if (t->exit_state) 978 continue; 979 980 /* SIGKILL will be handled before any pending SIGSTOP */ 981 sigaddset(&t->pending.signal, SIGKILL); 982 signal_wake_up(t, 1); 983 } 984 } 985 986 int __fatal_signal_pending(struct task_struct *tsk) 987 { 988 return sigismember(&tsk->pending.signal, SIGKILL); 989 } 990 EXPORT_SYMBOL(__fatal_signal_pending); 991 992 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 993 { 994 struct sighand_struct *sighand; 995 996 rcu_read_lock(); 997 for (;;) { 998 sighand = rcu_dereference(tsk->sighand); 999 if (unlikely(sighand == NULL)) 1000 break; 1001 1002 spin_lock_irqsave(&sighand->siglock, *flags); 1003 if (likely(sighand == tsk->sighand)) 1004 break; 1005 spin_unlock_irqrestore(&sighand->siglock, *flags); 1006 } 1007 rcu_read_unlock(); 1008 1009 return sighand; 1010 } 1011 1012 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1013 { 1014 unsigned long flags; 1015 int ret; 1016 1017 ret = check_kill_permission(sig, info, p); 1018 1019 if (!ret && sig) { 1020 ret = -ESRCH; 1021 if (lock_task_sighand(p, &flags)) { 1022 ret = __group_send_sig_info(sig, info, p); 1023 unlock_task_sighand(p, &flags); 1024 } 1025 } 1026 1027 return ret; 1028 } 1029 1030 /* 1031 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1032 * control characters do (^C, ^Z etc) 1033 */ 1034 1035 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1036 { 1037 struct task_struct *p = NULL; 1038 int retval, success; 1039 1040 success = 0; 1041 retval = -ESRCH; 1042 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1043 int err = group_send_sig_info(sig, info, p); 1044 success |= !err; 1045 retval = err; 1046 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1047 return success ? 0 : retval; 1048 } 1049 1050 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1051 { 1052 int error = -ESRCH; 1053 struct task_struct *p; 1054 1055 rcu_read_lock(); 1056 retry: 1057 p = pid_task(pid, PIDTYPE_PID); 1058 if (p) { 1059 error = group_send_sig_info(sig, info, p); 1060 if (unlikely(error == -ESRCH)) 1061 /* 1062 * The task was unhashed in between, try again. 1063 * If it is dead, pid_task() will return NULL, 1064 * if we race with de_thread() it will find the 1065 * new leader. 1066 */ 1067 goto retry; 1068 } 1069 rcu_read_unlock(); 1070 1071 return error; 1072 } 1073 1074 int 1075 kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1076 { 1077 int error; 1078 rcu_read_lock(); 1079 error = kill_pid_info(sig, info, find_vpid(pid)); 1080 rcu_read_unlock(); 1081 return error; 1082 } 1083 1084 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1085 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, 1086 uid_t uid, uid_t euid, u32 secid) 1087 { 1088 int ret = -EINVAL; 1089 struct task_struct *p; 1090 1091 if (!valid_signal(sig)) 1092 return ret; 1093 1094 read_lock(&tasklist_lock); 1095 p = pid_task(pid, PIDTYPE_PID); 1096 if (!p) { 1097 ret = -ESRCH; 1098 goto out_unlock; 1099 } 1100 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) 1101 && (euid != p->suid) && (euid != p->uid) 1102 && (uid != p->suid) && (uid != p->uid)) { 1103 ret = -EPERM; 1104 goto out_unlock; 1105 } 1106 ret = security_task_kill(p, info, sig, secid); 1107 if (ret) 1108 goto out_unlock; 1109 if (sig && p->sighand) { 1110 unsigned long flags; 1111 spin_lock_irqsave(&p->sighand->siglock, flags); 1112 ret = __group_send_sig_info(sig, info, p); 1113 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1114 } 1115 out_unlock: 1116 read_unlock(&tasklist_lock); 1117 return ret; 1118 } 1119 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); 1120 1121 /* 1122 * kill_something_info() interprets pid in interesting ways just like kill(2). 1123 * 1124 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1125 * is probably wrong. Should make it like BSD or SYSV. 1126 */ 1127 1128 static int kill_something_info(int sig, struct siginfo *info, int pid) 1129 { 1130 int ret; 1131 1132 if (pid > 0) { 1133 rcu_read_lock(); 1134 ret = kill_pid_info(sig, info, find_vpid(pid)); 1135 rcu_read_unlock(); 1136 return ret; 1137 } 1138 1139 read_lock(&tasklist_lock); 1140 if (pid != -1) { 1141 ret = __kill_pgrp_info(sig, info, 1142 pid ? find_vpid(-pid) : task_pgrp(current)); 1143 } else { 1144 int retval = 0, count = 0; 1145 struct task_struct * p; 1146 1147 for_each_process(p) { 1148 if (p->pid > 1 && !same_thread_group(p, current)) { 1149 int err = group_send_sig_info(sig, info, p); 1150 ++count; 1151 if (err != -EPERM) 1152 retval = err; 1153 } 1154 } 1155 ret = count ? retval : -ESRCH; 1156 } 1157 read_unlock(&tasklist_lock); 1158 1159 return ret; 1160 } 1161 1162 /* 1163 * These are for backward compatibility with the rest of the kernel source. 1164 */ 1165 1166 /* 1167 * The caller must ensure the task can't exit. 1168 */ 1169 int 1170 send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1171 { 1172 int ret; 1173 unsigned long flags; 1174 1175 /* 1176 * Make sure legacy kernel users don't send in bad values 1177 * (normal paths check this in check_kill_permission). 1178 */ 1179 if (!valid_signal(sig)) 1180 return -EINVAL; 1181 1182 spin_lock_irqsave(&p->sighand->siglock, flags); 1183 ret = specific_send_sig_info(sig, info, p); 1184 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1185 return ret; 1186 } 1187 1188 #define __si_special(priv) \ 1189 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1190 1191 int 1192 send_sig(int sig, struct task_struct *p, int priv) 1193 { 1194 return send_sig_info(sig, __si_special(priv), p); 1195 } 1196 1197 void 1198 force_sig(int sig, struct task_struct *p) 1199 { 1200 force_sig_info(sig, SEND_SIG_PRIV, p); 1201 } 1202 1203 /* 1204 * When things go south during signal handling, we 1205 * will force a SIGSEGV. And if the signal that caused 1206 * the problem was already a SIGSEGV, we'll want to 1207 * make sure we don't even try to deliver the signal.. 1208 */ 1209 int 1210 force_sigsegv(int sig, struct task_struct *p) 1211 { 1212 if (sig == SIGSEGV) { 1213 unsigned long flags; 1214 spin_lock_irqsave(&p->sighand->siglock, flags); 1215 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1216 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1217 } 1218 force_sig(SIGSEGV, p); 1219 return 0; 1220 } 1221 1222 int kill_pgrp(struct pid *pid, int sig, int priv) 1223 { 1224 int ret; 1225 1226 read_lock(&tasklist_lock); 1227 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1228 read_unlock(&tasklist_lock); 1229 1230 return ret; 1231 } 1232 EXPORT_SYMBOL(kill_pgrp); 1233 1234 int kill_pid(struct pid *pid, int sig, int priv) 1235 { 1236 return kill_pid_info(sig, __si_special(priv), pid); 1237 } 1238 EXPORT_SYMBOL(kill_pid); 1239 1240 int 1241 kill_proc(pid_t pid, int sig, int priv) 1242 { 1243 int ret; 1244 1245 rcu_read_lock(); 1246 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid)); 1247 rcu_read_unlock(); 1248 return ret; 1249 } 1250 1251 /* 1252 * These functions support sending signals using preallocated sigqueue 1253 * structures. This is needed "because realtime applications cannot 1254 * afford to lose notifications of asynchronous events, like timer 1255 * expirations or I/O completions". In the case of Posix Timers 1256 * we allocate the sigqueue structure from the timer_create. If this 1257 * allocation fails we are able to report the failure to the application 1258 * with an EAGAIN error. 1259 */ 1260 1261 struct sigqueue *sigqueue_alloc(void) 1262 { 1263 struct sigqueue *q; 1264 1265 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1266 q->flags |= SIGQUEUE_PREALLOC; 1267 return(q); 1268 } 1269 1270 void sigqueue_free(struct sigqueue *q) 1271 { 1272 unsigned long flags; 1273 spinlock_t *lock = ¤t->sighand->siglock; 1274 1275 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1276 /* 1277 * We must hold ->siglock while testing q->list 1278 * to serialize with collect_signal() or with 1279 * __exit_signal()->flush_sigqueue(). 1280 */ 1281 spin_lock_irqsave(lock, flags); 1282 q->flags &= ~SIGQUEUE_PREALLOC; 1283 /* 1284 * If it is queued it will be freed when dequeued, 1285 * like the "regular" sigqueue. 1286 */ 1287 if (!list_empty(&q->list)) 1288 q = NULL; 1289 spin_unlock_irqrestore(lock, flags); 1290 1291 if (q) 1292 __sigqueue_free(q); 1293 } 1294 1295 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) 1296 { 1297 int sig = q->info.si_signo; 1298 struct sigpending *pending; 1299 unsigned long flags; 1300 int ret; 1301 1302 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1303 1304 ret = -1; 1305 if (!likely(lock_task_sighand(t, &flags))) 1306 goto ret; 1307 1308 ret = 1; /* the signal is ignored */ 1309 if (!prepare_signal(sig, t)) 1310 goto out; 1311 1312 ret = 0; 1313 if (unlikely(!list_empty(&q->list))) { 1314 /* 1315 * If an SI_TIMER entry is already queue just increment 1316 * the overrun count. 1317 */ 1318 BUG_ON(q->info.si_code != SI_TIMER); 1319 q->info.si_overrun++; 1320 goto out; 1321 } 1322 1323 signalfd_notify(t, sig); 1324 pending = group ? &t->signal->shared_pending : &t->pending; 1325 list_add_tail(&q->list, &pending->list); 1326 sigaddset(&pending->signal, sig); 1327 complete_signal(sig, t, group); 1328 out: 1329 unlock_task_sighand(t, &flags); 1330 ret: 1331 return ret; 1332 } 1333 1334 /* 1335 * Wake up any threads in the parent blocked in wait* syscalls. 1336 */ 1337 static inline void __wake_up_parent(struct task_struct *p, 1338 struct task_struct *parent) 1339 { 1340 wake_up_interruptible_sync(&parent->signal->wait_chldexit); 1341 } 1342 1343 /* 1344 * Let a parent know about the death of a child. 1345 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1346 */ 1347 1348 void do_notify_parent(struct task_struct *tsk, int sig) 1349 { 1350 struct siginfo info; 1351 unsigned long flags; 1352 struct sighand_struct *psig; 1353 1354 BUG_ON(sig == -1); 1355 1356 /* do_notify_parent_cldstop should have been called instead. */ 1357 BUG_ON(task_is_stopped_or_traced(tsk)); 1358 1359 BUG_ON(!tsk->ptrace && 1360 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1361 1362 info.si_signo = sig; 1363 info.si_errno = 0; 1364 /* 1365 * we are under tasklist_lock here so our parent is tied to 1366 * us and cannot exit and release its namespace. 1367 * 1368 * the only it can is to switch its nsproxy with sys_unshare, 1369 * bu uncharing pid namespaces is not allowed, so we'll always 1370 * see relevant namespace 1371 * 1372 * write_lock() currently calls preempt_disable() which is the 1373 * same as rcu_read_lock(), but according to Oleg, this is not 1374 * correct to rely on this 1375 */ 1376 rcu_read_lock(); 1377 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1378 rcu_read_unlock(); 1379 1380 info.si_uid = tsk->uid; 1381 1382 /* FIXME: find out whether or not this is supposed to be c*time. */ 1383 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, 1384 tsk->signal->utime)); 1385 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, 1386 tsk->signal->stime)); 1387 1388 info.si_status = tsk->exit_code & 0x7f; 1389 if (tsk->exit_code & 0x80) 1390 info.si_code = CLD_DUMPED; 1391 else if (tsk->exit_code & 0x7f) 1392 info.si_code = CLD_KILLED; 1393 else { 1394 info.si_code = CLD_EXITED; 1395 info.si_status = tsk->exit_code >> 8; 1396 } 1397 1398 psig = tsk->parent->sighand; 1399 spin_lock_irqsave(&psig->siglock, flags); 1400 if (!tsk->ptrace && sig == SIGCHLD && 1401 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1402 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1403 /* 1404 * We are exiting and our parent doesn't care. POSIX.1 1405 * defines special semantics for setting SIGCHLD to SIG_IGN 1406 * or setting the SA_NOCLDWAIT flag: we should be reaped 1407 * automatically and not left for our parent's wait4 call. 1408 * Rather than having the parent do it as a magic kind of 1409 * signal handler, we just set this to tell do_exit that we 1410 * can be cleaned up without becoming a zombie. Note that 1411 * we still call __wake_up_parent in this case, because a 1412 * blocked sys_wait4 might now return -ECHILD. 1413 * 1414 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1415 * is implementation-defined: we do (if you don't want 1416 * it, just use SIG_IGN instead). 1417 */ 1418 tsk->exit_signal = -1; 1419 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1420 sig = 0; 1421 } 1422 if (valid_signal(sig) && sig > 0) 1423 __group_send_sig_info(sig, &info, tsk->parent); 1424 __wake_up_parent(tsk, tsk->parent); 1425 spin_unlock_irqrestore(&psig->siglock, flags); 1426 } 1427 1428 static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1429 { 1430 struct siginfo info; 1431 unsigned long flags; 1432 struct task_struct *parent; 1433 struct sighand_struct *sighand; 1434 1435 if (tsk->ptrace & PT_PTRACED) 1436 parent = tsk->parent; 1437 else { 1438 tsk = tsk->group_leader; 1439 parent = tsk->real_parent; 1440 } 1441 1442 info.si_signo = SIGCHLD; 1443 info.si_errno = 0; 1444 /* 1445 * see comment in do_notify_parent() abot the following 3 lines 1446 */ 1447 rcu_read_lock(); 1448 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1449 rcu_read_unlock(); 1450 1451 info.si_uid = tsk->uid; 1452 1453 /* FIXME: find out whether or not this is supposed to be c*time. */ 1454 info.si_utime = cputime_to_jiffies(tsk->utime); 1455 info.si_stime = cputime_to_jiffies(tsk->stime); 1456 1457 info.si_code = why; 1458 switch (why) { 1459 case CLD_CONTINUED: 1460 info.si_status = SIGCONT; 1461 break; 1462 case CLD_STOPPED: 1463 info.si_status = tsk->signal->group_exit_code & 0x7f; 1464 break; 1465 case CLD_TRAPPED: 1466 info.si_status = tsk->exit_code & 0x7f; 1467 break; 1468 default: 1469 BUG(); 1470 } 1471 1472 sighand = parent->sighand; 1473 spin_lock_irqsave(&sighand->siglock, flags); 1474 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1475 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1476 __group_send_sig_info(SIGCHLD, &info, parent); 1477 /* 1478 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1479 */ 1480 __wake_up_parent(tsk, parent); 1481 spin_unlock_irqrestore(&sighand->siglock, flags); 1482 } 1483 1484 static inline int may_ptrace_stop(void) 1485 { 1486 if (!likely(current->ptrace & PT_PTRACED)) 1487 return 0; 1488 /* 1489 * Are we in the middle of do_coredump? 1490 * If so and our tracer is also part of the coredump stopping 1491 * is a deadlock situation, and pointless because our tracer 1492 * is dead so don't allow us to stop. 1493 * If SIGKILL was already sent before the caller unlocked 1494 * ->siglock we must see ->core_waiters != 0. Otherwise it 1495 * is safe to enter schedule(). 1496 */ 1497 if (unlikely(current->mm->core_waiters) && 1498 unlikely(current->mm == current->parent->mm)) 1499 return 0; 1500 1501 return 1; 1502 } 1503 1504 /* 1505 * Return nonzero if there is a SIGKILL that should be waking us up. 1506 * Called with the siglock held. 1507 */ 1508 static int sigkill_pending(struct task_struct *tsk) 1509 { 1510 return ((sigismember(&tsk->pending.signal, SIGKILL) || 1511 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && 1512 !unlikely(sigismember(&tsk->blocked, SIGKILL))); 1513 } 1514 1515 /* 1516 * This must be called with current->sighand->siglock held. 1517 * 1518 * This should be the path for all ptrace stops. 1519 * We always set current->last_siginfo while stopped here. 1520 * That makes it a way to test a stopped process for 1521 * being ptrace-stopped vs being job-control-stopped. 1522 * 1523 * If we actually decide not to stop at all because the tracer 1524 * is gone, we keep current->exit_code unless clear_code. 1525 */ 1526 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) 1527 { 1528 int killed = 0; 1529 1530 if (arch_ptrace_stop_needed(exit_code, info)) { 1531 /* 1532 * The arch code has something special to do before a 1533 * ptrace stop. This is allowed to block, e.g. for faults 1534 * on user stack pages. We can't keep the siglock while 1535 * calling arch_ptrace_stop, so we must release it now. 1536 * To preserve proper semantics, we must do this before 1537 * any signal bookkeeping like checking group_stop_count. 1538 * Meanwhile, a SIGKILL could come in before we retake the 1539 * siglock. That must prevent us from sleeping in TASK_TRACED. 1540 * So after regaining the lock, we must check for SIGKILL. 1541 */ 1542 spin_unlock_irq(¤t->sighand->siglock); 1543 arch_ptrace_stop(exit_code, info); 1544 spin_lock_irq(¤t->sighand->siglock); 1545 killed = sigkill_pending(current); 1546 } 1547 1548 /* 1549 * If there is a group stop in progress, 1550 * we must participate in the bookkeeping. 1551 */ 1552 if (current->signal->group_stop_count > 0) 1553 --current->signal->group_stop_count; 1554 1555 current->last_siginfo = info; 1556 current->exit_code = exit_code; 1557 1558 /* Let the debugger run. */ 1559 __set_current_state(TASK_TRACED); 1560 spin_unlock_irq(¤t->sighand->siglock); 1561 read_lock(&tasklist_lock); 1562 if (!unlikely(killed) && may_ptrace_stop()) { 1563 do_notify_parent_cldstop(current, CLD_TRAPPED); 1564 read_unlock(&tasklist_lock); 1565 schedule(); 1566 } else { 1567 /* 1568 * By the time we got the lock, our tracer went away. 1569 * Don't drop the lock yet, another tracer may come. 1570 */ 1571 __set_current_state(TASK_RUNNING); 1572 if (clear_code) 1573 current->exit_code = 0; 1574 read_unlock(&tasklist_lock); 1575 } 1576 1577 /* 1578 * While in TASK_TRACED, we were considered "frozen enough". 1579 * Now that we woke up, it's crucial if we're supposed to be 1580 * frozen that we freeze now before running anything substantial. 1581 */ 1582 try_to_freeze(); 1583 1584 /* 1585 * We are back. Now reacquire the siglock before touching 1586 * last_siginfo, so that we are sure to have synchronized with 1587 * any signal-sending on another CPU that wants to examine it. 1588 */ 1589 spin_lock_irq(¤t->sighand->siglock); 1590 current->last_siginfo = NULL; 1591 1592 /* 1593 * Queued signals ignored us while we were stopped for tracing. 1594 * So check for any that we should take before resuming user mode. 1595 * This sets TIF_SIGPENDING, but never clears it. 1596 */ 1597 recalc_sigpending_tsk(current); 1598 } 1599 1600 void ptrace_notify(int exit_code) 1601 { 1602 siginfo_t info; 1603 1604 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 1605 1606 memset(&info, 0, sizeof info); 1607 info.si_signo = SIGTRAP; 1608 info.si_code = exit_code; 1609 info.si_pid = task_pid_vnr(current); 1610 info.si_uid = current->uid; 1611 1612 /* Let the debugger run. */ 1613 spin_lock_irq(¤t->sighand->siglock); 1614 ptrace_stop(exit_code, 1, &info); 1615 spin_unlock_irq(¤t->sighand->siglock); 1616 } 1617 1618 static void 1619 finish_stop(int stop_count) 1620 { 1621 /* 1622 * If there are no other threads in the group, or if there is 1623 * a group stop in progress and we are the last to stop, 1624 * report to the parent. When ptraced, every thread reports itself. 1625 */ 1626 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1627 read_lock(&tasklist_lock); 1628 do_notify_parent_cldstop(current, CLD_STOPPED); 1629 read_unlock(&tasklist_lock); 1630 } 1631 1632 do { 1633 schedule(); 1634 } while (try_to_freeze()); 1635 /* 1636 * Now we don't run again until continued. 1637 */ 1638 current->exit_code = 0; 1639 } 1640 1641 /* 1642 * This performs the stopping for SIGSTOP and other stop signals. 1643 * We have to stop all threads in the thread group. 1644 * Returns nonzero if we've actually stopped and released the siglock. 1645 * Returns zero if we didn't stop and still hold the siglock. 1646 */ 1647 static int do_signal_stop(int signr) 1648 { 1649 struct signal_struct *sig = current->signal; 1650 int stop_count; 1651 1652 if (sig->group_stop_count > 0) { 1653 /* 1654 * There is a group stop in progress. We don't need to 1655 * start another one. 1656 */ 1657 stop_count = --sig->group_stop_count; 1658 } else { 1659 struct task_struct *t; 1660 1661 if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE)) 1662 != SIGNAL_STOP_DEQUEUED) || 1663 unlikely(signal_group_exit(sig))) 1664 return 0; 1665 /* 1666 * There is no group stop already in progress. 1667 * We must initiate one now. 1668 */ 1669 sig->group_exit_code = signr; 1670 1671 stop_count = 0; 1672 for (t = next_thread(current); t != current; t = next_thread(t)) 1673 /* 1674 * Setting state to TASK_STOPPED for a group 1675 * stop is always done with the siglock held, 1676 * so this check has no races. 1677 */ 1678 if (!(t->flags & PF_EXITING) && 1679 !task_is_stopped_or_traced(t)) { 1680 stop_count++; 1681 signal_wake_up(t, 0); 1682 } 1683 sig->group_stop_count = stop_count; 1684 } 1685 1686 if (stop_count == 0) 1687 sig->flags = SIGNAL_STOP_STOPPED; 1688 current->exit_code = sig->group_exit_code; 1689 __set_current_state(TASK_STOPPED); 1690 1691 spin_unlock_irq(¤t->sighand->siglock); 1692 finish_stop(stop_count); 1693 return 1; 1694 } 1695 1696 static int ptrace_signal(int signr, siginfo_t *info, 1697 struct pt_regs *regs, void *cookie) 1698 { 1699 if (!(current->ptrace & PT_PTRACED)) 1700 return signr; 1701 1702 ptrace_signal_deliver(regs, cookie); 1703 1704 /* Let the debugger run. */ 1705 ptrace_stop(signr, 0, info); 1706 1707 /* We're back. Did the debugger cancel the sig? */ 1708 signr = current->exit_code; 1709 if (signr == 0) 1710 return signr; 1711 1712 current->exit_code = 0; 1713 1714 /* Update the siginfo structure if the signal has 1715 changed. If the debugger wanted something 1716 specific in the siginfo structure then it should 1717 have updated *info via PTRACE_SETSIGINFO. */ 1718 if (signr != info->si_signo) { 1719 info->si_signo = signr; 1720 info->si_errno = 0; 1721 info->si_code = SI_USER; 1722 info->si_pid = task_pid_vnr(current->parent); 1723 info->si_uid = current->parent->uid; 1724 } 1725 1726 /* If the (new) signal is now blocked, requeue it. */ 1727 if (sigismember(¤t->blocked, signr)) { 1728 specific_send_sig_info(signr, info, current); 1729 signr = 0; 1730 } 1731 1732 return signr; 1733 } 1734 1735 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, 1736 struct pt_regs *regs, void *cookie) 1737 { 1738 struct sighand_struct *sighand = current->sighand; 1739 struct signal_struct *signal = current->signal; 1740 int signr; 1741 1742 relock: 1743 /* 1744 * We'll jump back here after any time we were stopped in TASK_STOPPED. 1745 * While in TASK_STOPPED, we were considered "frozen enough". 1746 * Now that we woke up, it's crucial if we're supposed to be 1747 * frozen that we freeze now before running anything substantial. 1748 */ 1749 try_to_freeze(); 1750 1751 spin_lock_irq(&sighand->siglock); 1752 /* 1753 * Every stopped thread goes here after wakeup. Check to see if 1754 * we should notify the parent, prepare_signal(SIGCONT) encodes 1755 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 1756 */ 1757 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 1758 int why = (signal->flags & SIGNAL_STOP_CONTINUED) 1759 ? CLD_CONTINUED : CLD_STOPPED; 1760 signal->flags &= ~SIGNAL_CLD_MASK; 1761 spin_unlock_irq(&sighand->siglock); 1762 1763 read_lock(&tasklist_lock); 1764 do_notify_parent_cldstop(current->group_leader, why); 1765 read_unlock(&tasklist_lock); 1766 goto relock; 1767 } 1768 1769 for (;;) { 1770 struct k_sigaction *ka; 1771 1772 if (unlikely(signal->group_stop_count > 0) && 1773 do_signal_stop(0)) 1774 goto relock; 1775 1776 signr = dequeue_signal(current, ¤t->blocked, info); 1777 if (!signr) 1778 break; /* will return 0 */ 1779 1780 if (signr != SIGKILL) { 1781 signr = ptrace_signal(signr, info, regs, cookie); 1782 if (!signr) 1783 continue; 1784 } 1785 1786 ka = &sighand->action[signr-1]; 1787 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1788 continue; 1789 if (ka->sa.sa_handler != SIG_DFL) { 1790 /* Run the handler. */ 1791 *return_ka = *ka; 1792 1793 if (ka->sa.sa_flags & SA_ONESHOT) 1794 ka->sa.sa_handler = SIG_DFL; 1795 1796 break; /* will return non-zero "signr" value */ 1797 } 1798 1799 /* 1800 * Now we are doing the default action for this signal. 1801 */ 1802 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 1803 continue; 1804 1805 /* 1806 * Global init gets no signals it doesn't want. 1807 */ 1808 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 1809 !signal_group_exit(signal)) 1810 continue; 1811 1812 if (sig_kernel_stop(signr)) { 1813 /* 1814 * The default action is to stop all threads in 1815 * the thread group. The job control signals 1816 * do nothing in an orphaned pgrp, but SIGSTOP 1817 * always works. Note that siglock needs to be 1818 * dropped during the call to is_orphaned_pgrp() 1819 * because of lock ordering with tasklist_lock. 1820 * This allows an intervening SIGCONT to be posted. 1821 * We need to check for that and bail out if necessary. 1822 */ 1823 if (signr != SIGSTOP) { 1824 spin_unlock_irq(&sighand->siglock); 1825 1826 /* signals can be posted during this window */ 1827 1828 if (is_current_pgrp_orphaned()) 1829 goto relock; 1830 1831 spin_lock_irq(&sighand->siglock); 1832 } 1833 1834 if (likely(do_signal_stop(signr))) { 1835 /* It released the siglock. */ 1836 goto relock; 1837 } 1838 1839 /* 1840 * We didn't actually stop, due to a race 1841 * with SIGCONT or something like that. 1842 */ 1843 continue; 1844 } 1845 1846 spin_unlock_irq(&sighand->siglock); 1847 1848 /* 1849 * Anything else is fatal, maybe with a core dump. 1850 */ 1851 current->flags |= PF_SIGNALED; 1852 1853 if (sig_kernel_coredump(signr)) { 1854 if (print_fatal_signals) 1855 print_fatal_signal(regs, signr); 1856 /* 1857 * If it was able to dump core, this kills all 1858 * other threads in the group and synchronizes with 1859 * their demise. If we lost the race with another 1860 * thread getting here, it set group_exit_code 1861 * first and our do_group_exit call below will use 1862 * that value and ignore the one we pass it. 1863 */ 1864 do_coredump((long)signr, signr, regs); 1865 } 1866 1867 /* 1868 * Death signals, no core dump. 1869 */ 1870 do_group_exit(signr); 1871 /* NOTREACHED */ 1872 } 1873 spin_unlock_irq(&sighand->siglock); 1874 return signr; 1875 } 1876 1877 void exit_signals(struct task_struct *tsk) 1878 { 1879 int group_stop = 0; 1880 struct task_struct *t; 1881 1882 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 1883 tsk->flags |= PF_EXITING; 1884 return; 1885 } 1886 1887 spin_lock_irq(&tsk->sighand->siglock); 1888 /* 1889 * From now this task is not visible for group-wide signals, 1890 * see wants_signal(), do_signal_stop(). 1891 */ 1892 tsk->flags |= PF_EXITING; 1893 if (!signal_pending(tsk)) 1894 goto out; 1895 1896 /* It could be that __group_complete_signal() choose us to 1897 * notify about group-wide signal. Another thread should be 1898 * woken now to take the signal since we will not. 1899 */ 1900 for (t = tsk; (t = next_thread(t)) != tsk; ) 1901 if (!signal_pending(t) && !(t->flags & PF_EXITING)) 1902 recalc_sigpending_and_wake(t); 1903 1904 if (unlikely(tsk->signal->group_stop_count) && 1905 !--tsk->signal->group_stop_count) { 1906 tsk->signal->flags = SIGNAL_STOP_STOPPED; 1907 group_stop = 1; 1908 } 1909 out: 1910 spin_unlock_irq(&tsk->sighand->siglock); 1911 1912 if (unlikely(group_stop)) { 1913 read_lock(&tasklist_lock); 1914 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1915 read_unlock(&tasklist_lock); 1916 } 1917 } 1918 1919 EXPORT_SYMBOL(recalc_sigpending); 1920 EXPORT_SYMBOL_GPL(dequeue_signal); 1921 EXPORT_SYMBOL(flush_signals); 1922 EXPORT_SYMBOL(force_sig); 1923 EXPORT_SYMBOL(kill_proc); 1924 EXPORT_SYMBOL(ptrace_notify); 1925 EXPORT_SYMBOL(send_sig); 1926 EXPORT_SYMBOL(send_sig_info); 1927 EXPORT_SYMBOL(sigprocmask); 1928 EXPORT_SYMBOL(block_all_signals); 1929 EXPORT_SYMBOL(unblock_all_signals); 1930 1931 1932 /* 1933 * System call entry points. 1934 */ 1935 1936 asmlinkage long sys_restart_syscall(void) 1937 { 1938 struct restart_block *restart = ¤t_thread_info()->restart_block; 1939 return restart->fn(restart); 1940 } 1941 1942 long do_no_restart_syscall(struct restart_block *param) 1943 { 1944 return -EINTR; 1945 } 1946 1947 /* 1948 * We don't need to get the kernel lock - this is all local to this 1949 * particular thread.. (and that's good, because this is _heavily_ 1950 * used by various programs) 1951 */ 1952 1953 /* 1954 * This is also useful for kernel threads that want to temporarily 1955 * (or permanently) block certain signals. 1956 * 1957 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 1958 * interface happily blocks "unblockable" signals like SIGKILL 1959 * and friends. 1960 */ 1961 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 1962 { 1963 int error; 1964 1965 spin_lock_irq(¤t->sighand->siglock); 1966 if (oldset) 1967 *oldset = current->blocked; 1968 1969 error = 0; 1970 switch (how) { 1971 case SIG_BLOCK: 1972 sigorsets(¤t->blocked, ¤t->blocked, set); 1973 break; 1974 case SIG_UNBLOCK: 1975 signandsets(¤t->blocked, ¤t->blocked, set); 1976 break; 1977 case SIG_SETMASK: 1978 current->blocked = *set; 1979 break; 1980 default: 1981 error = -EINVAL; 1982 } 1983 recalc_sigpending(); 1984 spin_unlock_irq(¤t->sighand->siglock); 1985 1986 return error; 1987 } 1988 1989 asmlinkage long 1990 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) 1991 { 1992 int error = -EINVAL; 1993 sigset_t old_set, new_set; 1994 1995 /* XXX: Don't preclude handling different sized sigset_t's. */ 1996 if (sigsetsize != sizeof(sigset_t)) 1997 goto out; 1998 1999 if (set) { 2000 error = -EFAULT; 2001 if (copy_from_user(&new_set, set, sizeof(*set))) 2002 goto out; 2003 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2004 2005 error = sigprocmask(how, &new_set, &old_set); 2006 if (error) 2007 goto out; 2008 if (oset) 2009 goto set_old; 2010 } else if (oset) { 2011 spin_lock_irq(¤t->sighand->siglock); 2012 old_set = current->blocked; 2013 spin_unlock_irq(¤t->sighand->siglock); 2014 2015 set_old: 2016 error = -EFAULT; 2017 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2018 goto out; 2019 } 2020 error = 0; 2021 out: 2022 return error; 2023 } 2024 2025 long do_sigpending(void __user *set, unsigned long sigsetsize) 2026 { 2027 long error = -EINVAL; 2028 sigset_t pending; 2029 2030 if (sigsetsize > sizeof(sigset_t)) 2031 goto out; 2032 2033 spin_lock_irq(¤t->sighand->siglock); 2034 sigorsets(&pending, ¤t->pending.signal, 2035 ¤t->signal->shared_pending.signal); 2036 spin_unlock_irq(¤t->sighand->siglock); 2037 2038 /* Outside the lock because only this thread touches it. */ 2039 sigandsets(&pending, ¤t->blocked, &pending); 2040 2041 error = -EFAULT; 2042 if (!copy_to_user(set, &pending, sigsetsize)) 2043 error = 0; 2044 2045 out: 2046 return error; 2047 } 2048 2049 asmlinkage long 2050 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) 2051 { 2052 return do_sigpending(set, sigsetsize); 2053 } 2054 2055 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER 2056 2057 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) 2058 { 2059 int err; 2060 2061 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) 2062 return -EFAULT; 2063 if (from->si_code < 0) 2064 return __copy_to_user(to, from, sizeof(siginfo_t)) 2065 ? -EFAULT : 0; 2066 /* 2067 * If you change siginfo_t structure, please be sure 2068 * this code is fixed accordingly. 2069 * Please remember to update the signalfd_copyinfo() function 2070 * inside fs/signalfd.c too, in case siginfo_t changes. 2071 * It should never copy any pad contained in the structure 2072 * to avoid security leaks, but must copy the generic 2073 * 3 ints plus the relevant union member. 2074 */ 2075 err = __put_user(from->si_signo, &to->si_signo); 2076 err |= __put_user(from->si_errno, &to->si_errno); 2077 err |= __put_user((short)from->si_code, &to->si_code); 2078 switch (from->si_code & __SI_MASK) { 2079 case __SI_KILL: 2080 err |= __put_user(from->si_pid, &to->si_pid); 2081 err |= __put_user(from->si_uid, &to->si_uid); 2082 break; 2083 case __SI_TIMER: 2084 err |= __put_user(from->si_tid, &to->si_tid); 2085 err |= __put_user(from->si_overrun, &to->si_overrun); 2086 err |= __put_user(from->si_ptr, &to->si_ptr); 2087 break; 2088 case __SI_POLL: 2089 err |= __put_user(from->si_band, &to->si_band); 2090 err |= __put_user(from->si_fd, &to->si_fd); 2091 break; 2092 case __SI_FAULT: 2093 err |= __put_user(from->si_addr, &to->si_addr); 2094 #ifdef __ARCH_SI_TRAPNO 2095 err |= __put_user(from->si_trapno, &to->si_trapno); 2096 #endif 2097 break; 2098 case __SI_CHLD: 2099 err |= __put_user(from->si_pid, &to->si_pid); 2100 err |= __put_user(from->si_uid, &to->si_uid); 2101 err |= __put_user(from->si_status, &to->si_status); 2102 err |= __put_user(from->si_utime, &to->si_utime); 2103 err |= __put_user(from->si_stime, &to->si_stime); 2104 break; 2105 case __SI_RT: /* This is not generated by the kernel as of now. */ 2106 case __SI_MESGQ: /* But this is */ 2107 err |= __put_user(from->si_pid, &to->si_pid); 2108 err |= __put_user(from->si_uid, &to->si_uid); 2109 err |= __put_user(from->si_ptr, &to->si_ptr); 2110 break; 2111 default: /* this is just in case for now ... */ 2112 err |= __put_user(from->si_pid, &to->si_pid); 2113 err |= __put_user(from->si_uid, &to->si_uid); 2114 break; 2115 } 2116 return err; 2117 } 2118 2119 #endif 2120 2121 asmlinkage long 2122 sys_rt_sigtimedwait(const sigset_t __user *uthese, 2123 siginfo_t __user *uinfo, 2124 const struct timespec __user *uts, 2125 size_t sigsetsize) 2126 { 2127 int ret, sig; 2128 sigset_t these; 2129 struct timespec ts; 2130 siginfo_t info; 2131 long timeout = 0; 2132 2133 /* XXX: Don't preclude handling different sized sigset_t's. */ 2134 if (sigsetsize != sizeof(sigset_t)) 2135 return -EINVAL; 2136 2137 if (copy_from_user(&these, uthese, sizeof(these))) 2138 return -EFAULT; 2139 2140 /* 2141 * Invert the set of allowed signals to get those we 2142 * want to block. 2143 */ 2144 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2145 signotset(&these); 2146 2147 if (uts) { 2148 if (copy_from_user(&ts, uts, sizeof(ts))) 2149 return -EFAULT; 2150 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 2151 || ts.tv_sec < 0) 2152 return -EINVAL; 2153 } 2154 2155 spin_lock_irq(¤t->sighand->siglock); 2156 sig = dequeue_signal(current, &these, &info); 2157 if (!sig) { 2158 timeout = MAX_SCHEDULE_TIMEOUT; 2159 if (uts) 2160 timeout = (timespec_to_jiffies(&ts) 2161 + (ts.tv_sec || ts.tv_nsec)); 2162 2163 if (timeout) { 2164 /* None ready -- temporarily unblock those we're 2165 * interested while we are sleeping in so that we'll 2166 * be awakened when they arrive. */ 2167 current->real_blocked = current->blocked; 2168 sigandsets(¤t->blocked, ¤t->blocked, &these); 2169 recalc_sigpending(); 2170 spin_unlock_irq(¤t->sighand->siglock); 2171 2172 timeout = schedule_timeout_interruptible(timeout); 2173 2174 spin_lock_irq(¤t->sighand->siglock); 2175 sig = dequeue_signal(current, &these, &info); 2176 current->blocked = current->real_blocked; 2177 siginitset(¤t->real_blocked, 0); 2178 recalc_sigpending(); 2179 } 2180 } 2181 spin_unlock_irq(¤t->sighand->siglock); 2182 2183 if (sig) { 2184 ret = sig; 2185 if (uinfo) { 2186 if (copy_siginfo_to_user(uinfo, &info)) 2187 ret = -EFAULT; 2188 } 2189 } else { 2190 ret = -EAGAIN; 2191 if (timeout) 2192 ret = -EINTR; 2193 } 2194 2195 return ret; 2196 } 2197 2198 asmlinkage long 2199 sys_kill(int pid, int sig) 2200 { 2201 struct siginfo info; 2202 2203 info.si_signo = sig; 2204 info.si_errno = 0; 2205 info.si_code = SI_USER; 2206 info.si_pid = task_tgid_vnr(current); 2207 info.si_uid = current->uid; 2208 2209 return kill_something_info(sig, &info, pid); 2210 } 2211 2212 static int do_tkill(int tgid, int pid, int sig) 2213 { 2214 int error; 2215 struct siginfo info; 2216 struct task_struct *p; 2217 unsigned long flags; 2218 2219 error = -ESRCH; 2220 info.si_signo = sig; 2221 info.si_errno = 0; 2222 info.si_code = SI_TKILL; 2223 info.si_pid = task_tgid_vnr(current); 2224 info.si_uid = current->uid; 2225 2226 rcu_read_lock(); 2227 p = find_task_by_vpid(pid); 2228 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 2229 error = check_kill_permission(sig, &info, p); 2230 /* 2231 * The null signal is a permissions and process existence 2232 * probe. No signal is actually delivered. 2233 * 2234 * If lock_task_sighand() fails we pretend the task dies 2235 * after receiving the signal. The window is tiny, and the 2236 * signal is private anyway. 2237 */ 2238 if (!error && sig && lock_task_sighand(p, &flags)) { 2239 error = specific_send_sig_info(sig, &info, p); 2240 unlock_task_sighand(p, &flags); 2241 } 2242 } 2243 rcu_read_unlock(); 2244 2245 return error; 2246 } 2247 2248 /** 2249 * sys_tgkill - send signal to one specific thread 2250 * @tgid: the thread group ID of the thread 2251 * @pid: the PID of the thread 2252 * @sig: signal to be sent 2253 * 2254 * This syscall also checks the @tgid and returns -ESRCH even if the PID 2255 * exists but it's not belonging to the target process anymore. This 2256 * method solves the problem of threads exiting and PIDs getting reused. 2257 */ 2258 asmlinkage long sys_tgkill(int tgid, int pid, int sig) 2259 { 2260 /* This is only valid for single tasks */ 2261 if (pid <= 0 || tgid <= 0) 2262 return -EINVAL; 2263 2264 return do_tkill(tgid, pid, sig); 2265 } 2266 2267 /* 2268 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2269 */ 2270 asmlinkage long 2271 sys_tkill(int pid, int sig) 2272 { 2273 /* This is only valid for single tasks */ 2274 if (pid <= 0) 2275 return -EINVAL; 2276 2277 return do_tkill(0, pid, sig); 2278 } 2279 2280 asmlinkage long 2281 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) 2282 { 2283 siginfo_t info; 2284 2285 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2286 return -EFAULT; 2287 2288 /* Not even root can pretend to send signals from the kernel. 2289 Nor can they impersonate a kill(), which adds source info. */ 2290 if (info.si_code >= 0) 2291 return -EPERM; 2292 info.si_signo = sig; 2293 2294 /* POSIX.1b doesn't mention process groups. */ 2295 return kill_proc_info(sig, &info, pid); 2296 } 2297 2298 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 2299 { 2300 struct task_struct *t = current; 2301 struct k_sigaction *k; 2302 sigset_t mask; 2303 2304 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 2305 return -EINVAL; 2306 2307 k = &t->sighand->action[sig-1]; 2308 2309 spin_lock_irq(¤t->sighand->siglock); 2310 if (oact) 2311 *oact = *k; 2312 2313 if (act) { 2314 sigdelsetmask(&act->sa.sa_mask, 2315 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2316 *k = *act; 2317 /* 2318 * POSIX 3.3.1.3: 2319 * "Setting a signal action to SIG_IGN for a signal that is 2320 * pending shall cause the pending signal to be discarded, 2321 * whether or not it is blocked." 2322 * 2323 * "Setting a signal action to SIG_DFL for a signal that is 2324 * pending and whose default action is to ignore the signal 2325 * (for example, SIGCHLD), shall cause the pending signal to 2326 * be discarded, whether or not it is blocked" 2327 */ 2328 if (__sig_ignored(t, sig)) { 2329 sigemptyset(&mask); 2330 sigaddset(&mask, sig); 2331 rm_from_queue_full(&mask, &t->signal->shared_pending); 2332 do { 2333 rm_from_queue_full(&mask, &t->pending); 2334 t = next_thread(t); 2335 } while (t != current); 2336 } 2337 } 2338 2339 spin_unlock_irq(¤t->sighand->siglock); 2340 return 0; 2341 } 2342 2343 int 2344 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) 2345 { 2346 stack_t oss; 2347 int error; 2348 2349 if (uoss) { 2350 oss.ss_sp = (void __user *) current->sas_ss_sp; 2351 oss.ss_size = current->sas_ss_size; 2352 oss.ss_flags = sas_ss_flags(sp); 2353 } 2354 2355 if (uss) { 2356 void __user *ss_sp; 2357 size_t ss_size; 2358 int ss_flags; 2359 2360 error = -EFAULT; 2361 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2362 || __get_user(ss_sp, &uss->ss_sp) 2363 || __get_user(ss_flags, &uss->ss_flags) 2364 || __get_user(ss_size, &uss->ss_size)) 2365 goto out; 2366 2367 error = -EPERM; 2368 if (on_sig_stack(sp)) 2369 goto out; 2370 2371 error = -EINVAL; 2372 /* 2373 * 2374 * Note - this code used to test ss_flags incorrectly 2375 * old code may have been written using ss_flags==0 2376 * to mean ss_flags==SS_ONSTACK (as this was the only 2377 * way that worked) - this fix preserves that older 2378 * mechanism 2379 */ 2380 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2381 goto out; 2382 2383 if (ss_flags == SS_DISABLE) { 2384 ss_size = 0; 2385 ss_sp = NULL; 2386 } else { 2387 error = -ENOMEM; 2388 if (ss_size < MINSIGSTKSZ) 2389 goto out; 2390 } 2391 2392 current->sas_ss_sp = (unsigned long) ss_sp; 2393 current->sas_ss_size = ss_size; 2394 } 2395 2396 if (uoss) { 2397 error = -EFAULT; 2398 if (copy_to_user(uoss, &oss, sizeof(oss))) 2399 goto out; 2400 } 2401 2402 error = 0; 2403 out: 2404 return error; 2405 } 2406 2407 #ifdef __ARCH_WANT_SYS_SIGPENDING 2408 2409 asmlinkage long 2410 sys_sigpending(old_sigset_t __user *set) 2411 { 2412 return do_sigpending(set, sizeof(*set)); 2413 } 2414 2415 #endif 2416 2417 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 2418 /* Some platforms have their own version with special arguments others 2419 support only sys_rt_sigprocmask. */ 2420 2421 asmlinkage long 2422 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) 2423 { 2424 int error; 2425 old_sigset_t old_set, new_set; 2426 2427 if (set) { 2428 error = -EFAULT; 2429 if (copy_from_user(&new_set, set, sizeof(*set))) 2430 goto out; 2431 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); 2432 2433 spin_lock_irq(¤t->sighand->siglock); 2434 old_set = current->blocked.sig[0]; 2435 2436 error = 0; 2437 switch (how) { 2438 default: 2439 error = -EINVAL; 2440 break; 2441 case SIG_BLOCK: 2442 sigaddsetmask(¤t->blocked, new_set); 2443 break; 2444 case SIG_UNBLOCK: 2445 sigdelsetmask(¤t->blocked, new_set); 2446 break; 2447 case SIG_SETMASK: 2448 current->blocked.sig[0] = new_set; 2449 break; 2450 } 2451 2452 recalc_sigpending(); 2453 spin_unlock_irq(¤t->sighand->siglock); 2454 if (error) 2455 goto out; 2456 if (oset) 2457 goto set_old; 2458 } else if (oset) { 2459 old_set = current->blocked.sig[0]; 2460 set_old: 2461 error = -EFAULT; 2462 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2463 goto out; 2464 } 2465 error = 0; 2466 out: 2467 return error; 2468 } 2469 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2470 2471 #ifdef __ARCH_WANT_SYS_RT_SIGACTION 2472 asmlinkage long 2473 sys_rt_sigaction(int sig, 2474 const struct sigaction __user *act, 2475 struct sigaction __user *oact, 2476 size_t sigsetsize) 2477 { 2478 struct k_sigaction new_sa, old_sa; 2479 int ret = -EINVAL; 2480 2481 /* XXX: Don't preclude handling different sized sigset_t's. */ 2482 if (sigsetsize != sizeof(sigset_t)) 2483 goto out; 2484 2485 if (act) { 2486 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 2487 return -EFAULT; 2488 } 2489 2490 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 2491 2492 if (!ret && oact) { 2493 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 2494 return -EFAULT; 2495 } 2496 out: 2497 return ret; 2498 } 2499 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ 2500 2501 #ifdef __ARCH_WANT_SYS_SGETMASK 2502 2503 /* 2504 * For backwards compatibility. Functionality superseded by sigprocmask. 2505 */ 2506 asmlinkage long 2507 sys_sgetmask(void) 2508 { 2509 /* SMP safe */ 2510 return current->blocked.sig[0]; 2511 } 2512 2513 asmlinkage long 2514 sys_ssetmask(int newmask) 2515 { 2516 int old; 2517 2518 spin_lock_irq(¤t->sighand->siglock); 2519 old = current->blocked.sig[0]; 2520 2521 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| 2522 sigmask(SIGSTOP))); 2523 recalc_sigpending(); 2524 spin_unlock_irq(¤t->sighand->siglock); 2525 2526 return old; 2527 } 2528 #endif /* __ARCH_WANT_SGETMASK */ 2529 2530 #ifdef __ARCH_WANT_SYS_SIGNAL 2531 /* 2532 * For backwards compatibility. Functionality superseded by sigaction. 2533 */ 2534 asmlinkage unsigned long 2535 sys_signal(int sig, __sighandler_t handler) 2536 { 2537 struct k_sigaction new_sa, old_sa; 2538 int ret; 2539 2540 new_sa.sa.sa_handler = handler; 2541 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 2542 sigemptyset(&new_sa.sa.sa_mask); 2543 2544 ret = do_sigaction(sig, &new_sa, &old_sa); 2545 2546 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 2547 } 2548 #endif /* __ARCH_WANT_SYS_SIGNAL */ 2549 2550 #ifdef __ARCH_WANT_SYS_PAUSE 2551 2552 asmlinkage long 2553 sys_pause(void) 2554 { 2555 current->state = TASK_INTERRUPTIBLE; 2556 schedule(); 2557 return -ERESTARTNOHAND; 2558 } 2559 2560 #endif 2561 2562 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2563 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) 2564 { 2565 sigset_t newset; 2566 2567 /* XXX: Don't preclude handling different sized sigset_t's. */ 2568 if (sigsetsize != sizeof(sigset_t)) 2569 return -EINVAL; 2570 2571 if (copy_from_user(&newset, unewset, sizeof(newset))) 2572 return -EFAULT; 2573 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2574 2575 spin_lock_irq(¤t->sighand->siglock); 2576 current->saved_sigmask = current->blocked; 2577 current->blocked = newset; 2578 recalc_sigpending(); 2579 spin_unlock_irq(¤t->sighand->siglock); 2580 2581 current->state = TASK_INTERRUPTIBLE; 2582 schedule(); 2583 set_restore_sigmask(); 2584 return -ERESTARTNOHAND; 2585 } 2586 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 2587 2588 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) 2589 { 2590 return NULL; 2591 } 2592 2593 void __init signals_init(void) 2594 { 2595 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 2596 } 2597