1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/signal.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 8 * 9 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 10 * Changes to use preallocated sigqueue structures 11 * to allow signals to be sent reliably. 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/init.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/user.h> 19 #include <linux/sched/debug.h> 20 #include <linux/sched/task.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/sched/cputime.h> 23 #include <linux/file.h> 24 #include <linux/fs.h> 25 #include <linux/proc_fs.h> 26 #include <linux/tty.h> 27 #include <linux/binfmts.h> 28 #include <linux/coredump.h> 29 #include <linux/security.h> 30 #include <linux/syscalls.h> 31 #include <linux/ptrace.h> 32 #include <linux/signal.h> 33 #include <linux/signalfd.h> 34 #include <linux/ratelimit.h> 35 #include <linux/tracehook.h> 36 #include <linux/capability.h> 37 #include <linux/freezer.h> 38 #include <linux/pid_namespace.h> 39 #include <linux/nsproxy.h> 40 #include <linux/user_namespace.h> 41 #include <linux/uprobes.h> 42 #include <linux/compat.h> 43 #include <linux/cn_proc.h> 44 #include <linux/compiler.h> 45 #include <linux/posix-timers.h> 46 #include <linux/livepatch.h> 47 #include <linux/cgroup.h> 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/signal.h> 51 52 #include <asm/param.h> 53 #include <linux/uaccess.h> 54 #include <asm/unistd.h> 55 #include <asm/siginfo.h> 56 #include <asm/cacheflush.h> 57 #include "audit.h" /* audit_signal_info() */ 58 59 /* 60 * SLAB caches for signal bits. 61 */ 62 63 static struct kmem_cache *sigqueue_cachep; 64 65 int print_fatal_signals __read_mostly; 66 67 static void __user *sig_handler(struct task_struct *t, int sig) 68 { 69 return t->sighand->action[sig - 1].sa.sa_handler; 70 } 71 72 static inline bool sig_handler_ignored(void __user *handler, int sig) 73 { 74 /* Is it explicitly or implicitly ignored? */ 75 return handler == SIG_IGN || 76 (handler == SIG_DFL && sig_kernel_ignore(sig)); 77 } 78 79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) 80 { 81 void __user *handler; 82 83 handler = sig_handler(t, sig); 84 85 /* SIGKILL and SIGSTOP may not be sent to the global init */ 86 if (unlikely(is_global_init(t) && sig_kernel_only(sig))) 87 return true; 88 89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 91 return true; 92 93 return sig_handler_ignored(handler, sig); 94 } 95 96 static bool sig_ignored(struct task_struct *t, int sig, bool force) 97 { 98 /* 99 * Blocked signals are never ignored, since the 100 * signal handler may change by the time it is 101 * unblocked. 102 */ 103 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 104 return false; 105 106 /* 107 * Tracers may want to know about even ignored signal unless it 108 * is SIGKILL which can't be reported anyway but can be ignored 109 * by SIGNAL_UNKILLABLE task. 110 */ 111 if (t->ptrace && sig != SIGKILL) 112 return false; 113 114 return sig_task_ignored(t, sig, force); 115 } 116 117 /* 118 * Re-calculate pending state from the set of locally pending 119 * signals, globally pending signals, and blocked signals. 120 */ 121 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) 122 { 123 unsigned long ready; 124 long i; 125 126 switch (_NSIG_WORDS) { 127 default: 128 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 129 ready |= signal->sig[i] &~ blocked->sig[i]; 130 break; 131 132 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 133 ready |= signal->sig[2] &~ blocked->sig[2]; 134 ready |= signal->sig[1] &~ blocked->sig[1]; 135 ready |= signal->sig[0] &~ blocked->sig[0]; 136 break; 137 138 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 139 ready |= signal->sig[0] &~ blocked->sig[0]; 140 break; 141 142 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 143 } 144 return ready != 0; 145 } 146 147 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 148 149 static bool recalc_sigpending_tsk(struct task_struct *t) 150 { 151 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || 152 PENDING(&t->pending, &t->blocked) || 153 PENDING(&t->signal->shared_pending, &t->blocked) || 154 cgroup_task_frozen(t)) { 155 set_tsk_thread_flag(t, TIF_SIGPENDING); 156 return true; 157 } 158 159 /* 160 * We must never clear the flag in another thread, or in current 161 * when it's possible the current syscall is returning -ERESTART*. 162 * So we don't clear it here, and only callers who know they should do. 163 */ 164 return false; 165 } 166 167 /* 168 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 169 * This is superfluous when called on current, the wakeup is a harmless no-op. 170 */ 171 void recalc_sigpending_and_wake(struct task_struct *t) 172 { 173 if (recalc_sigpending_tsk(t)) 174 signal_wake_up(t, 0); 175 } 176 177 void recalc_sigpending(void) 178 { 179 if (!recalc_sigpending_tsk(current) && !freezing(current) && 180 !klp_patch_pending(current)) 181 clear_thread_flag(TIF_SIGPENDING); 182 183 } 184 EXPORT_SYMBOL(recalc_sigpending); 185 186 void calculate_sigpending(void) 187 { 188 /* Have any signals or users of TIF_SIGPENDING been delayed 189 * until after fork? 190 */ 191 spin_lock_irq(¤t->sighand->siglock); 192 set_tsk_thread_flag(current, TIF_SIGPENDING); 193 recalc_sigpending(); 194 spin_unlock_irq(¤t->sighand->siglock); 195 } 196 197 /* Given the mask, find the first available signal that should be serviced. */ 198 199 #define SYNCHRONOUS_MASK \ 200 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 201 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) 202 203 int next_signal(struct sigpending *pending, sigset_t *mask) 204 { 205 unsigned long i, *s, *m, x; 206 int sig = 0; 207 208 s = pending->signal.sig; 209 m = mask->sig; 210 211 /* 212 * Handle the first word specially: it contains the 213 * synchronous signals that need to be dequeued first. 214 */ 215 x = *s &~ *m; 216 if (x) { 217 if (x & SYNCHRONOUS_MASK) 218 x &= SYNCHRONOUS_MASK; 219 sig = ffz(~x) + 1; 220 return sig; 221 } 222 223 switch (_NSIG_WORDS) { 224 default: 225 for (i = 1; i < _NSIG_WORDS; ++i) { 226 x = *++s &~ *++m; 227 if (!x) 228 continue; 229 sig = ffz(~x) + i*_NSIG_BPW + 1; 230 break; 231 } 232 break; 233 234 case 2: 235 x = s[1] &~ m[1]; 236 if (!x) 237 break; 238 sig = ffz(~x) + _NSIG_BPW + 1; 239 break; 240 241 case 1: 242 /* Nothing to do */ 243 break; 244 } 245 246 return sig; 247 } 248 249 static inline void print_dropped_signal(int sig) 250 { 251 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 252 253 if (!print_fatal_signals) 254 return; 255 256 if (!__ratelimit(&ratelimit_state)) 257 return; 258 259 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 260 current->comm, current->pid, sig); 261 } 262 263 /** 264 * task_set_jobctl_pending - set jobctl pending bits 265 * @task: target task 266 * @mask: pending bits to set 267 * 268 * Clear @mask from @task->jobctl. @mask must be subset of 269 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | 270 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is 271 * cleared. If @task is already being killed or exiting, this function 272 * becomes noop. 273 * 274 * CONTEXT: 275 * Must be called with @task->sighand->siglock held. 276 * 277 * RETURNS: 278 * %true if @mask is set, %false if made noop because @task was dying. 279 */ 280 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) 281 { 282 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | 283 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 284 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 285 286 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 287 return false; 288 289 if (mask & JOBCTL_STOP_SIGMASK) 290 task->jobctl &= ~JOBCTL_STOP_SIGMASK; 291 292 task->jobctl |= mask; 293 return true; 294 } 295 296 /** 297 * task_clear_jobctl_trapping - clear jobctl trapping bit 298 * @task: target task 299 * 300 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. 301 * Clear it and wake up the ptracer. Note that we don't need any further 302 * locking. @task->siglock guarantees that @task->parent points to the 303 * ptracer. 304 * 305 * CONTEXT: 306 * Must be called with @task->sighand->siglock held. 307 */ 308 void task_clear_jobctl_trapping(struct task_struct *task) 309 { 310 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { 311 task->jobctl &= ~JOBCTL_TRAPPING; 312 smp_mb(); /* advised by wake_up_bit() */ 313 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); 314 } 315 } 316 317 /** 318 * task_clear_jobctl_pending - clear jobctl pending bits 319 * @task: target task 320 * @mask: pending bits to clear 321 * 322 * Clear @mask from @task->jobctl. @mask must be subset of 323 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other 324 * STOP bits are cleared together. 325 * 326 * If clearing of @mask leaves no stop or trap pending, this function calls 327 * task_clear_jobctl_trapping(). 328 * 329 * CONTEXT: 330 * Must be called with @task->sighand->siglock held. 331 */ 332 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) 333 { 334 BUG_ON(mask & ~JOBCTL_PENDING_MASK); 335 336 if (mask & JOBCTL_STOP_PENDING) 337 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; 338 339 task->jobctl &= ~mask; 340 341 if (!(task->jobctl & JOBCTL_PENDING_MASK)) 342 task_clear_jobctl_trapping(task); 343 } 344 345 /** 346 * task_participate_group_stop - participate in a group stop 347 * @task: task participating in a group stop 348 * 349 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. 350 * Group stop states are cleared and the group stop count is consumed if 351 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group 352 * stop, the appropriate %SIGNAL_* flags are set. 353 * 354 * CONTEXT: 355 * Must be called with @task->sighand->siglock held. 356 * 357 * RETURNS: 358 * %true if group stop completion should be notified to the parent, %false 359 * otherwise. 360 */ 361 static bool task_participate_group_stop(struct task_struct *task) 362 { 363 struct signal_struct *sig = task->signal; 364 bool consume = task->jobctl & JOBCTL_STOP_CONSUME; 365 366 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); 367 368 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); 369 370 if (!consume) 371 return false; 372 373 if (!WARN_ON_ONCE(sig->group_stop_count == 0)) 374 sig->group_stop_count--; 375 376 /* 377 * Tell the caller to notify completion iff we are entering into a 378 * fresh group stop. Read comment in do_signal_stop() for details. 379 */ 380 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 381 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); 382 return true; 383 } 384 return false; 385 } 386 387 void task_join_group_stop(struct task_struct *task) 388 { 389 /* Have the new thread join an on-going signal group stop */ 390 unsigned long jobctl = current->jobctl; 391 if (jobctl & JOBCTL_STOP_PENDING) { 392 struct signal_struct *sig = current->signal; 393 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; 394 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 395 if (task_set_jobctl_pending(task, signr | gstop)) { 396 sig->group_stop_count++; 397 } 398 } 399 } 400 401 /* 402 * allocate a new signal queue record 403 * - this may be called without locks if and only if t == current, otherwise an 404 * appropriate lock must be held to stop the target task from exiting 405 */ 406 static struct sigqueue * 407 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 408 { 409 struct sigqueue *q = NULL; 410 struct user_struct *user; 411 412 /* 413 * Protect access to @t credentials. This can go away when all 414 * callers hold rcu read lock. 415 */ 416 rcu_read_lock(); 417 user = get_uid(__task_cred(t)->user); 418 atomic_inc(&user->sigpending); 419 rcu_read_unlock(); 420 421 if (override_rlimit || 422 atomic_read(&user->sigpending) <= 423 task_rlimit(t, RLIMIT_SIGPENDING)) { 424 q = kmem_cache_alloc(sigqueue_cachep, flags); 425 } else { 426 print_dropped_signal(sig); 427 } 428 429 if (unlikely(q == NULL)) { 430 atomic_dec(&user->sigpending); 431 free_uid(user); 432 } else { 433 INIT_LIST_HEAD(&q->list); 434 q->flags = 0; 435 q->user = user; 436 } 437 438 return q; 439 } 440 441 static void __sigqueue_free(struct sigqueue *q) 442 { 443 if (q->flags & SIGQUEUE_PREALLOC) 444 return; 445 atomic_dec(&q->user->sigpending); 446 free_uid(q->user); 447 kmem_cache_free(sigqueue_cachep, q); 448 } 449 450 void flush_sigqueue(struct sigpending *queue) 451 { 452 struct sigqueue *q; 453 454 sigemptyset(&queue->signal); 455 while (!list_empty(&queue->list)) { 456 q = list_entry(queue->list.next, struct sigqueue , list); 457 list_del_init(&q->list); 458 __sigqueue_free(q); 459 } 460 } 461 462 /* 463 * Flush all pending signals for this kthread. 464 */ 465 void flush_signals(struct task_struct *t) 466 { 467 unsigned long flags; 468 469 spin_lock_irqsave(&t->sighand->siglock, flags); 470 clear_tsk_thread_flag(t, TIF_SIGPENDING); 471 flush_sigqueue(&t->pending); 472 flush_sigqueue(&t->signal->shared_pending); 473 spin_unlock_irqrestore(&t->sighand->siglock, flags); 474 } 475 EXPORT_SYMBOL(flush_signals); 476 477 #ifdef CONFIG_POSIX_TIMERS 478 static void __flush_itimer_signals(struct sigpending *pending) 479 { 480 sigset_t signal, retain; 481 struct sigqueue *q, *n; 482 483 signal = pending->signal; 484 sigemptyset(&retain); 485 486 list_for_each_entry_safe(q, n, &pending->list, list) { 487 int sig = q->info.si_signo; 488 489 if (likely(q->info.si_code != SI_TIMER)) { 490 sigaddset(&retain, sig); 491 } else { 492 sigdelset(&signal, sig); 493 list_del_init(&q->list); 494 __sigqueue_free(q); 495 } 496 } 497 498 sigorsets(&pending->signal, &signal, &retain); 499 } 500 501 void flush_itimer_signals(void) 502 { 503 struct task_struct *tsk = current; 504 unsigned long flags; 505 506 spin_lock_irqsave(&tsk->sighand->siglock, flags); 507 __flush_itimer_signals(&tsk->pending); 508 __flush_itimer_signals(&tsk->signal->shared_pending); 509 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 510 } 511 #endif 512 513 void ignore_signals(struct task_struct *t) 514 { 515 int i; 516 517 for (i = 0; i < _NSIG; ++i) 518 t->sighand->action[i].sa.sa_handler = SIG_IGN; 519 520 flush_signals(t); 521 } 522 523 /* 524 * Flush all handlers for a task. 525 */ 526 527 void 528 flush_signal_handlers(struct task_struct *t, int force_default) 529 { 530 int i; 531 struct k_sigaction *ka = &t->sighand->action[0]; 532 for (i = _NSIG ; i != 0 ; i--) { 533 if (force_default || ka->sa.sa_handler != SIG_IGN) 534 ka->sa.sa_handler = SIG_DFL; 535 ka->sa.sa_flags = 0; 536 #ifdef __ARCH_HAS_SA_RESTORER 537 ka->sa.sa_restorer = NULL; 538 #endif 539 sigemptyset(&ka->sa.sa_mask); 540 ka++; 541 } 542 } 543 544 bool unhandled_signal(struct task_struct *tsk, int sig) 545 { 546 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 547 if (is_global_init(tsk)) 548 return true; 549 550 if (handler != SIG_IGN && handler != SIG_DFL) 551 return false; 552 553 /* if ptraced, let the tracer determine */ 554 return !tsk->ptrace; 555 } 556 557 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, 558 bool *resched_timer) 559 { 560 struct sigqueue *q, *first = NULL; 561 562 /* 563 * Collect the siginfo appropriate to this signal. Check if 564 * there is another siginfo for the same signal. 565 */ 566 list_for_each_entry(q, &list->list, list) { 567 if (q->info.si_signo == sig) { 568 if (first) 569 goto still_pending; 570 first = q; 571 } 572 } 573 574 sigdelset(&list->signal, sig); 575 576 if (first) { 577 still_pending: 578 list_del_init(&first->list); 579 copy_siginfo(info, &first->info); 580 581 *resched_timer = 582 (first->flags & SIGQUEUE_PREALLOC) && 583 (info->si_code == SI_TIMER) && 584 (info->si_sys_private); 585 586 __sigqueue_free(first); 587 } else { 588 /* 589 * Ok, it wasn't in the queue. This must be 590 * a fast-pathed signal or we must have been 591 * out of queue space. So zero out the info. 592 */ 593 clear_siginfo(info); 594 info->si_signo = sig; 595 info->si_errno = 0; 596 info->si_code = SI_USER; 597 info->si_pid = 0; 598 info->si_uid = 0; 599 } 600 } 601 602 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 603 kernel_siginfo_t *info, bool *resched_timer) 604 { 605 int sig = next_signal(pending, mask); 606 607 if (sig) 608 collect_signal(sig, pending, info, resched_timer); 609 return sig; 610 } 611 612 /* 613 * Dequeue a signal and return the element to the caller, which is 614 * expected to free it. 615 * 616 * All callers have to hold the siglock. 617 */ 618 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) 619 { 620 bool resched_timer = false; 621 int signr; 622 623 /* We only dequeue private signals from ourselves, we don't let 624 * signalfd steal them 625 */ 626 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); 627 if (!signr) { 628 signr = __dequeue_signal(&tsk->signal->shared_pending, 629 mask, info, &resched_timer); 630 #ifdef CONFIG_POSIX_TIMERS 631 /* 632 * itimer signal ? 633 * 634 * itimers are process shared and we restart periodic 635 * itimers in the signal delivery path to prevent DoS 636 * attacks in the high resolution timer case. This is 637 * compliant with the old way of self-restarting 638 * itimers, as the SIGALRM is a legacy signal and only 639 * queued once. Changing the restart behaviour to 640 * restart the timer in the signal dequeue path is 641 * reducing the timer noise on heavy loaded !highres 642 * systems too. 643 */ 644 if (unlikely(signr == SIGALRM)) { 645 struct hrtimer *tmr = &tsk->signal->real_timer; 646 647 if (!hrtimer_is_queued(tmr) && 648 tsk->signal->it_real_incr != 0) { 649 hrtimer_forward(tmr, tmr->base->get_time(), 650 tsk->signal->it_real_incr); 651 hrtimer_restart(tmr); 652 } 653 } 654 #endif 655 } 656 657 recalc_sigpending(); 658 if (!signr) 659 return 0; 660 661 if (unlikely(sig_kernel_stop(signr))) { 662 /* 663 * Set a marker that we have dequeued a stop signal. Our 664 * caller might release the siglock and then the pending 665 * stop signal it is about to process is no longer in the 666 * pending bitmasks, but must still be cleared by a SIGCONT 667 * (and overruled by a SIGKILL). So those cases clear this 668 * shared flag after we've set it. Note that this flag may 669 * remain set after the signal we return is ignored or 670 * handled. That doesn't matter because its only purpose 671 * is to alert stop-signal processing code when another 672 * processor has come along and cleared the flag. 673 */ 674 current->jobctl |= JOBCTL_STOP_DEQUEUED; 675 } 676 #ifdef CONFIG_POSIX_TIMERS 677 if (resched_timer) { 678 /* 679 * Release the siglock to ensure proper locking order 680 * of timer locks outside of siglocks. Note, we leave 681 * irqs disabled here, since the posix-timers code is 682 * about to disable them again anyway. 683 */ 684 spin_unlock(&tsk->sighand->siglock); 685 posixtimer_rearm(info); 686 spin_lock(&tsk->sighand->siglock); 687 688 /* Don't expose the si_sys_private value to userspace */ 689 info->si_sys_private = 0; 690 } 691 #endif 692 return signr; 693 } 694 EXPORT_SYMBOL_GPL(dequeue_signal); 695 696 static int dequeue_synchronous_signal(kernel_siginfo_t *info) 697 { 698 struct task_struct *tsk = current; 699 struct sigpending *pending = &tsk->pending; 700 struct sigqueue *q, *sync = NULL; 701 702 /* 703 * Might a synchronous signal be in the queue? 704 */ 705 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) 706 return 0; 707 708 /* 709 * Return the first synchronous signal in the queue. 710 */ 711 list_for_each_entry(q, &pending->list, list) { 712 /* Synchronous signals have a postive si_code */ 713 if ((q->info.si_code > SI_USER) && 714 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { 715 sync = q; 716 goto next; 717 } 718 } 719 return 0; 720 next: 721 /* 722 * Check if there is another siginfo for the same signal. 723 */ 724 list_for_each_entry_continue(q, &pending->list, list) { 725 if (q->info.si_signo == sync->info.si_signo) 726 goto still_pending; 727 } 728 729 sigdelset(&pending->signal, sync->info.si_signo); 730 recalc_sigpending(); 731 still_pending: 732 list_del_init(&sync->list); 733 copy_siginfo(info, &sync->info); 734 __sigqueue_free(sync); 735 return info->si_signo; 736 } 737 738 /* 739 * Tell a process that it has a new active signal.. 740 * 741 * NOTE! we rely on the previous spin_lock to 742 * lock interrupts for us! We can only be called with 743 * "siglock" held, and the local interrupt must 744 * have been disabled when that got acquired! 745 * 746 * No need to set need_resched since signal event passing 747 * goes through ->blocked 748 */ 749 void signal_wake_up_state(struct task_struct *t, unsigned int state) 750 { 751 set_tsk_thread_flag(t, TIF_SIGPENDING); 752 /* 753 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable 754 * case. We don't check t->state here because there is a race with it 755 * executing another processor and just now entering stopped state. 756 * By using wake_up_state, we ensure the process will wake up and 757 * handle its death signal. 758 */ 759 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) 760 kick_process(t); 761 } 762 763 /* 764 * Remove signals in mask from the pending set and queue. 765 * Returns 1 if any signals were found. 766 * 767 * All callers must be holding the siglock. 768 */ 769 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) 770 { 771 struct sigqueue *q, *n; 772 sigset_t m; 773 774 sigandsets(&m, mask, &s->signal); 775 if (sigisemptyset(&m)) 776 return; 777 778 sigandnsets(&s->signal, &s->signal, mask); 779 list_for_each_entry_safe(q, n, &s->list, list) { 780 if (sigismember(mask, q->info.si_signo)) { 781 list_del_init(&q->list); 782 __sigqueue_free(q); 783 } 784 } 785 } 786 787 static inline int is_si_special(const struct kernel_siginfo *info) 788 { 789 return info <= SEND_SIG_PRIV; 790 } 791 792 static inline bool si_fromuser(const struct kernel_siginfo *info) 793 { 794 return info == SEND_SIG_NOINFO || 795 (!is_si_special(info) && SI_FROMUSER(info)); 796 } 797 798 /* 799 * called with RCU read lock from check_kill_permission() 800 */ 801 static bool kill_ok_by_cred(struct task_struct *t) 802 { 803 const struct cred *cred = current_cred(); 804 const struct cred *tcred = __task_cred(t); 805 806 return uid_eq(cred->euid, tcred->suid) || 807 uid_eq(cred->euid, tcred->uid) || 808 uid_eq(cred->uid, tcred->suid) || 809 uid_eq(cred->uid, tcred->uid) || 810 ns_capable(tcred->user_ns, CAP_KILL); 811 } 812 813 /* 814 * Bad permissions for sending the signal 815 * - the caller must hold the RCU read lock 816 */ 817 static int check_kill_permission(int sig, struct kernel_siginfo *info, 818 struct task_struct *t) 819 { 820 struct pid *sid; 821 int error; 822 823 if (!valid_signal(sig)) 824 return -EINVAL; 825 826 if (!si_fromuser(info)) 827 return 0; 828 829 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 830 if (error) 831 return error; 832 833 if (!same_thread_group(current, t) && 834 !kill_ok_by_cred(t)) { 835 switch (sig) { 836 case SIGCONT: 837 sid = task_session(t); 838 /* 839 * We don't return the error if sid == NULL. The 840 * task was unhashed, the caller must notice this. 841 */ 842 if (!sid || sid == task_session(current)) 843 break; 844 /* fall through */ 845 default: 846 return -EPERM; 847 } 848 } 849 850 return security_task_kill(t, info, sig, NULL); 851 } 852 853 /** 854 * ptrace_trap_notify - schedule trap to notify ptracer 855 * @t: tracee wanting to notify tracer 856 * 857 * This function schedules sticky ptrace trap which is cleared on the next 858 * TRAP_STOP to notify ptracer of an event. @t must have been seized by 859 * ptracer. 860 * 861 * If @t is running, STOP trap will be taken. If trapped for STOP and 862 * ptracer is listening for events, tracee is woken up so that it can 863 * re-trap for the new event. If trapped otherwise, STOP trap will be 864 * eventually taken without returning to userland after the existing traps 865 * are finished by PTRACE_CONT. 866 * 867 * CONTEXT: 868 * Must be called with @task->sighand->siglock held. 869 */ 870 static void ptrace_trap_notify(struct task_struct *t) 871 { 872 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); 873 assert_spin_locked(&t->sighand->siglock); 874 875 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 876 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 877 } 878 879 /* 880 * Handle magic process-wide effects of stop/continue signals. Unlike 881 * the signal actions, these happen immediately at signal-generation 882 * time regardless of blocking, ignoring, or handling. This does the 883 * actual continuing for SIGCONT, but not the actual stopping for stop 884 * signals. The process stop is done as a signal action for SIG_DFL. 885 * 886 * Returns true if the signal should be actually delivered, otherwise 887 * it should be dropped. 888 */ 889 static bool prepare_signal(int sig, struct task_struct *p, bool force) 890 { 891 struct signal_struct *signal = p->signal; 892 struct task_struct *t; 893 sigset_t flush; 894 895 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { 896 if (!(signal->flags & SIGNAL_GROUP_EXIT)) 897 return sig == SIGKILL; 898 /* 899 * The process is in the middle of dying, nothing to do. 900 */ 901 } else if (sig_kernel_stop(sig)) { 902 /* 903 * This is a stop signal. Remove SIGCONT from all queues. 904 */ 905 siginitset(&flush, sigmask(SIGCONT)); 906 flush_sigqueue_mask(&flush, &signal->shared_pending); 907 for_each_thread(p, t) 908 flush_sigqueue_mask(&flush, &t->pending); 909 } else if (sig == SIGCONT) { 910 unsigned int why; 911 /* 912 * Remove all stop signals from all queues, wake all threads. 913 */ 914 siginitset(&flush, SIG_KERNEL_STOP_MASK); 915 flush_sigqueue_mask(&flush, &signal->shared_pending); 916 for_each_thread(p, t) { 917 flush_sigqueue_mask(&flush, &t->pending); 918 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); 919 if (likely(!(t->ptrace & PT_SEIZED))) 920 wake_up_state(t, __TASK_STOPPED); 921 else 922 ptrace_trap_notify(t); 923 } 924 925 /* 926 * Notify the parent with CLD_CONTINUED if we were stopped. 927 * 928 * If we were in the middle of a group stop, we pretend it 929 * was already finished, and then continued. Since SIGCHLD 930 * doesn't queue we report only CLD_STOPPED, as if the next 931 * CLD_CONTINUED was dropped. 932 */ 933 why = 0; 934 if (signal->flags & SIGNAL_STOP_STOPPED) 935 why |= SIGNAL_CLD_CONTINUED; 936 else if (signal->group_stop_count) 937 why |= SIGNAL_CLD_STOPPED; 938 939 if (why) { 940 /* 941 * The first thread which returns from do_signal_stop() 942 * will take ->siglock, notice SIGNAL_CLD_MASK, and 943 * notify its parent. See get_signal(). 944 */ 945 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); 946 signal->group_stop_count = 0; 947 signal->group_exit_code = 0; 948 } 949 } 950 951 return !sig_ignored(p, sig, force); 952 } 953 954 /* 955 * Test if P wants to take SIG. After we've checked all threads with this, 956 * it's equivalent to finding no threads not blocking SIG. Any threads not 957 * blocking SIG were ruled out because they are not running and already 958 * have pending signals. Such threads will dequeue from the shared queue 959 * as soon as they're available, so putting the signal on the shared queue 960 * will be equivalent to sending it to one such thread. 961 */ 962 static inline bool wants_signal(int sig, struct task_struct *p) 963 { 964 if (sigismember(&p->blocked, sig)) 965 return false; 966 967 if (p->flags & PF_EXITING) 968 return false; 969 970 if (sig == SIGKILL) 971 return true; 972 973 if (task_is_stopped_or_traced(p)) 974 return false; 975 976 return task_curr(p) || !signal_pending(p); 977 } 978 979 static void complete_signal(int sig, struct task_struct *p, enum pid_type type) 980 { 981 struct signal_struct *signal = p->signal; 982 struct task_struct *t; 983 984 /* 985 * Now find a thread we can wake up to take the signal off the queue. 986 * 987 * If the main thread wants the signal, it gets first crack. 988 * Probably the least surprising to the average bear. 989 */ 990 if (wants_signal(sig, p)) 991 t = p; 992 else if ((type == PIDTYPE_PID) || thread_group_empty(p)) 993 /* 994 * There is just one thread and it does not need to be woken. 995 * It will dequeue unblocked signals before it runs again. 996 */ 997 return; 998 else { 999 /* 1000 * Otherwise try to find a suitable thread. 1001 */ 1002 t = signal->curr_target; 1003 while (!wants_signal(sig, t)) { 1004 t = next_thread(t); 1005 if (t == signal->curr_target) 1006 /* 1007 * No thread needs to be woken. 1008 * Any eligible threads will see 1009 * the signal in the queue soon. 1010 */ 1011 return; 1012 } 1013 signal->curr_target = t; 1014 } 1015 1016 /* 1017 * Found a killable thread. If the signal will be fatal, 1018 * then start taking the whole group down immediately. 1019 */ 1020 if (sig_fatal(p, sig) && 1021 !(signal->flags & SIGNAL_GROUP_EXIT) && 1022 !sigismember(&t->real_blocked, sig) && 1023 (sig == SIGKILL || !p->ptrace)) { 1024 /* 1025 * This signal will be fatal to the whole group. 1026 */ 1027 if (!sig_kernel_coredump(sig)) { 1028 /* 1029 * Start a group exit and wake everybody up. 1030 * This way we don't have other threads 1031 * running and doing things after a slower 1032 * thread has the fatal signal pending. 1033 */ 1034 signal->flags = SIGNAL_GROUP_EXIT; 1035 signal->group_exit_code = sig; 1036 signal->group_stop_count = 0; 1037 t = p; 1038 do { 1039 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1040 sigaddset(&t->pending.signal, SIGKILL); 1041 signal_wake_up(t, 1); 1042 } while_each_thread(p, t); 1043 return; 1044 } 1045 } 1046 1047 /* 1048 * The signal is already in the shared-pending queue. 1049 * Tell the chosen thread to wake up and dequeue it. 1050 */ 1051 signal_wake_up(t, sig == SIGKILL); 1052 return; 1053 } 1054 1055 static inline bool legacy_queue(struct sigpending *signals, int sig) 1056 { 1057 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 1058 } 1059 1060 #ifdef CONFIG_USER_NS 1061 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t) 1062 { 1063 if (current_user_ns() == task_cred_xxx(t, user_ns)) 1064 return; 1065 1066 if (SI_FROMKERNEL(info)) 1067 return; 1068 1069 rcu_read_lock(); 1070 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), 1071 make_kuid(current_user_ns(), info->si_uid)); 1072 rcu_read_unlock(); 1073 } 1074 #else 1075 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t) 1076 { 1077 return; 1078 } 1079 #endif 1080 1081 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, 1082 enum pid_type type, int from_ancestor_ns) 1083 { 1084 struct sigpending *pending; 1085 struct sigqueue *q; 1086 int override_rlimit; 1087 int ret = 0, result; 1088 1089 assert_spin_locked(&t->sighand->siglock); 1090 1091 result = TRACE_SIGNAL_IGNORED; 1092 if (!prepare_signal(sig, t, 1093 from_ancestor_ns || (info == SEND_SIG_PRIV))) 1094 goto ret; 1095 1096 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1097 /* 1098 * Short-circuit ignored signals and support queuing 1099 * exactly one non-rt signal, so that we can get more 1100 * detailed information about the cause of the signal. 1101 */ 1102 result = TRACE_SIGNAL_ALREADY_PENDING; 1103 if (legacy_queue(pending, sig)) 1104 goto ret; 1105 1106 result = TRACE_SIGNAL_DELIVERED; 1107 /* 1108 * Skip useless siginfo allocation for SIGKILL and kernel threads. 1109 */ 1110 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) 1111 goto out_set; 1112 1113 /* 1114 * Real-time signals must be queued if sent by sigqueue, or 1115 * some other real-time mechanism. It is implementation 1116 * defined whether kill() does so. We attempt to do so, on 1117 * the principle of least surprise, but since kill is not 1118 * allowed to fail with EAGAIN when low on memory we just 1119 * make sure at least one signal gets delivered and don't 1120 * pass on the info struct. 1121 */ 1122 if (sig < SIGRTMIN) 1123 override_rlimit = (is_si_special(info) || info->si_code >= 0); 1124 else 1125 override_rlimit = 0; 1126 1127 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); 1128 if (q) { 1129 list_add_tail(&q->list, &pending->list); 1130 switch ((unsigned long) info) { 1131 case (unsigned long) SEND_SIG_NOINFO: 1132 clear_siginfo(&q->info); 1133 q->info.si_signo = sig; 1134 q->info.si_errno = 0; 1135 q->info.si_code = SI_USER; 1136 q->info.si_pid = task_tgid_nr_ns(current, 1137 task_active_pid_ns(t)); 1138 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 1139 break; 1140 case (unsigned long) SEND_SIG_PRIV: 1141 clear_siginfo(&q->info); 1142 q->info.si_signo = sig; 1143 q->info.si_errno = 0; 1144 q->info.si_code = SI_KERNEL; 1145 q->info.si_pid = 0; 1146 q->info.si_uid = 0; 1147 break; 1148 default: 1149 copy_siginfo(&q->info, info); 1150 if (from_ancestor_ns) 1151 q->info.si_pid = 0; 1152 break; 1153 } 1154 1155 userns_fixup_signal_uid(&q->info, t); 1156 1157 } else if (!is_si_special(info)) { 1158 if (sig >= SIGRTMIN && info->si_code != SI_USER) { 1159 /* 1160 * Queue overflow, abort. We may abort if the 1161 * signal was rt and sent by user using something 1162 * other than kill(). 1163 */ 1164 result = TRACE_SIGNAL_OVERFLOW_FAIL; 1165 ret = -EAGAIN; 1166 goto ret; 1167 } else { 1168 /* 1169 * This is a silent loss of information. We still 1170 * send the signal, but the *info bits are lost. 1171 */ 1172 result = TRACE_SIGNAL_LOSE_INFO; 1173 } 1174 } 1175 1176 out_set: 1177 signalfd_notify(t, sig); 1178 sigaddset(&pending->signal, sig); 1179 1180 /* Let multiprocess signals appear after on-going forks */ 1181 if (type > PIDTYPE_TGID) { 1182 struct multiprocess_signals *delayed; 1183 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { 1184 sigset_t *signal = &delayed->signal; 1185 /* Can't queue both a stop and a continue signal */ 1186 if (sig == SIGCONT) 1187 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); 1188 else if (sig_kernel_stop(sig)) 1189 sigdelset(signal, SIGCONT); 1190 sigaddset(signal, sig); 1191 } 1192 } 1193 1194 complete_signal(sig, t, type); 1195 ret: 1196 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result); 1197 return ret; 1198 } 1199 1200 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, 1201 enum pid_type type) 1202 { 1203 int from_ancestor_ns = 0; 1204 1205 #ifdef CONFIG_PID_NS 1206 from_ancestor_ns = si_fromuser(info) && 1207 !task_pid_nr_ns(current, task_active_pid_ns(t)); 1208 #endif 1209 1210 return __send_signal(sig, info, t, type, from_ancestor_ns); 1211 } 1212 1213 static void print_fatal_signal(int signr) 1214 { 1215 struct pt_regs *regs = signal_pt_regs(); 1216 pr_info("potentially unexpected fatal signal %d.\n", signr); 1217 1218 #if defined(__i386__) && !defined(__arch_um__) 1219 pr_info("code at %08lx: ", regs->ip); 1220 { 1221 int i; 1222 for (i = 0; i < 16; i++) { 1223 unsigned char insn; 1224 1225 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1226 break; 1227 pr_cont("%02x ", insn); 1228 } 1229 } 1230 pr_cont("\n"); 1231 #endif 1232 preempt_disable(); 1233 show_regs(regs); 1234 preempt_enable(); 1235 } 1236 1237 static int __init setup_print_fatal_signals(char *str) 1238 { 1239 get_option (&str, &print_fatal_signals); 1240 1241 return 1; 1242 } 1243 1244 __setup("print-fatal-signals=", setup_print_fatal_signals); 1245 1246 int 1247 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1248 { 1249 return send_signal(sig, info, p, PIDTYPE_TGID); 1250 } 1251 1252 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, 1253 enum pid_type type) 1254 { 1255 unsigned long flags; 1256 int ret = -ESRCH; 1257 1258 if (lock_task_sighand(p, &flags)) { 1259 ret = send_signal(sig, info, p, type); 1260 unlock_task_sighand(p, &flags); 1261 } 1262 1263 return ret; 1264 } 1265 1266 /* 1267 * Force a signal that the process can't ignore: if necessary 1268 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1269 * 1270 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1271 * since we do not want to have a signal handler that was blocked 1272 * be invoked when user space had explicitly blocked it. 1273 * 1274 * We don't want to have recursive SIGSEGV's etc, for example, 1275 * that is why we also clear SIGNAL_UNKILLABLE. 1276 */ 1277 int 1278 force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t) 1279 { 1280 unsigned long int flags; 1281 int ret, blocked, ignored; 1282 struct k_sigaction *action; 1283 1284 spin_lock_irqsave(&t->sighand->siglock, flags); 1285 action = &t->sighand->action[sig-1]; 1286 ignored = action->sa.sa_handler == SIG_IGN; 1287 blocked = sigismember(&t->blocked, sig); 1288 if (blocked || ignored) { 1289 action->sa.sa_handler = SIG_DFL; 1290 if (blocked) { 1291 sigdelset(&t->blocked, sig); 1292 recalc_sigpending_and_wake(t); 1293 } 1294 } 1295 /* 1296 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect 1297 * debugging to leave init killable. 1298 */ 1299 if (action->sa.sa_handler == SIG_DFL && !t->ptrace) 1300 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1301 ret = send_signal(sig, info, t, PIDTYPE_PID); 1302 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1303 1304 return ret; 1305 } 1306 1307 /* 1308 * Nuke all other threads in the group. 1309 */ 1310 int zap_other_threads(struct task_struct *p) 1311 { 1312 struct task_struct *t = p; 1313 int count = 0; 1314 1315 p->signal->group_stop_count = 0; 1316 1317 while_each_thread(p, t) { 1318 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1319 count++; 1320 1321 /* Don't bother with already dead threads */ 1322 if (t->exit_state) 1323 continue; 1324 sigaddset(&t->pending.signal, SIGKILL); 1325 signal_wake_up(t, 1); 1326 } 1327 1328 return count; 1329 } 1330 1331 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1332 unsigned long *flags) 1333 { 1334 struct sighand_struct *sighand; 1335 1336 rcu_read_lock(); 1337 for (;;) { 1338 sighand = rcu_dereference(tsk->sighand); 1339 if (unlikely(sighand == NULL)) 1340 break; 1341 1342 /* 1343 * This sighand can be already freed and even reused, but 1344 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which 1345 * initializes ->siglock: this slab can't go away, it has 1346 * the same object type, ->siglock can't be reinitialized. 1347 * 1348 * We need to ensure that tsk->sighand is still the same 1349 * after we take the lock, we can race with de_thread() or 1350 * __exit_signal(). In the latter case the next iteration 1351 * must see ->sighand == NULL. 1352 */ 1353 spin_lock_irqsave(&sighand->siglock, *flags); 1354 if (likely(sighand == tsk->sighand)) 1355 break; 1356 spin_unlock_irqrestore(&sighand->siglock, *flags); 1357 } 1358 rcu_read_unlock(); 1359 1360 return sighand; 1361 } 1362 1363 /* 1364 * send signal info to all the members of a group 1365 */ 1366 int group_send_sig_info(int sig, struct kernel_siginfo *info, 1367 struct task_struct *p, enum pid_type type) 1368 { 1369 int ret; 1370 1371 rcu_read_lock(); 1372 ret = check_kill_permission(sig, info, p); 1373 rcu_read_unlock(); 1374 1375 if (!ret && sig) 1376 ret = do_send_sig_info(sig, info, p, type); 1377 1378 return ret; 1379 } 1380 1381 /* 1382 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1383 * control characters do (^C, ^Z etc) 1384 * - the caller must hold at least a readlock on tasklist_lock 1385 */ 1386 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) 1387 { 1388 struct task_struct *p = NULL; 1389 int retval, success; 1390 1391 success = 0; 1392 retval = -ESRCH; 1393 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1394 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); 1395 success |= !err; 1396 retval = err; 1397 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1398 return success ? 0 : retval; 1399 } 1400 1401 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) 1402 { 1403 int error = -ESRCH; 1404 struct task_struct *p; 1405 1406 for (;;) { 1407 rcu_read_lock(); 1408 p = pid_task(pid, PIDTYPE_PID); 1409 if (p) 1410 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); 1411 rcu_read_unlock(); 1412 if (likely(!p || error != -ESRCH)) 1413 return error; 1414 1415 /* 1416 * The task was unhashed in between, try again. If it 1417 * is dead, pid_task() will return NULL, if we race with 1418 * de_thread() it will find the new leader. 1419 */ 1420 } 1421 } 1422 1423 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) 1424 { 1425 int error; 1426 rcu_read_lock(); 1427 error = kill_pid_info(sig, info, find_vpid(pid)); 1428 rcu_read_unlock(); 1429 return error; 1430 } 1431 1432 static inline bool kill_as_cred_perm(const struct cred *cred, 1433 struct task_struct *target) 1434 { 1435 const struct cred *pcred = __task_cred(target); 1436 1437 return uid_eq(cred->euid, pcred->suid) || 1438 uid_eq(cred->euid, pcred->uid) || 1439 uid_eq(cred->uid, pcred->suid) || 1440 uid_eq(cred->uid, pcred->uid); 1441 } 1442 1443 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1444 int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid, 1445 const struct cred *cred) 1446 { 1447 int ret = -EINVAL; 1448 struct task_struct *p; 1449 unsigned long flags; 1450 1451 if (!valid_signal(sig)) 1452 return ret; 1453 1454 rcu_read_lock(); 1455 p = pid_task(pid, PIDTYPE_PID); 1456 if (!p) { 1457 ret = -ESRCH; 1458 goto out_unlock; 1459 } 1460 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { 1461 ret = -EPERM; 1462 goto out_unlock; 1463 } 1464 ret = security_task_kill(p, info, sig, cred); 1465 if (ret) 1466 goto out_unlock; 1467 1468 if (sig) { 1469 if (lock_task_sighand(p, &flags)) { 1470 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0); 1471 unlock_task_sighand(p, &flags); 1472 } else 1473 ret = -ESRCH; 1474 } 1475 out_unlock: 1476 rcu_read_unlock(); 1477 return ret; 1478 } 1479 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); 1480 1481 /* 1482 * kill_something_info() interprets pid in interesting ways just like kill(2). 1483 * 1484 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1485 * is probably wrong. Should make it like BSD or SYSV. 1486 */ 1487 1488 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) 1489 { 1490 int ret; 1491 1492 if (pid > 0) { 1493 rcu_read_lock(); 1494 ret = kill_pid_info(sig, info, find_vpid(pid)); 1495 rcu_read_unlock(); 1496 return ret; 1497 } 1498 1499 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ 1500 if (pid == INT_MIN) 1501 return -ESRCH; 1502 1503 read_lock(&tasklist_lock); 1504 if (pid != -1) { 1505 ret = __kill_pgrp_info(sig, info, 1506 pid ? find_vpid(-pid) : task_pgrp(current)); 1507 } else { 1508 int retval = 0, count = 0; 1509 struct task_struct * p; 1510 1511 for_each_process(p) { 1512 if (task_pid_vnr(p) > 1 && 1513 !same_thread_group(p, current)) { 1514 int err = group_send_sig_info(sig, info, p, 1515 PIDTYPE_MAX); 1516 ++count; 1517 if (err != -EPERM) 1518 retval = err; 1519 } 1520 } 1521 ret = count ? retval : -ESRCH; 1522 } 1523 read_unlock(&tasklist_lock); 1524 1525 return ret; 1526 } 1527 1528 /* 1529 * These are for backward compatibility with the rest of the kernel source. 1530 */ 1531 1532 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1533 { 1534 /* 1535 * Make sure legacy kernel users don't send in bad values 1536 * (normal paths check this in check_kill_permission). 1537 */ 1538 if (!valid_signal(sig)) 1539 return -EINVAL; 1540 1541 return do_send_sig_info(sig, info, p, PIDTYPE_PID); 1542 } 1543 EXPORT_SYMBOL(send_sig_info); 1544 1545 #define __si_special(priv) \ 1546 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1547 1548 int 1549 send_sig(int sig, struct task_struct *p, int priv) 1550 { 1551 return send_sig_info(sig, __si_special(priv), p); 1552 } 1553 EXPORT_SYMBOL(send_sig); 1554 1555 void force_sig(int sig, struct task_struct *p) 1556 { 1557 force_sig_info(sig, SEND_SIG_PRIV, p); 1558 } 1559 EXPORT_SYMBOL(force_sig); 1560 1561 /* 1562 * When things go south during signal handling, we 1563 * will force a SIGSEGV. And if the signal that caused 1564 * the problem was already a SIGSEGV, we'll want to 1565 * make sure we don't even try to deliver the signal.. 1566 */ 1567 void force_sigsegv(int sig, struct task_struct *p) 1568 { 1569 if (sig == SIGSEGV) { 1570 unsigned long flags; 1571 spin_lock_irqsave(&p->sighand->siglock, flags); 1572 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1573 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1574 } 1575 force_sig(SIGSEGV, p); 1576 } 1577 1578 int force_sig_fault(int sig, int code, void __user *addr 1579 ___ARCH_SI_TRAPNO(int trapno) 1580 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1581 , struct task_struct *t) 1582 { 1583 struct kernel_siginfo info; 1584 1585 clear_siginfo(&info); 1586 info.si_signo = sig; 1587 info.si_errno = 0; 1588 info.si_code = code; 1589 info.si_addr = addr; 1590 #ifdef __ARCH_SI_TRAPNO 1591 info.si_trapno = trapno; 1592 #endif 1593 #ifdef __ia64__ 1594 info.si_imm = imm; 1595 info.si_flags = flags; 1596 info.si_isr = isr; 1597 #endif 1598 return force_sig_info(info.si_signo, &info, t); 1599 } 1600 1601 int send_sig_fault(int sig, int code, void __user *addr 1602 ___ARCH_SI_TRAPNO(int trapno) 1603 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1604 , struct task_struct *t) 1605 { 1606 struct kernel_siginfo info; 1607 1608 clear_siginfo(&info); 1609 info.si_signo = sig; 1610 info.si_errno = 0; 1611 info.si_code = code; 1612 info.si_addr = addr; 1613 #ifdef __ARCH_SI_TRAPNO 1614 info.si_trapno = trapno; 1615 #endif 1616 #ifdef __ia64__ 1617 info.si_imm = imm; 1618 info.si_flags = flags; 1619 info.si_isr = isr; 1620 #endif 1621 return send_sig_info(info.si_signo, &info, t); 1622 } 1623 1624 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) 1625 { 1626 struct kernel_siginfo info; 1627 1628 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1629 clear_siginfo(&info); 1630 info.si_signo = SIGBUS; 1631 info.si_errno = 0; 1632 info.si_code = code; 1633 info.si_addr = addr; 1634 info.si_addr_lsb = lsb; 1635 return force_sig_info(info.si_signo, &info, t); 1636 } 1637 1638 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) 1639 { 1640 struct kernel_siginfo info; 1641 1642 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1643 clear_siginfo(&info); 1644 info.si_signo = SIGBUS; 1645 info.si_errno = 0; 1646 info.si_code = code; 1647 info.si_addr = addr; 1648 info.si_addr_lsb = lsb; 1649 return send_sig_info(info.si_signo, &info, t); 1650 } 1651 EXPORT_SYMBOL(send_sig_mceerr); 1652 1653 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) 1654 { 1655 struct kernel_siginfo info; 1656 1657 clear_siginfo(&info); 1658 info.si_signo = SIGSEGV; 1659 info.si_errno = 0; 1660 info.si_code = SEGV_BNDERR; 1661 info.si_addr = addr; 1662 info.si_lower = lower; 1663 info.si_upper = upper; 1664 return force_sig_info(info.si_signo, &info, current); 1665 } 1666 1667 #ifdef SEGV_PKUERR 1668 int force_sig_pkuerr(void __user *addr, u32 pkey) 1669 { 1670 struct kernel_siginfo info; 1671 1672 clear_siginfo(&info); 1673 info.si_signo = SIGSEGV; 1674 info.si_errno = 0; 1675 info.si_code = SEGV_PKUERR; 1676 info.si_addr = addr; 1677 info.si_pkey = pkey; 1678 return force_sig_info(info.si_signo, &info, current); 1679 } 1680 #endif 1681 1682 /* For the crazy architectures that include trap information in 1683 * the errno field, instead of an actual errno value. 1684 */ 1685 int force_sig_ptrace_errno_trap(int errno, void __user *addr) 1686 { 1687 struct kernel_siginfo info; 1688 1689 clear_siginfo(&info); 1690 info.si_signo = SIGTRAP; 1691 info.si_errno = errno; 1692 info.si_code = TRAP_HWBKPT; 1693 info.si_addr = addr; 1694 return force_sig_info(info.si_signo, &info, current); 1695 } 1696 1697 int kill_pgrp(struct pid *pid, int sig, int priv) 1698 { 1699 int ret; 1700 1701 read_lock(&tasklist_lock); 1702 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1703 read_unlock(&tasklist_lock); 1704 1705 return ret; 1706 } 1707 EXPORT_SYMBOL(kill_pgrp); 1708 1709 int kill_pid(struct pid *pid, int sig, int priv) 1710 { 1711 return kill_pid_info(sig, __si_special(priv), pid); 1712 } 1713 EXPORT_SYMBOL(kill_pid); 1714 1715 /* 1716 * These functions support sending signals using preallocated sigqueue 1717 * structures. This is needed "because realtime applications cannot 1718 * afford to lose notifications of asynchronous events, like timer 1719 * expirations or I/O completions". In the case of POSIX Timers 1720 * we allocate the sigqueue structure from the timer_create. If this 1721 * allocation fails we are able to report the failure to the application 1722 * with an EAGAIN error. 1723 */ 1724 struct sigqueue *sigqueue_alloc(void) 1725 { 1726 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); 1727 1728 if (q) 1729 q->flags |= SIGQUEUE_PREALLOC; 1730 1731 return q; 1732 } 1733 1734 void sigqueue_free(struct sigqueue *q) 1735 { 1736 unsigned long flags; 1737 spinlock_t *lock = ¤t->sighand->siglock; 1738 1739 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1740 /* 1741 * We must hold ->siglock while testing q->list 1742 * to serialize with collect_signal() or with 1743 * __exit_signal()->flush_sigqueue(). 1744 */ 1745 spin_lock_irqsave(lock, flags); 1746 q->flags &= ~SIGQUEUE_PREALLOC; 1747 /* 1748 * If it is queued it will be freed when dequeued, 1749 * like the "regular" sigqueue. 1750 */ 1751 if (!list_empty(&q->list)) 1752 q = NULL; 1753 spin_unlock_irqrestore(lock, flags); 1754 1755 if (q) 1756 __sigqueue_free(q); 1757 } 1758 1759 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) 1760 { 1761 int sig = q->info.si_signo; 1762 struct sigpending *pending; 1763 struct task_struct *t; 1764 unsigned long flags; 1765 int ret, result; 1766 1767 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1768 1769 ret = -1; 1770 rcu_read_lock(); 1771 t = pid_task(pid, type); 1772 if (!t || !likely(lock_task_sighand(t, &flags))) 1773 goto ret; 1774 1775 ret = 1; /* the signal is ignored */ 1776 result = TRACE_SIGNAL_IGNORED; 1777 if (!prepare_signal(sig, t, false)) 1778 goto out; 1779 1780 ret = 0; 1781 if (unlikely(!list_empty(&q->list))) { 1782 /* 1783 * If an SI_TIMER entry is already queue just increment 1784 * the overrun count. 1785 */ 1786 BUG_ON(q->info.si_code != SI_TIMER); 1787 q->info.si_overrun++; 1788 result = TRACE_SIGNAL_ALREADY_PENDING; 1789 goto out; 1790 } 1791 q->info.si_overrun = 0; 1792 1793 signalfd_notify(t, sig); 1794 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1795 list_add_tail(&q->list, &pending->list); 1796 sigaddset(&pending->signal, sig); 1797 complete_signal(sig, t, type); 1798 result = TRACE_SIGNAL_DELIVERED; 1799 out: 1800 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); 1801 unlock_task_sighand(t, &flags); 1802 ret: 1803 rcu_read_unlock(); 1804 return ret; 1805 } 1806 1807 /* 1808 * Let a parent know about the death of a child. 1809 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1810 * 1811 * Returns true if our parent ignored us and so we've switched to 1812 * self-reaping. 1813 */ 1814 bool do_notify_parent(struct task_struct *tsk, int sig) 1815 { 1816 struct kernel_siginfo info; 1817 unsigned long flags; 1818 struct sighand_struct *psig; 1819 bool autoreap = false; 1820 u64 utime, stime; 1821 1822 BUG_ON(sig == -1); 1823 1824 /* do_notify_parent_cldstop should have been called instead. */ 1825 BUG_ON(task_is_stopped_or_traced(tsk)); 1826 1827 BUG_ON(!tsk->ptrace && 1828 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1829 1830 if (sig != SIGCHLD) { 1831 /* 1832 * This is only possible if parent == real_parent. 1833 * Check if it has changed security domain. 1834 */ 1835 if (tsk->parent_exec_id != tsk->parent->self_exec_id) 1836 sig = SIGCHLD; 1837 } 1838 1839 clear_siginfo(&info); 1840 info.si_signo = sig; 1841 info.si_errno = 0; 1842 /* 1843 * We are under tasklist_lock here so our parent is tied to 1844 * us and cannot change. 1845 * 1846 * task_active_pid_ns will always return the same pid namespace 1847 * until a task passes through release_task. 1848 * 1849 * write_lock() currently calls preempt_disable() which is the 1850 * same as rcu_read_lock(), but according to Oleg, this is not 1851 * correct to rely on this 1852 */ 1853 rcu_read_lock(); 1854 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); 1855 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), 1856 task_uid(tsk)); 1857 rcu_read_unlock(); 1858 1859 task_cputime(tsk, &utime, &stime); 1860 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); 1861 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); 1862 1863 info.si_status = tsk->exit_code & 0x7f; 1864 if (tsk->exit_code & 0x80) 1865 info.si_code = CLD_DUMPED; 1866 else if (tsk->exit_code & 0x7f) 1867 info.si_code = CLD_KILLED; 1868 else { 1869 info.si_code = CLD_EXITED; 1870 info.si_status = tsk->exit_code >> 8; 1871 } 1872 1873 psig = tsk->parent->sighand; 1874 spin_lock_irqsave(&psig->siglock, flags); 1875 if (!tsk->ptrace && sig == SIGCHLD && 1876 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1877 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1878 /* 1879 * We are exiting and our parent doesn't care. POSIX.1 1880 * defines special semantics for setting SIGCHLD to SIG_IGN 1881 * or setting the SA_NOCLDWAIT flag: we should be reaped 1882 * automatically and not left for our parent's wait4 call. 1883 * Rather than having the parent do it as a magic kind of 1884 * signal handler, we just set this to tell do_exit that we 1885 * can be cleaned up without becoming a zombie. Note that 1886 * we still call __wake_up_parent in this case, because a 1887 * blocked sys_wait4 might now return -ECHILD. 1888 * 1889 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1890 * is implementation-defined: we do (if you don't want 1891 * it, just use SIG_IGN instead). 1892 */ 1893 autoreap = true; 1894 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1895 sig = 0; 1896 } 1897 if (valid_signal(sig) && sig) 1898 __group_send_sig_info(sig, &info, tsk->parent); 1899 __wake_up_parent(tsk, tsk->parent); 1900 spin_unlock_irqrestore(&psig->siglock, flags); 1901 1902 return autoreap; 1903 } 1904 1905 /** 1906 * do_notify_parent_cldstop - notify parent of stopped/continued state change 1907 * @tsk: task reporting the state change 1908 * @for_ptracer: the notification is for ptracer 1909 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report 1910 * 1911 * Notify @tsk's parent that the stopped/continued state has changed. If 1912 * @for_ptracer is %false, @tsk's group leader notifies to its real parent. 1913 * If %true, @tsk reports to @tsk->parent which should be the ptracer. 1914 * 1915 * CONTEXT: 1916 * Must be called with tasklist_lock at least read locked. 1917 */ 1918 static void do_notify_parent_cldstop(struct task_struct *tsk, 1919 bool for_ptracer, int why) 1920 { 1921 struct kernel_siginfo info; 1922 unsigned long flags; 1923 struct task_struct *parent; 1924 struct sighand_struct *sighand; 1925 u64 utime, stime; 1926 1927 if (for_ptracer) { 1928 parent = tsk->parent; 1929 } else { 1930 tsk = tsk->group_leader; 1931 parent = tsk->real_parent; 1932 } 1933 1934 clear_siginfo(&info); 1935 info.si_signo = SIGCHLD; 1936 info.si_errno = 0; 1937 /* 1938 * see comment in do_notify_parent() about the following 4 lines 1939 */ 1940 rcu_read_lock(); 1941 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); 1942 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 1943 rcu_read_unlock(); 1944 1945 task_cputime(tsk, &utime, &stime); 1946 info.si_utime = nsec_to_clock_t(utime); 1947 info.si_stime = nsec_to_clock_t(stime); 1948 1949 info.si_code = why; 1950 switch (why) { 1951 case CLD_CONTINUED: 1952 info.si_status = SIGCONT; 1953 break; 1954 case CLD_STOPPED: 1955 info.si_status = tsk->signal->group_exit_code & 0x7f; 1956 break; 1957 case CLD_TRAPPED: 1958 info.si_status = tsk->exit_code & 0x7f; 1959 break; 1960 default: 1961 BUG(); 1962 } 1963 1964 sighand = parent->sighand; 1965 spin_lock_irqsave(&sighand->siglock, flags); 1966 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1967 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1968 __group_send_sig_info(SIGCHLD, &info, parent); 1969 /* 1970 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1971 */ 1972 __wake_up_parent(tsk, parent); 1973 spin_unlock_irqrestore(&sighand->siglock, flags); 1974 } 1975 1976 static inline bool may_ptrace_stop(void) 1977 { 1978 if (!likely(current->ptrace)) 1979 return false; 1980 /* 1981 * Are we in the middle of do_coredump? 1982 * If so and our tracer is also part of the coredump stopping 1983 * is a deadlock situation, and pointless because our tracer 1984 * is dead so don't allow us to stop. 1985 * If SIGKILL was already sent before the caller unlocked 1986 * ->siglock we must see ->core_state != NULL. Otherwise it 1987 * is safe to enter schedule(). 1988 * 1989 * This is almost outdated, a task with the pending SIGKILL can't 1990 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported 1991 * after SIGKILL was already dequeued. 1992 */ 1993 if (unlikely(current->mm->core_state) && 1994 unlikely(current->mm == current->parent->mm)) 1995 return false; 1996 1997 return true; 1998 } 1999 2000 /* 2001 * Return non-zero if there is a SIGKILL that should be waking us up. 2002 * Called with the siglock held. 2003 */ 2004 static bool sigkill_pending(struct task_struct *tsk) 2005 { 2006 return sigismember(&tsk->pending.signal, SIGKILL) || 2007 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 2008 } 2009 2010 /* 2011 * This must be called with current->sighand->siglock held. 2012 * 2013 * This should be the path for all ptrace stops. 2014 * We always set current->last_siginfo while stopped here. 2015 * That makes it a way to test a stopped process for 2016 * being ptrace-stopped vs being job-control-stopped. 2017 * 2018 * If we actually decide not to stop at all because the tracer 2019 * is gone, we keep current->exit_code unless clear_code. 2020 */ 2021 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) 2022 __releases(¤t->sighand->siglock) 2023 __acquires(¤t->sighand->siglock) 2024 { 2025 bool gstop_done = false; 2026 2027 if (arch_ptrace_stop_needed(exit_code, info)) { 2028 /* 2029 * The arch code has something special to do before a 2030 * ptrace stop. This is allowed to block, e.g. for faults 2031 * on user stack pages. We can't keep the siglock while 2032 * calling arch_ptrace_stop, so we must release it now. 2033 * To preserve proper semantics, we must do this before 2034 * any signal bookkeeping like checking group_stop_count. 2035 * Meanwhile, a SIGKILL could come in before we retake the 2036 * siglock. That must prevent us from sleeping in TASK_TRACED. 2037 * So after regaining the lock, we must check for SIGKILL. 2038 */ 2039 spin_unlock_irq(¤t->sighand->siglock); 2040 arch_ptrace_stop(exit_code, info); 2041 spin_lock_irq(¤t->sighand->siglock); 2042 if (sigkill_pending(current)) 2043 return; 2044 } 2045 2046 set_special_state(TASK_TRACED); 2047 2048 /* 2049 * We're committing to trapping. TRACED should be visible before 2050 * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). 2051 * Also, transition to TRACED and updates to ->jobctl should be 2052 * atomic with respect to siglock and should be done after the arch 2053 * hook as siglock is released and regrabbed across it. 2054 * 2055 * TRACER TRACEE 2056 * 2057 * ptrace_attach() 2058 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) 2059 * do_wait() 2060 * set_current_state() smp_wmb(); 2061 * ptrace_do_wait() 2062 * wait_task_stopped() 2063 * task_stopped_code() 2064 * [L] task_is_traced() [S] task_clear_jobctl_trapping(); 2065 */ 2066 smp_wmb(); 2067 2068 current->last_siginfo = info; 2069 current->exit_code = exit_code; 2070 2071 /* 2072 * If @why is CLD_STOPPED, we're trapping to participate in a group 2073 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 2074 * across siglock relocks since INTERRUPT was scheduled, PENDING 2075 * could be clear now. We act as if SIGCONT is received after 2076 * TASK_TRACED is entered - ignore it. 2077 */ 2078 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) 2079 gstop_done = task_participate_group_stop(current); 2080 2081 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ 2082 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); 2083 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) 2084 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); 2085 2086 /* entering a trap, clear TRAPPING */ 2087 task_clear_jobctl_trapping(current); 2088 2089 spin_unlock_irq(¤t->sighand->siglock); 2090 read_lock(&tasklist_lock); 2091 if (may_ptrace_stop()) { 2092 /* 2093 * Notify parents of the stop. 2094 * 2095 * While ptraced, there are two parents - the ptracer and 2096 * the real_parent of the group_leader. The ptracer should 2097 * know about every stop while the real parent is only 2098 * interested in the completion of group stop. The states 2099 * for the two don't interact with each other. Notify 2100 * separately unless they're gonna be duplicates. 2101 */ 2102 do_notify_parent_cldstop(current, true, why); 2103 if (gstop_done && ptrace_reparented(current)) 2104 do_notify_parent_cldstop(current, false, why); 2105 2106 /* 2107 * Don't want to allow preemption here, because 2108 * sys_ptrace() needs this task to be inactive. 2109 * 2110 * XXX: implement read_unlock_no_resched(). 2111 */ 2112 preempt_disable(); 2113 read_unlock(&tasklist_lock); 2114 preempt_enable_no_resched(); 2115 cgroup_enter_frozen(); 2116 freezable_schedule(); 2117 cgroup_leave_frozen(true); 2118 } else { 2119 /* 2120 * By the time we got the lock, our tracer went away. 2121 * Don't drop the lock yet, another tracer may come. 2122 * 2123 * If @gstop_done, the ptracer went away between group stop 2124 * completion and here. During detach, it would have set 2125 * JOBCTL_STOP_PENDING on us and we'll re-enter 2126 * TASK_STOPPED in do_signal_stop() on return, so notifying 2127 * the real parent of the group stop completion is enough. 2128 */ 2129 if (gstop_done) 2130 do_notify_parent_cldstop(current, false, why); 2131 2132 /* tasklist protects us from ptrace_freeze_traced() */ 2133 __set_current_state(TASK_RUNNING); 2134 if (clear_code) 2135 current->exit_code = 0; 2136 read_unlock(&tasklist_lock); 2137 } 2138 2139 /* 2140 * We are back. Now reacquire the siglock before touching 2141 * last_siginfo, so that we are sure to have synchronized with 2142 * any signal-sending on another CPU that wants to examine it. 2143 */ 2144 spin_lock_irq(¤t->sighand->siglock); 2145 current->last_siginfo = NULL; 2146 2147 /* LISTENING can be set only during STOP traps, clear it */ 2148 current->jobctl &= ~JOBCTL_LISTENING; 2149 2150 /* 2151 * Queued signals ignored us while we were stopped for tracing. 2152 * So check for any that we should take before resuming user mode. 2153 * This sets TIF_SIGPENDING, but never clears it. 2154 */ 2155 recalc_sigpending_tsk(current); 2156 } 2157 2158 static void ptrace_do_notify(int signr, int exit_code, int why) 2159 { 2160 kernel_siginfo_t info; 2161 2162 clear_siginfo(&info); 2163 info.si_signo = signr; 2164 info.si_code = exit_code; 2165 info.si_pid = task_pid_vnr(current); 2166 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 2167 2168 /* Let the debugger run. */ 2169 ptrace_stop(exit_code, why, 1, &info); 2170 } 2171 2172 void ptrace_notify(int exit_code) 2173 { 2174 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 2175 if (unlikely(current->task_works)) 2176 task_work_run(); 2177 2178 spin_lock_irq(¤t->sighand->siglock); 2179 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); 2180 spin_unlock_irq(¤t->sighand->siglock); 2181 } 2182 2183 /** 2184 * do_signal_stop - handle group stop for SIGSTOP and other stop signals 2185 * @signr: signr causing group stop if initiating 2186 * 2187 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr 2188 * and participate in it. If already set, participate in the existing 2189 * group stop. If participated in a group stop (and thus slept), %true is 2190 * returned with siglock released. 2191 * 2192 * If ptraced, this function doesn't handle stop itself. Instead, 2193 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock 2194 * untouched. The caller must ensure that INTERRUPT trap handling takes 2195 * places afterwards. 2196 * 2197 * CONTEXT: 2198 * Must be called with @current->sighand->siglock held, which is released 2199 * on %true return. 2200 * 2201 * RETURNS: 2202 * %false if group stop is already cancelled or ptrace trap is scheduled. 2203 * %true if participated in group stop. 2204 */ 2205 static bool do_signal_stop(int signr) 2206 __releases(¤t->sighand->siglock) 2207 { 2208 struct signal_struct *sig = current->signal; 2209 2210 if (!(current->jobctl & JOBCTL_STOP_PENDING)) { 2211 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 2212 struct task_struct *t; 2213 2214 /* signr will be recorded in task->jobctl for retries */ 2215 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); 2216 2217 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || 2218 unlikely(signal_group_exit(sig))) 2219 return false; 2220 /* 2221 * There is no group stop already in progress. We must 2222 * initiate one now. 2223 * 2224 * While ptraced, a task may be resumed while group stop is 2225 * still in effect and then receive a stop signal and 2226 * initiate another group stop. This deviates from the 2227 * usual behavior as two consecutive stop signals can't 2228 * cause two group stops when !ptraced. That is why we 2229 * also check !task_is_stopped(t) below. 2230 * 2231 * The condition can be distinguished by testing whether 2232 * SIGNAL_STOP_STOPPED is already set. Don't generate 2233 * group_exit_code in such case. 2234 * 2235 * This is not necessary for SIGNAL_STOP_CONTINUED because 2236 * an intervening stop signal is required to cause two 2237 * continued events regardless of ptrace. 2238 */ 2239 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 2240 sig->group_exit_code = signr; 2241 2242 sig->group_stop_count = 0; 2243 2244 if (task_set_jobctl_pending(current, signr | gstop)) 2245 sig->group_stop_count++; 2246 2247 t = current; 2248 while_each_thread(current, t) { 2249 /* 2250 * Setting state to TASK_STOPPED for a group 2251 * stop is always done with the siglock held, 2252 * so this check has no races. 2253 */ 2254 if (!task_is_stopped(t) && 2255 task_set_jobctl_pending(t, signr | gstop)) { 2256 sig->group_stop_count++; 2257 if (likely(!(t->ptrace & PT_SEIZED))) 2258 signal_wake_up(t, 0); 2259 else 2260 ptrace_trap_notify(t); 2261 } 2262 } 2263 } 2264 2265 if (likely(!current->ptrace)) { 2266 int notify = 0; 2267 2268 /* 2269 * If there are no other threads in the group, or if there 2270 * is a group stop in progress and we are the last to stop, 2271 * report to the parent. 2272 */ 2273 if (task_participate_group_stop(current)) 2274 notify = CLD_STOPPED; 2275 2276 set_special_state(TASK_STOPPED); 2277 spin_unlock_irq(¤t->sighand->siglock); 2278 2279 /* 2280 * Notify the parent of the group stop completion. Because 2281 * we're not holding either the siglock or tasklist_lock 2282 * here, ptracer may attach inbetween; however, this is for 2283 * group stop and should always be delivered to the real 2284 * parent of the group leader. The new ptracer will get 2285 * its notification when this task transitions into 2286 * TASK_TRACED. 2287 */ 2288 if (notify) { 2289 read_lock(&tasklist_lock); 2290 do_notify_parent_cldstop(current, false, notify); 2291 read_unlock(&tasklist_lock); 2292 } 2293 2294 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2295 cgroup_enter_frozen(); 2296 freezable_schedule(); 2297 return true; 2298 } else { 2299 /* 2300 * While ptraced, group stop is handled by STOP trap. 2301 * Schedule it and let the caller deal with it. 2302 */ 2303 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); 2304 return false; 2305 } 2306 } 2307 2308 /** 2309 * do_jobctl_trap - take care of ptrace jobctl traps 2310 * 2311 * When PT_SEIZED, it's used for both group stop and explicit 2312 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with 2313 * accompanying siginfo. If stopped, lower eight bits of exit_code contain 2314 * the stop signal; otherwise, %SIGTRAP. 2315 * 2316 * When !PT_SEIZED, it's used only for group stop trap with stop signal 2317 * number as exit_code and no siginfo. 2318 * 2319 * CONTEXT: 2320 * Must be called with @current->sighand->siglock held, which may be 2321 * released and re-acquired before returning with intervening sleep. 2322 */ 2323 static void do_jobctl_trap(void) 2324 { 2325 struct signal_struct *signal = current->signal; 2326 int signr = current->jobctl & JOBCTL_STOP_SIGMASK; 2327 2328 if (current->ptrace & PT_SEIZED) { 2329 if (!signal->group_stop_count && 2330 !(signal->flags & SIGNAL_STOP_STOPPED)) 2331 signr = SIGTRAP; 2332 WARN_ON_ONCE(!signr); 2333 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), 2334 CLD_STOPPED); 2335 } else { 2336 WARN_ON_ONCE(!signr); 2337 ptrace_stop(signr, CLD_STOPPED, 0, NULL); 2338 current->exit_code = 0; 2339 } 2340 } 2341 2342 /** 2343 * do_freezer_trap - handle the freezer jobctl trap 2344 * 2345 * Puts the task into frozen state, if only the task is not about to quit. 2346 * In this case it drops JOBCTL_TRAP_FREEZE. 2347 * 2348 * CONTEXT: 2349 * Must be called with @current->sighand->siglock held, 2350 * which is always released before returning. 2351 */ 2352 static void do_freezer_trap(void) 2353 __releases(¤t->sighand->siglock) 2354 { 2355 /* 2356 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, 2357 * let's make another loop to give it a chance to be handled. 2358 * In any case, we'll return back. 2359 */ 2360 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != 2361 JOBCTL_TRAP_FREEZE) { 2362 spin_unlock_irq(¤t->sighand->siglock); 2363 return; 2364 } 2365 2366 /* 2367 * Now we're sure that there is no pending fatal signal and no 2368 * pending traps. Clear TIF_SIGPENDING to not get out of schedule() 2369 * immediately (if there is a non-fatal signal pending), and 2370 * put the task into sleep. 2371 */ 2372 __set_current_state(TASK_INTERRUPTIBLE); 2373 clear_thread_flag(TIF_SIGPENDING); 2374 spin_unlock_irq(¤t->sighand->siglock); 2375 cgroup_enter_frozen(); 2376 freezable_schedule(); 2377 } 2378 2379 static int ptrace_signal(int signr, kernel_siginfo_t *info) 2380 { 2381 /* 2382 * We do not check sig_kernel_stop(signr) but set this marker 2383 * unconditionally because we do not know whether debugger will 2384 * change signr. This flag has no meaning unless we are going 2385 * to stop after return from ptrace_stop(). In this case it will 2386 * be checked in do_signal_stop(), we should only stop if it was 2387 * not cleared by SIGCONT while we were sleeping. See also the 2388 * comment in dequeue_signal(). 2389 */ 2390 current->jobctl |= JOBCTL_STOP_DEQUEUED; 2391 ptrace_stop(signr, CLD_TRAPPED, 0, info); 2392 2393 /* We're back. Did the debugger cancel the sig? */ 2394 signr = current->exit_code; 2395 if (signr == 0) 2396 return signr; 2397 2398 current->exit_code = 0; 2399 2400 /* 2401 * Update the siginfo structure if the signal has 2402 * changed. If the debugger wanted something 2403 * specific in the siginfo structure then it should 2404 * have updated *info via PTRACE_SETSIGINFO. 2405 */ 2406 if (signr != info->si_signo) { 2407 clear_siginfo(info); 2408 info->si_signo = signr; 2409 info->si_errno = 0; 2410 info->si_code = SI_USER; 2411 rcu_read_lock(); 2412 info->si_pid = task_pid_vnr(current->parent); 2413 info->si_uid = from_kuid_munged(current_user_ns(), 2414 task_uid(current->parent)); 2415 rcu_read_unlock(); 2416 } 2417 2418 /* If the (new) signal is now blocked, requeue it. */ 2419 if (sigismember(¤t->blocked, signr)) { 2420 send_signal(signr, info, current, PIDTYPE_PID); 2421 signr = 0; 2422 } 2423 2424 return signr; 2425 } 2426 2427 bool get_signal(struct ksignal *ksig) 2428 { 2429 struct sighand_struct *sighand = current->sighand; 2430 struct signal_struct *signal = current->signal; 2431 int signr; 2432 2433 if (unlikely(current->task_works)) 2434 task_work_run(); 2435 2436 if (unlikely(uprobe_deny_signal())) 2437 return false; 2438 2439 /* 2440 * Do this once, we can't return to user-mode if freezing() == T. 2441 * do_signal_stop() and ptrace_stop() do freezable_schedule() and 2442 * thus do not need another check after return. 2443 */ 2444 try_to_freeze(); 2445 2446 relock: 2447 spin_lock_irq(&sighand->siglock); 2448 /* 2449 * Every stopped thread goes here after wakeup. Check to see if 2450 * we should notify the parent, prepare_signal(SIGCONT) encodes 2451 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2452 */ 2453 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2454 int why; 2455 2456 if (signal->flags & SIGNAL_CLD_CONTINUED) 2457 why = CLD_CONTINUED; 2458 else 2459 why = CLD_STOPPED; 2460 2461 signal->flags &= ~SIGNAL_CLD_MASK; 2462 2463 spin_unlock_irq(&sighand->siglock); 2464 2465 /* 2466 * Notify the parent that we're continuing. This event is 2467 * always per-process and doesn't make whole lot of sense 2468 * for ptracers, who shouldn't consume the state via 2469 * wait(2) either, but, for backward compatibility, notify 2470 * the ptracer of the group leader too unless it's gonna be 2471 * a duplicate. 2472 */ 2473 read_lock(&tasklist_lock); 2474 do_notify_parent_cldstop(current, false, why); 2475 2476 if (ptrace_reparented(current->group_leader)) 2477 do_notify_parent_cldstop(current->group_leader, 2478 true, why); 2479 read_unlock(&tasklist_lock); 2480 2481 goto relock; 2482 } 2483 2484 /* Has this task already been marked for death? */ 2485 if (signal_group_exit(signal)) { 2486 ksig->info.si_signo = signr = SIGKILL; 2487 sigdelset(¤t->pending.signal, SIGKILL); 2488 recalc_sigpending(); 2489 goto fatal; 2490 } 2491 2492 for (;;) { 2493 struct k_sigaction *ka; 2494 2495 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && 2496 do_signal_stop(0)) 2497 goto relock; 2498 2499 if (unlikely(current->jobctl & 2500 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { 2501 if (current->jobctl & JOBCTL_TRAP_MASK) { 2502 do_jobctl_trap(); 2503 spin_unlock_irq(&sighand->siglock); 2504 } else if (current->jobctl & JOBCTL_TRAP_FREEZE) 2505 do_freezer_trap(); 2506 2507 goto relock; 2508 } 2509 2510 /* 2511 * If the task is leaving the frozen state, let's update 2512 * cgroup counters and reset the frozen bit. 2513 */ 2514 if (unlikely(cgroup_task_frozen(current))) { 2515 spin_unlock_irq(&sighand->siglock); 2516 cgroup_leave_frozen(false); 2517 goto relock; 2518 } 2519 2520 /* 2521 * Signals generated by the execution of an instruction 2522 * need to be delivered before any other pending signals 2523 * so that the instruction pointer in the signal stack 2524 * frame points to the faulting instruction. 2525 */ 2526 signr = dequeue_synchronous_signal(&ksig->info); 2527 if (!signr) 2528 signr = dequeue_signal(current, ¤t->blocked, &ksig->info); 2529 2530 if (!signr) 2531 break; /* will return 0 */ 2532 2533 if (unlikely(current->ptrace) && signr != SIGKILL) { 2534 signr = ptrace_signal(signr, &ksig->info); 2535 if (!signr) 2536 continue; 2537 } 2538 2539 ka = &sighand->action[signr-1]; 2540 2541 /* Trace actually delivered signals. */ 2542 trace_signal_deliver(signr, &ksig->info, ka); 2543 2544 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 2545 continue; 2546 if (ka->sa.sa_handler != SIG_DFL) { 2547 /* Run the handler. */ 2548 ksig->ka = *ka; 2549 2550 if (ka->sa.sa_flags & SA_ONESHOT) 2551 ka->sa.sa_handler = SIG_DFL; 2552 2553 break; /* will return non-zero "signr" value */ 2554 } 2555 2556 /* 2557 * Now we are doing the default action for this signal. 2558 */ 2559 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 2560 continue; 2561 2562 /* 2563 * Global init gets no signals it doesn't want. 2564 * Container-init gets no signals it doesn't want from same 2565 * container. 2566 * 2567 * Note that if global/container-init sees a sig_kernel_only() 2568 * signal here, the signal must have been generated internally 2569 * or must have come from an ancestor namespace. In either 2570 * case, the signal cannot be dropped. 2571 */ 2572 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 2573 !sig_kernel_only(signr)) 2574 continue; 2575 2576 if (sig_kernel_stop(signr)) { 2577 /* 2578 * The default action is to stop all threads in 2579 * the thread group. The job control signals 2580 * do nothing in an orphaned pgrp, but SIGSTOP 2581 * always works. Note that siglock needs to be 2582 * dropped during the call to is_orphaned_pgrp() 2583 * because of lock ordering with tasklist_lock. 2584 * This allows an intervening SIGCONT to be posted. 2585 * We need to check for that and bail out if necessary. 2586 */ 2587 if (signr != SIGSTOP) { 2588 spin_unlock_irq(&sighand->siglock); 2589 2590 /* signals can be posted during this window */ 2591 2592 if (is_current_pgrp_orphaned()) 2593 goto relock; 2594 2595 spin_lock_irq(&sighand->siglock); 2596 } 2597 2598 if (likely(do_signal_stop(ksig->info.si_signo))) { 2599 /* It released the siglock. */ 2600 goto relock; 2601 } 2602 2603 /* 2604 * We didn't actually stop, due to a race 2605 * with SIGCONT or something like that. 2606 */ 2607 continue; 2608 } 2609 2610 fatal: 2611 spin_unlock_irq(&sighand->siglock); 2612 if (unlikely(cgroup_task_frozen(current))) 2613 cgroup_leave_frozen(true); 2614 2615 /* 2616 * Anything else is fatal, maybe with a core dump. 2617 */ 2618 current->flags |= PF_SIGNALED; 2619 2620 if (sig_kernel_coredump(signr)) { 2621 if (print_fatal_signals) 2622 print_fatal_signal(ksig->info.si_signo); 2623 proc_coredump_connector(current); 2624 /* 2625 * If it was able to dump core, this kills all 2626 * other threads in the group and synchronizes with 2627 * their demise. If we lost the race with another 2628 * thread getting here, it set group_exit_code 2629 * first and our do_group_exit call below will use 2630 * that value and ignore the one we pass it. 2631 */ 2632 do_coredump(&ksig->info); 2633 } 2634 2635 /* 2636 * Death signals, no core dump. 2637 */ 2638 do_group_exit(ksig->info.si_signo); 2639 /* NOTREACHED */ 2640 } 2641 spin_unlock_irq(&sighand->siglock); 2642 2643 ksig->sig = signr; 2644 return ksig->sig > 0; 2645 } 2646 2647 /** 2648 * signal_delivered - 2649 * @ksig: kernel signal struct 2650 * @stepping: nonzero if debugger single-step or block-step in use 2651 * 2652 * This function should be called when a signal has successfully been 2653 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask 2654 * is always blocked, and the signal itself is blocked unless %SA_NODEFER 2655 * is set in @ksig->ka.sa.sa_flags. Tracing is notified. 2656 */ 2657 static void signal_delivered(struct ksignal *ksig, int stepping) 2658 { 2659 sigset_t blocked; 2660 2661 /* A signal was successfully delivered, and the 2662 saved sigmask was stored on the signal frame, 2663 and will be restored by sigreturn. So we can 2664 simply clear the restore sigmask flag. */ 2665 clear_restore_sigmask(); 2666 2667 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); 2668 if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) 2669 sigaddset(&blocked, ksig->sig); 2670 set_current_blocked(&blocked); 2671 tracehook_signal_handler(stepping); 2672 } 2673 2674 void signal_setup_done(int failed, struct ksignal *ksig, int stepping) 2675 { 2676 if (failed) 2677 force_sigsegv(ksig->sig, current); 2678 else 2679 signal_delivered(ksig, stepping); 2680 } 2681 2682 /* 2683 * It could be that complete_signal() picked us to notify about the 2684 * group-wide signal. Other threads should be notified now to take 2685 * the shared signals in @which since we will not. 2686 */ 2687 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) 2688 { 2689 sigset_t retarget; 2690 struct task_struct *t; 2691 2692 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); 2693 if (sigisemptyset(&retarget)) 2694 return; 2695 2696 t = tsk; 2697 while_each_thread(tsk, t) { 2698 if (t->flags & PF_EXITING) 2699 continue; 2700 2701 if (!has_pending_signals(&retarget, &t->blocked)) 2702 continue; 2703 /* Remove the signals this thread can handle. */ 2704 sigandsets(&retarget, &retarget, &t->blocked); 2705 2706 if (!signal_pending(t)) 2707 signal_wake_up(t, 0); 2708 2709 if (sigisemptyset(&retarget)) 2710 break; 2711 } 2712 } 2713 2714 void exit_signals(struct task_struct *tsk) 2715 { 2716 int group_stop = 0; 2717 sigset_t unblocked; 2718 2719 /* 2720 * @tsk is about to have PF_EXITING set - lock out users which 2721 * expect stable threadgroup. 2722 */ 2723 cgroup_threadgroup_change_begin(tsk); 2724 2725 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 2726 tsk->flags |= PF_EXITING; 2727 cgroup_threadgroup_change_end(tsk); 2728 return; 2729 } 2730 2731 spin_lock_irq(&tsk->sighand->siglock); 2732 /* 2733 * From now this task is not visible for group-wide signals, 2734 * see wants_signal(), do_signal_stop(). 2735 */ 2736 tsk->flags |= PF_EXITING; 2737 2738 cgroup_threadgroup_change_end(tsk); 2739 2740 if (!signal_pending(tsk)) 2741 goto out; 2742 2743 unblocked = tsk->blocked; 2744 signotset(&unblocked); 2745 retarget_shared_pending(tsk, &unblocked); 2746 2747 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && 2748 task_participate_group_stop(tsk)) 2749 group_stop = CLD_STOPPED; 2750 out: 2751 spin_unlock_irq(&tsk->sighand->siglock); 2752 2753 /* 2754 * If group stop has completed, deliver the notification. This 2755 * should always go to the real parent of the group leader. 2756 */ 2757 if (unlikely(group_stop)) { 2758 read_lock(&tasklist_lock); 2759 do_notify_parent_cldstop(tsk, false, group_stop); 2760 read_unlock(&tasklist_lock); 2761 } 2762 } 2763 2764 /* 2765 * System call entry points. 2766 */ 2767 2768 /** 2769 * sys_restart_syscall - restart a system call 2770 */ 2771 SYSCALL_DEFINE0(restart_syscall) 2772 { 2773 struct restart_block *restart = ¤t->restart_block; 2774 return restart->fn(restart); 2775 } 2776 2777 long do_no_restart_syscall(struct restart_block *param) 2778 { 2779 return -EINTR; 2780 } 2781 2782 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) 2783 { 2784 if (signal_pending(tsk) && !thread_group_empty(tsk)) { 2785 sigset_t newblocked; 2786 /* A set of now blocked but previously unblocked signals. */ 2787 sigandnsets(&newblocked, newset, ¤t->blocked); 2788 retarget_shared_pending(tsk, &newblocked); 2789 } 2790 tsk->blocked = *newset; 2791 recalc_sigpending(); 2792 } 2793 2794 /** 2795 * set_current_blocked - change current->blocked mask 2796 * @newset: new mask 2797 * 2798 * It is wrong to change ->blocked directly, this helper should be used 2799 * to ensure the process can't miss a shared signal we are going to block. 2800 */ 2801 void set_current_blocked(sigset_t *newset) 2802 { 2803 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); 2804 __set_current_blocked(newset); 2805 } 2806 2807 void __set_current_blocked(const sigset_t *newset) 2808 { 2809 struct task_struct *tsk = current; 2810 2811 /* 2812 * In case the signal mask hasn't changed, there is nothing we need 2813 * to do. The current->blocked shouldn't be modified by other task. 2814 */ 2815 if (sigequalsets(&tsk->blocked, newset)) 2816 return; 2817 2818 spin_lock_irq(&tsk->sighand->siglock); 2819 __set_task_blocked(tsk, newset); 2820 spin_unlock_irq(&tsk->sighand->siglock); 2821 } 2822 2823 /* 2824 * This is also useful for kernel threads that want to temporarily 2825 * (or permanently) block certain signals. 2826 * 2827 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 2828 * interface happily blocks "unblockable" signals like SIGKILL 2829 * and friends. 2830 */ 2831 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2832 { 2833 struct task_struct *tsk = current; 2834 sigset_t newset; 2835 2836 /* Lockless, only current can change ->blocked, never from irq */ 2837 if (oldset) 2838 *oldset = tsk->blocked; 2839 2840 switch (how) { 2841 case SIG_BLOCK: 2842 sigorsets(&newset, &tsk->blocked, set); 2843 break; 2844 case SIG_UNBLOCK: 2845 sigandnsets(&newset, &tsk->blocked, set); 2846 break; 2847 case SIG_SETMASK: 2848 newset = *set; 2849 break; 2850 default: 2851 return -EINVAL; 2852 } 2853 2854 __set_current_blocked(&newset); 2855 return 0; 2856 } 2857 EXPORT_SYMBOL(sigprocmask); 2858 2859 /* 2860 * The api helps set app-provided sigmasks. 2861 * 2862 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and 2863 * epoll_pwait where a new sigmask is passed from userland for the syscalls. 2864 */ 2865 int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, 2866 sigset_t *oldset, size_t sigsetsize) 2867 { 2868 if (!usigmask) 2869 return 0; 2870 2871 if (sigsetsize != sizeof(sigset_t)) 2872 return -EINVAL; 2873 if (copy_from_user(set, usigmask, sizeof(sigset_t))) 2874 return -EFAULT; 2875 2876 *oldset = current->blocked; 2877 set_current_blocked(set); 2878 2879 return 0; 2880 } 2881 EXPORT_SYMBOL(set_user_sigmask); 2882 2883 #ifdef CONFIG_COMPAT 2884 int set_compat_user_sigmask(const compat_sigset_t __user *usigmask, 2885 sigset_t *set, sigset_t *oldset, 2886 size_t sigsetsize) 2887 { 2888 if (!usigmask) 2889 return 0; 2890 2891 if (sigsetsize != sizeof(compat_sigset_t)) 2892 return -EINVAL; 2893 if (get_compat_sigset(set, usigmask)) 2894 return -EFAULT; 2895 2896 *oldset = current->blocked; 2897 set_current_blocked(set); 2898 2899 return 0; 2900 } 2901 EXPORT_SYMBOL(set_compat_user_sigmask); 2902 #endif 2903 2904 /* 2905 * restore_user_sigmask: 2906 * usigmask: sigmask passed in from userland. 2907 * sigsaved: saved sigmask when the syscall started and changed the sigmask to 2908 * usigmask. 2909 * 2910 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and 2911 * epoll_pwait where a new sigmask is passed in from userland for the syscalls. 2912 */ 2913 void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved) 2914 { 2915 2916 if (!usigmask) 2917 return; 2918 /* 2919 * When signals are pending, do not restore them here. 2920 * Restoring sigmask here can lead to delivering signals that the above 2921 * syscalls are intended to block because of the sigmask passed in. 2922 */ 2923 if (signal_pending(current)) { 2924 current->saved_sigmask = *sigsaved; 2925 set_restore_sigmask(); 2926 return; 2927 } 2928 2929 /* 2930 * This is needed because the fast syscall return path does not restore 2931 * saved_sigmask when signals are not pending. 2932 */ 2933 set_current_blocked(sigsaved); 2934 } 2935 EXPORT_SYMBOL(restore_user_sigmask); 2936 2937 /** 2938 * sys_rt_sigprocmask - change the list of currently blocked signals 2939 * @how: whether to add, remove, or set signals 2940 * @nset: stores pending signals 2941 * @oset: previous value of signal mask if non-null 2942 * @sigsetsize: size of sigset_t type 2943 */ 2944 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, 2945 sigset_t __user *, oset, size_t, sigsetsize) 2946 { 2947 sigset_t old_set, new_set; 2948 int error; 2949 2950 /* XXX: Don't preclude handling different sized sigset_t's. */ 2951 if (sigsetsize != sizeof(sigset_t)) 2952 return -EINVAL; 2953 2954 old_set = current->blocked; 2955 2956 if (nset) { 2957 if (copy_from_user(&new_set, nset, sizeof(sigset_t))) 2958 return -EFAULT; 2959 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2960 2961 error = sigprocmask(how, &new_set, NULL); 2962 if (error) 2963 return error; 2964 } 2965 2966 if (oset) { 2967 if (copy_to_user(oset, &old_set, sizeof(sigset_t))) 2968 return -EFAULT; 2969 } 2970 2971 return 0; 2972 } 2973 2974 #ifdef CONFIG_COMPAT 2975 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, 2976 compat_sigset_t __user *, oset, compat_size_t, sigsetsize) 2977 { 2978 sigset_t old_set = current->blocked; 2979 2980 /* XXX: Don't preclude handling different sized sigset_t's. */ 2981 if (sigsetsize != sizeof(sigset_t)) 2982 return -EINVAL; 2983 2984 if (nset) { 2985 sigset_t new_set; 2986 int error; 2987 if (get_compat_sigset(&new_set, nset)) 2988 return -EFAULT; 2989 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2990 2991 error = sigprocmask(how, &new_set, NULL); 2992 if (error) 2993 return error; 2994 } 2995 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; 2996 } 2997 #endif 2998 2999 static void do_sigpending(sigset_t *set) 3000 { 3001 spin_lock_irq(¤t->sighand->siglock); 3002 sigorsets(set, ¤t->pending.signal, 3003 ¤t->signal->shared_pending.signal); 3004 spin_unlock_irq(¤t->sighand->siglock); 3005 3006 /* Outside the lock because only this thread touches it. */ 3007 sigandsets(set, ¤t->blocked, set); 3008 } 3009 3010 /** 3011 * sys_rt_sigpending - examine a pending signal that has been raised 3012 * while blocked 3013 * @uset: stores pending signals 3014 * @sigsetsize: size of sigset_t type or larger 3015 */ 3016 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 3017 { 3018 sigset_t set; 3019 3020 if (sigsetsize > sizeof(*uset)) 3021 return -EINVAL; 3022 3023 do_sigpending(&set); 3024 3025 if (copy_to_user(uset, &set, sigsetsize)) 3026 return -EFAULT; 3027 3028 return 0; 3029 } 3030 3031 #ifdef CONFIG_COMPAT 3032 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, 3033 compat_size_t, sigsetsize) 3034 { 3035 sigset_t set; 3036 3037 if (sigsetsize > sizeof(*uset)) 3038 return -EINVAL; 3039 3040 do_sigpending(&set); 3041 3042 return put_compat_sigset(uset, &set, sigsetsize); 3043 } 3044 #endif 3045 3046 static const struct { 3047 unsigned char limit, layout; 3048 } sig_sicodes[] = { 3049 [SIGILL] = { NSIGILL, SIL_FAULT }, 3050 [SIGFPE] = { NSIGFPE, SIL_FAULT }, 3051 [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, 3052 [SIGBUS] = { NSIGBUS, SIL_FAULT }, 3053 [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, 3054 #if defined(SIGEMT) 3055 [SIGEMT] = { NSIGEMT, SIL_FAULT }, 3056 #endif 3057 [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, 3058 [SIGPOLL] = { NSIGPOLL, SIL_POLL }, 3059 [SIGSYS] = { NSIGSYS, SIL_SYS }, 3060 }; 3061 3062 static bool known_siginfo_layout(unsigned sig, int si_code) 3063 { 3064 if (si_code == SI_KERNEL) 3065 return true; 3066 else if ((si_code > SI_USER)) { 3067 if (sig_specific_sicodes(sig)) { 3068 if (si_code <= sig_sicodes[sig].limit) 3069 return true; 3070 } 3071 else if (si_code <= NSIGPOLL) 3072 return true; 3073 } 3074 else if (si_code >= SI_DETHREAD) 3075 return true; 3076 else if (si_code == SI_ASYNCNL) 3077 return true; 3078 return false; 3079 } 3080 3081 enum siginfo_layout siginfo_layout(unsigned sig, int si_code) 3082 { 3083 enum siginfo_layout layout = SIL_KILL; 3084 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { 3085 if ((sig < ARRAY_SIZE(sig_sicodes)) && 3086 (si_code <= sig_sicodes[sig].limit)) { 3087 layout = sig_sicodes[sig].layout; 3088 /* Handle the exceptions */ 3089 if ((sig == SIGBUS) && 3090 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) 3091 layout = SIL_FAULT_MCEERR; 3092 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) 3093 layout = SIL_FAULT_BNDERR; 3094 #ifdef SEGV_PKUERR 3095 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) 3096 layout = SIL_FAULT_PKUERR; 3097 #endif 3098 } 3099 else if (si_code <= NSIGPOLL) 3100 layout = SIL_POLL; 3101 } else { 3102 if (si_code == SI_TIMER) 3103 layout = SIL_TIMER; 3104 else if (si_code == SI_SIGIO) 3105 layout = SIL_POLL; 3106 else if (si_code < 0) 3107 layout = SIL_RT; 3108 } 3109 return layout; 3110 } 3111 3112 static inline char __user *si_expansion(const siginfo_t __user *info) 3113 { 3114 return ((char __user *)info) + sizeof(struct kernel_siginfo); 3115 } 3116 3117 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) 3118 { 3119 char __user *expansion = si_expansion(to); 3120 if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) 3121 return -EFAULT; 3122 if (clear_user(expansion, SI_EXPANSION_SIZE)) 3123 return -EFAULT; 3124 return 0; 3125 } 3126 3127 static int post_copy_siginfo_from_user(kernel_siginfo_t *info, 3128 const siginfo_t __user *from) 3129 { 3130 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { 3131 char __user *expansion = si_expansion(from); 3132 char buf[SI_EXPANSION_SIZE]; 3133 int i; 3134 /* 3135 * An unknown si_code might need more than 3136 * sizeof(struct kernel_siginfo) bytes. Verify all of the 3137 * extra bytes are 0. This guarantees copy_siginfo_to_user 3138 * will return this data to userspace exactly. 3139 */ 3140 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE)) 3141 return -EFAULT; 3142 for (i = 0; i < SI_EXPANSION_SIZE; i++) { 3143 if (buf[i] != 0) 3144 return -E2BIG; 3145 } 3146 } 3147 return 0; 3148 } 3149 3150 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, 3151 const siginfo_t __user *from) 3152 { 3153 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3154 return -EFAULT; 3155 to->si_signo = signo; 3156 return post_copy_siginfo_from_user(to, from); 3157 } 3158 3159 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) 3160 { 3161 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3162 return -EFAULT; 3163 return post_copy_siginfo_from_user(to, from); 3164 } 3165 3166 #ifdef CONFIG_COMPAT 3167 int copy_siginfo_to_user32(struct compat_siginfo __user *to, 3168 const struct kernel_siginfo *from) 3169 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) 3170 { 3171 return __copy_siginfo_to_user32(to, from, in_x32_syscall()); 3172 } 3173 int __copy_siginfo_to_user32(struct compat_siginfo __user *to, 3174 const struct kernel_siginfo *from, bool x32_ABI) 3175 #endif 3176 { 3177 struct compat_siginfo new; 3178 memset(&new, 0, sizeof(new)); 3179 3180 new.si_signo = from->si_signo; 3181 new.si_errno = from->si_errno; 3182 new.si_code = from->si_code; 3183 switch(siginfo_layout(from->si_signo, from->si_code)) { 3184 case SIL_KILL: 3185 new.si_pid = from->si_pid; 3186 new.si_uid = from->si_uid; 3187 break; 3188 case SIL_TIMER: 3189 new.si_tid = from->si_tid; 3190 new.si_overrun = from->si_overrun; 3191 new.si_int = from->si_int; 3192 break; 3193 case SIL_POLL: 3194 new.si_band = from->si_band; 3195 new.si_fd = from->si_fd; 3196 break; 3197 case SIL_FAULT: 3198 new.si_addr = ptr_to_compat(from->si_addr); 3199 #ifdef __ARCH_SI_TRAPNO 3200 new.si_trapno = from->si_trapno; 3201 #endif 3202 break; 3203 case SIL_FAULT_MCEERR: 3204 new.si_addr = ptr_to_compat(from->si_addr); 3205 #ifdef __ARCH_SI_TRAPNO 3206 new.si_trapno = from->si_trapno; 3207 #endif 3208 new.si_addr_lsb = from->si_addr_lsb; 3209 break; 3210 case SIL_FAULT_BNDERR: 3211 new.si_addr = ptr_to_compat(from->si_addr); 3212 #ifdef __ARCH_SI_TRAPNO 3213 new.si_trapno = from->si_trapno; 3214 #endif 3215 new.si_lower = ptr_to_compat(from->si_lower); 3216 new.si_upper = ptr_to_compat(from->si_upper); 3217 break; 3218 case SIL_FAULT_PKUERR: 3219 new.si_addr = ptr_to_compat(from->si_addr); 3220 #ifdef __ARCH_SI_TRAPNO 3221 new.si_trapno = from->si_trapno; 3222 #endif 3223 new.si_pkey = from->si_pkey; 3224 break; 3225 case SIL_CHLD: 3226 new.si_pid = from->si_pid; 3227 new.si_uid = from->si_uid; 3228 new.si_status = from->si_status; 3229 #ifdef CONFIG_X86_X32_ABI 3230 if (x32_ABI) { 3231 new._sifields._sigchld_x32._utime = from->si_utime; 3232 new._sifields._sigchld_x32._stime = from->si_stime; 3233 } else 3234 #endif 3235 { 3236 new.si_utime = from->si_utime; 3237 new.si_stime = from->si_stime; 3238 } 3239 break; 3240 case SIL_RT: 3241 new.si_pid = from->si_pid; 3242 new.si_uid = from->si_uid; 3243 new.si_int = from->si_int; 3244 break; 3245 case SIL_SYS: 3246 new.si_call_addr = ptr_to_compat(from->si_call_addr); 3247 new.si_syscall = from->si_syscall; 3248 new.si_arch = from->si_arch; 3249 break; 3250 } 3251 3252 if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) 3253 return -EFAULT; 3254 3255 return 0; 3256 } 3257 3258 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, 3259 const struct compat_siginfo *from) 3260 { 3261 clear_siginfo(to); 3262 to->si_signo = from->si_signo; 3263 to->si_errno = from->si_errno; 3264 to->si_code = from->si_code; 3265 switch(siginfo_layout(from->si_signo, from->si_code)) { 3266 case SIL_KILL: 3267 to->si_pid = from->si_pid; 3268 to->si_uid = from->si_uid; 3269 break; 3270 case SIL_TIMER: 3271 to->si_tid = from->si_tid; 3272 to->si_overrun = from->si_overrun; 3273 to->si_int = from->si_int; 3274 break; 3275 case SIL_POLL: 3276 to->si_band = from->si_band; 3277 to->si_fd = from->si_fd; 3278 break; 3279 case SIL_FAULT: 3280 to->si_addr = compat_ptr(from->si_addr); 3281 #ifdef __ARCH_SI_TRAPNO 3282 to->si_trapno = from->si_trapno; 3283 #endif 3284 break; 3285 case SIL_FAULT_MCEERR: 3286 to->si_addr = compat_ptr(from->si_addr); 3287 #ifdef __ARCH_SI_TRAPNO 3288 to->si_trapno = from->si_trapno; 3289 #endif 3290 to->si_addr_lsb = from->si_addr_lsb; 3291 break; 3292 case SIL_FAULT_BNDERR: 3293 to->si_addr = compat_ptr(from->si_addr); 3294 #ifdef __ARCH_SI_TRAPNO 3295 to->si_trapno = from->si_trapno; 3296 #endif 3297 to->si_lower = compat_ptr(from->si_lower); 3298 to->si_upper = compat_ptr(from->si_upper); 3299 break; 3300 case SIL_FAULT_PKUERR: 3301 to->si_addr = compat_ptr(from->si_addr); 3302 #ifdef __ARCH_SI_TRAPNO 3303 to->si_trapno = from->si_trapno; 3304 #endif 3305 to->si_pkey = from->si_pkey; 3306 break; 3307 case SIL_CHLD: 3308 to->si_pid = from->si_pid; 3309 to->si_uid = from->si_uid; 3310 to->si_status = from->si_status; 3311 #ifdef CONFIG_X86_X32_ABI 3312 if (in_x32_syscall()) { 3313 to->si_utime = from->_sifields._sigchld_x32._utime; 3314 to->si_stime = from->_sifields._sigchld_x32._stime; 3315 } else 3316 #endif 3317 { 3318 to->si_utime = from->si_utime; 3319 to->si_stime = from->si_stime; 3320 } 3321 break; 3322 case SIL_RT: 3323 to->si_pid = from->si_pid; 3324 to->si_uid = from->si_uid; 3325 to->si_int = from->si_int; 3326 break; 3327 case SIL_SYS: 3328 to->si_call_addr = compat_ptr(from->si_call_addr); 3329 to->si_syscall = from->si_syscall; 3330 to->si_arch = from->si_arch; 3331 break; 3332 } 3333 return 0; 3334 } 3335 3336 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, 3337 const struct compat_siginfo __user *ufrom) 3338 { 3339 struct compat_siginfo from; 3340 3341 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3342 return -EFAULT; 3343 3344 from.si_signo = signo; 3345 return post_copy_siginfo_from_user32(to, &from); 3346 } 3347 3348 int copy_siginfo_from_user32(struct kernel_siginfo *to, 3349 const struct compat_siginfo __user *ufrom) 3350 { 3351 struct compat_siginfo from; 3352 3353 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3354 return -EFAULT; 3355 3356 return post_copy_siginfo_from_user32(to, &from); 3357 } 3358 #endif /* CONFIG_COMPAT */ 3359 3360 /** 3361 * do_sigtimedwait - wait for queued signals specified in @which 3362 * @which: queued signals to wait for 3363 * @info: if non-null, the signal's siginfo is returned here 3364 * @ts: upper bound on process time suspension 3365 */ 3366 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, 3367 const struct timespec64 *ts) 3368 { 3369 ktime_t *to = NULL, timeout = KTIME_MAX; 3370 struct task_struct *tsk = current; 3371 sigset_t mask = *which; 3372 int sig, ret = 0; 3373 3374 if (ts) { 3375 if (!timespec64_valid(ts)) 3376 return -EINVAL; 3377 timeout = timespec64_to_ktime(*ts); 3378 to = &timeout; 3379 } 3380 3381 /* 3382 * Invert the set of allowed signals to get those we want to block. 3383 */ 3384 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); 3385 signotset(&mask); 3386 3387 spin_lock_irq(&tsk->sighand->siglock); 3388 sig = dequeue_signal(tsk, &mask, info); 3389 if (!sig && timeout) { 3390 /* 3391 * None ready, temporarily unblock those we're interested 3392 * while we are sleeping in so that we'll be awakened when 3393 * they arrive. Unblocking is always fine, we can avoid 3394 * set_current_blocked(). 3395 */ 3396 tsk->real_blocked = tsk->blocked; 3397 sigandsets(&tsk->blocked, &tsk->blocked, &mask); 3398 recalc_sigpending(); 3399 spin_unlock_irq(&tsk->sighand->siglock); 3400 3401 __set_current_state(TASK_INTERRUPTIBLE); 3402 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, 3403 HRTIMER_MODE_REL); 3404 spin_lock_irq(&tsk->sighand->siglock); 3405 __set_task_blocked(tsk, &tsk->real_blocked); 3406 sigemptyset(&tsk->real_blocked); 3407 sig = dequeue_signal(tsk, &mask, info); 3408 } 3409 spin_unlock_irq(&tsk->sighand->siglock); 3410 3411 if (sig) 3412 return sig; 3413 return ret ? -EINTR : -EAGAIN; 3414 } 3415 3416 /** 3417 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 3418 * in @uthese 3419 * @uthese: queued signals to wait for 3420 * @uinfo: if non-null, the signal's siginfo is returned here 3421 * @uts: upper bound on process time suspension 3422 * @sigsetsize: size of sigset_t type 3423 */ 3424 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 3425 siginfo_t __user *, uinfo, 3426 const struct __kernel_timespec __user *, uts, 3427 size_t, sigsetsize) 3428 { 3429 sigset_t these; 3430 struct timespec64 ts; 3431 kernel_siginfo_t info; 3432 int ret; 3433 3434 /* XXX: Don't preclude handling different sized sigset_t's. */ 3435 if (sigsetsize != sizeof(sigset_t)) 3436 return -EINVAL; 3437 3438 if (copy_from_user(&these, uthese, sizeof(these))) 3439 return -EFAULT; 3440 3441 if (uts) { 3442 if (get_timespec64(&ts, uts)) 3443 return -EFAULT; 3444 } 3445 3446 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3447 3448 if (ret > 0 && uinfo) { 3449 if (copy_siginfo_to_user(uinfo, &info)) 3450 ret = -EFAULT; 3451 } 3452 3453 return ret; 3454 } 3455 3456 #ifdef CONFIG_COMPAT_32BIT_TIME 3457 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese, 3458 siginfo_t __user *, uinfo, 3459 const struct old_timespec32 __user *, uts, 3460 size_t, sigsetsize) 3461 { 3462 sigset_t these; 3463 struct timespec64 ts; 3464 kernel_siginfo_t info; 3465 int ret; 3466 3467 if (sigsetsize != sizeof(sigset_t)) 3468 return -EINVAL; 3469 3470 if (copy_from_user(&these, uthese, sizeof(these))) 3471 return -EFAULT; 3472 3473 if (uts) { 3474 if (get_old_timespec32(&ts, uts)) 3475 return -EFAULT; 3476 } 3477 3478 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3479 3480 if (ret > 0 && uinfo) { 3481 if (copy_siginfo_to_user(uinfo, &info)) 3482 ret = -EFAULT; 3483 } 3484 3485 return ret; 3486 } 3487 #endif 3488 3489 #ifdef CONFIG_COMPAT 3490 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, 3491 struct compat_siginfo __user *, uinfo, 3492 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize) 3493 { 3494 sigset_t s; 3495 struct timespec64 t; 3496 kernel_siginfo_t info; 3497 long ret; 3498 3499 if (sigsetsize != sizeof(sigset_t)) 3500 return -EINVAL; 3501 3502 if (get_compat_sigset(&s, uthese)) 3503 return -EFAULT; 3504 3505 if (uts) { 3506 if (get_timespec64(&t, uts)) 3507 return -EFAULT; 3508 } 3509 3510 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3511 3512 if (ret > 0 && uinfo) { 3513 if (copy_siginfo_to_user32(uinfo, &info)) 3514 ret = -EFAULT; 3515 } 3516 3517 return ret; 3518 } 3519 3520 #ifdef CONFIG_COMPAT_32BIT_TIME 3521 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, 3522 struct compat_siginfo __user *, uinfo, 3523 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) 3524 { 3525 sigset_t s; 3526 struct timespec64 t; 3527 kernel_siginfo_t info; 3528 long ret; 3529 3530 if (sigsetsize != sizeof(sigset_t)) 3531 return -EINVAL; 3532 3533 if (get_compat_sigset(&s, uthese)) 3534 return -EFAULT; 3535 3536 if (uts) { 3537 if (get_old_timespec32(&t, uts)) 3538 return -EFAULT; 3539 } 3540 3541 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3542 3543 if (ret > 0 && uinfo) { 3544 if (copy_siginfo_to_user32(uinfo, &info)) 3545 ret = -EFAULT; 3546 } 3547 3548 return ret; 3549 } 3550 #endif 3551 #endif 3552 3553 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) 3554 { 3555 clear_siginfo(info); 3556 info->si_signo = sig; 3557 info->si_errno = 0; 3558 info->si_code = SI_USER; 3559 info->si_pid = task_tgid_vnr(current); 3560 info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3561 } 3562 3563 /** 3564 * sys_kill - send a signal to a process 3565 * @pid: the PID of the process 3566 * @sig: signal to be sent 3567 */ 3568 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 3569 { 3570 struct kernel_siginfo info; 3571 3572 prepare_kill_siginfo(sig, &info); 3573 3574 return kill_something_info(sig, &info, pid); 3575 } 3576 3577 /* 3578 * Verify that the signaler and signalee either are in the same pid namespace 3579 * or that the signaler's pid namespace is an ancestor of the signalee's pid 3580 * namespace. 3581 */ 3582 static bool access_pidfd_pidns(struct pid *pid) 3583 { 3584 struct pid_namespace *active = task_active_pid_ns(current); 3585 struct pid_namespace *p = ns_of_pid(pid); 3586 3587 for (;;) { 3588 if (!p) 3589 return false; 3590 if (p == active) 3591 break; 3592 p = p->parent; 3593 } 3594 3595 return true; 3596 } 3597 3598 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info) 3599 { 3600 #ifdef CONFIG_COMPAT 3601 /* 3602 * Avoid hooking up compat syscalls and instead handle necessary 3603 * conversions here. Note, this is a stop-gap measure and should not be 3604 * considered a generic solution. 3605 */ 3606 if (in_compat_syscall()) 3607 return copy_siginfo_from_user32( 3608 kinfo, (struct compat_siginfo __user *)info); 3609 #endif 3610 return copy_siginfo_from_user(kinfo, info); 3611 } 3612 3613 static struct pid *pidfd_to_pid(const struct file *file) 3614 { 3615 if (file->f_op == &pidfd_fops) 3616 return file->private_data; 3617 3618 return tgid_pidfd_to_pid(file); 3619 } 3620 3621 /** 3622 * sys_pidfd_send_signal - send a signal to a process through a task file 3623 * descriptor 3624 * @pidfd: the file descriptor of the process 3625 * @sig: signal to be sent 3626 * @info: the signal info 3627 * @flags: future flags to be passed 3628 * 3629 * The syscall currently only signals via PIDTYPE_PID which covers 3630 * kill(<positive-pid>, <signal>. It does not signal threads or process 3631 * groups. 3632 * In order to extend the syscall to threads and process groups the @flags 3633 * argument should be used. In essence, the @flags argument will determine 3634 * what is signaled and not the file descriptor itself. Put in other words, 3635 * grouping is a property of the flags argument not a property of the file 3636 * descriptor. 3637 * 3638 * Return: 0 on success, negative errno on failure 3639 */ 3640 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, 3641 siginfo_t __user *, info, unsigned int, flags) 3642 { 3643 int ret; 3644 struct fd f; 3645 struct pid *pid; 3646 kernel_siginfo_t kinfo; 3647 3648 /* Enforce flags be set to 0 until we add an extension. */ 3649 if (flags) 3650 return -EINVAL; 3651 3652 f = fdget(pidfd); 3653 if (!f.file) 3654 return -EBADF; 3655 3656 /* Is this a pidfd? */ 3657 pid = pidfd_to_pid(f.file); 3658 if (IS_ERR(pid)) { 3659 ret = PTR_ERR(pid); 3660 goto err; 3661 } 3662 3663 ret = -EINVAL; 3664 if (!access_pidfd_pidns(pid)) 3665 goto err; 3666 3667 if (info) { 3668 ret = copy_siginfo_from_user_any(&kinfo, info); 3669 if (unlikely(ret)) 3670 goto err; 3671 3672 ret = -EINVAL; 3673 if (unlikely(sig != kinfo.si_signo)) 3674 goto err; 3675 3676 /* Only allow sending arbitrary signals to yourself. */ 3677 ret = -EPERM; 3678 if ((task_pid(current) != pid) && 3679 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) 3680 goto err; 3681 } else { 3682 prepare_kill_siginfo(sig, &kinfo); 3683 } 3684 3685 ret = kill_pid_info(sig, &kinfo, pid); 3686 3687 err: 3688 fdput(f); 3689 return ret; 3690 } 3691 3692 static int 3693 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) 3694 { 3695 struct task_struct *p; 3696 int error = -ESRCH; 3697 3698 rcu_read_lock(); 3699 p = find_task_by_vpid(pid); 3700 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 3701 error = check_kill_permission(sig, info, p); 3702 /* 3703 * The null signal is a permissions and process existence 3704 * probe. No signal is actually delivered. 3705 */ 3706 if (!error && sig) { 3707 error = do_send_sig_info(sig, info, p, PIDTYPE_PID); 3708 /* 3709 * If lock_task_sighand() failed we pretend the task 3710 * dies after receiving the signal. The window is tiny, 3711 * and the signal is private anyway. 3712 */ 3713 if (unlikely(error == -ESRCH)) 3714 error = 0; 3715 } 3716 } 3717 rcu_read_unlock(); 3718 3719 return error; 3720 } 3721 3722 static int do_tkill(pid_t tgid, pid_t pid, int sig) 3723 { 3724 struct kernel_siginfo info; 3725 3726 clear_siginfo(&info); 3727 info.si_signo = sig; 3728 info.si_errno = 0; 3729 info.si_code = SI_TKILL; 3730 info.si_pid = task_tgid_vnr(current); 3731 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3732 3733 return do_send_specific(tgid, pid, sig, &info); 3734 } 3735 3736 /** 3737 * sys_tgkill - send signal to one specific thread 3738 * @tgid: the thread group ID of the thread 3739 * @pid: the PID of the thread 3740 * @sig: signal to be sent 3741 * 3742 * This syscall also checks the @tgid and returns -ESRCH even if the PID 3743 * exists but it's not belonging to the target process anymore. This 3744 * method solves the problem of threads exiting and PIDs getting reused. 3745 */ 3746 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 3747 { 3748 /* This is only valid for single tasks */ 3749 if (pid <= 0 || tgid <= 0) 3750 return -EINVAL; 3751 3752 return do_tkill(tgid, pid, sig); 3753 } 3754 3755 /** 3756 * sys_tkill - send signal to one specific task 3757 * @pid: the PID of the task 3758 * @sig: signal to be sent 3759 * 3760 * Send a signal to only one task, even if it's a CLONE_THREAD task. 3761 */ 3762 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 3763 { 3764 /* This is only valid for single tasks */ 3765 if (pid <= 0) 3766 return -EINVAL; 3767 3768 return do_tkill(0, pid, sig); 3769 } 3770 3771 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) 3772 { 3773 /* Not even root can pretend to send signals from the kernel. 3774 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3775 */ 3776 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3777 (task_pid_vnr(current) != pid)) 3778 return -EPERM; 3779 3780 /* POSIX.1b doesn't mention process groups. */ 3781 return kill_proc_info(sig, info, pid); 3782 } 3783 3784 /** 3785 * sys_rt_sigqueueinfo - send signal information to a signal 3786 * @pid: the PID of the thread 3787 * @sig: signal to be sent 3788 * @uinfo: signal info to be sent 3789 */ 3790 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 3791 siginfo_t __user *, uinfo) 3792 { 3793 kernel_siginfo_t info; 3794 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 3795 if (unlikely(ret)) 3796 return ret; 3797 return do_rt_sigqueueinfo(pid, sig, &info); 3798 } 3799 3800 #ifdef CONFIG_COMPAT 3801 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, 3802 compat_pid_t, pid, 3803 int, sig, 3804 struct compat_siginfo __user *, uinfo) 3805 { 3806 kernel_siginfo_t info; 3807 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 3808 if (unlikely(ret)) 3809 return ret; 3810 return do_rt_sigqueueinfo(pid, sig, &info); 3811 } 3812 #endif 3813 3814 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) 3815 { 3816 /* This is only valid for single tasks */ 3817 if (pid <= 0 || tgid <= 0) 3818 return -EINVAL; 3819 3820 /* Not even root can pretend to send signals from the kernel. 3821 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3822 */ 3823 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3824 (task_pid_vnr(current) != pid)) 3825 return -EPERM; 3826 3827 return do_send_specific(tgid, pid, sig, info); 3828 } 3829 3830 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 3831 siginfo_t __user *, uinfo) 3832 { 3833 kernel_siginfo_t info; 3834 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 3835 if (unlikely(ret)) 3836 return ret; 3837 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3838 } 3839 3840 #ifdef CONFIG_COMPAT 3841 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, 3842 compat_pid_t, tgid, 3843 compat_pid_t, pid, 3844 int, sig, 3845 struct compat_siginfo __user *, uinfo) 3846 { 3847 kernel_siginfo_t info; 3848 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 3849 if (unlikely(ret)) 3850 return ret; 3851 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3852 } 3853 #endif 3854 3855 /* 3856 * For kthreads only, must not be used if cloned with CLONE_SIGHAND 3857 */ 3858 void kernel_sigaction(int sig, __sighandler_t action) 3859 { 3860 spin_lock_irq(¤t->sighand->siglock); 3861 current->sighand->action[sig - 1].sa.sa_handler = action; 3862 if (action == SIG_IGN) { 3863 sigset_t mask; 3864 3865 sigemptyset(&mask); 3866 sigaddset(&mask, sig); 3867 3868 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); 3869 flush_sigqueue_mask(&mask, ¤t->pending); 3870 recalc_sigpending(); 3871 } 3872 spin_unlock_irq(¤t->sighand->siglock); 3873 } 3874 EXPORT_SYMBOL(kernel_sigaction); 3875 3876 void __weak sigaction_compat_abi(struct k_sigaction *act, 3877 struct k_sigaction *oact) 3878 { 3879 } 3880 3881 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 3882 { 3883 struct task_struct *p = current, *t; 3884 struct k_sigaction *k; 3885 sigset_t mask; 3886 3887 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 3888 return -EINVAL; 3889 3890 k = &p->sighand->action[sig-1]; 3891 3892 spin_lock_irq(&p->sighand->siglock); 3893 if (oact) 3894 *oact = *k; 3895 3896 sigaction_compat_abi(act, oact); 3897 3898 if (act) { 3899 sigdelsetmask(&act->sa.sa_mask, 3900 sigmask(SIGKILL) | sigmask(SIGSTOP)); 3901 *k = *act; 3902 /* 3903 * POSIX 3.3.1.3: 3904 * "Setting a signal action to SIG_IGN for a signal that is 3905 * pending shall cause the pending signal to be discarded, 3906 * whether or not it is blocked." 3907 * 3908 * "Setting a signal action to SIG_DFL for a signal that is 3909 * pending and whose default action is to ignore the signal 3910 * (for example, SIGCHLD), shall cause the pending signal to 3911 * be discarded, whether or not it is blocked" 3912 */ 3913 if (sig_handler_ignored(sig_handler(p, sig), sig)) { 3914 sigemptyset(&mask); 3915 sigaddset(&mask, sig); 3916 flush_sigqueue_mask(&mask, &p->signal->shared_pending); 3917 for_each_thread(p, t) 3918 flush_sigqueue_mask(&mask, &t->pending); 3919 } 3920 } 3921 3922 spin_unlock_irq(&p->sighand->siglock); 3923 return 0; 3924 } 3925 3926 static int 3927 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, 3928 size_t min_ss_size) 3929 { 3930 struct task_struct *t = current; 3931 3932 if (oss) { 3933 memset(oss, 0, sizeof(stack_t)); 3934 oss->ss_sp = (void __user *) t->sas_ss_sp; 3935 oss->ss_size = t->sas_ss_size; 3936 oss->ss_flags = sas_ss_flags(sp) | 3937 (current->sas_ss_flags & SS_FLAG_BITS); 3938 } 3939 3940 if (ss) { 3941 void __user *ss_sp = ss->ss_sp; 3942 size_t ss_size = ss->ss_size; 3943 unsigned ss_flags = ss->ss_flags; 3944 int ss_mode; 3945 3946 if (unlikely(on_sig_stack(sp))) 3947 return -EPERM; 3948 3949 ss_mode = ss_flags & ~SS_FLAG_BITS; 3950 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && 3951 ss_mode != 0)) 3952 return -EINVAL; 3953 3954 if (ss_mode == SS_DISABLE) { 3955 ss_size = 0; 3956 ss_sp = NULL; 3957 } else { 3958 if (unlikely(ss_size < min_ss_size)) 3959 return -ENOMEM; 3960 } 3961 3962 t->sas_ss_sp = (unsigned long) ss_sp; 3963 t->sas_ss_size = ss_size; 3964 t->sas_ss_flags = ss_flags; 3965 } 3966 return 0; 3967 } 3968 3969 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) 3970 { 3971 stack_t new, old; 3972 int err; 3973 if (uss && copy_from_user(&new, uss, sizeof(stack_t))) 3974 return -EFAULT; 3975 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, 3976 current_user_stack_pointer(), 3977 MINSIGSTKSZ); 3978 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) 3979 err = -EFAULT; 3980 return err; 3981 } 3982 3983 int restore_altstack(const stack_t __user *uss) 3984 { 3985 stack_t new; 3986 if (copy_from_user(&new, uss, sizeof(stack_t))) 3987 return -EFAULT; 3988 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(), 3989 MINSIGSTKSZ); 3990 /* squash all but EFAULT for now */ 3991 return 0; 3992 } 3993 3994 int __save_altstack(stack_t __user *uss, unsigned long sp) 3995 { 3996 struct task_struct *t = current; 3997 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | 3998 __put_user(t->sas_ss_flags, &uss->ss_flags) | 3999 __put_user(t->sas_ss_size, &uss->ss_size); 4000 if (err) 4001 return err; 4002 if (t->sas_ss_flags & SS_AUTODISARM) 4003 sas_ss_reset(t); 4004 return 0; 4005 } 4006 4007 #ifdef CONFIG_COMPAT 4008 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, 4009 compat_stack_t __user *uoss_ptr) 4010 { 4011 stack_t uss, uoss; 4012 int ret; 4013 4014 if (uss_ptr) { 4015 compat_stack_t uss32; 4016 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) 4017 return -EFAULT; 4018 uss.ss_sp = compat_ptr(uss32.ss_sp); 4019 uss.ss_flags = uss32.ss_flags; 4020 uss.ss_size = uss32.ss_size; 4021 } 4022 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 4023 compat_user_stack_pointer(), 4024 COMPAT_MINSIGSTKSZ); 4025 if (ret >= 0 && uoss_ptr) { 4026 compat_stack_t old; 4027 memset(&old, 0, sizeof(old)); 4028 old.ss_sp = ptr_to_compat(uoss.ss_sp); 4029 old.ss_flags = uoss.ss_flags; 4030 old.ss_size = uoss.ss_size; 4031 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) 4032 ret = -EFAULT; 4033 } 4034 return ret; 4035 } 4036 4037 COMPAT_SYSCALL_DEFINE2(sigaltstack, 4038 const compat_stack_t __user *, uss_ptr, 4039 compat_stack_t __user *, uoss_ptr) 4040 { 4041 return do_compat_sigaltstack(uss_ptr, uoss_ptr); 4042 } 4043 4044 int compat_restore_altstack(const compat_stack_t __user *uss) 4045 { 4046 int err = do_compat_sigaltstack(uss, NULL); 4047 /* squash all but -EFAULT for now */ 4048 return err == -EFAULT ? err : 0; 4049 } 4050 4051 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) 4052 { 4053 int err; 4054 struct task_struct *t = current; 4055 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), 4056 &uss->ss_sp) | 4057 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4058 __put_user(t->sas_ss_size, &uss->ss_size); 4059 if (err) 4060 return err; 4061 if (t->sas_ss_flags & SS_AUTODISARM) 4062 sas_ss_reset(t); 4063 return 0; 4064 } 4065 #endif 4066 4067 #ifdef __ARCH_WANT_SYS_SIGPENDING 4068 4069 /** 4070 * sys_sigpending - examine pending signals 4071 * @uset: where mask of pending signal is returned 4072 */ 4073 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) 4074 { 4075 sigset_t set; 4076 4077 if (sizeof(old_sigset_t) > sizeof(*uset)) 4078 return -EINVAL; 4079 4080 do_sigpending(&set); 4081 4082 if (copy_to_user(uset, &set, sizeof(old_sigset_t))) 4083 return -EFAULT; 4084 4085 return 0; 4086 } 4087 4088 #ifdef CONFIG_COMPAT 4089 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) 4090 { 4091 sigset_t set; 4092 4093 do_sigpending(&set); 4094 4095 return put_user(set.sig[0], set32); 4096 } 4097 #endif 4098 4099 #endif 4100 4101 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 4102 /** 4103 * sys_sigprocmask - examine and change blocked signals 4104 * @how: whether to add, remove, or set signals 4105 * @nset: signals to add or remove (if non-null) 4106 * @oset: previous value of signal mask if non-null 4107 * 4108 * Some platforms have their own version with special arguments; 4109 * others support only sys_rt_sigprocmask. 4110 */ 4111 4112 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, 4113 old_sigset_t __user *, oset) 4114 { 4115 old_sigset_t old_set, new_set; 4116 sigset_t new_blocked; 4117 4118 old_set = current->blocked.sig[0]; 4119 4120 if (nset) { 4121 if (copy_from_user(&new_set, nset, sizeof(*nset))) 4122 return -EFAULT; 4123 4124 new_blocked = current->blocked; 4125 4126 switch (how) { 4127 case SIG_BLOCK: 4128 sigaddsetmask(&new_blocked, new_set); 4129 break; 4130 case SIG_UNBLOCK: 4131 sigdelsetmask(&new_blocked, new_set); 4132 break; 4133 case SIG_SETMASK: 4134 new_blocked.sig[0] = new_set; 4135 break; 4136 default: 4137 return -EINVAL; 4138 } 4139 4140 set_current_blocked(&new_blocked); 4141 } 4142 4143 if (oset) { 4144 if (copy_to_user(oset, &old_set, sizeof(*oset))) 4145 return -EFAULT; 4146 } 4147 4148 return 0; 4149 } 4150 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 4151 4152 #ifndef CONFIG_ODD_RT_SIGACTION 4153 /** 4154 * sys_rt_sigaction - alter an action taken by a process 4155 * @sig: signal to be sent 4156 * @act: new sigaction 4157 * @oact: used to save the previous sigaction 4158 * @sigsetsize: size of sigset_t type 4159 */ 4160 SYSCALL_DEFINE4(rt_sigaction, int, sig, 4161 const struct sigaction __user *, act, 4162 struct sigaction __user *, oact, 4163 size_t, sigsetsize) 4164 { 4165 struct k_sigaction new_sa, old_sa; 4166 int ret; 4167 4168 /* XXX: Don't preclude handling different sized sigset_t's. */ 4169 if (sigsetsize != sizeof(sigset_t)) 4170 return -EINVAL; 4171 4172 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 4173 return -EFAULT; 4174 4175 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 4176 if (ret) 4177 return ret; 4178 4179 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 4180 return -EFAULT; 4181 4182 return 0; 4183 } 4184 #ifdef CONFIG_COMPAT 4185 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, 4186 const struct compat_sigaction __user *, act, 4187 struct compat_sigaction __user *, oact, 4188 compat_size_t, sigsetsize) 4189 { 4190 struct k_sigaction new_ka, old_ka; 4191 #ifdef __ARCH_HAS_SA_RESTORER 4192 compat_uptr_t restorer; 4193 #endif 4194 int ret; 4195 4196 /* XXX: Don't preclude handling different sized sigset_t's. */ 4197 if (sigsetsize != sizeof(compat_sigset_t)) 4198 return -EINVAL; 4199 4200 if (act) { 4201 compat_uptr_t handler; 4202 ret = get_user(handler, &act->sa_handler); 4203 new_ka.sa.sa_handler = compat_ptr(handler); 4204 #ifdef __ARCH_HAS_SA_RESTORER 4205 ret |= get_user(restorer, &act->sa_restorer); 4206 new_ka.sa.sa_restorer = compat_ptr(restorer); 4207 #endif 4208 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); 4209 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); 4210 if (ret) 4211 return -EFAULT; 4212 } 4213 4214 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4215 if (!ret && oact) { 4216 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 4217 &oact->sa_handler); 4218 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, 4219 sizeof(oact->sa_mask)); 4220 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); 4221 #ifdef __ARCH_HAS_SA_RESTORER 4222 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4223 &oact->sa_restorer); 4224 #endif 4225 } 4226 return ret; 4227 } 4228 #endif 4229 #endif /* !CONFIG_ODD_RT_SIGACTION */ 4230 4231 #ifdef CONFIG_OLD_SIGACTION 4232 SYSCALL_DEFINE3(sigaction, int, sig, 4233 const struct old_sigaction __user *, act, 4234 struct old_sigaction __user *, oact) 4235 { 4236 struct k_sigaction new_ka, old_ka; 4237 int ret; 4238 4239 if (act) { 4240 old_sigset_t mask; 4241 if (!access_ok(act, sizeof(*act)) || 4242 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 4243 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 4244 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4245 __get_user(mask, &act->sa_mask)) 4246 return -EFAULT; 4247 #ifdef __ARCH_HAS_KA_RESTORER 4248 new_ka.ka_restorer = NULL; 4249 #endif 4250 siginitset(&new_ka.sa.sa_mask, mask); 4251 } 4252 4253 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4254 4255 if (!ret && oact) { 4256 if (!access_ok(oact, sizeof(*oact)) || 4257 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 4258 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 4259 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4260 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4261 return -EFAULT; 4262 } 4263 4264 return ret; 4265 } 4266 #endif 4267 #ifdef CONFIG_COMPAT_OLD_SIGACTION 4268 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, 4269 const struct compat_old_sigaction __user *, act, 4270 struct compat_old_sigaction __user *, oact) 4271 { 4272 struct k_sigaction new_ka, old_ka; 4273 int ret; 4274 compat_old_sigset_t mask; 4275 compat_uptr_t handler, restorer; 4276 4277 if (act) { 4278 if (!access_ok(act, sizeof(*act)) || 4279 __get_user(handler, &act->sa_handler) || 4280 __get_user(restorer, &act->sa_restorer) || 4281 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4282 __get_user(mask, &act->sa_mask)) 4283 return -EFAULT; 4284 4285 #ifdef __ARCH_HAS_KA_RESTORER 4286 new_ka.ka_restorer = NULL; 4287 #endif 4288 new_ka.sa.sa_handler = compat_ptr(handler); 4289 new_ka.sa.sa_restorer = compat_ptr(restorer); 4290 siginitset(&new_ka.sa.sa_mask, mask); 4291 } 4292 4293 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4294 4295 if (!ret && oact) { 4296 if (!access_ok(oact, sizeof(*oact)) || 4297 __put_user(ptr_to_compat(old_ka.sa.sa_handler), 4298 &oact->sa_handler) || 4299 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4300 &oact->sa_restorer) || 4301 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4302 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4303 return -EFAULT; 4304 } 4305 return ret; 4306 } 4307 #endif 4308 4309 #ifdef CONFIG_SGETMASK_SYSCALL 4310 4311 /* 4312 * For backwards compatibility. Functionality superseded by sigprocmask. 4313 */ 4314 SYSCALL_DEFINE0(sgetmask) 4315 { 4316 /* SMP safe */ 4317 return current->blocked.sig[0]; 4318 } 4319 4320 SYSCALL_DEFINE1(ssetmask, int, newmask) 4321 { 4322 int old = current->blocked.sig[0]; 4323 sigset_t newset; 4324 4325 siginitset(&newset, newmask); 4326 set_current_blocked(&newset); 4327 4328 return old; 4329 } 4330 #endif /* CONFIG_SGETMASK_SYSCALL */ 4331 4332 #ifdef __ARCH_WANT_SYS_SIGNAL 4333 /* 4334 * For backwards compatibility. Functionality superseded by sigaction. 4335 */ 4336 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 4337 { 4338 struct k_sigaction new_sa, old_sa; 4339 int ret; 4340 4341 new_sa.sa.sa_handler = handler; 4342 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 4343 sigemptyset(&new_sa.sa.sa_mask); 4344 4345 ret = do_sigaction(sig, &new_sa, &old_sa); 4346 4347 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 4348 } 4349 #endif /* __ARCH_WANT_SYS_SIGNAL */ 4350 4351 #ifdef __ARCH_WANT_SYS_PAUSE 4352 4353 SYSCALL_DEFINE0(pause) 4354 { 4355 while (!signal_pending(current)) { 4356 __set_current_state(TASK_INTERRUPTIBLE); 4357 schedule(); 4358 } 4359 return -ERESTARTNOHAND; 4360 } 4361 4362 #endif 4363 4364 static int sigsuspend(sigset_t *set) 4365 { 4366 current->saved_sigmask = current->blocked; 4367 set_current_blocked(set); 4368 4369 while (!signal_pending(current)) { 4370 __set_current_state(TASK_INTERRUPTIBLE); 4371 schedule(); 4372 } 4373 set_restore_sigmask(); 4374 return -ERESTARTNOHAND; 4375 } 4376 4377 /** 4378 * sys_rt_sigsuspend - replace the signal mask for a value with the 4379 * @unewset value until a signal is received 4380 * @unewset: new signal mask value 4381 * @sigsetsize: size of sigset_t type 4382 */ 4383 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 4384 { 4385 sigset_t newset; 4386 4387 /* XXX: Don't preclude handling different sized sigset_t's. */ 4388 if (sigsetsize != sizeof(sigset_t)) 4389 return -EINVAL; 4390 4391 if (copy_from_user(&newset, unewset, sizeof(newset))) 4392 return -EFAULT; 4393 return sigsuspend(&newset); 4394 } 4395 4396 #ifdef CONFIG_COMPAT 4397 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) 4398 { 4399 sigset_t newset; 4400 4401 /* XXX: Don't preclude handling different sized sigset_t's. */ 4402 if (sigsetsize != sizeof(sigset_t)) 4403 return -EINVAL; 4404 4405 if (get_compat_sigset(&newset, unewset)) 4406 return -EFAULT; 4407 return sigsuspend(&newset); 4408 } 4409 #endif 4410 4411 #ifdef CONFIG_OLD_SIGSUSPEND 4412 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) 4413 { 4414 sigset_t blocked; 4415 siginitset(&blocked, mask); 4416 return sigsuspend(&blocked); 4417 } 4418 #endif 4419 #ifdef CONFIG_OLD_SIGSUSPEND3 4420 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) 4421 { 4422 sigset_t blocked; 4423 siginitset(&blocked, mask); 4424 return sigsuspend(&blocked); 4425 } 4426 #endif 4427 4428 __weak const char *arch_vma_name(struct vm_area_struct *vma) 4429 { 4430 return NULL; 4431 } 4432 4433 static inline void siginfo_buildtime_checks(void) 4434 { 4435 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); 4436 4437 /* Verify the offsets in the two siginfos match */ 4438 #define CHECK_OFFSET(field) \ 4439 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) 4440 4441 /* kill */ 4442 CHECK_OFFSET(si_pid); 4443 CHECK_OFFSET(si_uid); 4444 4445 /* timer */ 4446 CHECK_OFFSET(si_tid); 4447 CHECK_OFFSET(si_overrun); 4448 CHECK_OFFSET(si_value); 4449 4450 /* rt */ 4451 CHECK_OFFSET(si_pid); 4452 CHECK_OFFSET(si_uid); 4453 CHECK_OFFSET(si_value); 4454 4455 /* sigchld */ 4456 CHECK_OFFSET(si_pid); 4457 CHECK_OFFSET(si_uid); 4458 CHECK_OFFSET(si_status); 4459 CHECK_OFFSET(si_utime); 4460 CHECK_OFFSET(si_stime); 4461 4462 /* sigfault */ 4463 CHECK_OFFSET(si_addr); 4464 CHECK_OFFSET(si_addr_lsb); 4465 CHECK_OFFSET(si_lower); 4466 CHECK_OFFSET(si_upper); 4467 CHECK_OFFSET(si_pkey); 4468 4469 /* sigpoll */ 4470 CHECK_OFFSET(si_band); 4471 CHECK_OFFSET(si_fd); 4472 4473 /* sigsys */ 4474 CHECK_OFFSET(si_call_addr); 4475 CHECK_OFFSET(si_syscall); 4476 CHECK_OFFSET(si_arch); 4477 #undef CHECK_OFFSET 4478 } 4479 4480 void __init signals_init(void) 4481 { 4482 siginfo_buildtime_checks(); 4483 4484 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 4485 } 4486 4487 #ifdef CONFIG_KGDB_KDB 4488 #include <linux/kdb.h> 4489 /* 4490 * kdb_send_sig - Allows kdb to send signals without exposing 4491 * signal internals. This function checks if the required locks are 4492 * available before calling the main signal code, to avoid kdb 4493 * deadlocks. 4494 */ 4495 void kdb_send_sig(struct task_struct *t, int sig) 4496 { 4497 static struct task_struct *kdb_prev_t; 4498 int new_t, ret; 4499 if (!spin_trylock(&t->sighand->siglock)) { 4500 kdb_printf("Can't do kill command now.\n" 4501 "The sigmask lock is held somewhere else in " 4502 "kernel, try again later\n"); 4503 return; 4504 } 4505 new_t = kdb_prev_t != t; 4506 kdb_prev_t = t; 4507 if (t->state != TASK_RUNNING && new_t) { 4508 spin_unlock(&t->sighand->siglock); 4509 kdb_printf("Process is not RUNNING, sending a signal from " 4510 "kdb risks deadlock\n" 4511 "on the run queue locks. " 4512 "The signal has _not_ been sent.\n" 4513 "Reissue the kill command if you want to risk " 4514 "the deadlock.\n"); 4515 return; 4516 } 4517 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); 4518 spin_unlock(&t->sighand->siglock); 4519 if (ret) 4520 kdb_printf("Fail to deliver Signal %d to process %d.\n", 4521 sig, t->pid); 4522 else 4523 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 4524 } 4525 #endif /* CONFIG_KGDB_KDB */ 4526