1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/signal.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 8 * 9 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 10 * Changes to use preallocated sigqueue structures 11 * to allow signals to be sent reliably. 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/init.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/user.h> 19 #include <linux/sched/debug.h> 20 #include <linux/sched/task.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/sched/cputime.h> 23 #include <linux/file.h> 24 #include <linux/fs.h> 25 #include <linux/proc_fs.h> 26 #include <linux/tty.h> 27 #include <linux/binfmts.h> 28 #include <linux/coredump.h> 29 #include <linux/security.h> 30 #include <linux/syscalls.h> 31 #include <linux/ptrace.h> 32 #include <linux/signal.h> 33 #include <linux/signalfd.h> 34 #include <linux/ratelimit.h> 35 #include <linux/tracehook.h> 36 #include <linux/capability.h> 37 #include <linux/freezer.h> 38 #include <linux/pid_namespace.h> 39 #include <linux/nsproxy.h> 40 #include <linux/user_namespace.h> 41 #include <linux/uprobes.h> 42 #include <linux/compat.h> 43 #include <linux/cn_proc.h> 44 #include <linux/compiler.h> 45 #include <linux/posix-timers.h> 46 #include <linux/livepatch.h> 47 #include <linux/cgroup.h> 48 #include <linux/audit.h> 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/signal.h> 52 53 #include <asm/param.h> 54 #include <linux/uaccess.h> 55 #include <asm/unistd.h> 56 #include <asm/siginfo.h> 57 #include <asm/cacheflush.h> 58 59 /* 60 * SLAB caches for signal bits. 61 */ 62 63 static struct kmem_cache *sigqueue_cachep; 64 65 int print_fatal_signals __read_mostly; 66 67 static void __user *sig_handler(struct task_struct *t, int sig) 68 { 69 return t->sighand->action[sig - 1].sa.sa_handler; 70 } 71 72 static inline bool sig_handler_ignored(void __user *handler, int sig) 73 { 74 /* Is it explicitly or implicitly ignored? */ 75 return handler == SIG_IGN || 76 (handler == SIG_DFL && sig_kernel_ignore(sig)); 77 } 78 79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) 80 { 81 void __user *handler; 82 83 handler = sig_handler(t, sig); 84 85 /* SIGKILL and SIGSTOP may not be sent to the global init */ 86 if (unlikely(is_global_init(t) && sig_kernel_only(sig))) 87 return true; 88 89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 91 return true; 92 93 /* Only allow kernel generated signals to this kthread */ 94 if (unlikely((t->flags & (PF_KTHREAD | PF_IO_WORKER)) && 95 (handler == SIG_KTHREAD_KERNEL) && !force)) 96 return true; 97 98 return sig_handler_ignored(handler, sig); 99 } 100 101 static bool sig_ignored(struct task_struct *t, int sig, bool force) 102 { 103 /* 104 * Blocked signals are never ignored, since the 105 * signal handler may change by the time it is 106 * unblocked. 107 */ 108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 109 return false; 110 111 /* 112 * Tracers may want to know about even ignored signal unless it 113 * is SIGKILL which can't be reported anyway but can be ignored 114 * by SIGNAL_UNKILLABLE task. 115 */ 116 if (t->ptrace && sig != SIGKILL) 117 return false; 118 119 return sig_task_ignored(t, sig, force); 120 } 121 122 /* 123 * Re-calculate pending state from the set of locally pending 124 * signals, globally pending signals, and blocked signals. 125 */ 126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) 127 { 128 unsigned long ready; 129 long i; 130 131 switch (_NSIG_WORDS) { 132 default: 133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 134 ready |= signal->sig[i] &~ blocked->sig[i]; 135 break; 136 137 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 138 ready |= signal->sig[2] &~ blocked->sig[2]; 139 ready |= signal->sig[1] &~ blocked->sig[1]; 140 ready |= signal->sig[0] &~ blocked->sig[0]; 141 break; 142 143 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 144 ready |= signal->sig[0] &~ blocked->sig[0]; 145 break; 146 147 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 148 } 149 return ready != 0; 150 } 151 152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 153 154 static bool recalc_sigpending_tsk(struct task_struct *t) 155 { 156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || 157 PENDING(&t->pending, &t->blocked) || 158 PENDING(&t->signal->shared_pending, &t->blocked) || 159 cgroup_task_frozen(t)) { 160 set_tsk_thread_flag(t, TIF_SIGPENDING); 161 return true; 162 } 163 164 /* 165 * We must never clear the flag in another thread, or in current 166 * when it's possible the current syscall is returning -ERESTART*. 167 * So we don't clear it here, and only callers who know they should do. 168 */ 169 return false; 170 } 171 172 /* 173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 174 * This is superfluous when called on current, the wakeup is a harmless no-op. 175 */ 176 void recalc_sigpending_and_wake(struct task_struct *t) 177 { 178 if (recalc_sigpending_tsk(t)) 179 signal_wake_up(t, 0); 180 } 181 182 void recalc_sigpending(void) 183 { 184 if (!recalc_sigpending_tsk(current) && !freezing(current) && 185 !klp_patch_pending(current)) 186 clear_thread_flag(TIF_SIGPENDING); 187 188 } 189 EXPORT_SYMBOL(recalc_sigpending); 190 191 void calculate_sigpending(void) 192 { 193 /* Have any signals or users of TIF_SIGPENDING been delayed 194 * until after fork? 195 */ 196 spin_lock_irq(¤t->sighand->siglock); 197 set_tsk_thread_flag(current, TIF_SIGPENDING); 198 recalc_sigpending(); 199 spin_unlock_irq(¤t->sighand->siglock); 200 } 201 202 /* Given the mask, find the first available signal that should be serviced. */ 203 204 #define SYNCHRONOUS_MASK \ 205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) 207 208 int next_signal(struct sigpending *pending, sigset_t *mask) 209 { 210 unsigned long i, *s, *m, x; 211 int sig = 0; 212 213 s = pending->signal.sig; 214 m = mask->sig; 215 216 /* 217 * Handle the first word specially: it contains the 218 * synchronous signals that need to be dequeued first. 219 */ 220 x = *s &~ *m; 221 if (x) { 222 if (x & SYNCHRONOUS_MASK) 223 x &= SYNCHRONOUS_MASK; 224 sig = ffz(~x) + 1; 225 return sig; 226 } 227 228 switch (_NSIG_WORDS) { 229 default: 230 for (i = 1; i < _NSIG_WORDS; ++i) { 231 x = *++s &~ *++m; 232 if (!x) 233 continue; 234 sig = ffz(~x) + i*_NSIG_BPW + 1; 235 break; 236 } 237 break; 238 239 case 2: 240 x = s[1] &~ m[1]; 241 if (!x) 242 break; 243 sig = ffz(~x) + _NSIG_BPW + 1; 244 break; 245 246 case 1: 247 /* Nothing to do */ 248 break; 249 } 250 251 return sig; 252 } 253 254 static inline void print_dropped_signal(int sig) 255 { 256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 257 258 if (!print_fatal_signals) 259 return; 260 261 if (!__ratelimit(&ratelimit_state)) 262 return; 263 264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 265 current->comm, current->pid, sig); 266 } 267 268 /** 269 * task_set_jobctl_pending - set jobctl pending bits 270 * @task: target task 271 * @mask: pending bits to set 272 * 273 * Clear @mask from @task->jobctl. @mask must be subset of 274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | 275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is 276 * cleared. If @task is already being killed or exiting, this function 277 * becomes noop. 278 * 279 * CONTEXT: 280 * Must be called with @task->sighand->siglock held. 281 * 282 * RETURNS: 283 * %true if @mask is set, %false if made noop because @task was dying. 284 */ 285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) 286 { 287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | 288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 290 291 if (unlikely(fatal_signal_pending(task) || 292 (task->flags & (PF_EXITING | PF_IO_WORKER)))) 293 return false; 294 295 if (mask & JOBCTL_STOP_SIGMASK) 296 task->jobctl &= ~JOBCTL_STOP_SIGMASK; 297 298 task->jobctl |= mask; 299 return true; 300 } 301 302 /** 303 * task_clear_jobctl_trapping - clear jobctl trapping bit 304 * @task: target task 305 * 306 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. 307 * Clear it and wake up the ptracer. Note that we don't need any further 308 * locking. @task->siglock guarantees that @task->parent points to the 309 * ptracer. 310 * 311 * CONTEXT: 312 * Must be called with @task->sighand->siglock held. 313 */ 314 void task_clear_jobctl_trapping(struct task_struct *task) 315 { 316 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { 317 task->jobctl &= ~JOBCTL_TRAPPING; 318 smp_mb(); /* advised by wake_up_bit() */ 319 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); 320 } 321 } 322 323 /** 324 * task_clear_jobctl_pending - clear jobctl pending bits 325 * @task: target task 326 * @mask: pending bits to clear 327 * 328 * Clear @mask from @task->jobctl. @mask must be subset of 329 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other 330 * STOP bits are cleared together. 331 * 332 * If clearing of @mask leaves no stop or trap pending, this function calls 333 * task_clear_jobctl_trapping(). 334 * 335 * CONTEXT: 336 * Must be called with @task->sighand->siglock held. 337 */ 338 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) 339 { 340 BUG_ON(mask & ~JOBCTL_PENDING_MASK); 341 342 if (mask & JOBCTL_STOP_PENDING) 343 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; 344 345 task->jobctl &= ~mask; 346 347 if (!(task->jobctl & JOBCTL_PENDING_MASK)) 348 task_clear_jobctl_trapping(task); 349 } 350 351 /** 352 * task_participate_group_stop - participate in a group stop 353 * @task: task participating in a group stop 354 * 355 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. 356 * Group stop states are cleared and the group stop count is consumed if 357 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group 358 * stop, the appropriate `SIGNAL_*` flags are set. 359 * 360 * CONTEXT: 361 * Must be called with @task->sighand->siglock held. 362 * 363 * RETURNS: 364 * %true if group stop completion should be notified to the parent, %false 365 * otherwise. 366 */ 367 static bool task_participate_group_stop(struct task_struct *task) 368 { 369 struct signal_struct *sig = task->signal; 370 bool consume = task->jobctl & JOBCTL_STOP_CONSUME; 371 372 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); 373 374 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); 375 376 if (!consume) 377 return false; 378 379 if (!WARN_ON_ONCE(sig->group_stop_count == 0)) 380 sig->group_stop_count--; 381 382 /* 383 * Tell the caller to notify completion iff we are entering into a 384 * fresh group stop. Read comment in do_signal_stop() for details. 385 */ 386 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 387 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); 388 return true; 389 } 390 return false; 391 } 392 393 void task_join_group_stop(struct task_struct *task) 394 { 395 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; 396 struct signal_struct *sig = current->signal; 397 398 if (sig->group_stop_count) { 399 sig->group_stop_count++; 400 mask |= JOBCTL_STOP_CONSUME; 401 } else if (!(sig->flags & SIGNAL_STOP_STOPPED)) 402 return; 403 404 /* Have the new thread join an on-going signal group stop */ 405 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); 406 } 407 408 /* 409 * allocate a new signal queue record 410 * - this may be called without locks if and only if t == current, otherwise an 411 * appropriate lock must be held to stop the target task from exiting 412 */ 413 static struct sigqueue * 414 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 415 { 416 struct sigqueue *q = NULL; 417 struct user_struct *user; 418 int sigpending; 419 420 /* 421 * Protect access to @t credentials. This can go away when all 422 * callers hold rcu read lock. 423 * 424 * NOTE! A pending signal will hold on to the user refcount, 425 * and we get/put the refcount only when the sigpending count 426 * changes from/to zero. 427 */ 428 rcu_read_lock(); 429 user = __task_cred(t)->user; 430 sigpending = atomic_inc_return(&user->sigpending); 431 if (sigpending == 1) 432 get_uid(user); 433 rcu_read_unlock(); 434 435 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { 436 q = kmem_cache_alloc(sigqueue_cachep, flags); 437 } else { 438 print_dropped_signal(sig); 439 } 440 441 if (unlikely(q == NULL)) { 442 if (atomic_dec_and_test(&user->sigpending)) 443 free_uid(user); 444 } else { 445 INIT_LIST_HEAD(&q->list); 446 q->flags = 0; 447 q->user = user; 448 } 449 450 return q; 451 } 452 453 static void __sigqueue_free(struct sigqueue *q) 454 { 455 if (q->flags & SIGQUEUE_PREALLOC) 456 return; 457 if (atomic_dec_and_test(&q->user->sigpending)) 458 free_uid(q->user); 459 kmem_cache_free(sigqueue_cachep, q); 460 } 461 462 void flush_sigqueue(struct sigpending *queue) 463 { 464 struct sigqueue *q; 465 466 sigemptyset(&queue->signal); 467 while (!list_empty(&queue->list)) { 468 q = list_entry(queue->list.next, struct sigqueue , list); 469 list_del_init(&q->list); 470 __sigqueue_free(q); 471 } 472 } 473 474 /* 475 * Flush all pending signals for this kthread. 476 */ 477 void flush_signals(struct task_struct *t) 478 { 479 unsigned long flags; 480 481 spin_lock_irqsave(&t->sighand->siglock, flags); 482 clear_tsk_thread_flag(t, TIF_SIGPENDING); 483 flush_sigqueue(&t->pending); 484 flush_sigqueue(&t->signal->shared_pending); 485 spin_unlock_irqrestore(&t->sighand->siglock, flags); 486 } 487 EXPORT_SYMBOL(flush_signals); 488 489 #ifdef CONFIG_POSIX_TIMERS 490 static void __flush_itimer_signals(struct sigpending *pending) 491 { 492 sigset_t signal, retain; 493 struct sigqueue *q, *n; 494 495 signal = pending->signal; 496 sigemptyset(&retain); 497 498 list_for_each_entry_safe(q, n, &pending->list, list) { 499 int sig = q->info.si_signo; 500 501 if (likely(q->info.si_code != SI_TIMER)) { 502 sigaddset(&retain, sig); 503 } else { 504 sigdelset(&signal, sig); 505 list_del_init(&q->list); 506 __sigqueue_free(q); 507 } 508 } 509 510 sigorsets(&pending->signal, &signal, &retain); 511 } 512 513 void flush_itimer_signals(void) 514 { 515 struct task_struct *tsk = current; 516 unsigned long flags; 517 518 spin_lock_irqsave(&tsk->sighand->siglock, flags); 519 __flush_itimer_signals(&tsk->pending); 520 __flush_itimer_signals(&tsk->signal->shared_pending); 521 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 522 } 523 #endif 524 525 void ignore_signals(struct task_struct *t) 526 { 527 int i; 528 529 for (i = 0; i < _NSIG; ++i) 530 t->sighand->action[i].sa.sa_handler = SIG_IGN; 531 532 flush_signals(t); 533 } 534 535 /* 536 * Flush all handlers for a task. 537 */ 538 539 void 540 flush_signal_handlers(struct task_struct *t, int force_default) 541 { 542 int i; 543 struct k_sigaction *ka = &t->sighand->action[0]; 544 for (i = _NSIG ; i != 0 ; i--) { 545 if (force_default || ka->sa.sa_handler != SIG_IGN) 546 ka->sa.sa_handler = SIG_DFL; 547 ka->sa.sa_flags = 0; 548 #ifdef __ARCH_HAS_SA_RESTORER 549 ka->sa.sa_restorer = NULL; 550 #endif 551 sigemptyset(&ka->sa.sa_mask); 552 ka++; 553 } 554 } 555 556 bool unhandled_signal(struct task_struct *tsk, int sig) 557 { 558 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 559 if (is_global_init(tsk)) 560 return true; 561 562 if (handler != SIG_IGN && handler != SIG_DFL) 563 return false; 564 565 /* if ptraced, let the tracer determine */ 566 return !tsk->ptrace; 567 } 568 569 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, 570 bool *resched_timer) 571 { 572 struct sigqueue *q, *first = NULL; 573 574 /* 575 * Collect the siginfo appropriate to this signal. Check if 576 * there is another siginfo for the same signal. 577 */ 578 list_for_each_entry(q, &list->list, list) { 579 if (q->info.si_signo == sig) { 580 if (first) 581 goto still_pending; 582 first = q; 583 } 584 } 585 586 sigdelset(&list->signal, sig); 587 588 if (first) { 589 still_pending: 590 list_del_init(&first->list); 591 copy_siginfo(info, &first->info); 592 593 *resched_timer = 594 (first->flags & SIGQUEUE_PREALLOC) && 595 (info->si_code == SI_TIMER) && 596 (info->si_sys_private); 597 598 __sigqueue_free(first); 599 } else { 600 /* 601 * Ok, it wasn't in the queue. This must be 602 * a fast-pathed signal or we must have been 603 * out of queue space. So zero out the info. 604 */ 605 clear_siginfo(info); 606 info->si_signo = sig; 607 info->si_errno = 0; 608 info->si_code = SI_USER; 609 info->si_pid = 0; 610 info->si_uid = 0; 611 } 612 } 613 614 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 615 kernel_siginfo_t *info, bool *resched_timer) 616 { 617 int sig = next_signal(pending, mask); 618 619 if (sig) 620 collect_signal(sig, pending, info, resched_timer); 621 return sig; 622 } 623 624 /* 625 * Dequeue a signal and return the element to the caller, which is 626 * expected to free it. 627 * 628 * All callers have to hold the siglock. 629 */ 630 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) 631 { 632 bool resched_timer = false; 633 int signr; 634 635 /* We only dequeue private signals from ourselves, we don't let 636 * signalfd steal them 637 */ 638 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); 639 if (!signr) { 640 signr = __dequeue_signal(&tsk->signal->shared_pending, 641 mask, info, &resched_timer); 642 #ifdef CONFIG_POSIX_TIMERS 643 /* 644 * itimer signal ? 645 * 646 * itimers are process shared and we restart periodic 647 * itimers in the signal delivery path to prevent DoS 648 * attacks in the high resolution timer case. This is 649 * compliant with the old way of self-restarting 650 * itimers, as the SIGALRM is a legacy signal and only 651 * queued once. Changing the restart behaviour to 652 * restart the timer in the signal dequeue path is 653 * reducing the timer noise on heavy loaded !highres 654 * systems too. 655 */ 656 if (unlikely(signr == SIGALRM)) { 657 struct hrtimer *tmr = &tsk->signal->real_timer; 658 659 if (!hrtimer_is_queued(tmr) && 660 tsk->signal->it_real_incr != 0) { 661 hrtimer_forward(tmr, tmr->base->get_time(), 662 tsk->signal->it_real_incr); 663 hrtimer_restart(tmr); 664 } 665 } 666 #endif 667 } 668 669 recalc_sigpending(); 670 if (!signr) 671 return 0; 672 673 if (unlikely(sig_kernel_stop(signr))) { 674 /* 675 * Set a marker that we have dequeued a stop signal. Our 676 * caller might release the siglock and then the pending 677 * stop signal it is about to process is no longer in the 678 * pending bitmasks, but must still be cleared by a SIGCONT 679 * (and overruled by a SIGKILL). So those cases clear this 680 * shared flag after we've set it. Note that this flag may 681 * remain set after the signal we return is ignored or 682 * handled. That doesn't matter because its only purpose 683 * is to alert stop-signal processing code when another 684 * processor has come along and cleared the flag. 685 */ 686 current->jobctl |= JOBCTL_STOP_DEQUEUED; 687 } 688 #ifdef CONFIG_POSIX_TIMERS 689 if (resched_timer) { 690 /* 691 * Release the siglock to ensure proper locking order 692 * of timer locks outside of siglocks. Note, we leave 693 * irqs disabled here, since the posix-timers code is 694 * about to disable them again anyway. 695 */ 696 spin_unlock(&tsk->sighand->siglock); 697 posixtimer_rearm(info); 698 spin_lock(&tsk->sighand->siglock); 699 700 /* Don't expose the si_sys_private value to userspace */ 701 info->si_sys_private = 0; 702 } 703 #endif 704 return signr; 705 } 706 EXPORT_SYMBOL_GPL(dequeue_signal); 707 708 static int dequeue_synchronous_signal(kernel_siginfo_t *info) 709 { 710 struct task_struct *tsk = current; 711 struct sigpending *pending = &tsk->pending; 712 struct sigqueue *q, *sync = NULL; 713 714 /* 715 * Might a synchronous signal be in the queue? 716 */ 717 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) 718 return 0; 719 720 /* 721 * Return the first synchronous signal in the queue. 722 */ 723 list_for_each_entry(q, &pending->list, list) { 724 /* Synchronous signals have a positive si_code */ 725 if ((q->info.si_code > SI_USER) && 726 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { 727 sync = q; 728 goto next; 729 } 730 } 731 return 0; 732 next: 733 /* 734 * Check if there is another siginfo for the same signal. 735 */ 736 list_for_each_entry_continue(q, &pending->list, list) { 737 if (q->info.si_signo == sync->info.si_signo) 738 goto still_pending; 739 } 740 741 sigdelset(&pending->signal, sync->info.si_signo); 742 recalc_sigpending(); 743 still_pending: 744 list_del_init(&sync->list); 745 copy_siginfo(info, &sync->info); 746 __sigqueue_free(sync); 747 return info->si_signo; 748 } 749 750 /* 751 * Tell a process that it has a new active signal.. 752 * 753 * NOTE! we rely on the previous spin_lock to 754 * lock interrupts for us! We can only be called with 755 * "siglock" held, and the local interrupt must 756 * have been disabled when that got acquired! 757 * 758 * No need to set need_resched since signal event passing 759 * goes through ->blocked 760 */ 761 void signal_wake_up_state(struct task_struct *t, unsigned int state) 762 { 763 set_tsk_thread_flag(t, TIF_SIGPENDING); 764 /* 765 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable 766 * case. We don't check t->state here because there is a race with it 767 * executing another processor and just now entering stopped state. 768 * By using wake_up_state, we ensure the process will wake up and 769 * handle its death signal. 770 */ 771 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) 772 kick_process(t); 773 } 774 775 /* 776 * Remove signals in mask from the pending set and queue. 777 * Returns 1 if any signals were found. 778 * 779 * All callers must be holding the siglock. 780 */ 781 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) 782 { 783 struct sigqueue *q, *n; 784 sigset_t m; 785 786 sigandsets(&m, mask, &s->signal); 787 if (sigisemptyset(&m)) 788 return; 789 790 sigandnsets(&s->signal, &s->signal, mask); 791 list_for_each_entry_safe(q, n, &s->list, list) { 792 if (sigismember(mask, q->info.si_signo)) { 793 list_del_init(&q->list); 794 __sigqueue_free(q); 795 } 796 } 797 } 798 799 static inline int is_si_special(const struct kernel_siginfo *info) 800 { 801 return info <= SEND_SIG_PRIV; 802 } 803 804 static inline bool si_fromuser(const struct kernel_siginfo *info) 805 { 806 return info == SEND_SIG_NOINFO || 807 (!is_si_special(info) && SI_FROMUSER(info)); 808 } 809 810 /* 811 * called with RCU read lock from check_kill_permission() 812 */ 813 static bool kill_ok_by_cred(struct task_struct *t) 814 { 815 const struct cred *cred = current_cred(); 816 const struct cred *tcred = __task_cred(t); 817 818 return uid_eq(cred->euid, tcred->suid) || 819 uid_eq(cred->euid, tcred->uid) || 820 uid_eq(cred->uid, tcred->suid) || 821 uid_eq(cred->uid, tcred->uid) || 822 ns_capable(tcred->user_ns, CAP_KILL); 823 } 824 825 /* 826 * Bad permissions for sending the signal 827 * - the caller must hold the RCU read lock 828 */ 829 static int check_kill_permission(int sig, struct kernel_siginfo *info, 830 struct task_struct *t) 831 { 832 struct pid *sid; 833 int error; 834 835 if (!valid_signal(sig)) 836 return -EINVAL; 837 /* PF_IO_WORKER threads don't take any signals */ 838 if (t->flags & PF_IO_WORKER) 839 return -ESRCH; 840 841 if (!si_fromuser(info)) 842 return 0; 843 844 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 845 if (error) 846 return error; 847 848 if (!same_thread_group(current, t) && 849 !kill_ok_by_cred(t)) { 850 switch (sig) { 851 case SIGCONT: 852 sid = task_session(t); 853 /* 854 * We don't return the error if sid == NULL. The 855 * task was unhashed, the caller must notice this. 856 */ 857 if (!sid || sid == task_session(current)) 858 break; 859 fallthrough; 860 default: 861 return -EPERM; 862 } 863 } 864 865 return security_task_kill(t, info, sig, NULL); 866 } 867 868 /** 869 * ptrace_trap_notify - schedule trap to notify ptracer 870 * @t: tracee wanting to notify tracer 871 * 872 * This function schedules sticky ptrace trap which is cleared on the next 873 * TRAP_STOP to notify ptracer of an event. @t must have been seized by 874 * ptracer. 875 * 876 * If @t is running, STOP trap will be taken. If trapped for STOP and 877 * ptracer is listening for events, tracee is woken up so that it can 878 * re-trap for the new event. If trapped otherwise, STOP trap will be 879 * eventually taken without returning to userland after the existing traps 880 * are finished by PTRACE_CONT. 881 * 882 * CONTEXT: 883 * Must be called with @task->sighand->siglock held. 884 */ 885 static void ptrace_trap_notify(struct task_struct *t) 886 { 887 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); 888 assert_spin_locked(&t->sighand->siglock); 889 890 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 891 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 892 } 893 894 /* 895 * Handle magic process-wide effects of stop/continue signals. Unlike 896 * the signal actions, these happen immediately at signal-generation 897 * time regardless of blocking, ignoring, or handling. This does the 898 * actual continuing for SIGCONT, but not the actual stopping for stop 899 * signals. The process stop is done as a signal action for SIG_DFL. 900 * 901 * Returns true if the signal should be actually delivered, otherwise 902 * it should be dropped. 903 */ 904 static bool prepare_signal(int sig, struct task_struct *p, bool force) 905 { 906 struct signal_struct *signal = p->signal; 907 struct task_struct *t; 908 sigset_t flush; 909 910 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { 911 if (!(signal->flags & SIGNAL_GROUP_EXIT)) 912 return sig == SIGKILL; 913 /* 914 * The process is in the middle of dying, nothing to do. 915 */ 916 } else if (sig_kernel_stop(sig)) { 917 /* 918 * This is a stop signal. Remove SIGCONT from all queues. 919 */ 920 siginitset(&flush, sigmask(SIGCONT)); 921 flush_sigqueue_mask(&flush, &signal->shared_pending); 922 for_each_thread(p, t) 923 flush_sigqueue_mask(&flush, &t->pending); 924 } else if (sig == SIGCONT) { 925 unsigned int why; 926 /* 927 * Remove all stop signals from all queues, wake all threads. 928 */ 929 siginitset(&flush, SIG_KERNEL_STOP_MASK); 930 flush_sigqueue_mask(&flush, &signal->shared_pending); 931 for_each_thread(p, t) { 932 flush_sigqueue_mask(&flush, &t->pending); 933 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); 934 if (likely(!(t->ptrace & PT_SEIZED))) 935 wake_up_state(t, __TASK_STOPPED); 936 else 937 ptrace_trap_notify(t); 938 } 939 940 /* 941 * Notify the parent with CLD_CONTINUED if we were stopped. 942 * 943 * If we were in the middle of a group stop, we pretend it 944 * was already finished, and then continued. Since SIGCHLD 945 * doesn't queue we report only CLD_STOPPED, as if the next 946 * CLD_CONTINUED was dropped. 947 */ 948 why = 0; 949 if (signal->flags & SIGNAL_STOP_STOPPED) 950 why |= SIGNAL_CLD_CONTINUED; 951 else if (signal->group_stop_count) 952 why |= SIGNAL_CLD_STOPPED; 953 954 if (why) { 955 /* 956 * The first thread which returns from do_signal_stop() 957 * will take ->siglock, notice SIGNAL_CLD_MASK, and 958 * notify its parent. See get_signal(). 959 */ 960 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); 961 signal->group_stop_count = 0; 962 signal->group_exit_code = 0; 963 } 964 } 965 966 return !sig_ignored(p, sig, force); 967 } 968 969 /* 970 * Test if P wants to take SIG. After we've checked all threads with this, 971 * it's equivalent to finding no threads not blocking SIG. Any threads not 972 * blocking SIG were ruled out because they are not running and already 973 * have pending signals. Such threads will dequeue from the shared queue 974 * as soon as they're available, so putting the signal on the shared queue 975 * will be equivalent to sending it to one such thread. 976 */ 977 static inline bool wants_signal(int sig, struct task_struct *p) 978 { 979 if (sigismember(&p->blocked, sig)) 980 return false; 981 982 if (p->flags & PF_EXITING) 983 return false; 984 985 if (sig == SIGKILL) 986 return true; 987 988 if (task_is_stopped_or_traced(p)) 989 return false; 990 991 return task_curr(p) || !task_sigpending(p); 992 } 993 994 static void complete_signal(int sig, struct task_struct *p, enum pid_type type) 995 { 996 struct signal_struct *signal = p->signal; 997 struct task_struct *t; 998 999 /* 1000 * Now find a thread we can wake up to take the signal off the queue. 1001 * 1002 * If the main thread wants the signal, it gets first crack. 1003 * Probably the least surprising to the average bear. 1004 */ 1005 if (wants_signal(sig, p)) 1006 t = p; 1007 else if ((type == PIDTYPE_PID) || thread_group_empty(p)) 1008 /* 1009 * There is just one thread and it does not need to be woken. 1010 * It will dequeue unblocked signals before it runs again. 1011 */ 1012 return; 1013 else { 1014 /* 1015 * Otherwise try to find a suitable thread. 1016 */ 1017 t = signal->curr_target; 1018 while (!wants_signal(sig, t)) { 1019 t = next_thread(t); 1020 if (t == signal->curr_target) 1021 /* 1022 * No thread needs to be woken. 1023 * Any eligible threads will see 1024 * the signal in the queue soon. 1025 */ 1026 return; 1027 } 1028 signal->curr_target = t; 1029 } 1030 1031 /* 1032 * Found a killable thread. If the signal will be fatal, 1033 * then start taking the whole group down immediately. 1034 */ 1035 if (sig_fatal(p, sig) && 1036 !(signal->flags & SIGNAL_GROUP_EXIT) && 1037 !sigismember(&t->real_blocked, sig) && 1038 (sig == SIGKILL || !p->ptrace)) { 1039 /* 1040 * This signal will be fatal to the whole group. 1041 */ 1042 if (!sig_kernel_coredump(sig)) { 1043 /* 1044 * Start a group exit and wake everybody up. 1045 * This way we don't have other threads 1046 * running and doing things after a slower 1047 * thread has the fatal signal pending. 1048 */ 1049 signal->flags = SIGNAL_GROUP_EXIT; 1050 signal->group_exit_code = sig; 1051 signal->group_stop_count = 0; 1052 t = p; 1053 do { 1054 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1055 sigaddset(&t->pending.signal, SIGKILL); 1056 signal_wake_up(t, 1); 1057 } while_each_thread(p, t); 1058 return; 1059 } 1060 } 1061 1062 /* 1063 * The signal is already in the shared-pending queue. 1064 * Tell the chosen thread to wake up and dequeue it. 1065 */ 1066 signal_wake_up(t, sig == SIGKILL); 1067 return; 1068 } 1069 1070 static inline bool legacy_queue(struct sigpending *signals, int sig) 1071 { 1072 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 1073 } 1074 1075 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, 1076 enum pid_type type, bool force) 1077 { 1078 struct sigpending *pending; 1079 struct sigqueue *q; 1080 int override_rlimit; 1081 int ret = 0, result; 1082 1083 assert_spin_locked(&t->sighand->siglock); 1084 1085 result = TRACE_SIGNAL_IGNORED; 1086 if (!prepare_signal(sig, t, force)) 1087 goto ret; 1088 1089 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1090 /* 1091 * Short-circuit ignored signals and support queuing 1092 * exactly one non-rt signal, so that we can get more 1093 * detailed information about the cause of the signal. 1094 */ 1095 result = TRACE_SIGNAL_ALREADY_PENDING; 1096 if (legacy_queue(pending, sig)) 1097 goto ret; 1098 1099 result = TRACE_SIGNAL_DELIVERED; 1100 /* 1101 * Skip useless siginfo allocation for SIGKILL and kernel threads. 1102 */ 1103 if ((sig == SIGKILL) || (t->flags & (PF_KTHREAD | PF_IO_WORKER))) 1104 goto out_set; 1105 1106 /* 1107 * Real-time signals must be queued if sent by sigqueue, or 1108 * some other real-time mechanism. It is implementation 1109 * defined whether kill() does so. We attempt to do so, on 1110 * the principle of least surprise, but since kill is not 1111 * allowed to fail with EAGAIN when low on memory we just 1112 * make sure at least one signal gets delivered and don't 1113 * pass on the info struct. 1114 */ 1115 if (sig < SIGRTMIN) 1116 override_rlimit = (is_si_special(info) || info->si_code >= 0); 1117 else 1118 override_rlimit = 0; 1119 1120 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); 1121 if (q) { 1122 list_add_tail(&q->list, &pending->list); 1123 switch ((unsigned long) info) { 1124 case (unsigned long) SEND_SIG_NOINFO: 1125 clear_siginfo(&q->info); 1126 q->info.si_signo = sig; 1127 q->info.si_errno = 0; 1128 q->info.si_code = SI_USER; 1129 q->info.si_pid = task_tgid_nr_ns(current, 1130 task_active_pid_ns(t)); 1131 rcu_read_lock(); 1132 q->info.si_uid = 1133 from_kuid_munged(task_cred_xxx(t, user_ns), 1134 current_uid()); 1135 rcu_read_unlock(); 1136 break; 1137 case (unsigned long) SEND_SIG_PRIV: 1138 clear_siginfo(&q->info); 1139 q->info.si_signo = sig; 1140 q->info.si_errno = 0; 1141 q->info.si_code = SI_KERNEL; 1142 q->info.si_pid = 0; 1143 q->info.si_uid = 0; 1144 break; 1145 default: 1146 copy_siginfo(&q->info, info); 1147 break; 1148 } 1149 } else if (!is_si_special(info) && 1150 sig >= SIGRTMIN && info->si_code != SI_USER) { 1151 /* 1152 * Queue overflow, abort. We may abort if the 1153 * signal was rt and sent by user using something 1154 * other than kill(). 1155 */ 1156 result = TRACE_SIGNAL_OVERFLOW_FAIL; 1157 ret = -EAGAIN; 1158 goto ret; 1159 } else { 1160 /* 1161 * This is a silent loss of information. We still 1162 * send the signal, but the *info bits are lost. 1163 */ 1164 result = TRACE_SIGNAL_LOSE_INFO; 1165 } 1166 1167 out_set: 1168 signalfd_notify(t, sig); 1169 sigaddset(&pending->signal, sig); 1170 1171 /* Let multiprocess signals appear after on-going forks */ 1172 if (type > PIDTYPE_TGID) { 1173 struct multiprocess_signals *delayed; 1174 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { 1175 sigset_t *signal = &delayed->signal; 1176 /* Can't queue both a stop and a continue signal */ 1177 if (sig == SIGCONT) 1178 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); 1179 else if (sig_kernel_stop(sig)) 1180 sigdelset(signal, SIGCONT); 1181 sigaddset(signal, sig); 1182 } 1183 } 1184 1185 complete_signal(sig, t, type); 1186 ret: 1187 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result); 1188 return ret; 1189 } 1190 1191 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) 1192 { 1193 bool ret = false; 1194 switch (siginfo_layout(info->si_signo, info->si_code)) { 1195 case SIL_KILL: 1196 case SIL_CHLD: 1197 case SIL_RT: 1198 ret = true; 1199 break; 1200 case SIL_TIMER: 1201 case SIL_POLL: 1202 case SIL_FAULT: 1203 case SIL_FAULT_MCEERR: 1204 case SIL_FAULT_BNDERR: 1205 case SIL_FAULT_PKUERR: 1206 case SIL_SYS: 1207 ret = false; 1208 break; 1209 } 1210 return ret; 1211 } 1212 1213 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, 1214 enum pid_type type) 1215 { 1216 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ 1217 bool force = false; 1218 1219 if (info == SEND_SIG_NOINFO) { 1220 /* Force if sent from an ancestor pid namespace */ 1221 force = !task_pid_nr_ns(current, task_active_pid_ns(t)); 1222 } else if (info == SEND_SIG_PRIV) { 1223 /* Don't ignore kernel generated signals */ 1224 force = true; 1225 } else if (has_si_pid_and_uid(info)) { 1226 /* SIGKILL and SIGSTOP is special or has ids */ 1227 struct user_namespace *t_user_ns; 1228 1229 rcu_read_lock(); 1230 t_user_ns = task_cred_xxx(t, user_ns); 1231 if (current_user_ns() != t_user_ns) { 1232 kuid_t uid = make_kuid(current_user_ns(), info->si_uid); 1233 info->si_uid = from_kuid_munged(t_user_ns, uid); 1234 } 1235 rcu_read_unlock(); 1236 1237 /* A kernel generated signal? */ 1238 force = (info->si_code == SI_KERNEL); 1239 1240 /* From an ancestor pid namespace? */ 1241 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) { 1242 info->si_pid = 0; 1243 force = true; 1244 } 1245 } 1246 return __send_signal(sig, info, t, type, force); 1247 } 1248 1249 static void print_fatal_signal(int signr) 1250 { 1251 struct pt_regs *regs = signal_pt_regs(); 1252 pr_info("potentially unexpected fatal signal %d.\n", signr); 1253 1254 #if defined(__i386__) && !defined(__arch_um__) 1255 pr_info("code at %08lx: ", regs->ip); 1256 { 1257 int i; 1258 for (i = 0; i < 16; i++) { 1259 unsigned char insn; 1260 1261 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1262 break; 1263 pr_cont("%02x ", insn); 1264 } 1265 } 1266 pr_cont("\n"); 1267 #endif 1268 preempt_disable(); 1269 show_regs(regs); 1270 preempt_enable(); 1271 } 1272 1273 static int __init setup_print_fatal_signals(char *str) 1274 { 1275 get_option (&str, &print_fatal_signals); 1276 1277 return 1; 1278 } 1279 1280 __setup("print-fatal-signals=", setup_print_fatal_signals); 1281 1282 int 1283 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1284 { 1285 return send_signal(sig, info, p, PIDTYPE_TGID); 1286 } 1287 1288 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, 1289 enum pid_type type) 1290 { 1291 unsigned long flags; 1292 int ret = -ESRCH; 1293 1294 if (lock_task_sighand(p, &flags)) { 1295 ret = send_signal(sig, info, p, type); 1296 unlock_task_sighand(p, &flags); 1297 } 1298 1299 return ret; 1300 } 1301 1302 /* 1303 * Force a signal that the process can't ignore: if necessary 1304 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1305 * 1306 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1307 * since we do not want to have a signal handler that was blocked 1308 * be invoked when user space had explicitly blocked it. 1309 * 1310 * We don't want to have recursive SIGSEGV's etc, for example, 1311 * that is why we also clear SIGNAL_UNKILLABLE. 1312 */ 1313 static int 1314 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t) 1315 { 1316 unsigned long int flags; 1317 int ret, blocked, ignored; 1318 struct k_sigaction *action; 1319 int sig = info->si_signo; 1320 1321 spin_lock_irqsave(&t->sighand->siglock, flags); 1322 action = &t->sighand->action[sig-1]; 1323 ignored = action->sa.sa_handler == SIG_IGN; 1324 blocked = sigismember(&t->blocked, sig); 1325 if (blocked || ignored) { 1326 action->sa.sa_handler = SIG_DFL; 1327 if (blocked) { 1328 sigdelset(&t->blocked, sig); 1329 recalc_sigpending_and_wake(t); 1330 } 1331 } 1332 /* 1333 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect 1334 * debugging to leave init killable. 1335 */ 1336 if (action->sa.sa_handler == SIG_DFL && !t->ptrace) 1337 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1338 ret = send_signal(sig, info, t, PIDTYPE_PID); 1339 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1340 1341 return ret; 1342 } 1343 1344 int force_sig_info(struct kernel_siginfo *info) 1345 { 1346 return force_sig_info_to_task(info, current); 1347 } 1348 1349 /* 1350 * Nuke all other threads in the group. 1351 */ 1352 int zap_other_threads(struct task_struct *p) 1353 { 1354 struct task_struct *t = p; 1355 int count = 0; 1356 1357 p->signal->group_stop_count = 0; 1358 1359 while_each_thread(p, t) { 1360 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1361 count++; 1362 1363 /* Don't bother with already dead threads */ 1364 if (t->exit_state) 1365 continue; 1366 sigaddset(&t->pending.signal, SIGKILL); 1367 signal_wake_up(t, 1); 1368 } 1369 1370 return count; 1371 } 1372 1373 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1374 unsigned long *flags) 1375 { 1376 struct sighand_struct *sighand; 1377 1378 rcu_read_lock(); 1379 for (;;) { 1380 sighand = rcu_dereference(tsk->sighand); 1381 if (unlikely(sighand == NULL)) 1382 break; 1383 1384 /* 1385 * This sighand can be already freed and even reused, but 1386 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which 1387 * initializes ->siglock: this slab can't go away, it has 1388 * the same object type, ->siglock can't be reinitialized. 1389 * 1390 * We need to ensure that tsk->sighand is still the same 1391 * after we take the lock, we can race with de_thread() or 1392 * __exit_signal(). In the latter case the next iteration 1393 * must see ->sighand == NULL. 1394 */ 1395 spin_lock_irqsave(&sighand->siglock, *flags); 1396 if (likely(sighand == rcu_access_pointer(tsk->sighand))) 1397 break; 1398 spin_unlock_irqrestore(&sighand->siglock, *flags); 1399 } 1400 rcu_read_unlock(); 1401 1402 return sighand; 1403 } 1404 1405 /* 1406 * send signal info to all the members of a group 1407 */ 1408 int group_send_sig_info(int sig, struct kernel_siginfo *info, 1409 struct task_struct *p, enum pid_type type) 1410 { 1411 int ret; 1412 1413 rcu_read_lock(); 1414 ret = check_kill_permission(sig, info, p); 1415 rcu_read_unlock(); 1416 1417 if (!ret && sig) 1418 ret = do_send_sig_info(sig, info, p, type); 1419 1420 return ret; 1421 } 1422 1423 /* 1424 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1425 * control characters do (^C, ^Z etc) 1426 * - the caller must hold at least a readlock on tasklist_lock 1427 */ 1428 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) 1429 { 1430 struct task_struct *p = NULL; 1431 int retval, success; 1432 1433 success = 0; 1434 retval = -ESRCH; 1435 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1436 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); 1437 success |= !err; 1438 retval = err; 1439 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1440 return success ? 0 : retval; 1441 } 1442 1443 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) 1444 { 1445 int error = -ESRCH; 1446 struct task_struct *p; 1447 1448 for (;;) { 1449 rcu_read_lock(); 1450 p = pid_task(pid, PIDTYPE_PID); 1451 if (p) 1452 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); 1453 rcu_read_unlock(); 1454 if (likely(!p || error != -ESRCH)) 1455 return error; 1456 1457 /* 1458 * The task was unhashed in between, try again. If it 1459 * is dead, pid_task() will return NULL, if we race with 1460 * de_thread() it will find the new leader. 1461 */ 1462 } 1463 } 1464 1465 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) 1466 { 1467 int error; 1468 rcu_read_lock(); 1469 error = kill_pid_info(sig, info, find_vpid(pid)); 1470 rcu_read_unlock(); 1471 return error; 1472 } 1473 1474 static inline bool kill_as_cred_perm(const struct cred *cred, 1475 struct task_struct *target) 1476 { 1477 const struct cred *pcred = __task_cred(target); 1478 1479 return uid_eq(cred->euid, pcred->suid) || 1480 uid_eq(cred->euid, pcred->uid) || 1481 uid_eq(cred->uid, pcred->suid) || 1482 uid_eq(cred->uid, pcred->uid); 1483 } 1484 1485 /* 1486 * The usb asyncio usage of siginfo is wrong. The glibc support 1487 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. 1488 * AKA after the generic fields: 1489 * kernel_pid_t si_pid; 1490 * kernel_uid32_t si_uid; 1491 * sigval_t si_value; 1492 * 1493 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout 1494 * after the generic fields is: 1495 * void __user *si_addr; 1496 * 1497 * This is a practical problem when there is a 64bit big endian kernel 1498 * and a 32bit userspace. As the 32bit address will encoded in the low 1499 * 32bits of the pointer. Those low 32bits will be stored at higher 1500 * address than appear in a 32 bit pointer. So userspace will not 1501 * see the address it was expecting for it's completions. 1502 * 1503 * There is nothing in the encoding that can allow 1504 * copy_siginfo_to_user32 to detect this confusion of formats, so 1505 * handle this by requiring the caller of kill_pid_usb_asyncio to 1506 * notice when this situration takes place and to store the 32bit 1507 * pointer in sival_int, instead of sival_addr of the sigval_t addr 1508 * parameter. 1509 */ 1510 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, 1511 struct pid *pid, const struct cred *cred) 1512 { 1513 struct kernel_siginfo info; 1514 struct task_struct *p; 1515 unsigned long flags; 1516 int ret = -EINVAL; 1517 1518 if (!valid_signal(sig)) 1519 return ret; 1520 1521 clear_siginfo(&info); 1522 info.si_signo = sig; 1523 info.si_errno = errno; 1524 info.si_code = SI_ASYNCIO; 1525 *((sigval_t *)&info.si_pid) = addr; 1526 1527 rcu_read_lock(); 1528 p = pid_task(pid, PIDTYPE_PID); 1529 if (!p) { 1530 ret = -ESRCH; 1531 goto out_unlock; 1532 } 1533 if (!kill_as_cred_perm(cred, p)) { 1534 ret = -EPERM; 1535 goto out_unlock; 1536 } 1537 ret = security_task_kill(p, &info, sig, cred); 1538 if (ret) 1539 goto out_unlock; 1540 1541 if (sig) { 1542 if (lock_task_sighand(p, &flags)) { 1543 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false); 1544 unlock_task_sighand(p, &flags); 1545 } else 1546 ret = -ESRCH; 1547 } 1548 out_unlock: 1549 rcu_read_unlock(); 1550 return ret; 1551 } 1552 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio); 1553 1554 /* 1555 * kill_something_info() interprets pid in interesting ways just like kill(2). 1556 * 1557 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1558 * is probably wrong. Should make it like BSD or SYSV. 1559 */ 1560 1561 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) 1562 { 1563 int ret; 1564 1565 if (pid > 0) 1566 return kill_proc_info(sig, info, pid); 1567 1568 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ 1569 if (pid == INT_MIN) 1570 return -ESRCH; 1571 1572 read_lock(&tasklist_lock); 1573 if (pid != -1) { 1574 ret = __kill_pgrp_info(sig, info, 1575 pid ? find_vpid(-pid) : task_pgrp(current)); 1576 } else { 1577 int retval = 0, count = 0; 1578 struct task_struct * p; 1579 1580 for_each_process(p) { 1581 if (task_pid_vnr(p) > 1 && 1582 !same_thread_group(p, current)) { 1583 int err = group_send_sig_info(sig, info, p, 1584 PIDTYPE_MAX); 1585 ++count; 1586 if (err != -EPERM) 1587 retval = err; 1588 } 1589 } 1590 ret = count ? retval : -ESRCH; 1591 } 1592 read_unlock(&tasklist_lock); 1593 1594 return ret; 1595 } 1596 1597 /* 1598 * These are for backward compatibility with the rest of the kernel source. 1599 */ 1600 1601 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1602 { 1603 /* 1604 * Make sure legacy kernel users don't send in bad values 1605 * (normal paths check this in check_kill_permission). 1606 */ 1607 if (!valid_signal(sig)) 1608 return -EINVAL; 1609 1610 return do_send_sig_info(sig, info, p, PIDTYPE_PID); 1611 } 1612 EXPORT_SYMBOL(send_sig_info); 1613 1614 #define __si_special(priv) \ 1615 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1616 1617 int 1618 send_sig(int sig, struct task_struct *p, int priv) 1619 { 1620 return send_sig_info(sig, __si_special(priv), p); 1621 } 1622 EXPORT_SYMBOL(send_sig); 1623 1624 void force_sig(int sig) 1625 { 1626 struct kernel_siginfo info; 1627 1628 clear_siginfo(&info); 1629 info.si_signo = sig; 1630 info.si_errno = 0; 1631 info.si_code = SI_KERNEL; 1632 info.si_pid = 0; 1633 info.si_uid = 0; 1634 force_sig_info(&info); 1635 } 1636 EXPORT_SYMBOL(force_sig); 1637 1638 /* 1639 * When things go south during signal handling, we 1640 * will force a SIGSEGV. And if the signal that caused 1641 * the problem was already a SIGSEGV, we'll want to 1642 * make sure we don't even try to deliver the signal.. 1643 */ 1644 void force_sigsegv(int sig) 1645 { 1646 struct task_struct *p = current; 1647 1648 if (sig == SIGSEGV) { 1649 unsigned long flags; 1650 spin_lock_irqsave(&p->sighand->siglock, flags); 1651 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1652 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1653 } 1654 force_sig(SIGSEGV); 1655 } 1656 1657 int force_sig_fault_to_task(int sig, int code, void __user *addr 1658 ___ARCH_SI_TRAPNO(int trapno) 1659 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1660 , struct task_struct *t) 1661 { 1662 struct kernel_siginfo info; 1663 1664 clear_siginfo(&info); 1665 info.si_signo = sig; 1666 info.si_errno = 0; 1667 info.si_code = code; 1668 info.si_addr = addr; 1669 #ifdef __ARCH_SI_TRAPNO 1670 info.si_trapno = trapno; 1671 #endif 1672 #ifdef __ia64__ 1673 info.si_imm = imm; 1674 info.si_flags = flags; 1675 info.si_isr = isr; 1676 #endif 1677 return force_sig_info_to_task(&info, t); 1678 } 1679 1680 int force_sig_fault(int sig, int code, void __user *addr 1681 ___ARCH_SI_TRAPNO(int trapno) 1682 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)) 1683 { 1684 return force_sig_fault_to_task(sig, code, addr 1685 ___ARCH_SI_TRAPNO(trapno) 1686 ___ARCH_SI_IA64(imm, flags, isr), current); 1687 } 1688 1689 int send_sig_fault(int sig, int code, void __user *addr 1690 ___ARCH_SI_TRAPNO(int trapno) 1691 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1692 , struct task_struct *t) 1693 { 1694 struct kernel_siginfo info; 1695 1696 clear_siginfo(&info); 1697 info.si_signo = sig; 1698 info.si_errno = 0; 1699 info.si_code = code; 1700 info.si_addr = addr; 1701 #ifdef __ARCH_SI_TRAPNO 1702 info.si_trapno = trapno; 1703 #endif 1704 #ifdef __ia64__ 1705 info.si_imm = imm; 1706 info.si_flags = flags; 1707 info.si_isr = isr; 1708 #endif 1709 return send_sig_info(info.si_signo, &info, t); 1710 } 1711 1712 int force_sig_mceerr(int code, void __user *addr, short lsb) 1713 { 1714 struct kernel_siginfo info; 1715 1716 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1717 clear_siginfo(&info); 1718 info.si_signo = SIGBUS; 1719 info.si_errno = 0; 1720 info.si_code = code; 1721 info.si_addr = addr; 1722 info.si_addr_lsb = lsb; 1723 return force_sig_info(&info); 1724 } 1725 1726 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) 1727 { 1728 struct kernel_siginfo info; 1729 1730 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1731 clear_siginfo(&info); 1732 info.si_signo = SIGBUS; 1733 info.si_errno = 0; 1734 info.si_code = code; 1735 info.si_addr = addr; 1736 info.si_addr_lsb = lsb; 1737 return send_sig_info(info.si_signo, &info, t); 1738 } 1739 EXPORT_SYMBOL(send_sig_mceerr); 1740 1741 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) 1742 { 1743 struct kernel_siginfo info; 1744 1745 clear_siginfo(&info); 1746 info.si_signo = SIGSEGV; 1747 info.si_errno = 0; 1748 info.si_code = SEGV_BNDERR; 1749 info.si_addr = addr; 1750 info.si_lower = lower; 1751 info.si_upper = upper; 1752 return force_sig_info(&info); 1753 } 1754 1755 #ifdef SEGV_PKUERR 1756 int force_sig_pkuerr(void __user *addr, u32 pkey) 1757 { 1758 struct kernel_siginfo info; 1759 1760 clear_siginfo(&info); 1761 info.si_signo = SIGSEGV; 1762 info.si_errno = 0; 1763 info.si_code = SEGV_PKUERR; 1764 info.si_addr = addr; 1765 info.si_pkey = pkey; 1766 return force_sig_info(&info); 1767 } 1768 #endif 1769 1770 /* For the crazy architectures that include trap information in 1771 * the errno field, instead of an actual errno value. 1772 */ 1773 int force_sig_ptrace_errno_trap(int errno, void __user *addr) 1774 { 1775 struct kernel_siginfo info; 1776 1777 clear_siginfo(&info); 1778 info.si_signo = SIGTRAP; 1779 info.si_errno = errno; 1780 info.si_code = TRAP_HWBKPT; 1781 info.si_addr = addr; 1782 return force_sig_info(&info); 1783 } 1784 1785 int kill_pgrp(struct pid *pid, int sig, int priv) 1786 { 1787 int ret; 1788 1789 read_lock(&tasklist_lock); 1790 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1791 read_unlock(&tasklist_lock); 1792 1793 return ret; 1794 } 1795 EXPORT_SYMBOL(kill_pgrp); 1796 1797 int kill_pid(struct pid *pid, int sig, int priv) 1798 { 1799 return kill_pid_info(sig, __si_special(priv), pid); 1800 } 1801 EXPORT_SYMBOL(kill_pid); 1802 1803 /* 1804 * These functions support sending signals using preallocated sigqueue 1805 * structures. This is needed "because realtime applications cannot 1806 * afford to lose notifications of asynchronous events, like timer 1807 * expirations or I/O completions". In the case of POSIX Timers 1808 * we allocate the sigqueue structure from the timer_create. If this 1809 * allocation fails we are able to report the failure to the application 1810 * with an EAGAIN error. 1811 */ 1812 struct sigqueue *sigqueue_alloc(void) 1813 { 1814 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); 1815 1816 if (q) 1817 q->flags |= SIGQUEUE_PREALLOC; 1818 1819 return q; 1820 } 1821 1822 void sigqueue_free(struct sigqueue *q) 1823 { 1824 unsigned long flags; 1825 spinlock_t *lock = ¤t->sighand->siglock; 1826 1827 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1828 /* 1829 * We must hold ->siglock while testing q->list 1830 * to serialize with collect_signal() or with 1831 * __exit_signal()->flush_sigqueue(). 1832 */ 1833 spin_lock_irqsave(lock, flags); 1834 q->flags &= ~SIGQUEUE_PREALLOC; 1835 /* 1836 * If it is queued it will be freed when dequeued, 1837 * like the "regular" sigqueue. 1838 */ 1839 if (!list_empty(&q->list)) 1840 q = NULL; 1841 spin_unlock_irqrestore(lock, flags); 1842 1843 if (q) 1844 __sigqueue_free(q); 1845 } 1846 1847 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) 1848 { 1849 int sig = q->info.si_signo; 1850 struct sigpending *pending; 1851 struct task_struct *t; 1852 unsigned long flags; 1853 int ret, result; 1854 1855 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1856 1857 ret = -1; 1858 rcu_read_lock(); 1859 t = pid_task(pid, type); 1860 if (!t || !likely(lock_task_sighand(t, &flags))) 1861 goto ret; 1862 1863 ret = 1; /* the signal is ignored */ 1864 result = TRACE_SIGNAL_IGNORED; 1865 if (!prepare_signal(sig, t, false)) 1866 goto out; 1867 1868 ret = 0; 1869 if (unlikely(!list_empty(&q->list))) { 1870 /* 1871 * If an SI_TIMER entry is already queue just increment 1872 * the overrun count. 1873 */ 1874 BUG_ON(q->info.si_code != SI_TIMER); 1875 q->info.si_overrun++; 1876 result = TRACE_SIGNAL_ALREADY_PENDING; 1877 goto out; 1878 } 1879 q->info.si_overrun = 0; 1880 1881 signalfd_notify(t, sig); 1882 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1883 list_add_tail(&q->list, &pending->list); 1884 sigaddset(&pending->signal, sig); 1885 complete_signal(sig, t, type); 1886 result = TRACE_SIGNAL_DELIVERED; 1887 out: 1888 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); 1889 unlock_task_sighand(t, &flags); 1890 ret: 1891 rcu_read_unlock(); 1892 return ret; 1893 } 1894 1895 static void do_notify_pidfd(struct task_struct *task) 1896 { 1897 struct pid *pid; 1898 1899 WARN_ON(task->exit_state == 0); 1900 pid = task_pid(task); 1901 wake_up_all(&pid->wait_pidfd); 1902 } 1903 1904 /* 1905 * Let a parent know about the death of a child. 1906 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1907 * 1908 * Returns true if our parent ignored us and so we've switched to 1909 * self-reaping. 1910 */ 1911 bool do_notify_parent(struct task_struct *tsk, int sig) 1912 { 1913 struct kernel_siginfo info; 1914 unsigned long flags; 1915 struct sighand_struct *psig; 1916 bool autoreap = false; 1917 u64 utime, stime; 1918 1919 BUG_ON(sig == -1); 1920 1921 /* do_notify_parent_cldstop should have been called instead. */ 1922 BUG_ON(task_is_stopped_or_traced(tsk)); 1923 1924 BUG_ON(!tsk->ptrace && 1925 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1926 1927 /* Wake up all pidfd waiters */ 1928 do_notify_pidfd(tsk); 1929 1930 if (sig != SIGCHLD) { 1931 /* 1932 * This is only possible if parent == real_parent. 1933 * Check if it has changed security domain. 1934 */ 1935 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) 1936 sig = SIGCHLD; 1937 } 1938 1939 clear_siginfo(&info); 1940 info.si_signo = sig; 1941 info.si_errno = 0; 1942 /* 1943 * We are under tasklist_lock here so our parent is tied to 1944 * us and cannot change. 1945 * 1946 * task_active_pid_ns will always return the same pid namespace 1947 * until a task passes through release_task. 1948 * 1949 * write_lock() currently calls preempt_disable() which is the 1950 * same as rcu_read_lock(), but according to Oleg, this is not 1951 * correct to rely on this 1952 */ 1953 rcu_read_lock(); 1954 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); 1955 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), 1956 task_uid(tsk)); 1957 rcu_read_unlock(); 1958 1959 task_cputime(tsk, &utime, &stime); 1960 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); 1961 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); 1962 1963 info.si_status = tsk->exit_code & 0x7f; 1964 if (tsk->exit_code & 0x80) 1965 info.si_code = CLD_DUMPED; 1966 else if (tsk->exit_code & 0x7f) 1967 info.si_code = CLD_KILLED; 1968 else { 1969 info.si_code = CLD_EXITED; 1970 info.si_status = tsk->exit_code >> 8; 1971 } 1972 1973 psig = tsk->parent->sighand; 1974 spin_lock_irqsave(&psig->siglock, flags); 1975 if (!tsk->ptrace && sig == SIGCHLD && 1976 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1977 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1978 /* 1979 * We are exiting and our parent doesn't care. POSIX.1 1980 * defines special semantics for setting SIGCHLD to SIG_IGN 1981 * or setting the SA_NOCLDWAIT flag: we should be reaped 1982 * automatically and not left for our parent's wait4 call. 1983 * Rather than having the parent do it as a magic kind of 1984 * signal handler, we just set this to tell do_exit that we 1985 * can be cleaned up without becoming a zombie. Note that 1986 * we still call __wake_up_parent in this case, because a 1987 * blocked sys_wait4 might now return -ECHILD. 1988 * 1989 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1990 * is implementation-defined: we do (if you don't want 1991 * it, just use SIG_IGN instead). 1992 */ 1993 autoreap = true; 1994 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1995 sig = 0; 1996 } 1997 /* 1998 * Send with __send_signal as si_pid and si_uid are in the 1999 * parent's namespaces. 2000 */ 2001 if (valid_signal(sig) && sig) 2002 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false); 2003 __wake_up_parent(tsk, tsk->parent); 2004 spin_unlock_irqrestore(&psig->siglock, flags); 2005 2006 return autoreap; 2007 } 2008 2009 /** 2010 * do_notify_parent_cldstop - notify parent of stopped/continued state change 2011 * @tsk: task reporting the state change 2012 * @for_ptracer: the notification is for ptracer 2013 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report 2014 * 2015 * Notify @tsk's parent that the stopped/continued state has changed. If 2016 * @for_ptracer is %false, @tsk's group leader notifies to its real parent. 2017 * If %true, @tsk reports to @tsk->parent which should be the ptracer. 2018 * 2019 * CONTEXT: 2020 * Must be called with tasklist_lock at least read locked. 2021 */ 2022 static void do_notify_parent_cldstop(struct task_struct *tsk, 2023 bool for_ptracer, int why) 2024 { 2025 struct kernel_siginfo info; 2026 unsigned long flags; 2027 struct task_struct *parent; 2028 struct sighand_struct *sighand; 2029 u64 utime, stime; 2030 2031 if (for_ptracer) { 2032 parent = tsk->parent; 2033 } else { 2034 tsk = tsk->group_leader; 2035 parent = tsk->real_parent; 2036 } 2037 2038 clear_siginfo(&info); 2039 info.si_signo = SIGCHLD; 2040 info.si_errno = 0; 2041 /* 2042 * see comment in do_notify_parent() about the following 4 lines 2043 */ 2044 rcu_read_lock(); 2045 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); 2046 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 2047 rcu_read_unlock(); 2048 2049 task_cputime(tsk, &utime, &stime); 2050 info.si_utime = nsec_to_clock_t(utime); 2051 info.si_stime = nsec_to_clock_t(stime); 2052 2053 info.si_code = why; 2054 switch (why) { 2055 case CLD_CONTINUED: 2056 info.si_status = SIGCONT; 2057 break; 2058 case CLD_STOPPED: 2059 info.si_status = tsk->signal->group_exit_code & 0x7f; 2060 break; 2061 case CLD_TRAPPED: 2062 info.si_status = tsk->exit_code & 0x7f; 2063 break; 2064 default: 2065 BUG(); 2066 } 2067 2068 sighand = parent->sighand; 2069 spin_lock_irqsave(&sighand->siglock, flags); 2070 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 2071 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 2072 __group_send_sig_info(SIGCHLD, &info, parent); 2073 /* 2074 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 2075 */ 2076 __wake_up_parent(tsk, parent); 2077 spin_unlock_irqrestore(&sighand->siglock, flags); 2078 } 2079 2080 static inline bool may_ptrace_stop(void) 2081 { 2082 if (!likely(current->ptrace)) 2083 return false; 2084 /* 2085 * Are we in the middle of do_coredump? 2086 * If so and our tracer is also part of the coredump stopping 2087 * is a deadlock situation, and pointless because our tracer 2088 * is dead so don't allow us to stop. 2089 * If SIGKILL was already sent before the caller unlocked 2090 * ->siglock we must see ->core_state != NULL. Otherwise it 2091 * is safe to enter schedule(). 2092 * 2093 * This is almost outdated, a task with the pending SIGKILL can't 2094 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported 2095 * after SIGKILL was already dequeued. 2096 */ 2097 if (unlikely(current->mm->core_state) && 2098 unlikely(current->mm == current->parent->mm)) 2099 return false; 2100 2101 return true; 2102 } 2103 2104 /* 2105 * Return non-zero if there is a SIGKILL that should be waking us up. 2106 * Called with the siglock held. 2107 */ 2108 static bool sigkill_pending(struct task_struct *tsk) 2109 { 2110 return sigismember(&tsk->pending.signal, SIGKILL) || 2111 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 2112 } 2113 2114 /* 2115 * This must be called with current->sighand->siglock held. 2116 * 2117 * This should be the path for all ptrace stops. 2118 * We always set current->last_siginfo while stopped here. 2119 * That makes it a way to test a stopped process for 2120 * being ptrace-stopped vs being job-control-stopped. 2121 * 2122 * If we actually decide not to stop at all because the tracer 2123 * is gone, we keep current->exit_code unless clear_code. 2124 */ 2125 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) 2126 __releases(¤t->sighand->siglock) 2127 __acquires(¤t->sighand->siglock) 2128 { 2129 bool gstop_done = false; 2130 2131 if (arch_ptrace_stop_needed(exit_code, info)) { 2132 /* 2133 * The arch code has something special to do before a 2134 * ptrace stop. This is allowed to block, e.g. for faults 2135 * on user stack pages. We can't keep the siglock while 2136 * calling arch_ptrace_stop, so we must release it now. 2137 * To preserve proper semantics, we must do this before 2138 * any signal bookkeeping like checking group_stop_count. 2139 * Meanwhile, a SIGKILL could come in before we retake the 2140 * siglock. That must prevent us from sleeping in TASK_TRACED. 2141 * So after regaining the lock, we must check for SIGKILL. 2142 */ 2143 spin_unlock_irq(¤t->sighand->siglock); 2144 arch_ptrace_stop(exit_code, info); 2145 spin_lock_irq(¤t->sighand->siglock); 2146 if (sigkill_pending(current)) 2147 return; 2148 } 2149 2150 set_special_state(TASK_TRACED); 2151 2152 /* 2153 * We're committing to trapping. TRACED should be visible before 2154 * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). 2155 * Also, transition to TRACED and updates to ->jobctl should be 2156 * atomic with respect to siglock and should be done after the arch 2157 * hook as siglock is released and regrabbed across it. 2158 * 2159 * TRACER TRACEE 2160 * 2161 * ptrace_attach() 2162 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) 2163 * do_wait() 2164 * set_current_state() smp_wmb(); 2165 * ptrace_do_wait() 2166 * wait_task_stopped() 2167 * task_stopped_code() 2168 * [L] task_is_traced() [S] task_clear_jobctl_trapping(); 2169 */ 2170 smp_wmb(); 2171 2172 current->last_siginfo = info; 2173 current->exit_code = exit_code; 2174 2175 /* 2176 * If @why is CLD_STOPPED, we're trapping to participate in a group 2177 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 2178 * across siglock relocks since INTERRUPT was scheduled, PENDING 2179 * could be clear now. We act as if SIGCONT is received after 2180 * TASK_TRACED is entered - ignore it. 2181 */ 2182 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) 2183 gstop_done = task_participate_group_stop(current); 2184 2185 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ 2186 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); 2187 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) 2188 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); 2189 2190 /* entering a trap, clear TRAPPING */ 2191 task_clear_jobctl_trapping(current); 2192 2193 spin_unlock_irq(¤t->sighand->siglock); 2194 read_lock(&tasklist_lock); 2195 if (may_ptrace_stop()) { 2196 /* 2197 * Notify parents of the stop. 2198 * 2199 * While ptraced, there are two parents - the ptracer and 2200 * the real_parent of the group_leader. The ptracer should 2201 * know about every stop while the real parent is only 2202 * interested in the completion of group stop. The states 2203 * for the two don't interact with each other. Notify 2204 * separately unless they're gonna be duplicates. 2205 */ 2206 do_notify_parent_cldstop(current, true, why); 2207 if (gstop_done && ptrace_reparented(current)) 2208 do_notify_parent_cldstop(current, false, why); 2209 2210 /* 2211 * Don't want to allow preemption here, because 2212 * sys_ptrace() needs this task to be inactive. 2213 * 2214 * XXX: implement read_unlock_no_resched(). 2215 */ 2216 preempt_disable(); 2217 read_unlock(&tasklist_lock); 2218 cgroup_enter_frozen(); 2219 preempt_enable_no_resched(); 2220 freezable_schedule(); 2221 cgroup_leave_frozen(true); 2222 } else { 2223 /* 2224 * By the time we got the lock, our tracer went away. 2225 * Don't drop the lock yet, another tracer may come. 2226 * 2227 * If @gstop_done, the ptracer went away between group stop 2228 * completion and here. During detach, it would have set 2229 * JOBCTL_STOP_PENDING on us and we'll re-enter 2230 * TASK_STOPPED in do_signal_stop() on return, so notifying 2231 * the real parent of the group stop completion is enough. 2232 */ 2233 if (gstop_done) 2234 do_notify_parent_cldstop(current, false, why); 2235 2236 /* tasklist protects us from ptrace_freeze_traced() */ 2237 __set_current_state(TASK_RUNNING); 2238 if (clear_code) 2239 current->exit_code = 0; 2240 read_unlock(&tasklist_lock); 2241 } 2242 2243 /* 2244 * We are back. Now reacquire the siglock before touching 2245 * last_siginfo, so that we are sure to have synchronized with 2246 * any signal-sending on another CPU that wants to examine it. 2247 */ 2248 spin_lock_irq(¤t->sighand->siglock); 2249 current->last_siginfo = NULL; 2250 2251 /* LISTENING can be set only during STOP traps, clear it */ 2252 current->jobctl &= ~JOBCTL_LISTENING; 2253 2254 /* 2255 * Queued signals ignored us while we were stopped for tracing. 2256 * So check for any that we should take before resuming user mode. 2257 * This sets TIF_SIGPENDING, but never clears it. 2258 */ 2259 recalc_sigpending_tsk(current); 2260 } 2261 2262 static void ptrace_do_notify(int signr, int exit_code, int why) 2263 { 2264 kernel_siginfo_t info; 2265 2266 clear_siginfo(&info); 2267 info.si_signo = signr; 2268 info.si_code = exit_code; 2269 info.si_pid = task_pid_vnr(current); 2270 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 2271 2272 /* Let the debugger run. */ 2273 ptrace_stop(exit_code, why, 1, &info); 2274 } 2275 2276 void ptrace_notify(int exit_code) 2277 { 2278 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 2279 if (unlikely(current->task_works)) 2280 task_work_run(); 2281 2282 spin_lock_irq(¤t->sighand->siglock); 2283 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); 2284 spin_unlock_irq(¤t->sighand->siglock); 2285 } 2286 2287 /** 2288 * do_signal_stop - handle group stop for SIGSTOP and other stop signals 2289 * @signr: signr causing group stop if initiating 2290 * 2291 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr 2292 * and participate in it. If already set, participate in the existing 2293 * group stop. If participated in a group stop (and thus slept), %true is 2294 * returned with siglock released. 2295 * 2296 * If ptraced, this function doesn't handle stop itself. Instead, 2297 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock 2298 * untouched. The caller must ensure that INTERRUPT trap handling takes 2299 * places afterwards. 2300 * 2301 * CONTEXT: 2302 * Must be called with @current->sighand->siglock held, which is released 2303 * on %true return. 2304 * 2305 * RETURNS: 2306 * %false if group stop is already cancelled or ptrace trap is scheduled. 2307 * %true if participated in group stop. 2308 */ 2309 static bool do_signal_stop(int signr) 2310 __releases(¤t->sighand->siglock) 2311 { 2312 struct signal_struct *sig = current->signal; 2313 2314 if (!(current->jobctl & JOBCTL_STOP_PENDING)) { 2315 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 2316 struct task_struct *t; 2317 2318 /* signr will be recorded in task->jobctl for retries */ 2319 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); 2320 2321 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || 2322 unlikely(signal_group_exit(sig))) 2323 return false; 2324 /* 2325 * There is no group stop already in progress. We must 2326 * initiate one now. 2327 * 2328 * While ptraced, a task may be resumed while group stop is 2329 * still in effect and then receive a stop signal and 2330 * initiate another group stop. This deviates from the 2331 * usual behavior as two consecutive stop signals can't 2332 * cause two group stops when !ptraced. That is why we 2333 * also check !task_is_stopped(t) below. 2334 * 2335 * The condition can be distinguished by testing whether 2336 * SIGNAL_STOP_STOPPED is already set. Don't generate 2337 * group_exit_code in such case. 2338 * 2339 * This is not necessary for SIGNAL_STOP_CONTINUED because 2340 * an intervening stop signal is required to cause two 2341 * continued events regardless of ptrace. 2342 */ 2343 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 2344 sig->group_exit_code = signr; 2345 2346 sig->group_stop_count = 0; 2347 2348 if (task_set_jobctl_pending(current, signr | gstop)) 2349 sig->group_stop_count++; 2350 2351 t = current; 2352 while_each_thread(current, t) { 2353 /* 2354 * Setting state to TASK_STOPPED for a group 2355 * stop is always done with the siglock held, 2356 * so this check has no races. 2357 */ 2358 if (!task_is_stopped(t) && 2359 task_set_jobctl_pending(t, signr | gstop)) { 2360 sig->group_stop_count++; 2361 if (likely(!(t->ptrace & PT_SEIZED))) 2362 signal_wake_up(t, 0); 2363 else 2364 ptrace_trap_notify(t); 2365 } 2366 } 2367 } 2368 2369 if (likely(!current->ptrace)) { 2370 int notify = 0; 2371 2372 /* 2373 * If there are no other threads in the group, or if there 2374 * is a group stop in progress and we are the last to stop, 2375 * report to the parent. 2376 */ 2377 if (task_participate_group_stop(current)) 2378 notify = CLD_STOPPED; 2379 2380 set_special_state(TASK_STOPPED); 2381 spin_unlock_irq(¤t->sighand->siglock); 2382 2383 /* 2384 * Notify the parent of the group stop completion. Because 2385 * we're not holding either the siglock or tasklist_lock 2386 * here, ptracer may attach inbetween; however, this is for 2387 * group stop and should always be delivered to the real 2388 * parent of the group leader. The new ptracer will get 2389 * its notification when this task transitions into 2390 * TASK_TRACED. 2391 */ 2392 if (notify) { 2393 read_lock(&tasklist_lock); 2394 do_notify_parent_cldstop(current, false, notify); 2395 read_unlock(&tasklist_lock); 2396 } 2397 2398 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2399 cgroup_enter_frozen(); 2400 freezable_schedule(); 2401 return true; 2402 } else { 2403 /* 2404 * While ptraced, group stop is handled by STOP trap. 2405 * Schedule it and let the caller deal with it. 2406 */ 2407 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); 2408 return false; 2409 } 2410 } 2411 2412 /** 2413 * do_jobctl_trap - take care of ptrace jobctl traps 2414 * 2415 * When PT_SEIZED, it's used for both group stop and explicit 2416 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with 2417 * accompanying siginfo. If stopped, lower eight bits of exit_code contain 2418 * the stop signal; otherwise, %SIGTRAP. 2419 * 2420 * When !PT_SEIZED, it's used only for group stop trap with stop signal 2421 * number as exit_code and no siginfo. 2422 * 2423 * CONTEXT: 2424 * Must be called with @current->sighand->siglock held, which may be 2425 * released and re-acquired before returning with intervening sleep. 2426 */ 2427 static void do_jobctl_trap(void) 2428 { 2429 struct signal_struct *signal = current->signal; 2430 int signr = current->jobctl & JOBCTL_STOP_SIGMASK; 2431 2432 if (current->ptrace & PT_SEIZED) { 2433 if (!signal->group_stop_count && 2434 !(signal->flags & SIGNAL_STOP_STOPPED)) 2435 signr = SIGTRAP; 2436 WARN_ON_ONCE(!signr); 2437 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), 2438 CLD_STOPPED); 2439 } else { 2440 WARN_ON_ONCE(!signr); 2441 ptrace_stop(signr, CLD_STOPPED, 0, NULL); 2442 current->exit_code = 0; 2443 } 2444 } 2445 2446 /** 2447 * do_freezer_trap - handle the freezer jobctl trap 2448 * 2449 * Puts the task into frozen state, if only the task is not about to quit. 2450 * In this case it drops JOBCTL_TRAP_FREEZE. 2451 * 2452 * CONTEXT: 2453 * Must be called with @current->sighand->siglock held, 2454 * which is always released before returning. 2455 */ 2456 static void do_freezer_trap(void) 2457 __releases(¤t->sighand->siglock) 2458 { 2459 /* 2460 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, 2461 * let's make another loop to give it a chance to be handled. 2462 * In any case, we'll return back. 2463 */ 2464 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != 2465 JOBCTL_TRAP_FREEZE) { 2466 spin_unlock_irq(¤t->sighand->siglock); 2467 return; 2468 } 2469 2470 /* 2471 * Now we're sure that there is no pending fatal signal and no 2472 * pending traps. Clear TIF_SIGPENDING to not get out of schedule() 2473 * immediately (if there is a non-fatal signal pending), and 2474 * put the task into sleep. 2475 */ 2476 __set_current_state(TASK_INTERRUPTIBLE); 2477 clear_thread_flag(TIF_SIGPENDING); 2478 spin_unlock_irq(¤t->sighand->siglock); 2479 cgroup_enter_frozen(); 2480 freezable_schedule(); 2481 } 2482 2483 static int ptrace_signal(int signr, kernel_siginfo_t *info) 2484 { 2485 /* 2486 * We do not check sig_kernel_stop(signr) but set this marker 2487 * unconditionally because we do not know whether debugger will 2488 * change signr. This flag has no meaning unless we are going 2489 * to stop after return from ptrace_stop(). In this case it will 2490 * be checked in do_signal_stop(), we should only stop if it was 2491 * not cleared by SIGCONT while we were sleeping. See also the 2492 * comment in dequeue_signal(). 2493 */ 2494 current->jobctl |= JOBCTL_STOP_DEQUEUED; 2495 ptrace_stop(signr, CLD_TRAPPED, 0, info); 2496 2497 /* We're back. Did the debugger cancel the sig? */ 2498 signr = current->exit_code; 2499 if (signr == 0) 2500 return signr; 2501 2502 current->exit_code = 0; 2503 2504 /* 2505 * Update the siginfo structure if the signal has 2506 * changed. If the debugger wanted something 2507 * specific in the siginfo structure then it should 2508 * have updated *info via PTRACE_SETSIGINFO. 2509 */ 2510 if (signr != info->si_signo) { 2511 clear_siginfo(info); 2512 info->si_signo = signr; 2513 info->si_errno = 0; 2514 info->si_code = SI_USER; 2515 rcu_read_lock(); 2516 info->si_pid = task_pid_vnr(current->parent); 2517 info->si_uid = from_kuid_munged(current_user_ns(), 2518 task_uid(current->parent)); 2519 rcu_read_unlock(); 2520 } 2521 2522 /* If the (new) signal is now blocked, requeue it. */ 2523 if (sigismember(¤t->blocked, signr)) { 2524 send_signal(signr, info, current, PIDTYPE_PID); 2525 signr = 0; 2526 } 2527 2528 return signr; 2529 } 2530 2531 static void hide_si_addr_tag_bits(struct ksignal *ksig) 2532 { 2533 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) { 2534 case SIL_FAULT: 2535 case SIL_FAULT_MCEERR: 2536 case SIL_FAULT_BNDERR: 2537 case SIL_FAULT_PKUERR: 2538 ksig->info.si_addr = arch_untagged_si_addr( 2539 ksig->info.si_addr, ksig->sig, ksig->info.si_code); 2540 break; 2541 case SIL_KILL: 2542 case SIL_TIMER: 2543 case SIL_POLL: 2544 case SIL_CHLD: 2545 case SIL_RT: 2546 case SIL_SYS: 2547 break; 2548 } 2549 } 2550 2551 bool get_signal(struct ksignal *ksig) 2552 { 2553 struct sighand_struct *sighand = current->sighand; 2554 struct signal_struct *signal = current->signal; 2555 int signr; 2556 2557 if (unlikely(current->task_works)) 2558 task_work_run(); 2559 2560 /* 2561 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so 2562 * that the arch handlers don't all have to do it. If we get here 2563 * without TIF_SIGPENDING, just exit after running signal work. 2564 */ 2565 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) { 2566 if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 2567 tracehook_notify_signal(); 2568 if (!task_sigpending(current)) 2569 return false; 2570 } 2571 2572 if (unlikely(uprobe_deny_signal())) 2573 return false; 2574 2575 /* 2576 * Do this once, we can't return to user-mode if freezing() == T. 2577 * do_signal_stop() and ptrace_stop() do freezable_schedule() and 2578 * thus do not need another check after return. 2579 */ 2580 try_to_freeze(); 2581 2582 relock: 2583 spin_lock_irq(&sighand->siglock); 2584 2585 /* 2586 * Every stopped thread goes here after wakeup. Check to see if 2587 * we should notify the parent, prepare_signal(SIGCONT) encodes 2588 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2589 */ 2590 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2591 int why; 2592 2593 if (signal->flags & SIGNAL_CLD_CONTINUED) 2594 why = CLD_CONTINUED; 2595 else 2596 why = CLD_STOPPED; 2597 2598 signal->flags &= ~SIGNAL_CLD_MASK; 2599 2600 spin_unlock_irq(&sighand->siglock); 2601 2602 /* 2603 * Notify the parent that we're continuing. This event is 2604 * always per-process and doesn't make whole lot of sense 2605 * for ptracers, who shouldn't consume the state via 2606 * wait(2) either, but, for backward compatibility, notify 2607 * the ptracer of the group leader too unless it's gonna be 2608 * a duplicate. 2609 */ 2610 read_lock(&tasklist_lock); 2611 do_notify_parent_cldstop(current, false, why); 2612 2613 if (ptrace_reparented(current->group_leader)) 2614 do_notify_parent_cldstop(current->group_leader, 2615 true, why); 2616 read_unlock(&tasklist_lock); 2617 2618 goto relock; 2619 } 2620 2621 /* Has this task already been marked for death? */ 2622 if (signal_group_exit(signal)) { 2623 ksig->info.si_signo = signr = SIGKILL; 2624 sigdelset(¤t->pending.signal, SIGKILL); 2625 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, 2626 &sighand->action[SIGKILL - 1]); 2627 recalc_sigpending(); 2628 goto fatal; 2629 } 2630 2631 for (;;) { 2632 struct k_sigaction *ka; 2633 2634 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && 2635 do_signal_stop(0)) 2636 goto relock; 2637 2638 if (unlikely(current->jobctl & 2639 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { 2640 if (current->jobctl & JOBCTL_TRAP_MASK) { 2641 do_jobctl_trap(); 2642 spin_unlock_irq(&sighand->siglock); 2643 } else if (current->jobctl & JOBCTL_TRAP_FREEZE) 2644 do_freezer_trap(); 2645 2646 goto relock; 2647 } 2648 2649 /* 2650 * If the task is leaving the frozen state, let's update 2651 * cgroup counters and reset the frozen bit. 2652 */ 2653 if (unlikely(cgroup_task_frozen(current))) { 2654 spin_unlock_irq(&sighand->siglock); 2655 cgroup_leave_frozen(false); 2656 goto relock; 2657 } 2658 2659 /* 2660 * Signals generated by the execution of an instruction 2661 * need to be delivered before any other pending signals 2662 * so that the instruction pointer in the signal stack 2663 * frame points to the faulting instruction. 2664 */ 2665 signr = dequeue_synchronous_signal(&ksig->info); 2666 if (!signr) 2667 signr = dequeue_signal(current, ¤t->blocked, &ksig->info); 2668 2669 if (!signr) 2670 break; /* will return 0 */ 2671 2672 if (unlikely(current->ptrace) && signr != SIGKILL) { 2673 signr = ptrace_signal(signr, &ksig->info); 2674 if (!signr) 2675 continue; 2676 } 2677 2678 ka = &sighand->action[signr-1]; 2679 2680 /* Trace actually delivered signals. */ 2681 trace_signal_deliver(signr, &ksig->info, ka); 2682 2683 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 2684 continue; 2685 if (ka->sa.sa_handler != SIG_DFL) { 2686 /* Run the handler. */ 2687 ksig->ka = *ka; 2688 2689 if (ka->sa.sa_flags & SA_ONESHOT) 2690 ka->sa.sa_handler = SIG_DFL; 2691 2692 break; /* will return non-zero "signr" value */ 2693 } 2694 2695 /* 2696 * Now we are doing the default action for this signal. 2697 */ 2698 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 2699 continue; 2700 2701 /* 2702 * Global init gets no signals it doesn't want. 2703 * Container-init gets no signals it doesn't want from same 2704 * container. 2705 * 2706 * Note that if global/container-init sees a sig_kernel_only() 2707 * signal here, the signal must have been generated internally 2708 * or must have come from an ancestor namespace. In either 2709 * case, the signal cannot be dropped. 2710 */ 2711 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 2712 !sig_kernel_only(signr)) 2713 continue; 2714 2715 if (sig_kernel_stop(signr)) { 2716 /* 2717 * The default action is to stop all threads in 2718 * the thread group. The job control signals 2719 * do nothing in an orphaned pgrp, but SIGSTOP 2720 * always works. Note that siglock needs to be 2721 * dropped during the call to is_orphaned_pgrp() 2722 * because of lock ordering with tasklist_lock. 2723 * This allows an intervening SIGCONT to be posted. 2724 * We need to check for that and bail out if necessary. 2725 */ 2726 if (signr != SIGSTOP) { 2727 spin_unlock_irq(&sighand->siglock); 2728 2729 /* signals can be posted during this window */ 2730 2731 if (is_current_pgrp_orphaned()) 2732 goto relock; 2733 2734 spin_lock_irq(&sighand->siglock); 2735 } 2736 2737 if (likely(do_signal_stop(ksig->info.si_signo))) { 2738 /* It released the siglock. */ 2739 goto relock; 2740 } 2741 2742 /* 2743 * We didn't actually stop, due to a race 2744 * with SIGCONT or something like that. 2745 */ 2746 continue; 2747 } 2748 2749 fatal: 2750 spin_unlock_irq(&sighand->siglock); 2751 if (unlikely(cgroup_task_frozen(current))) 2752 cgroup_leave_frozen(true); 2753 2754 /* 2755 * Anything else is fatal, maybe with a core dump. 2756 */ 2757 current->flags |= PF_SIGNALED; 2758 2759 if (sig_kernel_coredump(signr)) { 2760 if (print_fatal_signals) 2761 print_fatal_signal(ksig->info.si_signo); 2762 proc_coredump_connector(current); 2763 /* 2764 * If it was able to dump core, this kills all 2765 * other threads in the group and synchronizes with 2766 * their demise. If we lost the race with another 2767 * thread getting here, it set group_exit_code 2768 * first and our do_group_exit call below will use 2769 * that value and ignore the one we pass it. 2770 */ 2771 do_coredump(&ksig->info); 2772 } 2773 2774 /* 2775 * Death signals, no core dump. 2776 */ 2777 do_group_exit(ksig->info.si_signo); 2778 /* NOTREACHED */ 2779 } 2780 spin_unlock_irq(&sighand->siglock); 2781 2782 ksig->sig = signr; 2783 2784 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS)) 2785 hide_si_addr_tag_bits(ksig); 2786 2787 return ksig->sig > 0; 2788 } 2789 2790 /** 2791 * signal_delivered - 2792 * @ksig: kernel signal struct 2793 * @stepping: nonzero if debugger single-step or block-step in use 2794 * 2795 * This function should be called when a signal has successfully been 2796 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask 2797 * is always blocked, and the signal itself is blocked unless %SA_NODEFER 2798 * is set in @ksig->ka.sa.sa_flags. Tracing is notified. 2799 */ 2800 static void signal_delivered(struct ksignal *ksig, int stepping) 2801 { 2802 sigset_t blocked; 2803 2804 /* A signal was successfully delivered, and the 2805 saved sigmask was stored on the signal frame, 2806 and will be restored by sigreturn. So we can 2807 simply clear the restore sigmask flag. */ 2808 clear_restore_sigmask(); 2809 2810 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); 2811 if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) 2812 sigaddset(&blocked, ksig->sig); 2813 set_current_blocked(&blocked); 2814 tracehook_signal_handler(stepping); 2815 } 2816 2817 void signal_setup_done(int failed, struct ksignal *ksig, int stepping) 2818 { 2819 if (failed) 2820 force_sigsegv(ksig->sig); 2821 else 2822 signal_delivered(ksig, stepping); 2823 } 2824 2825 /* 2826 * It could be that complete_signal() picked us to notify about the 2827 * group-wide signal. Other threads should be notified now to take 2828 * the shared signals in @which since we will not. 2829 */ 2830 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) 2831 { 2832 sigset_t retarget; 2833 struct task_struct *t; 2834 2835 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); 2836 if (sigisemptyset(&retarget)) 2837 return; 2838 2839 t = tsk; 2840 while_each_thread(tsk, t) { 2841 if (t->flags & PF_EXITING) 2842 continue; 2843 2844 if (!has_pending_signals(&retarget, &t->blocked)) 2845 continue; 2846 /* Remove the signals this thread can handle. */ 2847 sigandsets(&retarget, &retarget, &t->blocked); 2848 2849 if (!task_sigpending(t)) 2850 signal_wake_up(t, 0); 2851 2852 if (sigisemptyset(&retarget)) 2853 break; 2854 } 2855 } 2856 2857 void exit_signals(struct task_struct *tsk) 2858 { 2859 int group_stop = 0; 2860 sigset_t unblocked; 2861 2862 /* 2863 * @tsk is about to have PF_EXITING set - lock out users which 2864 * expect stable threadgroup. 2865 */ 2866 cgroup_threadgroup_change_begin(tsk); 2867 2868 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 2869 tsk->flags |= PF_EXITING; 2870 cgroup_threadgroup_change_end(tsk); 2871 return; 2872 } 2873 2874 spin_lock_irq(&tsk->sighand->siglock); 2875 /* 2876 * From now this task is not visible for group-wide signals, 2877 * see wants_signal(), do_signal_stop(). 2878 */ 2879 tsk->flags |= PF_EXITING; 2880 2881 cgroup_threadgroup_change_end(tsk); 2882 2883 if (!task_sigpending(tsk)) 2884 goto out; 2885 2886 unblocked = tsk->blocked; 2887 signotset(&unblocked); 2888 retarget_shared_pending(tsk, &unblocked); 2889 2890 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && 2891 task_participate_group_stop(tsk)) 2892 group_stop = CLD_STOPPED; 2893 out: 2894 spin_unlock_irq(&tsk->sighand->siglock); 2895 2896 /* 2897 * If group stop has completed, deliver the notification. This 2898 * should always go to the real parent of the group leader. 2899 */ 2900 if (unlikely(group_stop)) { 2901 read_lock(&tasklist_lock); 2902 do_notify_parent_cldstop(tsk, false, group_stop); 2903 read_unlock(&tasklist_lock); 2904 } 2905 } 2906 2907 /* 2908 * System call entry points. 2909 */ 2910 2911 /** 2912 * sys_restart_syscall - restart a system call 2913 */ 2914 SYSCALL_DEFINE0(restart_syscall) 2915 { 2916 struct restart_block *restart = ¤t->restart_block; 2917 return restart->fn(restart); 2918 } 2919 2920 long do_no_restart_syscall(struct restart_block *param) 2921 { 2922 return -EINTR; 2923 } 2924 2925 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) 2926 { 2927 if (task_sigpending(tsk) && !thread_group_empty(tsk)) { 2928 sigset_t newblocked; 2929 /* A set of now blocked but previously unblocked signals. */ 2930 sigandnsets(&newblocked, newset, ¤t->blocked); 2931 retarget_shared_pending(tsk, &newblocked); 2932 } 2933 tsk->blocked = *newset; 2934 recalc_sigpending(); 2935 } 2936 2937 /** 2938 * set_current_blocked - change current->blocked mask 2939 * @newset: new mask 2940 * 2941 * It is wrong to change ->blocked directly, this helper should be used 2942 * to ensure the process can't miss a shared signal we are going to block. 2943 */ 2944 void set_current_blocked(sigset_t *newset) 2945 { 2946 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); 2947 __set_current_blocked(newset); 2948 } 2949 2950 void __set_current_blocked(const sigset_t *newset) 2951 { 2952 struct task_struct *tsk = current; 2953 2954 /* 2955 * In case the signal mask hasn't changed, there is nothing we need 2956 * to do. The current->blocked shouldn't be modified by other task. 2957 */ 2958 if (sigequalsets(&tsk->blocked, newset)) 2959 return; 2960 2961 spin_lock_irq(&tsk->sighand->siglock); 2962 __set_task_blocked(tsk, newset); 2963 spin_unlock_irq(&tsk->sighand->siglock); 2964 } 2965 2966 /* 2967 * This is also useful for kernel threads that want to temporarily 2968 * (or permanently) block certain signals. 2969 * 2970 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 2971 * interface happily blocks "unblockable" signals like SIGKILL 2972 * and friends. 2973 */ 2974 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2975 { 2976 struct task_struct *tsk = current; 2977 sigset_t newset; 2978 2979 /* Lockless, only current can change ->blocked, never from irq */ 2980 if (oldset) 2981 *oldset = tsk->blocked; 2982 2983 switch (how) { 2984 case SIG_BLOCK: 2985 sigorsets(&newset, &tsk->blocked, set); 2986 break; 2987 case SIG_UNBLOCK: 2988 sigandnsets(&newset, &tsk->blocked, set); 2989 break; 2990 case SIG_SETMASK: 2991 newset = *set; 2992 break; 2993 default: 2994 return -EINVAL; 2995 } 2996 2997 __set_current_blocked(&newset); 2998 return 0; 2999 } 3000 EXPORT_SYMBOL(sigprocmask); 3001 3002 /* 3003 * The api helps set app-provided sigmasks. 3004 * 3005 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and 3006 * epoll_pwait where a new sigmask is passed from userland for the syscalls. 3007 * 3008 * Note that it does set_restore_sigmask() in advance, so it must be always 3009 * paired with restore_saved_sigmask_unless() before return from syscall. 3010 */ 3011 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize) 3012 { 3013 sigset_t kmask; 3014 3015 if (!umask) 3016 return 0; 3017 if (sigsetsize != sizeof(sigset_t)) 3018 return -EINVAL; 3019 if (copy_from_user(&kmask, umask, sizeof(sigset_t))) 3020 return -EFAULT; 3021 3022 set_restore_sigmask(); 3023 current->saved_sigmask = current->blocked; 3024 set_current_blocked(&kmask); 3025 3026 return 0; 3027 } 3028 3029 #ifdef CONFIG_COMPAT 3030 int set_compat_user_sigmask(const compat_sigset_t __user *umask, 3031 size_t sigsetsize) 3032 { 3033 sigset_t kmask; 3034 3035 if (!umask) 3036 return 0; 3037 if (sigsetsize != sizeof(compat_sigset_t)) 3038 return -EINVAL; 3039 if (get_compat_sigset(&kmask, umask)) 3040 return -EFAULT; 3041 3042 set_restore_sigmask(); 3043 current->saved_sigmask = current->blocked; 3044 set_current_blocked(&kmask); 3045 3046 return 0; 3047 } 3048 #endif 3049 3050 /** 3051 * sys_rt_sigprocmask - change the list of currently blocked signals 3052 * @how: whether to add, remove, or set signals 3053 * @nset: stores pending signals 3054 * @oset: previous value of signal mask if non-null 3055 * @sigsetsize: size of sigset_t type 3056 */ 3057 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, 3058 sigset_t __user *, oset, size_t, sigsetsize) 3059 { 3060 sigset_t old_set, new_set; 3061 int error; 3062 3063 /* XXX: Don't preclude handling different sized sigset_t's. */ 3064 if (sigsetsize != sizeof(sigset_t)) 3065 return -EINVAL; 3066 3067 old_set = current->blocked; 3068 3069 if (nset) { 3070 if (copy_from_user(&new_set, nset, sizeof(sigset_t))) 3071 return -EFAULT; 3072 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3073 3074 error = sigprocmask(how, &new_set, NULL); 3075 if (error) 3076 return error; 3077 } 3078 3079 if (oset) { 3080 if (copy_to_user(oset, &old_set, sizeof(sigset_t))) 3081 return -EFAULT; 3082 } 3083 3084 return 0; 3085 } 3086 3087 #ifdef CONFIG_COMPAT 3088 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, 3089 compat_sigset_t __user *, oset, compat_size_t, sigsetsize) 3090 { 3091 sigset_t old_set = current->blocked; 3092 3093 /* XXX: Don't preclude handling different sized sigset_t's. */ 3094 if (sigsetsize != sizeof(sigset_t)) 3095 return -EINVAL; 3096 3097 if (nset) { 3098 sigset_t new_set; 3099 int error; 3100 if (get_compat_sigset(&new_set, nset)) 3101 return -EFAULT; 3102 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3103 3104 error = sigprocmask(how, &new_set, NULL); 3105 if (error) 3106 return error; 3107 } 3108 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; 3109 } 3110 #endif 3111 3112 static void do_sigpending(sigset_t *set) 3113 { 3114 spin_lock_irq(¤t->sighand->siglock); 3115 sigorsets(set, ¤t->pending.signal, 3116 ¤t->signal->shared_pending.signal); 3117 spin_unlock_irq(¤t->sighand->siglock); 3118 3119 /* Outside the lock because only this thread touches it. */ 3120 sigandsets(set, ¤t->blocked, set); 3121 } 3122 3123 /** 3124 * sys_rt_sigpending - examine a pending signal that has been raised 3125 * while blocked 3126 * @uset: stores pending signals 3127 * @sigsetsize: size of sigset_t type or larger 3128 */ 3129 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 3130 { 3131 sigset_t set; 3132 3133 if (sigsetsize > sizeof(*uset)) 3134 return -EINVAL; 3135 3136 do_sigpending(&set); 3137 3138 if (copy_to_user(uset, &set, sigsetsize)) 3139 return -EFAULT; 3140 3141 return 0; 3142 } 3143 3144 #ifdef CONFIG_COMPAT 3145 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, 3146 compat_size_t, sigsetsize) 3147 { 3148 sigset_t set; 3149 3150 if (sigsetsize > sizeof(*uset)) 3151 return -EINVAL; 3152 3153 do_sigpending(&set); 3154 3155 return put_compat_sigset(uset, &set, sigsetsize); 3156 } 3157 #endif 3158 3159 static const struct { 3160 unsigned char limit, layout; 3161 } sig_sicodes[] = { 3162 [SIGILL] = { NSIGILL, SIL_FAULT }, 3163 [SIGFPE] = { NSIGFPE, SIL_FAULT }, 3164 [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, 3165 [SIGBUS] = { NSIGBUS, SIL_FAULT }, 3166 [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, 3167 #if defined(SIGEMT) 3168 [SIGEMT] = { NSIGEMT, SIL_FAULT }, 3169 #endif 3170 [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, 3171 [SIGPOLL] = { NSIGPOLL, SIL_POLL }, 3172 [SIGSYS] = { NSIGSYS, SIL_SYS }, 3173 }; 3174 3175 static bool known_siginfo_layout(unsigned sig, int si_code) 3176 { 3177 if (si_code == SI_KERNEL) 3178 return true; 3179 else if ((si_code > SI_USER)) { 3180 if (sig_specific_sicodes(sig)) { 3181 if (si_code <= sig_sicodes[sig].limit) 3182 return true; 3183 } 3184 else if (si_code <= NSIGPOLL) 3185 return true; 3186 } 3187 else if (si_code >= SI_DETHREAD) 3188 return true; 3189 else if (si_code == SI_ASYNCNL) 3190 return true; 3191 return false; 3192 } 3193 3194 enum siginfo_layout siginfo_layout(unsigned sig, int si_code) 3195 { 3196 enum siginfo_layout layout = SIL_KILL; 3197 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { 3198 if ((sig < ARRAY_SIZE(sig_sicodes)) && 3199 (si_code <= sig_sicodes[sig].limit)) { 3200 layout = sig_sicodes[sig].layout; 3201 /* Handle the exceptions */ 3202 if ((sig == SIGBUS) && 3203 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) 3204 layout = SIL_FAULT_MCEERR; 3205 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) 3206 layout = SIL_FAULT_BNDERR; 3207 #ifdef SEGV_PKUERR 3208 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) 3209 layout = SIL_FAULT_PKUERR; 3210 #endif 3211 } 3212 else if (si_code <= NSIGPOLL) 3213 layout = SIL_POLL; 3214 } else { 3215 if (si_code == SI_TIMER) 3216 layout = SIL_TIMER; 3217 else if (si_code == SI_SIGIO) 3218 layout = SIL_POLL; 3219 else if (si_code < 0) 3220 layout = SIL_RT; 3221 } 3222 return layout; 3223 } 3224 3225 static inline char __user *si_expansion(const siginfo_t __user *info) 3226 { 3227 return ((char __user *)info) + sizeof(struct kernel_siginfo); 3228 } 3229 3230 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) 3231 { 3232 char __user *expansion = si_expansion(to); 3233 if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) 3234 return -EFAULT; 3235 if (clear_user(expansion, SI_EXPANSION_SIZE)) 3236 return -EFAULT; 3237 return 0; 3238 } 3239 3240 static int post_copy_siginfo_from_user(kernel_siginfo_t *info, 3241 const siginfo_t __user *from) 3242 { 3243 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { 3244 char __user *expansion = si_expansion(from); 3245 char buf[SI_EXPANSION_SIZE]; 3246 int i; 3247 /* 3248 * An unknown si_code might need more than 3249 * sizeof(struct kernel_siginfo) bytes. Verify all of the 3250 * extra bytes are 0. This guarantees copy_siginfo_to_user 3251 * will return this data to userspace exactly. 3252 */ 3253 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE)) 3254 return -EFAULT; 3255 for (i = 0; i < SI_EXPANSION_SIZE; i++) { 3256 if (buf[i] != 0) 3257 return -E2BIG; 3258 } 3259 } 3260 return 0; 3261 } 3262 3263 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, 3264 const siginfo_t __user *from) 3265 { 3266 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3267 return -EFAULT; 3268 to->si_signo = signo; 3269 return post_copy_siginfo_from_user(to, from); 3270 } 3271 3272 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) 3273 { 3274 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3275 return -EFAULT; 3276 return post_copy_siginfo_from_user(to, from); 3277 } 3278 3279 #ifdef CONFIG_COMPAT 3280 /** 3281 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo 3282 * @to: compat siginfo destination 3283 * @from: kernel siginfo source 3284 * 3285 * Note: This function does not work properly for the SIGCHLD on x32, but 3286 * fortunately it doesn't have to. The only valid callers for this function are 3287 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code. 3288 * The latter does not care because SIGCHLD will never cause a coredump. 3289 */ 3290 void copy_siginfo_to_external32(struct compat_siginfo *to, 3291 const struct kernel_siginfo *from) 3292 { 3293 memset(to, 0, sizeof(*to)); 3294 3295 to->si_signo = from->si_signo; 3296 to->si_errno = from->si_errno; 3297 to->si_code = from->si_code; 3298 switch(siginfo_layout(from->si_signo, from->si_code)) { 3299 case SIL_KILL: 3300 to->si_pid = from->si_pid; 3301 to->si_uid = from->si_uid; 3302 break; 3303 case SIL_TIMER: 3304 to->si_tid = from->si_tid; 3305 to->si_overrun = from->si_overrun; 3306 to->si_int = from->si_int; 3307 break; 3308 case SIL_POLL: 3309 to->si_band = from->si_band; 3310 to->si_fd = from->si_fd; 3311 break; 3312 case SIL_FAULT: 3313 to->si_addr = ptr_to_compat(from->si_addr); 3314 #ifdef __ARCH_SI_TRAPNO 3315 to->si_trapno = from->si_trapno; 3316 #endif 3317 break; 3318 case SIL_FAULT_MCEERR: 3319 to->si_addr = ptr_to_compat(from->si_addr); 3320 #ifdef __ARCH_SI_TRAPNO 3321 to->si_trapno = from->si_trapno; 3322 #endif 3323 to->si_addr_lsb = from->si_addr_lsb; 3324 break; 3325 case SIL_FAULT_BNDERR: 3326 to->si_addr = ptr_to_compat(from->si_addr); 3327 #ifdef __ARCH_SI_TRAPNO 3328 to->si_trapno = from->si_trapno; 3329 #endif 3330 to->si_lower = ptr_to_compat(from->si_lower); 3331 to->si_upper = ptr_to_compat(from->si_upper); 3332 break; 3333 case SIL_FAULT_PKUERR: 3334 to->si_addr = ptr_to_compat(from->si_addr); 3335 #ifdef __ARCH_SI_TRAPNO 3336 to->si_trapno = from->si_trapno; 3337 #endif 3338 to->si_pkey = from->si_pkey; 3339 break; 3340 case SIL_CHLD: 3341 to->si_pid = from->si_pid; 3342 to->si_uid = from->si_uid; 3343 to->si_status = from->si_status; 3344 to->si_utime = from->si_utime; 3345 to->si_stime = from->si_stime; 3346 break; 3347 case SIL_RT: 3348 to->si_pid = from->si_pid; 3349 to->si_uid = from->si_uid; 3350 to->si_int = from->si_int; 3351 break; 3352 case SIL_SYS: 3353 to->si_call_addr = ptr_to_compat(from->si_call_addr); 3354 to->si_syscall = from->si_syscall; 3355 to->si_arch = from->si_arch; 3356 break; 3357 } 3358 } 3359 3360 int __copy_siginfo_to_user32(struct compat_siginfo __user *to, 3361 const struct kernel_siginfo *from) 3362 { 3363 struct compat_siginfo new; 3364 3365 copy_siginfo_to_external32(&new, from); 3366 if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) 3367 return -EFAULT; 3368 return 0; 3369 } 3370 3371 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, 3372 const struct compat_siginfo *from) 3373 { 3374 clear_siginfo(to); 3375 to->si_signo = from->si_signo; 3376 to->si_errno = from->si_errno; 3377 to->si_code = from->si_code; 3378 switch(siginfo_layout(from->si_signo, from->si_code)) { 3379 case SIL_KILL: 3380 to->si_pid = from->si_pid; 3381 to->si_uid = from->si_uid; 3382 break; 3383 case SIL_TIMER: 3384 to->si_tid = from->si_tid; 3385 to->si_overrun = from->si_overrun; 3386 to->si_int = from->si_int; 3387 break; 3388 case SIL_POLL: 3389 to->si_band = from->si_band; 3390 to->si_fd = from->si_fd; 3391 break; 3392 case SIL_FAULT: 3393 to->si_addr = compat_ptr(from->si_addr); 3394 #ifdef __ARCH_SI_TRAPNO 3395 to->si_trapno = from->si_trapno; 3396 #endif 3397 break; 3398 case SIL_FAULT_MCEERR: 3399 to->si_addr = compat_ptr(from->si_addr); 3400 #ifdef __ARCH_SI_TRAPNO 3401 to->si_trapno = from->si_trapno; 3402 #endif 3403 to->si_addr_lsb = from->si_addr_lsb; 3404 break; 3405 case SIL_FAULT_BNDERR: 3406 to->si_addr = compat_ptr(from->si_addr); 3407 #ifdef __ARCH_SI_TRAPNO 3408 to->si_trapno = from->si_trapno; 3409 #endif 3410 to->si_lower = compat_ptr(from->si_lower); 3411 to->si_upper = compat_ptr(from->si_upper); 3412 break; 3413 case SIL_FAULT_PKUERR: 3414 to->si_addr = compat_ptr(from->si_addr); 3415 #ifdef __ARCH_SI_TRAPNO 3416 to->si_trapno = from->si_trapno; 3417 #endif 3418 to->si_pkey = from->si_pkey; 3419 break; 3420 case SIL_CHLD: 3421 to->si_pid = from->si_pid; 3422 to->si_uid = from->si_uid; 3423 to->si_status = from->si_status; 3424 #ifdef CONFIG_X86_X32_ABI 3425 if (in_x32_syscall()) { 3426 to->si_utime = from->_sifields._sigchld_x32._utime; 3427 to->si_stime = from->_sifields._sigchld_x32._stime; 3428 } else 3429 #endif 3430 { 3431 to->si_utime = from->si_utime; 3432 to->si_stime = from->si_stime; 3433 } 3434 break; 3435 case SIL_RT: 3436 to->si_pid = from->si_pid; 3437 to->si_uid = from->si_uid; 3438 to->si_int = from->si_int; 3439 break; 3440 case SIL_SYS: 3441 to->si_call_addr = compat_ptr(from->si_call_addr); 3442 to->si_syscall = from->si_syscall; 3443 to->si_arch = from->si_arch; 3444 break; 3445 } 3446 return 0; 3447 } 3448 3449 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, 3450 const struct compat_siginfo __user *ufrom) 3451 { 3452 struct compat_siginfo from; 3453 3454 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3455 return -EFAULT; 3456 3457 from.si_signo = signo; 3458 return post_copy_siginfo_from_user32(to, &from); 3459 } 3460 3461 int copy_siginfo_from_user32(struct kernel_siginfo *to, 3462 const struct compat_siginfo __user *ufrom) 3463 { 3464 struct compat_siginfo from; 3465 3466 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3467 return -EFAULT; 3468 3469 return post_copy_siginfo_from_user32(to, &from); 3470 } 3471 #endif /* CONFIG_COMPAT */ 3472 3473 /** 3474 * do_sigtimedwait - wait for queued signals specified in @which 3475 * @which: queued signals to wait for 3476 * @info: if non-null, the signal's siginfo is returned here 3477 * @ts: upper bound on process time suspension 3478 */ 3479 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, 3480 const struct timespec64 *ts) 3481 { 3482 ktime_t *to = NULL, timeout = KTIME_MAX; 3483 struct task_struct *tsk = current; 3484 sigset_t mask = *which; 3485 int sig, ret = 0; 3486 3487 if (ts) { 3488 if (!timespec64_valid(ts)) 3489 return -EINVAL; 3490 timeout = timespec64_to_ktime(*ts); 3491 to = &timeout; 3492 } 3493 3494 /* 3495 * Invert the set of allowed signals to get those we want to block. 3496 */ 3497 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); 3498 signotset(&mask); 3499 3500 spin_lock_irq(&tsk->sighand->siglock); 3501 sig = dequeue_signal(tsk, &mask, info); 3502 if (!sig && timeout) { 3503 /* 3504 * None ready, temporarily unblock those we're interested 3505 * while we are sleeping in so that we'll be awakened when 3506 * they arrive. Unblocking is always fine, we can avoid 3507 * set_current_blocked(). 3508 */ 3509 tsk->real_blocked = tsk->blocked; 3510 sigandsets(&tsk->blocked, &tsk->blocked, &mask); 3511 recalc_sigpending(); 3512 spin_unlock_irq(&tsk->sighand->siglock); 3513 3514 __set_current_state(TASK_INTERRUPTIBLE); 3515 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, 3516 HRTIMER_MODE_REL); 3517 spin_lock_irq(&tsk->sighand->siglock); 3518 __set_task_blocked(tsk, &tsk->real_blocked); 3519 sigemptyset(&tsk->real_blocked); 3520 sig = dequeue_signal(tsk, &mask, info); 3521 } 3522 spin_unlock_irq(&tsk->sighand->siglock); 3523 3524 if (sig) 3525 return sig; 3526 return ret ? -EINTR : -EAGAIN; 3527 } 3528 3529 /** 3530 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 3531 * in @uthese 3532 * @uthese: queued signals to wait for 3533 * @uinfo: if non-null, the signal's siginfo is returned here 3534 * @uts: upper bound on process time suspension 3535 * @sigsetsize: size of sigset_t type 3536 */ 3537 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 3538 siginfo_t __user *, uinfo, 3539 const struct __kernel_timespec __user *, uts, 3540 size_t, sigsetsize) 3541 { 3542 sigset_t these; 3543 struct timespec64 ts; 3544 kernel_siginfo_t info; 3545 int ret; 3546 3547 /* XXX: Don't preclude handling different sized sigset_t's. */ 3548 if (sigsetsize != sizeof(sigset_t)) 3549 return -EINVAL; 3550 3551 if (copy_from_user(&these, uthese, sizeof(these))) 3552 return -EFAULT; 3553 3554 if (uts) { 3555 if (get_timespec64(&ts, uts)) 3556 return -EFAULT; 3557 } 3558 3559 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3560 3561 if (ret > 0 && uinfo) { 3562 if (copy_siginfo_to_user(uinfo, &info)) 3563 ret = -EFAULT; 3564 } 3565 3566 return ret; 3567 } 3568 3569 #ifdef CONFIG_COMPAT_32BIT_TIME 3570 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese, 3571 siginfo_t __user *, uinfo, 3572 const struct old_timespec32 __user *, uts, 3573 size_t, sigsetsize) 3574 { 3575 sigset_t these; 3576 struct timespec64 ts; 3577 kernel_siginfo_t info; 3578 int ret; 3579 3580 if (sigsetsize != sizeof(sigset_t)) 3581 return -EINVAL; 3582 3583 if (copy_from_user(&these, uthese, sizeof(these))) 3584 return -EFAULT; 3585 3586 if (uts) { 3587 if (get_old_timespec32(&ts, uts)) 3588 return -EFAULT; 3589 } 3590 3591 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3592 3593 if (ret > 0 && uinfo) { 3594 if (copy_siginfo_to_user(uinfo, &info)) 3595 ret = -EFAULT; 3596 } 3597 3598 return ret; 3599 } 3600 #endif 3601 3602 #ifdef CONFIG_COMPAT 3603 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, 3604 struct compat_siginfo __user *, uinfo, 3605 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize) 3606 { 3607 sigset_t s; 3608 struct timespec64 t; 3609 kernel_siginfo_t info; 3610 long ret; 3611 3612 if (sigsetsize != sizeof(sigset_t)) 3613 return -EINVAL; 3614 3615 if (get_compat_sigset(&s, uthese)) 3616 return -EFAULT; 3617 3618 if (uts) { 3619 if (get_timespec64(&t, uts)) 3620 return -EFAULT; 3621 } 3622 3623 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3624 3625 if (ret > 0 && uinfo) { 3626 if (copy_siginfo_to_user32(uinfo, &info)) 3627 ret = -EFAULT; 3628 } 3629 3630 return ret; 3631 } 3632 3633 #ifdef CONFIG_COMPAT_32BIT_TIME 3634 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, 3635 struct compat_siginfo __user *, uinfo, 3636 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) 3637 { 3638 sigset_t s; 3639 struct timespec64 t; 3640 kernel_siginfo_t info; 3641 long ret; 3642 3643 if (sigsetsize != sizeof(sigset_t)) 3644 return -EINVAL; 3645 3646 if (get_compat_sigset(&s, uthese)) 3647 return -EFAULT; 3648 3649 if (uts) { 3650 if (get_old_timespec32(&t, uts)) 3651 return -EFAULT; 3652 } 3653 3654 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3655 3656 if (ret > 0 && uinfo) { 3657 if (copy_siginfo_to_user32(uinfo, &info)) 3658 ret = -EFAULT; 3659 } 3660 3661 return ret; 3662 } 3663 #endif 3664 #endif 3665 3666 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) 3667 { 3668 clear_siginfo(info); 3669 info->si_signo = sig; 3670 info->si_errno = 0; 3671 info->si_code = SI_USER; 3672 info->si_pid = task_tgid_vnr(current); 3673 info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3674 } 3675 3676 /** 3677 * sys_kill - send a signal to a process 3678 * @pid: the PID of the process 3679 * @sig: signal to be sent 3680 */ 3681 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 3682 { 3683 struct kernel_siginfo info; 3684 3685 prepare_kill_siginfo(sig, &info); 3686 3687 return kill_something_info(sig, &info, pid); 3688 } 3689 3690 /* 3691 * Verify that the signaler and signalee either are in the same pid namespace 3692 * or that the signaler's pid namespace is an ancestor of the signalee's pid 3693 * namespace. 3694 */ 3695 static bool access_pidfd_pidns(struct pid *pid) 3696 { 3697 struct pid_namespace *active = task_active_pid_ns(current); 3698 struct pid_namespace *p = ns_of_pid(pid); 3699 3700 for (;;) { 3701 if (!p) 3702 return false; 3703 if (p == active) 3704 break; 3705 p = p->parent; 3706 } 3707 3708 return true; 3709 } 3710 3711 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, 3712 siginfo_t __user *info) 3713 { 3714 #ifdef CONFIG_COMPAT 3715 /* 3716 * Avoid hooking up compat syscalls and instead handle necessary 3717 * conversions here. Note, this is a stop-gap measure and should not be 3718 * considered a generic solution. 3719 */ 3720 if (in_compat_syscall()) 3721 return copy_siginfo_from_user32( 3722 kinfo, (struct compat_siginfo __user *)info); 3723 #endif 3724 return copy_siginfo_from_user(kinfo, info); 3725 } 3726 3727 static struct pid *pidfd_to_pid(const struct file *file) 3728 { 3729 struct pid *pid; 3730 3731 pid = pidfd_pid(file); 3732 if (!IS_ERR(pid)) 3733 return pid; 3734 3735 return tgid_pidfd_to_pid(file); 3736 } 3737 3738 /** 3739 * sys_pidfd_send_signal - Signal a process through a pidfd 3740 * @pidfd: file descriptor of the process 3741 * @sig: signal to send 3742 * @info: signal info 3743 * @flags: future flags 3744 * 3745 * The syscall currently only signals via PIDTYPE_PID which covers 3746 * kill(<positive-pid>, <signal>. It does not signal threads or process 3747 * groups. 3748 * In order to extend the syscall to threads and process groups the @flags 3749 * argument should be used. In essence, the @flags argument will determine 3750 * what is signaled and not the file descriptor itself. Put in other words, 3751 * grouping is a property of the flags argument not a property of the file 3752 * descriptor. 3753 * 3754 * Return: 0 on success, negative errno on failure 3755 */ 3756 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, 3757 siginfo_t __user *, info, unsigned int, flags) 3758 { 3759 int ret; 3760 struct fd f; 3761 struct pid *pid; 3762 kernel_siginfo_t kinfo; 3763 3764 /* Enforce flags be set to 0 until we add an extension. */ 3765 if (flags) 3766 return -EINVAL; 3767 3768 f = fdget(pidfd); 3769 if (!f.file) 3770 return -EBADF; 3771 3772 /* Is this a pidfd? */ 3773 pid = pidfd_to_pid(f.file); 3774 if (IS_ERR(pid)) { 3775 ret = PTR_ERR(pid); 3776 goto err; 3777 } 3778 3779 ret = -EINVAL; 3780 if (!access_pidfd_pidns(pid)) 3781 goto err; 3782 3783 if (info) { 3784 ret = copy_siginfo_from_user_any(&kinfo, info); 3785 if (unlikely(ret)) 3786 goto err; 3787 3788 ret = -EINVAL; 3789 if (unlikely(sig != kinfo.si_signo)) 3790 goto err; 3791 3792 /* Only allow sending arbitrary signals to yourself. */ 3793 ret = -EPERM; 3794 if ((task_pid(current) != pid) && 3795 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) 3796 goto err; 3797 } else { 3798 prepare_kill_siginfo(sig, &kinfo); 3799 } 3800 3801 ret = kill_pid_info(sig, &kinfo, pid); 3802 3803 err: 3804 fdput(f); 3805 return ret; 3806 } 3807 3808 static int 3809 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) 3810 { 3811 struct task_struct *p; 3812 int error = -ESRCH; 3813 3814 rcu_read_lock(); 3815 p = find_task_by_vpid(pid); 3816 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 3817 error = check_kill_permission(sig, info, p); 3818 /* 3819 * The null signal is a permissions and process existence 3820 * probe. No signal is actually delivered. 3821 */ 3822 if (!error && sig) { 3823 error = do_send_sig_info(sig, info, p, PIDTYPE_PID); 3824 /* 3825 * If lock_task_sighand() failed we pretend the task 3826 * dies after receiving the signal. The window is tiny, 3827 * and the signal is private anyway. 3828 */ 3829 if (unlikely(error == -ESRCH)) 3830 error = 0; 3831 } 3832 } 3833 rcu_read_unlock(); 3834 3835 return error; 3836 } 3837 3838 static int do_tkill(pid_t tgid, pid_t pid, int sig) 3839 { 3840 struct kernel_siginfo info; 3841 3842 clear_siginfo(&info); 3843 info.si_signo = sig; 3844 info.si_errno = 0; 3845 info.si_code = SI_TKILL; 3846 info.si_pid = task_tgid_vnr(current); 3847 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3848 3849 return do_send_specific(tgid, pid, sig, &info); 3850 } 3851 3852 /** 3853 * sys_tgkill - send signal to one specific thread 3854 * @tgid: the thread group ID of the thread 3855 * @pid: the PID of the thread 3856 * @sig: signal to be sent 3857 * 3858 * This syscall also checks the @tgid and returns -ESRCH even if the PID 3859 * exists but it's not belonging to the target process anymore. This 3860 * method solves the problem of threads exiting and PIDs getting reused. 3861 */ 3862 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 3863 { 3864 /* This is only valid for single tasks */ 3865 if (pid <= 0 || tgid <= 0) 3866 return -EINVAL; 3867 3868 return do_tkill(tgid, pid, sig); 3869 } 3870 3871 /** 3872 * sys_tkill - send signal to one specific task 3873 * @pid: the PID of the task 3874 * @sig: signal to be sent 3875 * 3876 * Send a signal to only one task, even if it's a CLONE_THREAD task. 3877 */ 3878 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 3879 { 3880 /* This is only valid for single tasks */ 3881 if (pid <= 0) 3882 return -EINVAL; 3883 3884 return do_tkill(0, pid, sig); 3885 } 3886 3887 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) 3888 { 3889 /* Not even root can pretend to send signals from the kernel. 3890 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3891 */ 3892 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3893 (task_pid_vnr(current) != pid)) 3894 return -EPERM; 3895 3896 /* POSIX.1b doesn't mention process groups. */ 3897 return kill_proc_info(sig, info, pid); 3898 } 3899 3900 /** 3901 * sys_rt_sigqueueinfo - send signal information to a signal 3902 * @pid: the PID of the thread 3903 * @sig: signal to be sent 3904 * @uinfo: signal info to be sent 3905 */ 3906 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 3907 siginfo_t __user *, uinfo) 3908 { 3909 kernel_siginfo_t info; 3910 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 3911 if (unlikely(ret)) 3912 return ret; 3913 return do_rt_sigqueueinfo(pid, sig, &info); 3914 } 3915 3916 #ifdef CONFIG_COMPAT 3917 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, 3918 compat_pid_t, pid, 3919 int, sig, 3920 struct compat_siginfo __user *, uinfo) 3921 { 3922 kernel_siginfo_t info; 3923 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 3924 if (unlikely(ret)) 3925 return ret; 3926 return do_rt_sigqueueinfo(pid, sig, &info); 3927 } 3928 #endif 3929 3930 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) 3931 { 3932 /* This is only valid for single tasks */ 3933 if (pid <= 0 || tgid <= 0) 3934 return -EINVAL; 3935 3936 /* Not even root can pretend to send signals from the kernel. 3937 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3938 */ 3939 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3940 (task_pid_vnr(current) != pid)) 3941 return -EPERM; 3942 3943 return do_send_specific(tgid, pid, sig, info); 3944 } 3945 3946 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 3947 siginfo_t __user *, uinfo) 3948 { 3949 kernel_siginfo_t info; 3950 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 3951 if (unlikely(ret)) 3952 return ret; 3953 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3954 } 3955 3956 #ifdef CONFIG_COMPAT 3957 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, 3958 compat_pid_t, tgid, 3959 compat_pid_t, pid, 3960 int, sig, 3961 struct compat_siginfo __user *, uinfo) 3962 { 3963 kernel_siginfo_t info; 3964 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 3965 if (unlikely(ret)) 3966 return ret; 3967 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3968 } 3969 #endif 3970 3971 /* 3972 * For kthreads only, must not be used if cloned with CLONE_SIGHAND 3973 */ 3974 void kernel_sigaction(int sig, __sighandler_t action) 3975 { 3976 spin_lock_irq(¤t->sighand->siglock); 3977 current->sighand->action[sig - 1].sa.sa_handler = action; 3978 if (action == SIG_IGN) { 3979 sigset_t mask; 3980 3981 sigemptyset(&mask); 3982 sigaddset(&mask, sig); 3983 3984 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); 3985 flush_sigqueue_mask(&mask, ¤t->pending); 3986 recalc_sigpending(); 3987 } 3988 spin_unlock_irq(¤t->sighand->siglock); 3989 } 3990 EXPORT_SYMBOL(kernel_sigaction); 3991 3992 void __weak sigaction_compat_abi(struct k_sigaction *act, 3993 struct k_sigaction *oact) 3994 { 3995 } 3996 3997 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 3998 { 3999 struct task_struct *p = current, *t; 4000 struct k_sigaction *k; 4001 sigset_t mask; 4002 4003 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 4004 return -EINVAL; 4005 4006 k = &p->sighand->action[sig-1]; 4007 4008 spin_lock_irq(&p->sighand->siglock); 4009 if (oact) 4010 *oact = *k; 4011 4012 /* 4013 * Make sure that we never accidentally claim to support SA_UNSUPPORTED, 4014 * e.g. by having an architecture use the bit in their uapi. 4015 */ 4016 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED); 4017 4018 /* 4019 * Clear unknown flag bits in order to allow userspace to detect missing 4020 * support for flag bits and to allow the kernel to use non-uapi bits 4021 * internally. 4022 */ 4023 if (act) 4024 act->sa.sa_flags &= UAPI_SA_FLAGS; 4025 if (oact) 4026 oact->sa.sa_flags &= UAPI_SA_FLAGS; 4027 4028 sigaction_compat_abi(act, oact); 4029 4030 if (act) { 4031 sigdelsetmask(&act->sa.sa_mask, 4032 sigmask(SIGKILL) | sigmask(SIGSTOP)); 4033 *k = *act; 4034 /* 4035 * POSIX 3.3.1.3: 4036 * "Setting a signal action to SIG_IGN for a signal that is 4037 * pending shall cause the pending signal to be discarded, 4038 * whether or not it is blocked." 4039 * 4040 * "Setting a signal action to SIG_DFL for a signal that is 4041 * pending and whose default action is to ignore the signal 4042 * (for example, SIGCHLD), shall cause the pending signal to 4043 * be discarded, whether or not it is blocked" 4044 */ 4045 if (sig_handler_ignored(sig_handler(p, sig), sig)) { 4046 sigemptyset(&mask); 4047 sigaddset(&mask, sig); 4048 flush_sigqueue_mask(&mask, &p->signal->shared_pending); 4049 for_each_thread(p, t) 4050 flush_sigqueue_mask(&mask, &t->pending); 4051 } 4052 } 4053 4054 spin_unlock_irq(&p->sighand->siglock); 4055 return 0; 4056 } 4057 4058 static int 4059 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, 4060 size_t min_ss_size) 4061 { 4062 struct task_struct *t = current; 4063 4064 if (oss) { 4065 memset(oss, 0, sizeof(stack_t)); 4066 oss->ss_sp = (void __user *) t->sas_ss_sp; 4067 oss->ss_size = t->sas_ss_size; 4068 oss->ss_flags = sas_ss_flags(sp) | 4069 (current->sas_ss_flags & SS_FLAG_BITS); 4070 } 4071 4072 if (ss) { 4073 void __user *ss_sp = ss->ss_sp; 4074 size_t ss_size = ss->ss_size; 4075 unsigned ss_flags = ss->ss_flags; 4076 int ss_mode; 4077 4078 if (unlikely(on_sig_stack(sp))) 4079 return -EPERM; 4080 4081 ss_mode = ss_flags & ~SS_FLAG_BITS; 4082 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && 4083 ss_mode != 0)) 4084 return -EINVAL; 4085 4086 if (ss_mode == SS_DISABLE) { 4087 ss_size = 0; 4088 ss_sp = NULL; 4089 } else { 4090 if (unlikely(ss_size < min_ss_size)) 4091 return -ENOMEM; 4092 } 4093 4094 t->sas_ss_sp = (unsigned long) ss_sp; 4095 t->sas_ss_size = ss_size; 4096 t->sas_ss_flags = ss_flags; 4097 } 4098 return 0; 4099 } 4100 4101 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) 4102 { 4103 stack_t new, old; 4104 int err; 4105 if (uss && copy_from_user(&new, uss, sizeof(stack_t))) 4106 return -EFAULT; 4107 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, 4108 current_user_stack_pointer(), 4109 MINSIGSTKSZ); 4110 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) 4111 err = -EFAULT; 4112 return err; 4113 } 4114 4115 int restore_altstack(const stack_t __user *uss) 4116 { 4117 stack_t new; 4118 if (copy_from_user(&new, uss, sizeof(stack_t))) 4119 return -EFAULT; 4120 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(), 4121 MINSIGSTKSZ); 4122 /* squash all but EFAULT for now */ 4123 return 0; 4124 } 4125 4126 int __save_altstack(stack_t __user *uss, unsigned long sp) 4127 { 4128 struct task_struct *t = current; 4129 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | 4130 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4131 __put_user(t->sas_ss_size, &uss->ss_size); 4132 if (err) 4133 return err; 4134 if (t->sas_ss_flags & SS_AUTODISARM) 4135 sas_ss_reset(t); 4136 return 0; 4137 } 4138 4139 #ifdef CONFIG_COMPAT 4140 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, 4141 compat_stack_t __user *uoss_ptr) 4142 { 4143 stack_t uss, uoss; 4144 int ret; 4145 4146 if (uss_ptr) { 4147 compat_stack_t uss32; 4148 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) 4149 return -EFAULT; 4150 uss.ss_sp = compat_ptr(uss32.ss_sp); 4151 uss.ss_flags = uss32.ss_flags; 4152 uss.ss_size = uss32.ss_size; 4153 } 4154 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 4155 compat_user_stack_pointer(), 4156 COMPAT_MINSIGSTKSZ); 4157 if (ret >= 0 && uoss_ptr) { 4158 compat_stack_t old; 4159 memset(&old, 0, sizeof(old)); 4160 old.ss_sp = ptr_to_compat(uoss.ss_sp); 4161 old.ss_flags = uoss.ss_flags; 4162 old.ss_size = uoss.ss_size; 4163 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) 4164 ret = -EFAULT; 4165 } 4166 return ret; 4167 } 4168 4169 COMPAT_SYSCALL_DEFINE2(sigaltstack, 4170 const compat_stack_t __user *, uss_ptr, 4171 compat_stack_t __user *, uoss_ptr) 4172 { 4173 return do_compat_sigaltstack(uss_ptr, uoss_ptr); 4174 } 4175 4176 int compat_restore_altstack(const compat_stack_t __user *uss) 4177 { 4178 int err = do_compat_sigaltstack(uss, NULL); 4179 /* squash all but -EFAULT for now */ 4180 return err == -EFAULT ? err : 0; 4181 } 4182 4183 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) 4184 { 4185 int err; 4186 struct task_struct *t = current; 4187 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), 4188 &uss->ss_sp) | 4189 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4190 __put_user(t->sas_ss_size, &uss->ss_size); 4191 if (err) 4192 return err; 4193 if (t->sas_ss_flags & SS_AUTODISARM) 4194 sas_ss_reset(t); 4195 return 0; 4196 } 4197 #endif 4198 4199 #ifdef __ARCH_WANT_SYS_SIGPENDING 4200 4201 /** 4202 * sys_sigpending - examine pending signals 4203 * @uset: where mask of pending signal is returned 4204 */ 4205 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) 4206 { 4207 sigset_t set; 4208 4209 if (sizeof(old_sigset_t) > sizeof(*uset)) 4210 return -EINVAL; 4211 4212 do_sigpending(&set); 4213 4214 if (copy_to_user(uset, &set, sizeof(old_sigset_t))) 4215 return -EFAULT; 4216 4217 return 0; 4218 } 4219 4220 #ifdef CONFIG_COMPAT 4221 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) 4222 { 4223 sigset_t set; 4224 4225 do_sigpending(&set); 4226 4227 return put_user(set.sig[0], set32); 4228 } 4229 #endif 4230 4231 #endif 4232 4233 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 4234 /** 4235 * sys_sigprocmask - examine and change blocked signals 4236 * @how: whether to add, remove, or set signals 4237 * @nset: signals to add or remove (if non-null) 4238 * @oset: previous value of signal mask if non-null 4239 * 4240 * Some platforms have their own version with special arguments; 4241 * others support only sys_rt_sigprocmask. 4242 */ 4243 4244 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, 4245 old_sigset_t __user *, oset) 4246 { 4247 old_sigset_t old_set, new_set; 4248 sigset_t new_blocked; 4249 4250 old_set = current->blocked.sig[0]; 4251 4252 if (nset) { 4253 if (copy_from_user(&new_set, nset, sizeof(*nset))) 4254 return -EFAULT; 4255 4256 new_blocked = current->blocked; 4257 4258 switch (how) { 4259 case SIG_BLOCK: 4260 sigaddsetmask(&new_blocked, new_set); 4261 break; 4262 case SIG_UNBLOCK: 4263 sigdelsetmask(&new_blocked, new_set); 4264 break; 4265 case SIG_SETMASK: 4266 new_blocked.sig[0] = new_set; 4267 break; 4268 default: 4269 return -EINVAL; 4270 } 4271 4272 set_current_blocked(&new_blocked); 4273 } 4274 4275 if (oset) { 4276 if (copy_to_user(oset, &old_set, sizeof(*oset))) 4277 return -EFAULT; 4278 } 4279 4280 return 0; 4281 } 4282 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 4283 4284 #ifndef CONFIG_ODD_RT_SIGACTION 4285 /** 4286 * sys_rt_sigaction - alter an action taken by a process 4287 * @sig: signal to be sent 4288 * @act: new sigaction 4289 * @oact: used to save the previous sigaction 4290 * @sigsetsize: size of sigset_t type 4291 */ 4292 SYSCALL_DEFINE4(rt_sigaction, int, sig, 4293 const struct sigaction __user *, act, 4294 struct sigaction __user *, oact, 4295 size_t, sigsetsize) 4296 { 4297 struct k_sigaction new_sa, old_sa; 4298 int ret; 4299 4300 /* XXX: Don't preclude handling different sized sigset_t's. */ 4301 if (sigsetsize != sizeof(sigset_t)) 4302 return -EINVAL; 4303 4304 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 4305 return -EFAULT; 4306 4307 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 4308 if (ret) 4309 return ret; 4310 4311 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 4312 return -EFAULT; 4313 4314 return 0; 4315 } 4316 #ifdef CONFIG_COMPAT 4317 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, 4318 const struct compat_sigaction __user *, act, 4319 struct compat_sigaction __user *, oact, 4320 compat_size_t, sigsetsize) 4321 { 4322 struct k_sigaction new_ka, old_ka; 4323 #ifdef __ARCH_HAS_SA_RESTORER 4324 compat_uptr_t restorer; 4325 #endif 4326 int ret; 4327 4328 /* XXX: Don't preclude handling different sized sigset_t's. */ 4329 if (sigsetsize != sizeof(compat_sigset_t)) 4330 return -EINVAL; 4331 4332 if (act) { 4333 compat_uptr_t handler; 4334 ret = get_user(handler, &act->sa_handler); 4335 new_ka.sa.sa_handler = compat_ptr(handler); 4336 #ifdef __ARCH_HAS_SA_RESTORER 4337 ret |= get_user(restorer, &act->sa_restorer); 4338 new_ka.sa.sa_restorer = compat_ptr(restorer); 4339 #endif 4340 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); 4341 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); 4342 if (ret) 4343 return -EFAULT; 4344 } 4345 4346 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4347 if (!ret && oact) { 4348 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 4349 &oact->sa_handler); 4350 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, 4351 sizeof(oact->sa_mask)); 4352 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); 4353 #ifdef __ARCH_HAS_SA_RESTORER 4354 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4355 &oact->sa_restorer); 4356 #endif 4357 } 4358 return ret; 4359 } 4360 #endif 4361 #endif /* !CONFIG_ODD_RT_SIGACTION */ 4362 4363 #ifdef CONFIG_OLD_SIGACTION 4364 SYSCALL_DEFINE3(sigaction, int, sig, 4365 const struct old_sigaction __user *, act, 4366 struct old_sigaction __user *, oact) 4367 { 4368 struct k_sigaction new_ka, old_ka; 4369 int ret; 4370 4371 if (act) { 4372 old_sigset_t mask; 4373 if (!access_ok(act, sizeof(*act)) || 4374 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 4375 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 4376 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4377 __get_user(mask, &act->sa_mask)) 4378 return -EFAULT; 4379 #ifdef __ARCH_HAS_KA_RESTORER 4380 new_ka.ka_restorer = NULL; 4381 #endif 4382 siginitset(&new_ka.sa.sa_mask, mask); 4383 } 4384 4385 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4386 4387 if (!ret && oact) { 4388 if (!access_ok(oact, sizeof(*oact)) || 4389 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 4390 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 4391 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4392 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4393 return -EFAULT; 4394 } 4395 4396 return ret; 4397 } 4398 #endif 4399 #ifdef CONFIG_COMPAT_OLD_SIGACTION 4400 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, 4401 const struct compat_old_sigaction __user *, act, 4402 struct compat_old_sigaction __user *, oact) 4403 { 4404 struct k_sigaction new_ka, old_ka; 4405 int ret; 4406 compat_old_sigset_t mask; 4407 compat_uptr_t handler, restorer; 4408 4409 if (act) { 4410 if (!access_ok(act, sizeof(*act)) || 4411 __get_user(handler, &act->sa_handler) || 4412 __get_user(restorer, &act->sa_restorer) || 4413 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4414 __get_user(mask, &act->sa_mask)) 4415 return -EFAULT; 4416 4417 #ifdef __ARCH_HAS_KA_RESTORER 4418 new_ka.ka_restorer = NULL; 4419 #endif 4420 new_ka.sa.sa_handler = compat_ptr(handler); 4421 new_ka.sa.sa_restorer = compat_ptr(restorer); 4422 siginitset(&new_ka.sa.sa_mask, mask); 4423 } 4424 4425 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4426 4427 if (!ret && oact) { 4428 if (!access_ok(oact, sizeof(*oact)) || 4429 __put_user(ptr_to_compat(old_ka.sa.sa_handler), 4430 &oact->sa_handler) || 4431 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4432 &oact->sa_restorer) || 4433 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4434 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4435 return -EFAULT; 4436 } 4437 return ret; 4438 } 4439 #endif 4440 4441 #ifdef CONFIG_SGETMASK_SYSCALL 4442 4443 /* 4444 * For backwards compatibility. Functionality superseded by sigprocmask. 4445 */ 4446 SYSCALL_DEFINE0(sgetmask) 4447 { 4448 /* SMP safe */ 4449 return current->blocked.sig[0]; 4450 } 4451 4452 SYSCALL_DEFINE1(ssetmask, int, newmask) 4453 { 4454 int old = current->blocked.sig[0]; 4455 sigset_t newset; 4456 4457 siginitset(&newset, newmask); 4458 set_current_blocked(&newset); 4459 4460 return old; 4461 } 4462 #endif /* CONFIG_SGETMASK_SYSCALL */ 4463 4464 #ifdef __ARCH_WANT_SYS_SIGNAL 4465 /* 4466 * For backwards compatibility. Functionality superseded by sigaction. 4467 */ 4468 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 4469 { 4470 struct k_sigaction new_sa, old_sa; 4471 int ret; 4472 4473 new_sa.sa.sa_handler = handler; 4474 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 4475 sigemptyset(&new_sa.sa.sa_mask); 4476 4477 ret = do_sigaction(sig, &new_sa, &old_sa); 4478 4479 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 4480 } 4481 #endif /* __ARCH_WANT_SYS_SIGNAL */ 4482 4483 #ifdef __ARCH_WANT_SYS_PAUSE 4484 4485 SYSCALL_DEFINE0(pause) 4486 { 4487 while (!signal_pending(current)) { 4488 __set_current_state(TASK_INTERRUPTIBLE); 4489 schedule(); 4490 } 4491 return -ERESTARTNOHAND; 4492 } 4493 4494 #endif 4495 4496 static int sigsuspend(sigset_t *set) 4497 { 4498 current->saved_sigmask = current->blocked; 4499 set_current_blocked(set); 4500 4501 while (!signal_pending(current)) { 4502 __set_current_state(TASK_INTERRUPTIBLE); 4503 schedule(); 4504 } 4505 set_restore_sigmask(); 4506 return -ERESTARTNOHAND; 4507 } 4508 4509 /** 4510 * sys_rt_sigsuspend - replace the signal mask for a value with the 4511 * @unewset value until a signal is received 4512 * @unewset: new signal mask value 4513 * @sigsetsize: size of sigset_t type 4514 */ 4515 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 4516 { 4517 sigset_t newset; 4518 4519 /* XXX: Don't preclude handling different sized sigset_t's. */ 4520 if (sigsetsize != sizeof(sigset_t)) 4521 return -EINVAL; 4522 4523 if (copy_from_user(&newset, unewset, sizeof(newset))) 4524 return -EFAULT; 4525 return sigsuspend(&newset); 4526 } 4527 4528 #ifdef CONFIG_COMPAT 4529 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) 4530 { 4531 sigset_t newset; 4532 4533 /* XXX: Don't preclude handling different sized sigset_t's. */ 4534 if (sigsetsize != sizeof(sigset_t)) 4535 return -EINVAL; 4536 4537 if (get_compat_sigset(&newset, unewset)) 4538 return -EFAULT; 4539 return sigsuspend(&newset); 4540 } 4541 #endif 4542 4543 #ifdef CONFIG_OLD_SIGSUSPEND 4544 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) 4545 { 4546 sigset_t blocked; 4547 siginitset(&blocked, mask); 4548 return sigsuspend(&blocked); 4549 } 4550 #endif 4551 #ifdef CONFIG_OLD_SIGSUSPEND3 4552 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) 4553 { 4554 sigset_t blocked; 4555 siginitset(&blocked, mask); 4556 return sigsuspend(&blocked); 4557 } 4558 #endif 4559 4560 __weak const char *arch_vma_name(struct vm_area_struct *vma) 4561 { 4562 return NULL; 4563 } 4564 4565 static inline void siginfo_buildtime_checks(void) 4566 { 4567 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); 4568 4569 /* Verify the offsets in the two siginfos match */ 4570 #define CHECK_OFFSET(field) \ 4571 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) 4572 4573 /* kill */ 4574 CHECK_OFFSET(si_pid); 4575 CHECK_OFFSET(si_uid); 4576 4577 /* timer */ 4578 CHECK_OFFSET(si_tid); 4579 CHECK_OFFSET(si_overrun); 4580 CHECK_OFFSET(si_value); 4581 4582 /* rt */ 4583 CHECK_OFFSET(si_pid); 4584 CHECK_OFFSET(si_uid); 4585 CHECK_OFFSET(si_value); 4586 4587 /* sigchld */ 4588 CHECK_OFFSET(si_pid); 4589 CHECK_OFFSET(si_uid); 4590 CHECK_OFFSET(si_status); 4591 CHECK_OFFSET(si_utime); 4592 CHECK_OFFSET(si_stime); 4593 4594 /* sigfault */ 4595 CHECK_OFFSET(si_addr); 4596 CHECK_OFFSET(si_addr_lsb); 4597 CHECK_OFFSET(si_lower); 4598 CHECK_OFFSET(si_upper); 4599 CHECK_OFFSET(si_pkey); 4600 4601 /* sigpoll */ 4602 CHECK_OFFSET(si_band); 4603 CHECK_OFFSET(si_fd); 4604 4605 /* sigsys */ 4606 CHECK_OFFSET(si_call_addr); 4607 CHECK_OFFSET(si_syscall); 4608 CHECK_OFFSET(si_arch); 4609 #undef CHECK_OFFSET 4610 4611 /* usb asyncio */ 4612 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) != 4613 offsetof(struct siginfo, si_addr)); 4614 if (sizeof(int) == sizeof(void __user *)) { 4615 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) != 4616 sizeof(void __user *)); 4617 } else { 4618 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) + 4619 sizeof_field(struct siginfo, si_uid)) != 4620 sizeof(void __user *)); 4621 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) != 4622 offsetof(struct siginfo, si_uid)); 4623 } 4624 #ifdef CONFIG_COMPAT 4625 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) != 4626 offsetof(struct compat_siginfo, si_addr)); 4627 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != 4628 sizeof(compat_uptr_t)); 4629 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != 4630 sizeof_field(struct siginfo, si_pid)); 4631 #endif 4632 } 4633 4634 void __init signals_init(void) 4635 { 4636 siginfo_buildtime_checks(); 4637 4638 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 4639 } 4640 4641 #ifdef CONFIG_KGDB_KDB 4642 #include <linux/kdb.h> 4643 /* 4644 * kdb_send_sig - Allows kdb to send signals without exposing 4645 * signal internals. This function checks if the required locks are 4646 * available before calling the main signal code, to avoid kdb 4647 * deadlocks. 4648 */ 4649 void kdb_send_sig(struct task_struct *t, int sig) 4650 { 4651 static struct task_struct *kdb_prev_t; 4652 int new_t, ret; 4653 if (!spin_trylock(&t->sighand->siglock)) { 4654 kdb_printf("Can't do kill command now.\n" 4655 "The sigmask lock is held somewhere else in " 4656 "kernel, try again later\n"); 4657 return; 4658 } 4659 new_t = kdb_prev_t != t; 4660 kdb_prev_t = t; 4661 if (t->state != TASK_RUNNING && new_t) { 4662 spin_unlock(&t->sighand->siglock); 4663 kdb_printf("Process is not RUNNING, sending a signal from " 4664 "kdb risks deadlock\n" 4665 "on the run queue locks. " 4666 "The signal has _not_ been sent.\n" 4667 "Reissue the kill command if you want to risk " 4668 "the deadlock.\n"); 4669 return; 4670 } 4671 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); 4672 spin_unlock(&t->sighand->siglock); 4673 if (ret) 4674 kdb_printf("Fail to deliver Signal %d to process %d.\n", 4675 sig, t->pid); 4676 else 4677 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 4678 } 4679 #endif /* CONFIG_KGDB_KDB */ 4680