1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/signal.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 8 * 9 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 10 * Changes to use preallocated sigqueue structures 11 * to allow signals to be sent reliably. 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/init.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/user.h> 19 #include <linux/sched/debug.h> 20 #include <linux/sched/task.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/sched/cputime.h> 23 #include <linux/file.h> 24 #include <linux/fs.h> 25 #include <linux/proc_fs.h> 26 #include <linux/tty.h> 27 #include <linux/binfmts.h> 28 #include <linux/coredump.h> 29 #include <linux/security.h> 30 #include <linux/syscalls.h> 31 #include <linux/ptrace.h> 32 #include <linux/signal.h> 33 #include <linux/signalfd.h> 34 #include <linux/ratelimit.h> 35 #include <linux/task_work.h> 36 #include <linux/capability.h> 37 #include <linux/freezer.h> 38 #include <linux/pid_namespace.h> 39 #include <linux/nsproxy.h> 40 #include <linux/user_namespace.h> 41 #include <linux/uprobes.h> 42 #include <linux/compat.h> 43 #include <linux/cn_proc.h> 44 #include <linux/compiler.h> 45 #include <linux/posix-timers.h> 46 #include <linux/cgroup.h> 47 #include <linux/audit.h> 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/signal.h> 51 52 #include <asm/param.h> 53 #include <linux/uaccess.h> 54 #include <asm/unistd.h> 55 #include <asm/siginfo.h> 56 #include <asm/cacheflush.h> 57 #include <asm/syscall.h> /* for syscall_get_* */ 58 59 /* 60 * SLAB caches for signal bits. 61 */ 62 63 static struct kmem_cache *sigqueue_cachep; 64 65 int print_fatal_signals __read_mostly; 66 67 static void __user *sig_handler(struct task_struct *t, int sig) 68 { 69 return t->sighand->action[sig - 1].sa.sa_handler; 70 } 71 72 static inline bool sig_handler_ignored(void __user *handler, int sig) 73 { 74 /* Is it explicitly or implicitly ignored? */ 75 return handler == SIG_IGN || 76 (handler == SIG_DFL && sig_kernel_ignore(sig)); 77 } 78 79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) 80 { 81 void __user *handler; 82 83 handler = sig_handler(t, sig); 84 85 /* SIGKILL and SIGSTOP may not be sent to the global init */ 86 if (unlikely(is_global_init(t) && sig_kernel_only(sig))) 87 return true; 88 89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 91 return true; 92 93 /* Only allow kernel generated signals to this kthread */ 94 if (unlikely((t->flags & PF_KTHREAD) && 95 (handler == SIG_KTHREAD_KERNEL) && !force)) 96 return true; 97 98 return sig_handler_ignored(handler, sig); 99 } 100 101 static bool sig_ignored(struct task_struct *t, int sig, bool force) 102 { 103 /* 104 * Blocked signals are never ignored, since the 105 * signal handler may change by the time it is 106 * unblocked. 107 */ 108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 109 return false; 110 111 /* 112 * Tracers may want to know about even ignored signal unless it 113 * is SIGKILL which can't be reported anyway but can be ignored 114 * by SIGNAL_UNKILLABLE task. 115 */ 116 if (t->ptrace && sig != SIGKILL) 117 return false; 118 119 return sig_task_ignored(t, sig, force); 120 } 121 122 /* 123 * Re-calculate pending state from the set of locally pending 124 * signals, globally pending signals, and blocked signals. 125 */ 126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) 127 { 128 unsigned long ready; 129 long i; 130 131 switch (_NSIG_WORDS) { 132 default: 133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 134 ready |= signal->sig[i] &~ blocked->sig[i]; 135 break; 136 137 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 138 ready |= signal->sig[2] &~ blocked->sig[2]; 139 ready |= signal->sig[1] &~ blocked->sig[1]; 140 ready |= signal->sig[0] &~ blocked->sig[0]; 141 break; 142 143 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 144 ready |= signal->sig[0] &~ blocked->sig[0]; 145 break; 146 147 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 148 } 149 return ready != 0; 150 } 151 152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 153 154 static bool recalc_sigpending_tsk(struct task_struct *t) 155 { 156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || 157 PENDING(&t->pending, &t->blocked) || 158 PENDING(&t->signal->shared_pending, &t->blocked) || 159 cgroup_task_frozen(t)) { 160 set_tsk_thread_flag(t, TIF_SIGPENDING); 161 return true; 162 } 163 164 /* 165 * We must never clear the flag in another thread, or in current 166 * when it's possible the current syscall is returning -ERESTART*. 167 * So we don't clear it here, and only callers who know they should do. 168 */ 169 return false; 170 } 171 172 /* 173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 174 * This is superfluous when called on current, the wakeup is a harmless no-op. 175 */ 176 void recalc_sigpending_and_wake(struct task_struct *t) 177 { 178 if (recalc_sigpending_tsk(t)) 179 signal_wake_up(t, 0); 180 } 181 182 void recalc_sigpending(void) 183 { 184 if (!recalc_sigpending_tsk(current) && !freezing(current)) 185 clear_thread_flag(TIF_SIGPENDING); 186 187 } 188 EXPORT_SYMBOL(recalc_sigpending); 189 190 void calculate_sigpending(void) 191 { 192 /* Have any signals or users of TIF_SIGPENDING been delayed 193 * until after fork? 194 */ 195 spin_lock_irq(¤t->sighand->siglock); 196 set_tsk_thread_flag(current, TIF_SIGPENDING); 197 recalc_sigpending(); 198 spin_unlock_irq(¤t->sighand->siglock); 199 } 200 201 /* Given the mask, find the first available signal that should be serviced. */ 202 203 #define SYNCHRONOUS_MASK \ 204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) 206 207 int next_signal(struct sigpending *pending, sigset_t *mask) 208 { 209 unsigned long i, *s, *m, x; 210 int sig = 0; 211 212 s = pending->signal.sig; 213 m = mask->sig; 214 215 /* 216 * Handle the first word specially: it contains the 217 * synchronous signals that need to be dequeued first. 218 */ 219 x = *s &~ *m; 220 if (x) { 221 if (x & SYNCHRONOUS_MASK) 222 x &= SYNCHRONOUS_MASK; 223 sig = ffz(~x) + 1; 224 return sig; 225 } 226 227 switch (_NSIG_WORDS) { 228 default: 229 for (i = 1; i < _NSIG_WORDS; ++i) { 230 x = *++s &~ *++m; 231 if (!x) 232 continue; 233 sig = ffz(~x) + i*_NSIG_BPW + 1; 234 break; 235 } 236 break; 237 238 case 2: 239 x = s[1] &~ m[1]; 240 if (!x) 241 break; 242 sig = ffz(~x) + _NSIG_BPW + 1; 243 break; 244 245 case 1: 246 /* Nothing to do */ 247 break; 248 } 249 250 return sig; 251 } 252 253 static inline void print_dropped_signal(int sig) 254 { 255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 256 257 if (!print_fatal_signals) 258 return; 259 260 if (!__ratelimit(&ratelimit_state)) 261 return; 262 263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 264 current->comm, current->pid, sig); 265 } 266 267 /** 268 * task_set_jobctl_pending - set jobctl pending bits 269 * @task: target task 270 * @mask: pending bits to set 271 * 272 * Clear @mask from @task->jobctl. @mask must be subset of 273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | 274 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is 275 * cleared. If @task is already being killed or exiting, this function 276 * becomes noop. 277 * 278 * CONTEXT: 279 * Must be called with @task->sighand->siglock held. 280 * 281 * RETURNS: 282 * %true if @mask is set, %false if made noop because @task was dying. 283 */ 284 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) 285 { 286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | 287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 289 290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 291 return false; 292 293 if (mask & JOBCTL_STOP_SIGMASK) 294 task->jobctl &= ~JOBCTL_STOP_SIGMASK; 295 296 task->jobctl |= mask; 297 return true; 298 } 299 300 /** 301 * task_clear_jobctl_trapping - clear jobctl trapping bit 302 * @task: target task 303 * 304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. 305 * Clear it and wake up the ptracer. Note that we don't need any further 306 * locking. @task->siglock guarantees that @task->parent points to the 307 * ptracer. 308 * 309 * CONTEXT: 310 * Must be called with @task->sighand->siglock held. 311 */ 312 void task_clear_jobctl_trapping(struct task_struct *task) 313 { 314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { 315 task->jobctl &= ~JOBCTL_TRAPPING; 316 smp_mb(); /* advised by wake_up_bit() */ 317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); 318 } 319 } 320 321 /** 322 * task_clear_jobctl_pending - clear jobctl pending bits 323 * @task: target task 324 * @mask: pending bits to clear 325 * 326 * Clear @mask from @task->jobctl. @mask must be subset of 327 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other 328 * STOP bits are cleared together. 329 * 330 * If clearing of @mask leaves no stop or trap pending, this function calls 331 * task_clear_jobctl_trapping(). 332 * 333 * CONTEXT: 334 * Must be called with @task->sighand->siglock held. 335 */ 336 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) 337 { 338 BUG_ON(mask & ~JOBCTL_PENDING_MASK); 339 340 if (mask & JOBCTL_STOP_PENDING) 341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; 342 343 task->jobctl &= ~mask; 344 345 if (!(task->jobctl & JOBCTL_PENDING_MASK)) 346 task_clear_jobctl_trapping(task); 347 } 348 349 /** 350 * task_participate_group_stop - participate in a group stop 351 * @task: task participating in a group stop 352 * 353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. 354 * Group stop states are cleared and the group stop count is consumed if 355 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group 356 * stop, the appropriate `SIGNAL_*` flags are set. 357 * 358 * CONTEXT: 359 * Must be called with @task->sighand->siglock held. 360 * 361 * RETURNS: 362 * %true if group stop completion should be notified to the parent, %false 363 * otherwise. 364 */ 365 static bool task_participate_group_stop(struct task_struct *task) 366 { 367 struct signal_struct *sig = task->signal; 368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME; 369 370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); 371 372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); 373 374 if (!consume) 375 return false; 376 377 if (!WARN_ON_ONCE(sig->group_stop_count == 0)) 378 sig->group_stop_count--; 379 380 /* 381 * Tell the caller to notify completion iff we are entering into a 382 * fresh group stop. Read comment in do_signal_stop() for details. 383 */ 384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); 386 return true; 387 } 388 return false; 389 } 390 391 void task_join_group_stop(struct task_struct *task) 392 { 393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; 394 struct signal_struct *sig = current->signal; 395 396 if (sig->group_stop_count) { 397 sig->group_stop_count++; 398 mask |= JOBCTL_STOP_CONSUME; 399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED)) 400 return; 401 402 /* Have the new thread join an on-going signal group stop */ 403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); 404 } 405 406 /* 407 * allocate a new signal queue record 408 * - this may be called without locks if and only if t == current, otherwise an 409 * appropriate lock must be held to stop the target task from exiting 410 */ 411 static struct sigqueue * 412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, 413 int override_rlimit, const unsigned int sigqueue_flags) 414 { 415 struct sigqueue *q = NULL; 416 struct ucounts *ucounts = NULL; 417 long sigpending; 418 419 /* 420 * Protect access to @t credentials. This can go away when all 421 * callers hold rcu read lock. 422 * 423 * NOTE! A pending signal will hold on to the user refcount, 424 * and we get/put the refcount only when the sigpending count 425 * changes from/to zero. 426 */ 427 rcu_read_lock(); 428 ucounts = task_ucounts(t); 429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); 430 rcu_read_unlock(); 431 if (!sigpending) 432 return NULL; 433 434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { 435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); 436 } else { 437 print_dropped_signal(sig); 438 } 439 440 if (unlikely(q == NULL)) { 441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); 442 } else { 443 INIT_LIST_HEAD(&q->list); 444 q->flags = sigqueue_flags; 445 q->ucounts = ucounts; 446 } 447 return q; 448 } 449 450 static void __sigqueue_free(struct sigqueue *q) 451 { 452 if (q->flags & SIGQUEUE_PREALLOC) 453 return; 454 if (q->ucounts) { 455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING); 456 q->ucounts = NULL; 457 } 458 kmem_cache_free(sigqueue_cachep, q); 459 } 460 461 void flush_sigqueue(struct sigpending *queue) 462 { 463 struct sigqueue *q; 464 465 sigemptyset(&queue->signal); 466 while (!list_empty(&queue->list)) { 467 q = list_entry(queue->list.next, struct sigqueue , list); 468 list_del_init(&q->list); 469 __sigqueue_free(q); 470 } 471 } 472 473 /* 474 * Flush all pending signals for this kthread. 475 */ 476 void flush_signals(struct task_struct *t) 477 { 478 unsigned long flags; 479 480 spin_lock_irqsave(&t->sighand->siglock, flags); 481 clear_tsk_thread_flag(t, TIF_SIGPENDING); 482 flush_sigqueue(&t->pending); 483 flush_sigqueue(&t->signal->shared_pending); 484 spin_unlock_irqrestore(&t->sighand->siglock, flags); 485 } 486 EXPORT_SYMBOL(flush_signals); 487 488 #ifdef CONFIG_POSIX_TIMERS 489 static void __flush_itimer_signals(struct sigpending *pending) 490 { 491 sigset_t signal, retain; 492 struct sigqueue *q, *n; 493 494 signal = pending->signal; 495 sigemptyset(&retain); 496 497 list_for_each_entry_safe(q, n, &pending->list, list) { 498 int sig = q->info.si_signo; 499 500 if (likely(q->info.si_code != SI_TIMER)) { 501 sigaddset(&retain, sig); 502 } else { 503 sigdelset(&signal, sig); 504 list_del_init(&q->list); 505 __sigqueue_free(q); 506 } 507 } 508 509 sigorsets(&pending->signal, &signal, &retain); 510 } 511 512 void flush_itimer_signals(void) 513 { 514 struct task_struct *tsk = current; 515 unsigned long flags; 516 517 spin_lock_irqsave(&tsk->sighand->siglock, flags); 518 __flush_itimer_signals(&tsk->pending); 519 __flush_itimer_signals(&tsk->signal->shared_pending); 520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 521 } 522 #endif 523 524 void ignore_signals(struct task_struct *t) 525 { 526 int i; 527 528 for (i = 0; i < _NSIG; ++i) 529 t->sighand->action[i].sa.sa_handler = SIG_IGN; 530 531 flush_signals(t); 532 } 533 534 /* 535 * Flush all handlers for a task. 536 */ 537 538 void 539 flush_signal_handlers(struct task_struct *t, int force_default) 540 { 541 int i; 542 struct k_sigaction *ka = &t->sighand->action[0]; 543 for (i = _NSIG ; i != 0 ; i--) { 544 if (force_default || ka->sa.sa_handler != SIG_IGN) 545 ka->sa.sa_handler = SIG_DFL; 546 ka->sa.sa_flags = 0; 547 #ifdef __ARCH_HAS_SA_RESTORER 548 ka->sa.sa_restorer = NULL; 549 #endif 550 sigemptyset(&ka->sa.sa_mask); 551 ka++; 552 } 553 } 554 555 bool unhandled_signal(struct task_struct *tsk, int sig) 556 { 557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 558 if (is_global_init(tsk)) 559 return true; 560 561 if (handler != SIG_IGN && handler != SIG_DFL) 562 return false; 563 564 /* if ptraced, let the tracer determine */ 565 return !tsk->ptrace; 566 } 567 568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, 569 bool *resched_timer) 570 { 571 struct sigqueue *q, *first = NULL; 572 573 /* 574 * Collect the siginfo appropriate to this signal. Check if 575 * there is another siginfo for the same signal. 576 */ 577 list_for_each_entry(q, &list->list, list) { 578 if (q->info.si_signo == sig) { 579 if (first) 580 goto still_pending; 581 first = q; 582 } 583 } 584 585 sigdelset(&list->signal, sig); 586 587 if (first) { 588 still_pending: 589 list_del_init(&first->list); 590 copy_siginfo(info, &first->info); 591 592 *resched_timer = 593 (first->flags & SIGQUEUE_PREALLOC) && 594 (info->si_code == SI_TIMER) && 595 (info->si_sys_private); 596 597 __sigqueue_free(first); 598 } else { 599 /* 600 * Ok, it wasn't in the queue. This must be 601 * a fast-pathed signal or we must have been 602 * out of queue space. So zero out the info. 603 */ 604 clear_siginfo(info); 605 info->si_signo = sig; 606 info->si_errno = 0; 607 info->si_code = SI_USER; 608 info->si_pid = 0; 609 info->si_uid = 0; 610 } 611 } 612 613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 614 kernel_siginfo_t *info, bool *resched_timer) 615 { 616 int sig = next_signal(pending, mask); 617 618 if (sig) 619 collect_signal(sig, pending, info, resched_timer); 620 return sig; 621 } 622 623 /* 624 * Dequeue a signal and return the element to the caller, which is 625 * expected to free it. 626 * 627 * All callers have to hold the siglock. 628 */ 629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, 630 kernel_siginfo_t *info, enum pid_type *type) 631 { 632 bool resched_timer = false; 633 int signr; 634 635 /* We only dequeue private signals from ourselves, we don't let 636 * signalfd steal them 637 */ 638 *type = PIDTYPE_PID; 639 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); 640 if (!signr) { 641 *type = PIDTYPE_TGID; 642 signr = __dequeue_signal(&tsk->signal->shared_pending, 643 mask, info, &resched_timer); 644 #ifdef CONFIG_POSIX_TIMERS 645 /* 646 * itimer signal ? 647 * 648 * itimers are process shared and we restart periodic 649 * itimers in the signal delivery path to prevent DoS 650 * attacks in the high resolution timer case. This is 651 * compliant with the old way of self-restarting 652 * itimers, as the SIGALRM is a legacy signal and only 653 * queued once. Changing the restart behaviour to 654 * restart the timer in the signal dequeue path is 655 * reducing the timer noise on heavy loaded !highres 656 * systems too. 657 */ 658 if (unlikely(signr == SIGALRM)) { 659 struct hrtimer *tmr = &tsk->signal->real_timer; 660 661 if (!hrtimer_is_queued(tmr) && 662 tsk->signal->it_real_incr != 0) { 663 hrtimer_forward(tmr, tmr->base->get_time(), 664 tsk->signal->it_real_incr); 665 hrtimer_restart(tmr); 666 } 667 } 668 #endif 669 } 670 671 recalc_sigpending(); 672 if (!signr) 673 return 0; 674 675 if (unlikely(sig_kernel_stop(signr))) { 676 /* 677 * Set a marker that we have dequeued a stop signal. Our 678 * caller might release the siglock and then the pending 679 * stop signal it is about to process is no longer in the 680 * pending bitmasks, but must still be cleared by a SIGCONT 681 * (and overruled by a SIGKILL). So those cases clear this 682 * shared flag after we've set it. Note that this flag may 683 * remain set after the signal we return is ignored or 684 * handled. That doesn't matter because its only purpose 685 * is to alert stop-signal processing code when another 686 * processor has come along and cleared the flag. 687 */ 688 current->jobctl |= JOBCTL_STOP_DEQUEUED; 689 } 690 #ifdef CONFIG_POSIX_TIMERS 691 if (resched_timer) { 692 /* 693 * Release the siglock to ensure proper locking order 694 * of timer locks outside of siglocks. Note, we leave 695 * irqs disabled here, since the posix-timers code is 696 * about to disable them again anyway. 697 */ 698 spin_unlock(&tsk->sighand->siglock); 699 posixtimer_rearm(info); 700 spin_lock(&tsk->sighand->siglock); 701 702 /* Don't expose the si_sys_private value to userspace */ 703 info->si_sys_private = 0; 704 } 705 #endif 706 return signr; 707 } 708 EXPORT_SYMBOL_GPL(dequeue_signal); 709 710 static int dequeue_synchronous_signal(kernel_siginfo_t *info) 711 { 712 struct task_struct *tsk = current; 713 struct sigpending *pending = &tsk->pending; 714 struct sigqueue *q, *sync = NULL; 715 716 /* 717 * Might a synchronous signal be in the queue? 718 */ 719 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) 720 return 0; 721 722 /* 723 * Return the first synchronous signal in the queue. 724 */ 725 list_for_each_entry(q, &pending->list, list) { 726 /* Synchronous signals have a positive si_code */ 727 if ((q->info.si_code > SI_USER) && 728 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { 729 sync = q; 730 goto next; 731 } 732 } 733 return 0; 734 next: 735 /* 736 * Check if there is another siginfo for the same signal. 737 */ 738 list_for_each_entry_continue(q, &pending->list, list) { 739 if (q->info.si_signo == sync->info.si_signo) 740 goto still_pending; 741 } 742 743 sigdelset(&pending->signal, sync->info.si_signo); 744 recalc_sigpending(); 745 still_pending: 746 list_del_init(&sync->list); 747 copy_siginfo(info, &sync->info); 748 __sigqueue_free(sync); 749 return info->si_signo; 750 } 751 752 /* 753 * Tell a process that it has a new active signal.. 754 * 755 * NOTE! we rely on the previous spin_lock to 756 * lock interrupts for us! We can only be called with 757 * "siglock" held, and the local interrupt must 758 * have been disabled when that got acquired! 759 * 760 * No need to set need_resched since signal event passing 761 * goes through ->blocked 762 */ 763 void signal_wake_up_state(struct task_struct *t, unsigned int state) 764 { 765 set_tsk_thread_flag(t, TIF_SIGPENDING); 766 /* 767 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable 768 * case. We don't check t->state here because there is a race with it 769 * executing another processor and just now entering stopped state. 770 * By using wake_up_state, we ensure the process will wake up and 771 * handle its death signal. 772 */ 773 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) 774 kick_process(t); 775 } 776 777 /* 778 * Remove signals in mask from the pending set and queue. 779 * Returns 1 if any signals were found. 780 * 781 * All callers must be holding the siglock. 782 */ 783 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) 784 { 785 struct sigqueue *q, *n; 786 sigset_t m; 787 788 sigandsets(&m, mask, &s->signal); 789 if (sigisemptyset(&m)) 790 return; 791 792 sigandnsets(&s->signal, &s->signal, mask); 793 list_for_each_entry_safe(q, n, &s->list, list) { 794 if (sigismember(mask, q->info.si_signo)) { 795 list_del_init(&q->list); 796 __sigqueue_free(q); 797 } 798 } 799 } 800 801 static inline int is_si_special(const struct kernel_siginfo *info) 802 { 803 return info <= SEND_SIG_PRIV; 804 } 805 806 static inline bool si_fromuser(const struct kernel_siginfo *info) 807 { 808 return info == SEND_SIG_NOINFO || 809 (!is_si_special(info) && SI_FROMUSER(info)); 810 } 811 812 /* 813 * called with RCU read lock from check_kill_permission() 814 */ 815 static bool kill_ok_by_cred(struct task_struct *t) 816 { 817 const struct cred *cred = current_cred(); 818 const struct cred *tcred = __task_cred(t); 819 820 return uid_eq(cred->euid, tcred->suid) || 821 uid_eq(cred->euid, tcred->uid) || 822 uid_eq(cred->uid, tcred->suid) || 823 uid_eq(cred->uid, tcred->uid) || 824 ns_capable(tcred->user_ns, CAP_KILL); 825 } 826 827 /* 828 * Bad permissions for sending the signal 829 * - the caller must hold the RCU read lock 830 */ 831 static int check_kill_permission(int sig, struct kernel_siginfo *info, 832 struct task_struct *t) 833 { 834 struct pid *sid; 835 int error; 836 837 if (!valid_signal(sig)) 838 return -EINVAL; 839 840 if (!si_fromuser(info)) 841 return 0; 842 843 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 844 if (error) 845 return error; 846 847 if (!same_thread_group(current, t) && 848 !kill_ok_by_cred(t)) { 849 switch (sig) { 850 case SIGCONT: 851 sid = task_session(t); 852 /* 853 * We don't return the error if sid == NULL. The 854 * task was unhashed, the caller must notice this. 855 */ 856 if (!sid || sid == task_session(current)) 857 break; 858 fallthrough; 859 default: 860 return -EPERM; 861 } 862 } 863 864 return security_task_kill(t, info, sig, NULL); 865 } 866 867 /** 868 * ptrace_trap_notify - schedule trap to notify ptracer 869 * @t: tracee wanting to notify tracer 870 * 871 * This function schedules sticky ptrace trap which is cleared on the next 872 * TRAP_STOP to notify ptracer of an event. @t must have been seized by 873 * ptracer. 874 * 875 * If @t is running, STOP trap will be taken. If trapped for STOP and 876 * ptracer is listening for events, tracee is woken up so that it can 877 * re-trap for the new event. If trapped otherwise, STOP trap will be 878 * eventually taken without returning to userland after the existing traps 879 * are finished by PTRACE_CONT. 880 * 881 * CONTEXT: 882 * Must be called with @task->sighand->siglock held. 883 */ 884 static void ptrace_trap_notify(struct task_struct *t) 885 { 886 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); 887 assert_spin_locked(&t->sighand->siglock); 888 889 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 890 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 891 } 892 893 /* 894 * Handle magic process-wide effects of stop/continue signals. Unlike 895 * the signal actions, these happen immediately at signal-generation 896 * time regardless of blocking, ignoring, or handling. This does the 897 * actual continuing for SIGCONT, but not the actual stopping for stop 898 * signals. The process stop is done as a signal action for SIG_DFL. 899 * 900 * Returns true if the signal should be actually delivered, otherwise 901 * it should be dropped. 902 */ 903 static bool prepare_signal(int sig, struct task_struct *p, bool force) 904 { 905 struct signal_struct *signal = p->signal; 906 struct task_struct *t; 907 sigset_t flush; 908 909 if (signal->flags & SIGNAL_GROUP_EXIT) { 910 if (signal->core_state) 911 return sig == SIGKILL; 912 /* 913 * The process is in the middle of dying, nothing to do. 914 */ 915 } else if (sig_kernel_stop(sig)) { 916 /* 917 * This is a stop signal. Remove SIGCONT from all queues. 918 */ 919 siginitset(&flush, sigmask(SIGCONT)); 920 flush_sigqueue_mask(&flush, &signal->shared_pending); 921 for_each_thread(p, t) 922 flush_sigqueue_mask(&flush, &t->pending); 923 } else if (sig == SIGCONT) { 924 unsigned int why; 925 /* 926 * Remove all stop signals from all queues, wake all threads. 927 */ 928 siginitset(&flush, SIG_KERNEL_STOP_MASK); 929 flush_sigqueue_mask(&flush, &signal->shared_pending); 930 for_each_thread(p, t) { 931 flush_sigqueue_mask(&flush, &t->pending); 932 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); 933 if (likely(!(t->ptrace & PT_SEIZED))) 934 wake_up_state(t, __TASK_STOPPED); 935 else 936 ptrace_trap_notify(t); 937 } 938 939 /* 940 * Notify the parent with CLD_CONTINUED if we were stopped. 941 * 942 * If we were in the middle of a group stop, we pretend it 943 * was already finished, and then continued. Since SIGCHLD 944 * doesn't queue we report only CLD_STOPPED, as if the next 945 * CLD_CONTINUED was dropped. 946 */ 947 why = 0; 948 if (signal->flags & SIGNAL_STOP_STOPPED) 949 why |= SIGNAL_CLD_CONTINUED; 950 else if (signal->group_stop_count) 951 why |= SIGNAL_CLD_STOPPED; 952 953 if (why) { 954 /* 955 * The first thread which returns from do_signal_stop() 956 * will take ->siglock, notice SIGNAL_CLD_MASK, and 957 * notify its parent. See get_signal(). 958 */ 959 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); 960 signal->group_stop_count = 0; 961 signal->group_exit_code = 0; 962 } 963 } 964 965 return !sig_ignored(p, sig, force); 966 } 967 968 /* 969 * Test if P wants to take SIG. After we've checked all threads with this, 970 * it's equivalent to finding no threads not blocking SIG. Any threads not 971 * blocking SIG were ruled out because they are not running and already 972 * have pending signals. Such threads will dequeue from the shared queue 973 * as soon as they're available, so putting the signal on the shared queue 974 * will be equivalent to sending it to one such thread. 975 */ 976 static inline bool wants_signal(int sig, struct task_struct *p) 977 { 978 if (sigismember(&p->blocked, sig)) 979 return false; 980 981 if (p->flags & PF_EXITING) 982 return false; 983 984 if (sig == SIGKILL) 985 return true; 986 987 if (task_is_stopped_or_traced(p)) 988 return false; 989 990 return task_curr(p) || !task_sigpending(p); 991 } 992 993 static void complete_signal(int sig, struct task_struct *p, enum pid_type type) 994 { 995 struct signal_struct *signal = p->signal; 996 struct task_struct *t; 997 998 /* 999 * Now find a thread we can wake up to take the signal off the queue. 1000 * 1001 * If the main thread wants the signal, it gets first crack. 1002 * Probably the least surprising to the average bear. 1003 */ 1004 if (wants_signal(sig, p)) 1005 t = p; 1006 else if ((type == PIDTYPE_PID) || thread_group_empty(p)) 1007 /* 1008 * There is just one thread and it does not need to be woken. 1009 * It will dequeue unblocked signals before it runs again. 1010 */ 1011 return; 1012 else { 1013 /* 1014 * Otherwise try to find a suitable thread. 1015 */ 1016 t = signal->curr_target; 1017 while (!wants_signal(sig, t)) { 1018 t = next_thread(t); 1019 if (t == signal->curr_target) 1020 /* 1021 * No thread needs to be woken. 1022 * Any eligible threads will see 1023 * the signal in the queue soon. 1024 */ 1025 return; 1026 } 1027 signal->curr_target = t; 1028 } 1029 1030 /* 1031 * Found a killable thread. If the signal will be fatal, 1032 * then start taking the whole group down immediately. 1033 */ 1034 if (sig_fatal(p, sig) && 1035 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) && 1036 !sigismember(&t->real_blocked, sig) && 1037 (sig == SIGKILL || !p->ptrace)) { 1038 /* 1039 * This signal will be fatal to the whole group. 1040 */ 1041 if (!sig_kernel_coredump(sig)) { 1042 /* 1043 * Start a group exit and wake everybody up. 1044 * This way we don't have other threads 1045 * running and doing things after a slower 1046 * thread has the fatal signal pending. 1047 */ 1048 signal->flags = SIGNAL_GROUP_EXIT; 1049 signal->group_exit_code = sig; 1050 signal->group_stop_count = 0; 1051 t = p; 1052 do { 1053 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1054 sigaddset(&t->pending.signal, SIGKILL); 1055 signal_wake_up(t, 1); 1056 } while_each_thread(p, t); 1057 return; 1058 } 1059 } 1060 1061 /* 1062 * The signal is already in the shared-pending queue. 1063 * Tell the chosen thread to wake up and dequeue it. 1064 */ 1065 signal_wake_up(t, sig == SIGKILL); 1066 return; 1067 } 1068 1069 static inline bool legacy_queue(struct sigpending *signals, int sig) 1070 { 1071 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 1072 } 1073 1074 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, 1075 enum pid_type type, bool force) 1076 { 1077 struct sigpending *pending; 1078 struct sigqueue *q; 1079 int override_rlimit; 1080 int ret = 0, result; 1081 1082 assert_spin_locked(&t->sighand->siglock); 1083 1084 result = TRACE_SIGNAL_IGNORED; 1085 if (!prepare_signal(sig, t, force)) 1086 goto ret; 1087 1088 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1089 /* 1090 * Short-circuit ignored signals and support queuing 1091 * exactly one non-rt signal, so that we can get more 1092 * detailed information about the cause of the signal. 1093 */ 1094 result = TRACE_SIGNAL_ALREADY_PENDING; 1095 if (legacy_queue(pending, sig)) 1096 goto ret; 1097 1098 result = TRACE_SIGNAL_DELIVERED; 1099 /* 1100 * Skip useless siginfo allocation for SIGKILL and kernel threads. 1101 */ 1102 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) 1103 goto out_set; 1104 1105 /* 1106 * Real-time signals must be queued if sent by sigqueue, or 1107 * some other real-time mechanism. It is implementation 1108 * defined whether kill() does so. We attempt to do so, on 1109 * the principle of least surprise, but since kill is not 1110 * allowed to fail with EAGAIN when low on memory we just 1111 * make sure at least one signal gets delivered and don't 1112 * pass on the info struct. 1113 */ 1114 if (sig < SIGRTMIN) 1115 override_rlimit = (is_si_special(info) || info->si_code >= 0); 1116 else 1117 override_rlimit = 0; 1118 1119 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0); 1120 1121 if (q) { 1122 list_add_tail(&q->list, &pending->list); 1123 switch ((unsigned long) info) { 1124 case (unsigned long) SEND_SIG_NOINFO: 1125 clear_siginfo(&q->info); 1126 q->info.si_signo = sig; 1127 q->info.si_errno = 0; 1128 q->info.si_code = SI_USER; 1129 q->info.si_pid = task_tgid_nr_ns(current, 1130 task_active_pid_ns(t)); 1131 rcu_read_lock(); 1132 q->info.si_uid = 1133 from_kuid_munged(task_cred_xxx(t, user_ns), 1134 current_uid()); 1135 rcu_read_unlock(); 1136 break; 1137 case (unsigned long) SEND_SIG_PRIV: 1138 clear_siginfo(&q->info); 1139 q->info.si_signo = sig; 1140 q->info.si_errno = 0; 1141 q->info.si_code = SI_KERNEL; 1142 q->info.si_pid = 0; 1143 q->info.si_uid = 0; 1144 break; 1145 default: 1146 copy_siginfo(&q->info, info); 1147 break; 1148 } 1149 } else if (!is_si_special(info) && 1150 sig >= SIGRTMIN && info->si_code != SI_USER) { 1151 /* 1152 * Queue overflow, abort. We may abort if the 1153 * signal was rt and sent by user using something 1154 * other than kill(). 1155 */ 1156 result = TRACE_SIGNAL_OVERFLOW_FAIL; 1157 ret = -EAGAIN; 1158 goto ret; 1159 } else { 1160 /* 1161 * This is a silent loss of information. We still 1162 * send the signal, but the *info bits are lost. 1163 */ 1164 result = TRACE_SIGNAL_LOSE_INFO; 1165 } 1166 1167 out_set: 1168 signalfd_notify(t, sig); 1169 sigaddset(&pending->signal, sig); 1170 1171 /* Let multiprocess signals appear after on-going forks */ 1172 if (type > PIDTYPE_TGID) { 1173 struct multiprocess_signals *delayed; 1174 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { 1175 sigset_t *signal = &delayed->signal; 1176 /* Can't queue both a stop and a continue signal */ 1177 if (sig == SIGCONT) 1178 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); 1179 else if (sig_kernel_stop(sig)) 1180 sigdelset(signal, SIGCONT); 1181 sigaddset(signal, sig); 1182 } 1183 } 1184 1185 complete_signal(sig, t, type); 1186 ret: 1187 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result); 1188 return ret; 1189 } 1190 1191 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) 1192 { 1193 bool ret = false; 1194 switch (siginfo_layout(info->si_signo, info->si_code)) { 1195 case SIL_KILL: 1196 case SIL_CHLD: 1197 case SIL_RT: 1198 ret = true; 1199 break; 1200 case SIL_TIMER: 1201 case SIL_POLL: 1202 case SIL_FAULT: 1203 case SIL_FAULT_TRAPNO: 1204 case SIL_FAULT_MCEERR: 1205 case SIL_FAULT_BNDERR: 1206 case SIL_FAULT_PKUERR: 1207 case SIL_FAULT_PERF_EVENT: 1208 case SIL_SYS: 1209 ret = false; 1210 break; 1211 } 1212 return ret; 1213 } 1214 1215 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, 1216 enum pid_type type) 1217 { 1218 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ 1219 bool force = false; 1220 1221 if (info == SEND_SIG_NOINFO) { 1222 /* Force if sent from an ancestor pid namespace */ 1223 force = !task_pid_nr_ns(current, task_active_pid_ns(t)); 1224 } else if (info == SEND_SIG_PRIV) { 1225 /* Don't ignore kernel generated signals */ 1226 force = true; 1227 } else if (has_si_pid_and_uid(info)) { 1228 /* SIGKILL and SIGSTOP is special or has ids */ 1229 struct user_namespace *t_user_ns; 1230 1231 rcu_read_lock(); 1232 t_user_ns = task_cred_xxx(t, user_ns); 1233 if (current_user_ns() != t_user_ns) { 1234 kuid_t uid = make_kuid(current_user_ns(), info->si_uid); 1235 info->si_uid = from_kuid_munged(t_user_ns, uid); 1236 } 1237 rcu_read_unlock(); 1238 1239 /* A kernel generated signal? */ 1240 force = (info->si_code == SI_KERNEL); 1241 1242 /* From an ancestor pid namespace? */ 1243 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) { 1244 info->si_pid = 0; 1245 force = true; 1246 } 1247 } 1248 return __send_signal(sig, info, t, type, force); 1249 } 1250 1251 static void print_fatal_signal(int signr) 1252 { 1253 struct pt_regs *regs = signal_pt_regs(); 1254 pr_info("potentially unexpected fatal signal %d.\n", signr); 1255 1256 #if defined(__i386__) && !defined(__arch_um__) 1257 pr_info("code at %08lx: ", regs->ip); 1258 { 1259 int i; 1260 for (i = 0; i < 16; i++) { 1261 unsigned char insn; 1262 1263 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1264 break; 1265 pr_cont("%02x ", insn); 1266 } 1267 } 1268 pr_cont("\n"); 1269 #endif 1270 preempt_disable(); 1271 show_regs(regs); 1272 preempt_enable(); 1273 } 1274 1275 static int __init setup_print_fatal_signals(char *str) 1276 { 1277 get_option (&str, &print_fatal_signals); 1278 1279 return 1; 1280 } 1281 1282 __setup("print-fatal-signals=", setup_print_fatal_signals); 1283 1284 int 1285 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1286 { 1287 return send_signal(sig, info, p, PIDTYPE_TGID); 1288 } 1289 1290 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, 1291 enum pid_type type) 1292 { 1293 unsigned long flags; 1294 int ret = -ESRCH; 1295 1296 if (lock_task_sighand(p, &flags)) { 1297 ret = send_signal(sig, info, p, type); 1298 unlock_task_sighand(p, &flags); 1299 } 1300 1301 return ret; 1302 } 1303 1304 enum sig_handler { 1305 HANDLER_CURRENT, /* If reachable use the current handler */ 1306 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */ 1307 HANDLER_EXIT, /* Only visible as the process exit code */ 1308 }; 1309 1310 /* 1311 * Force a signal that the process can't ignore: if necessary 1312 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1313 * 1314 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1315 * since we do not want to have a signal handler that was blocked 1316 * be invoked when user space had explicitly blocked it. 1317 * 1318 * We don't want to have recursive SIGSEGV's etc, for example, 1319 * that is why we also clear SIGNAL_UNKILLABLE. 1320 */ 1321 static int 1322 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, 1323 enum sig_handler handler) 1324 { 1325 unsigned long int flags; 1326 int ret, blocked, ignored; 1327 struct k_sigaction *action; 1328 int sig = info->si_signo; 1329 1330 spin_lock_irqsave(&t->sighand->siglock, flags); 1331 action = &t->sighand->action[sig-1]; 1332 ignored = action->sa.sa_handler == SIG_IGN; 1333 blocked = sigismember(&t->blocked, sig); 1334 if (blocked || ignored || (handler != HANDLER_CURRENT)) { 1335 action->sa.sa_handler = SIG_DFL; 1336 if (handler == HANDLER_EXIT) 1337 action->sa.sa_flags |= SA_IMMUTABLE; 1338 if (blocked) { 1339 sigdelset(&t->blocked, sig); 1340 recalc_sigpending_and_wake(t); 1341 } 1342 } 1343 /* 1344 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect 1345 * debugging to leave init killable. But HANDLER_EXIT is always fatal. 1346 */ 1347 if (action->sa.sa_handler == SIG_DFL && 1348 (!t->ptrace || (handler == HANDLER_EXIT))) 1349 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1350 ret = send_signal(sig, info, t, PIDTYPE_PID); 1351 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1352 1353 return ret; 1354 } 1355 1356 int force_sig_info(struct kernel_siginfo *info) 1357 { 1358 return force_sig_info_to_task(info, current, HANDLER_CURRENT); 1359 } 1360 1361 /* 1362 * Nuke all other threads in the group. 1363 */ 1364 int zap_other_threads(struct task_struct *p) 1365 { 1366 struct task_struct *t = p; 1367 int count = 0; 1368 1369 p->signal->group_stop_count = 0; 1370 1371 while_each_thread(p, t) { 1372 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1373 count++; 1374 1375 /* Don't bother with already dead threads */ 1376 if (t->exit_state) 1377 continue; 1378 sigaddset(&t->pending.signal, SIGKILL); 1379 signal_wake_up(t, 1); 1380 } 1381 1382 return count; 1383 } 1384 1385 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1386 unsigned long *flags) 1387 { 1388 struct sighand_struct *sighand; 1389 1390 rcu_read_lock(); 1391 for (;;) { 1392 sighand = rcu_dereference(tsk->sighand); 1393 if (unlikely(sighand == NULL)) 1394 break; 1395 1396 /* 1397 * This sighand can be already freed and even reused, but 1398 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which 1399 * initializes ->siglock: this slab can't go away, it has 1400 * the same object type, ->siglock can't be reinitialized. 1401 * 1402 * We need to ensure that tsk->sighand is still the same 1403 * after we take the lock, we can race with de_thread() or 1404 * __exit_signal(). In the latter case the next iteration 1405 * must see ->sighand == NULL. 1406 */ 1407 spin_lock_irqsave(&sighand->siglock, *flags); 1408 if (likely(sighand == rcu_access_pointer(tsk->sighand))) 1409 break; 1410 spin_unlock_irqrestore(&sighand->siglock, *flags); 1411 } 1412 rcu_read_unlock(); 1413 1414 return sighand; 1415 } 1416 1417 #ifdef CONFIG_LOCKDEP 1418 void lockdep_assert_task_sighand_held(struct task_struct *task) 1419 { 1420 struct sighand_struct *sighand; 1421 1422 rcu_read_lock(); 1423 sighand = rcu_dereference(task->sighand); 1424 if (sighand) 1425 lockdep_assert_held(&sighand->siglock); 1426 else 1427 WARN_ON_ONCE(1); 1428 rcu_read_unlock(); 1429 } 1430 #endif 1431 1432 /* 1433 * send signal info to all the members of a group 1434 */ 1435 int group_send_sig_info(int sig, struct kernel_siginfo *info, 1436 struct task_struct *p, enum pid_type type) 1437 { 1438 int ret; 1439 1440 rcu_read_lock(); 1441 ret = check_kill_permission(sig, info, p); 1442 rcu_read_unlock(); 1443 1444 if (!ret && sig) 1445 ret = do_send_sig_info(sig, info, p, type); 1446 1447 return ret; 1448 } 1449 1450 /* 1451 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1452 * control characters do (^C, ^Z etc) 1453 * - the caller must hold at least a readlock on tasklist_lock 1454 */ 1455 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) 1456 { 1457 struct task_struct *p = NULL; 1458 int retval, success; 1459 1460 success = 0; 1461 retval = -ESRCH; 1462 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1463 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); 1464 success |= !err; 1465 retval = err; 1466 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1467 return success ? 0 : retval; 1468 } 1469 1470 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) 1471 { 1472 int error = -ESRCH; 1473 struct task_struct *p; 1474 1475 for (;;) { 1476 rcu_read_lock(); 1477 p = pid_task(pid, PIDTYPE_PID); 1478 if (p) 1479 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); 1480 rcu_read_unlock(); 1481 if (likely(!p || error != -ESRCH)) 1482 return error; 1483 1484 /* 1485 * The task was unhashed in between, try again. If it 1486 * is dead, pid_task() will return NULL, if we race with 1487 * de_thread() it will find the new leader. 1488 */ 1489 } 1490 } 1491 1492 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) 1493 { 1494 int error; 1495 rcu_read_lock(); 1496 error = kill_pid_info(sig, info, find_vpid(pid)); 1497 rcu_read_unlock(); 1498 return error; 1499 } 1500 1501 static inline bool kill_as_cred_perm(const struct cred *cred, 1502 struct task_struct *target) 1503 { 1504 const struct cred *pcred = __task_cred(target); 1505 1506 return uid_eq(cred->euid, pcred->suid) || 1507 uid_eq(cred->euid, pcred->uid) || 1508 uid_eq(cred->uid, pcred->suid) || 1509 uid_eq(cred->uid, pcred->uid); 1510 } 1511 1512 /* 1513 * The usb asyncio usage of siginfo is wrong. The glibc support 1514 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. 1515 * AKA after the generic fields: 1516 * kernel_pid_t si_pid; 1517 * kernel_uid32_t si_uid; 1518 * sigval_t si_value; 1519 * 1520 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout 1521 * after the generic fields is: 1522 * void __user *si_addr; 1523 * 1524 * This is a practical problem when there is a 64bit big endian kernel 1525 * and a 32bit userspace. As the 32bit address will encoded in the low 1526 * 32bits of the pointer. Those low 32bits will be stored at higher 1527 * address than appear in a 32 bit pointer. So userspace will not 1528 * see the address it was expecting for it's completions. 1529 * 1530 * There is nothing in the encoding that can allow 1531 * copy_siginfo_to_user32 to detect this confusion of formats, so 1532 * handle this by requiring the caller of kill_pid_usb_asyncio to 1533 * notice when this situration takes place and to store the 32bit 1534 * pointer in sival_int, instead of sival_addr of the sigval_t addr 1535 * parameter. 1536 */ 1537 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, 1538 struct pid *pid, const struct cred *cred) 1539 { 1540 struct kernel_siginfo info; 1541 struct task_struct *p; 1542 unsigned long flags; 1543 int ret = -EINVAL; 1544 1545 if (!valid_signal(sig)) 1546 return ret; 1547 1548 clear_siginfo(&info); 1549 info.si_signo = sig; 1550 info.si_errno = errno; 1551 info.si_code = SI_ASYNCIO; 1552 *((sigval_t *)&info.si_pid) = addr; 1553 1554 rcu_read_lock(); 1555 p = pid_task(pid, PIDTYPE_PID); 1556 if (!p) { 1557 ret = -ESRCH; 1558 goto out_unlock; 1559 } 1560 if (!kill_as_cred_perm(cred, p)) { 1561 ret = -EPERM; 1562 goto out_unlock; 1563 } 1564 ret = security_task_kill(p, &info, sig, cred); 1565 if (ret) 1566 goto out_unlock; 1567 1568 if (sig) { 1569 if (lock_task_sighand(p, &flags)) { 1570 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false); 1571 unlock_task_sighand(p, &flags); 1572 } else 1573 ret = -ESRCH; 1574 } 1575 out_unlock: 1576 rcu_read_unlock(); 1577 return ret; 1578 } 1579 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio); 1580 1581 /* 1582 * kill_something_info() interprets pid in interesting ways just like kill(2). 1583 * 1584 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1585 * is probably wrong. Should make it like BSD or SYSV. 1586 */ 1587 1588 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) 1589 { 1590 int ret; 1591 1592 if (pid > 0) 1593 return kill_proc_info(sig, info, pid); 1594 1595 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ 1596 if (pid == INT_MIN) 1597 return -ESRCH; 1598 1599 read_lock(&tasklist_lock); 1600 if (pid != -1) { 1601 ret = __kill_pgrp_info(sig, info, 1602 pid ? find_vpid(-pid) : task_pgrp(current)); 1603 } else { 1604 int retval = 0, count = 0; 1605 struct task_struct * p; 1606 1607 for_each_process(p) { 1608 if (task_pid_vnr(p) > 1 && 1609 !same_thread_group(p, current)) { 1610 int err = group_send_sig_info(sig, info, p, 1611 PIDTYPE_MAX); 1612 ++count; 1613 if (err != -EPERM) 1614 retval = err; 1615 } 1616 } 1617 ret = count ? retval : -ESRCH; 1618 } 1619 read_unlock(&tasklist_lock); 1620 1621 return ret; 1622 } 1623 1624 /* 1625 * These are for backward compatibility with the rest of the kernel source. 1626 */ 1627 1628 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1629 { 1630 /* 1631 * Make sure legacy kernel users don't send in bad values 1632 * (normal paths check this in check_kill_permission). 1633 */ 1634 if (!valid_signal(sig)) 1635 return -EINVAL; 1636 1637 return do_send_sig_info(sig, info, p, PIDTYPE_PID); 1638 } 1639 EXPORT_SYMBOL(send_sig_info); 1640 1641 #define __si_special(priv) \ 1642 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1643 1644 int 1645 send_sig(int sig, struct task_struct *p, int priv) 1646 { 1647 return send_sig_info(sig, __si_special(priv), p); 1648 } 1649 EXPORT_SYMBOL(send_sig); 1650 1651 void force_sig(int sig) 1652 { 1653 struct kernel_siginfo info; 1654 1655 clear_siginfo(&info); 1656 info.si_signo = sig; 1657 info.si_errno = 0; 1658 info.si_code = SI_KERNEL; 1659 info.si_pid = 0; 1660 info.si_uid = 0; 1661 force_sig_info(&info); 1662 } 1663 EXPORT_SYMBOL(force_sig); 1664 1665 void force_fatal_sig(int sig) 1666 { 1667 struct kernel_siginfo info; 1668 1669 clear_siginfo(&info); 1670 info.si_signo = sig; 1671 info.si_errno = 0; 1672 info.si_code = SI_KERNEL; 1673 info.si_pid = 0; 1674 info.si_uid = 0; 1675 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL); 1676 } 1677 1678 void force_exit_sig(int sig) 1679 { 1680 struct kernel_siginfo info; 1681 1682 clear_siginfo(&info); 1683 info.si_signo = sig; 1684 info.si_errno = 0; 1685 info.si_code = SI_KERNEL; 1686 info.si_pid = 0; 1687 info.si_uid = 0; 1688 force_sig_info_to_task(&info, current, HANDLER_EXIT); 1689 } 1690 1691 /* 1692 * When things go south during signal handling, we 1693 * will force a SIGSEGV. And if the signal that caused 1694 * the problem was already a SIGSEGV, we'll want to 1695 * make sure we don't even try to deliver the signal.. 1696 */ 1697 void force_sigsegv(int sig) 1698 { 1699 if (sig == SIGSEGV) 1700 force_fatal_sig(SIGSEGV); 1701 else 1702 force_sig(SIGSEGV); 1703 } 1704 1705 int force_sig_fault_to_task(int sig, int code, void __user *addr 1706 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1707 , struct task_struct *t) 1708 { 1709 struct kernel_siginfo info; 1710 1711 clear_siginfo(&info); 1712 info.si_signo = sig; 1713 info.si_errno = 0; 1714 info.si_code = code; 1715 info.si_addr = addr; 1716 #ifdef __ia64__ 1717 info.si_imm = imm; 1718 info.si_flags = flags; 1719 info.si_isr = isr; 1720 #endif 1721 return force_sig_info_to_task(&info, t, HANDLER_CURRENT); 1722 } 1723 1724 int force_sig_fault(int sig, int code, void __user *addr 1725 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)) 1726 { 1727 return force_sig_fault_to_task(sig, code, addr 1728 ___ARCH_SI_IA64(imm, flags, isr), current); 1729 } 1730 1731 int send_sig_fault(int sig, int code, void __user *addr 1732 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1733 , struct task_struct *t) 1734 { 1735 struct kernel_siginfo info; 1736 1737 clear_siginfo(&info); 1738 info.si_signo = sig; 1739 info.si_errno = 0; 1740 info.si_code = code; 1741 info.si_addr = addr; 1742 #ifdef __ia64__ 1743 info.si_imm = imm; 1744 info.si_flags = flags; 1745 info.si_isr = isr; 1746 #endif 1747 return send_sig_info(info.si_signo, &info, t); 1748 } 1749 1750 int force_sig_mceerr(int code, void __user *addr, short lsb) 1751 { 1752 struct kernel_siginfo info; 1753 1754 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1755 clear_siginfo(&info); 1756 info.si_signo = SIGBUS; 1757 info.si_errno = 0; 1758 info.si_code = code; 1759 info.si_addr = addr; 1760 info.si_addr_lsb = lsb; 1761 return force_sig_info(&info); 1762 } 1763 1764 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) 1765 { 1766 struct kernel_siginfo info; 1767 1768 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1769 clear_siginfo(&info); 1770 info.si_signo = SIGBUS; 1771 info.si_errno = 0; 1772 info.si_code = code; 1773 info.si_addr = addr; 1774 info.si_addr_lsb = lsb; 1775 return send_sig_info(info.si_signo, &info, t); 1776 } 1777 EXPORT_SYMBOL(send_sig_mceerr); 1778 1779 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) 1780 { 1781 struct kernel_siginfo info; 1782 1783 clear_siginfo(&info); 1784 info.si_signo = SIGSEGV; 1785 info.si_errno = 0; 1786 info.si_code = SEGV_BNDERR; 1787 info.si_addr = addr; 1788 info.si_lower = lower; 1789 info.si_upper = upper; 1790 return force_sig_info(&info); 1791 } 1792 1793 #ifdef SEGV_PKUERR 1794 int force_sig_pkuerr(void __user *addr, u32 pkey) 1795 { 1796 struct kernel_siginfo info; 1797 1798 clear_siginfo(&info); 1799 info.si_signo = SIGSEGV; 1800 info.si_errno = 0; 1801 info.si_code = SEGV_PKUERR; 1802 info.si_addr = addr; 1803 info.si_pkey = pkey; 1804 return force_sig_info(&info); 1805 } 1806 #endif 1807 1808 int send_sig_perf(void __user *addr, u32 type, u64 sig_data) 1809 { 1810 struct kernel_siginfo info; 1811 1812 clear_siginfo(&info); 1813 info.si_signo = SIGTRAP; 1814 info.si_errno = 0; 1815 info.si_code = TRAP_PERF; 1816 info.si_addr = addr; 1817 info.si_perf_data = sig_data; 1818 info.si_perf_type = type; 1819 1820 /* 1821 * Signals generated by perf events should not terminate the whole 1822 * process if SIGTRAP is blocked, however, delivering the signal 1823 * asynchronously is better than not delivering at all. But tell user 1824 * space if the signal was asynchronous, so it can clearly be 1825 * distinguished from normal synchronous ones. 1826 */ 1827 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ? 1828 TRAP_PERF_FLAG_ASYNC : 1829 0; 1830 1831 return send_sig_info(info.si_signo, &info, current); 1832 } 1833 1834 /** 1835 * force_sig_seccomp - signals the task to allow in-process syscall emulation 1836 * @syscall: syscall number to send to userland 1837 * @reason: filter-supplied reason code to send to userland (via si_errno) 1838 * @force_coredump: true to trigger a coredump 1839 * 1840 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. 1841 */ 1842 int force_sig_seccomp(int syscall, int reason, bool force_coredump) 1843 { 1844 struct kernel_siginfo info; 1845 1846 clear_siginfo(&info); 1847 info.si_signo = SIGSYS; 1848 info.si_code = SYS_SECCOMP; 1849 info.si_call_addr = (void __user *)KSTK_EIP(current); 1850 info.si_errno = reason; 1851 info.si_arch = syscall_get_arch(current); 1852 info.si_syscall = syscall; 1853 return force_sig_info_to_task(&info, current, 1854 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT); 1855 } 1856 1857 /* For the crazy architectures that include trap information in 1858 * the errno field, instead of an actual errno value. 1859 */ 1860 int force_sig_ptrace_errno_trap(int errno, void __user *addr) 1861 { 1862 struct kernel_siginfo info; 1863 1864 clear_siginfo(&info); 1865 info.si_signo = SIGTRAP; 1866 info.si_errno = errno; 1867 info.si_code = TRAP_HWBKPT; 1868 info.si_addr = addr; 1869 return force_sig_info(&info); 1870 } 1871 1872 /* For the rare architectures that include trap information using 1873 * si_trapno. 1874 */ 1875 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno) 1876 { 1877 struct kernel_siginfo info; 1878 1879 clear_siginfo(&info); 1880 info.si_signo = sig; 1881 info.si_errno = 0; 1882 info.si_code = code; 1883 info.si_addr = addr; 1884 info.si_trapno = trapno; 1885 return force_sig_info(&info); 1886 } 1887 1888 /* For the rare architectures that include trap information using 1889 * si_trapno. 1890 */ 1891 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, 1892 struct task_struct *t) 1893 { 1894 struct kernel_siginfo info; 1895 1896 clear_siginfo(&info); 1897 info.si_signo = sig; 1898 info.si_errno = 0; 1899 info.si_code = code; 1900 info.si_addr = addr; 1901 info.si_trapno = trapno; 1902 return send_sig_info(info.si_signo, &info, t); 1903 } 1904 1905 int kill_pgrp(struct pid *pid, int sig, int priv) 1906 { 1907 int ret; 1908 1909 read_lock(&tasklist_lock); 1910 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1911 read_unlock(&tasklist_lock); 1912 1913 return ret; 1914 } 1915 EXPORT_SYMBOL(kill_pgrp); 1916 1917 int kill_pid(struct pid *pid, int sig, int priv) 1918 { 1919 return kill_pid_info(sig, __si_special(priv), pid); 1920 } 1921 EXPORT_SYMBOL(kill_pid); 1922 1923 /* 1924 * These functions support sending signals using preallocated sigqueue 1925 * structures. This is needed "because realtime applications cannot 1926 * afford to lose notifications of asynchronous events, like timer 1927 * expirations or I/O completions". In the case of POSIX Timers 1928 * we allocate the sigqueue structure from the timer_create. If this 1929 * allocation fails we are able to report the failure to the application 1930 * with an EAGAIN error. 1931 */ 1932 struct sigqueue *sigqueue_alloc(void) 1933 { 1934 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC); 1935 } 1936 1937 void sigqueue_free(struct sigqueue *q) 1938 { 1939 unsigned long flags; 1940 spinlock_t *lock = ¤t->sighand->siglock; 1941 1942 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1943 /* 1944 * We must hold ->siglock while testing q->list 1945 * to serialize with collect_signal() or with 1946 * __exit_signal()->flush_sigqueue(). 1947 */ 1948 spin_lock_irqsave(lock, flags); 1949 q->flags &= ~SIGQUEUE_PREALLOC; 1950 /* 1951 * If it is queued it will be freed when dequeued, 1952 * like the "regular" sigqueue. 1953 */ 1954 if (!list_empty(&q->list)) 1955 q = NULL; 1956 spin_unlock_irqrestore(lock, flags); 1957 1958 if (q) 1959 __sigqueue_free(q); 1960 } 1961 1962 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) 1963 { 1964 int sig = q->info.si_signo; 1965 struct sigpending *pending; 1966 struct task_struct *t; 1967 unsigned long flags; 1968 int ret, result; 1969 1970 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1971 1972 ret = -1; 1973 rcu_read_lock(); 1974 t = pid_task(pid, type); 1975 if (!t || !likely(lock_task_sighand(t, &flags))) 1976 goto ret; 1977 1978 ret = 1; /* the signal is ignored */ 1979 result = TRACE_SIGNAL_IGNORED; 1980 if (!prepare_signal(sig, t, false)) 1981 goto out; 1982 1983 ret = 0; 1984 if (unlikely(!list_empty(&q->list))) { 1985 /* 1986 * If an SI_TIMER entry is already queue just increment 1987 * the overrun count. 1988 */ 1989 BUG_ON(q->info.si_code != SI_TIMER); 1990 q->info.si_overrun++; 1991 result = TRACE_SIGNAL_ALREADY_PENDING; 1992 goto out; 1993 } 1994 q->info.si_overrun = 0; 1995 1996 signalfd_notify(t, sig); 1997 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1998 list_add_tail(&q->list, &pending->list); 1999 sigaddset(&pending->signal, sig); 2000 complete_signal(sig, t, type); 2001 result = TRACE_SIGNAL_DELIVERED; 2002 out: 2003 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); 2004 unlock_task_sighand(t, &flags); 2005 ret: 2006 rcu_read_unlock(); 2007 return ret; 2008 } 2009 2010 static void do_notify_pidfd(struct task_struct *task) 2011 { 2012 struct pid *pid; 2013 2014 WARN_ON(task->exit_state == 0); 2015 pid = task_pid(task); 2016 wake_up_all(&pid->wait_pidfd); 2017 } 2018 2019 /* 2020 * Let a parent know about the death of a child. 2021 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 2022 * 2023 * Returns true if our parent ignored us and so we've switched to 2024 * self-reaping. 2025 */ 2026 bool do_notify_parent(struct task_struct *tsk, int sig) 2027 { 2028 struct kernel_siginfo info; 2029 unsigned long flags; 2030 struct sighand_struct *psig; 2031 bool autoreap = false; 2032 u64 utime, stime; 2033 2034 BUG_ON(sig == -1); 2035 2036 /* do_notify_parent_cldstop should have been called instead. */ 2037 BUG_ON(task_is_stopped_or_traced(tsk)); 2038 2039 BUG_ON(!tsk->ptrace && 2040 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 2041 2042 /* Wake up all pidfd waiters */ 2043 do_notify_pidfd(tsk); 2044 2045 if (sig != SIGCHLD) { 2046 /* 2047 * This is only possible if parent == real_parent. 2048 * Check if it has changed security domain. 2049 */ 2050 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) 2051 sig = SIGCHLD; 2052 } 2053 2054 clear_siginfo(&info); 2055 info.si_signo = sig; 2056 info.si_errno = 0; 2057 /* 2058 * We are under tasklist_lock here so our parent is tied to 2059 * us and cannot change. 2060 * 2061 * task_active_pid_ns will always return the same pid namespace 2062 * until a task passes through release_task. 2063 * 2064 * write_lock() currently calls preempt_disable() which is the 2065 * same as rcu_read_lock(), but according to Oleg, this is not 2066 * correct to rely on this 2067 */ 2068 rcu_read_lock(); 2069 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); 2070 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), 2071 task_uid(tsk)); 2072 rcu_read_unlock(); 2073 2074 task_cputime(tsk, &utime, &stime); 2075 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); 2076 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); 2077 2078 info.si_status = tsk->exit_code & 0x7f; 2079 if (tsk->exit_code & 0x80) 2080 info.si_code = CLD_DUMPED; 2081 else if (tsk->exit_code & 0x7f) 2082 info.si_code = CLD_KILLED; 2083 else { 2084 info.si_code = CLD_EXITED; 2085 info.si_status = tsk->exit_code >> 8; 2086 } 2087 2088 psig = tsk->parent->sighand; 2089 spin_lock_irqsave(&psig->siglock, flags); 2090 if (!tsk->ptrace && sig == SIGCHLD && 2091 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 2092 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 2093 /* 2094 * We are exiting and our parent doesn't care. POSIX.1 2095 * defines special semantics for setting SIGCHLD to SIG_IGN 2096 * or setting the SA_NOCLDWAIT flag: we should be reaped 2097 * automatically and not left for our parent's wait4 call. 2098 * Rather than having the parent do it as a magic kind of 2099 * signal handler, we just set this to tell do_exit that we 2100 * can be cleaned up without becoming a zombie. Note that 2101 * we still call __wake_up_parent in this case, because a 2102 * blocked sys_wait4 might now return -ECHILD. 2103 * 2104 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 2105 * is implementation-defined: we do (if you don't want 2106 * it, just use SIG_IGN instead). 2107 */ 2108 autoreap = true; 2109 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 2110 sig = 0; 2111 } 2112 /* 2113 * Send with __send_signal as si_pid and si_uid are in the 2114 * parent's namespaces. 2115 */ 2116 if (valid_signal(sig) && sig) 2117 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false); 2118 __wake_up_parent(tsk, tsk->parent); 2119 spin_unlock_irqrestore(&psig->siglock, flags); 2120 2121 return autoreap; 2122 } 2123 2124 /** 2125 * do_notify_parent_cldstop - notify parent of stopped/continued state change 2126 * @tsk: task reporting the state change 2127 * @for_ptracer: the notification is for ptracer 2128 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report 2129 * 2130 * Notify @tsk's parent that the stopped/continued state has changed. If 2131 * @for_ptracer is %false, @tsk's group leader notifies to its real parent. 2132 * If %true, @tsk reports to @tsk->parent which should be the ptracer. 2133 * 2134 * CONTEXT: 2135 * Must be called with tasklist_lock at least read locked. 2136 */ 2137 static void do_notify_parent_cldstop(struct task_struct *tsk, 2138 bool for_ptracer, int why) 2139 { 2140 struct kernel_siginfo info; 2141 unsigned long flags; 2142 struct task_struct *parent; 2143 struct sighand_struct *sighand; 2144 u64 utime, stime; 2145 2146 if (for_ptracer) { 2147 parent = tsk->parent; 2148 } else { 2149 tsk = tsk->group_leader; 2150 parent = tsk->real_parent; 2151 } 2152 2153 clear_siginfo(&info); 2154 info.si_signo = SIGCHLD; 2155 info.si_errno = 0; 2156 /* 2157 * see comment in do_notify_parent() about the following 4 lines 2158 */ 2159 rcu_read_lock(); 2160 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); 2161 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 2162 rcu_read_unlock(); 2163 2164 task_cputime(tsk, &utime, &stime); 2165 info.si_utime = nsec_to_clock_t(utime); 2166 info.si_stime = nsec_to_clock_t(stime); 2167 2168 info.si_code = why; 2169 switch (why) { 2170 case CLD_CONTINUED: 2171 info.si_status = SIGCONT; 2172 break; 2173 case CLD_STOPPED: 2174 info.si_status = tsk->signal->group_exit_code & 0x7f; 2175 break; 2176 case CLD_TRAPPED: 2177 info.si_status = tsk->exit_code & 0x7f; 2178 break; 2179 default: 2180 BUG(); 2181 } 2182 2183 sighand = parent->sighand; 2184 spin_lock_irqsave(&sighand->siglock, flags); 2185 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 2186 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 2187 __group_send_sig_info(SIGCHLD, &info, parent); 2188 /* 2189 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 2190 */ 2191 __wake_up_parent(tsk, parent); 2192 spin_unlock_irqrestore(&sighand->siglock, flags); 2193 } 2194 2195 /* 2196 * This must be called with current->sighand->siglock held. 2197 * 2198 * This should be the path for all ptrace stops. 2199 * We always set current->last_siginfo while stopped here. 2200 * That makes it a way to test a stopped process for 2201 * being ptrace-stopped vs being job-control-stopped. 2202 * 2203 * Returns the signal the ptracer requested the code resume 2204 * with. If the code did not stop because the tracer is gone, 2205 * the stop signal remains unchanged unless clear_code. 2206 */ 2207 static int ptrace_stop(int exit_code, int why, int clear_code, 2208 unsigned long message, kernel_siginfo_t *info) 2209 __releases(¤t->sighand->siglock) 2210 __acquires(¤t->sighand->siglock) 2211 { 2212 bool gstop_done = false; 2213 bool read_code = true; 2214 2215 if (arch_ptrace_stop_needed()) { 2216 /* 2217 * The arch code has something special to do before a 2218 * ptrace stop. This is allowed to block, e.g. for faults 2219 * on user stack pages. We can't keep the siglock while 2220 * calling arch_ptrace_stop, so we must release it now. 2221 * To preserve proper semantics, we must do this before 2222 * any signal bookkeeping like checking group_stop_count. 2223 */ 2224 spin_unlock_irq(¤t->sighand->siglock); 2225 arch_ptrace_stop(); 2226 spin_lock_irq(¤t->sighand->siglock); 2227 } 2228 2229 /* 2230 * schedule() will not sleep if there is a pending signal that 2231 * can awaken the task. 2232 */ 2233 set_special_state(TASK_TRACED); 2234 2235 /* 2236 * We're committing to trapping. TRACED should be visible before 2237 * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). 2238 * Also, transition to TRACED and updates to ->jobctl should be 2239 * atomic with respect to siglock and should be done after the arch 2240 * hook as siglock is released and regrabbed across it. 2241 * 2242 * TRACER TRACEE 2243 * 2244 * ptrace_attach() 2245 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) 2246 * do_wait() 2247 * set_current_state() smp_wmb(); 2248 * ptrace_do_wait() 2249 * wait_task_stopped() 2250 * task_stopped_code() 2251 * [L] task_is_traced() [S] task_clear_jobctl_trapping(); 2252 */ 2253 smp_wmb(); 2254 2255 current->ptrace_message = message; 2256 current->last_siginfo = info; 2257 current->exit_code = exit_code; 2258 2259 /* 2260 * If @why is CLD_STOPPED, we're trapping to participate in a group 2261 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 2262 * across siglock relocks since INTERRUPT was scheduled, PENDING 2263 * could be clear now. We act as if SIGCONT is received after 2264 * TASK_TRACED is entered - ignore it. 2265 */ 2266 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) 2267 gstop_done = task_participate_group_stop(current); 2268 2269 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ 2270 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); 2271 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) 2272 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); 2273 2274 /* entering a trap, clear TRAPPING */ 2275 task_clear_jobctl_trapping(current); 2276 2277 spin_unlock_irq(¤t->sighand->siglock); 2278 read_lock(&tasklist_lock); 2279 if (likely(current->ptrace)) { 2280 /* 2281 * Notify parents of the stop. 2282 * 2283 * While ptraced, there are two parents - the ptracer and 2284 * the real_parent of the group_leader. The ptracer should 2285 * know about every stop while the real parent is only 2286 * interested in the completion of group stop. The states 2287 * for the two don't interact with each other. Notify 2288 * separately unless they're gonna be duplicates. 2289 */ 2290 do_notify_parent_cldstop(current, true, why); 2291 if (gstop_done && ptrace_reparented(current)) 2292 do_notify_parent_cldstop(current, false, why); 2293 2294 /* 2295 * Don't want to allow preemption here, because 2296 * sys_ptrace() needs this task to be inactive. 2297 * 2298 * XXX: implement read_unlock_no_resched(). 2299 */ 2300 preempt_disable(); 2301 read_unlock(&tasklist_lock); 2302 cgroup_enter_frozen(); 2303 preempt_enable_no_resched(); 2304 freezable_schedule(); 2305 cgroup_leave_frozen(true); 2306 } else { 2307 /* 2308 * By the time we got the lock, our tracer went away. 2309 * Don't drop the lock yet, another tracer may come. 2310 * 2311 * If @gstop_done, the ptracer went away between group stop 2312 * completion and here. During detach, it would have set 2313 * JOBCTL_STOP_PENDING on us and we'll re-enter 2314 * TASK_STOPPED in do_signal_stop() on return, so notifying 2315 * the real parent of the group stop completion is enough. 2316 */ 2317 if (gstop_done) 2318 do_notify_parent_cldstop(current, false, why); 2319 2320 /* tasklist protects us from ptrace_freeze_traced() */ 2321 __set_current_state(TASK_RUNNING); 2322 read_code = false; 2323 if (clear_code) 2324 exit_code = 0; 2325 read_unlock(&tasklist_lock); 2326 } 2327 2328 /* 2329 * We are back. Now reacquire the siglock before touching 2330 * last_siginfo, so that we are sure to have synchronized with 2331 * any signal-sending on another CPU that wants to examine it. 2332 */ 2333 spin_lock_irq(¤t->sighand->siglock); 2334 if (read_code) 2335 exit_code = current->exit_code; 2336 current->last_siginfo = NULL; 2337 current->ptrace_message = 0; 2338 current->exit_code = 0; 2339 2340 /* LISTENING can be set only during STOP traps, clear it */ 2341 current->jobctl &= ~JOBCTL_LISTENING; 2342 2343 /* 2344 * Queued signals ignored us while we were stopped for tracing. 2345 * So check for any that we should take before resuming user mode. 2346 * This sets TIF_SIGPENDING, but never clears it. 2347 */ 2348 recalc_sigpending_tsk(current); 2349 return exit_code; 2350 } 2351 2352 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message) 2353 { 2354 kernel_siginfo_t info; 2355 2356 clear_siginfo(&info); 2357 info.si_signo = signr; 2358 info.si_code = exit_code; 2359 info.si_pid = task_pid_vnr(current); 2360 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 2361 2362 /* Let the debugger run. */ 2363 return ptrace_stop(exit_code, why, 1, message, &info); 2364 } 2365 2366 int ptrace_notify(int exit_code, unsigned long message) 2367 { 2368 int signr; 2369 2370 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 2371 if (unlikely(task_work_pending(current))) 2372 task_work_run(); 2373 2374 spin_lock_irq(¤t->sighand->siglock); 2375 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message); 2376 spin_unlock_irq(¤t->sighand->siglock); 2377 return signr; 2378 } 2379 2380 /** 2381 * do_signal_stop - handle group stop for SIGSTOP and other stop signals 2382 * @signr: signr causing group stop if initiating 2383 * 2384 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr 2385 * and participate in it. If already set, participate in the existing 2386 * group stop. If participated in a group stop (and thus slept), %true is 2387 * returned with siglock released. 2388 * 2389 * If ptraced, this function doesn't handle stop itself. Instead, 2390 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock 2391 * untouched. The caller must ensure that INTERRUPT trap handling takes 2392 * places afterwards. 2393 * 2394 * CONTEXT: 2395 * Must be called with @current->sighand->siglock held, which is released 2396 * on %true return. 2397 * 2398 * RETURNS: 2399 * %false if group stop is already cancelled or ptrace trap is scheduled. 2400 * %true if participated in group stop. 2401 */ 2402 static bool do_signal_stop(int signr) 2403 __releases(¤t->sighand->siglock) 2404 { 2405 struct signal_struct *sig = current->signal; 2406 2407 if (!(current->jobctl & JOBCTL_STOP_PENDING)) { 2408 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 2409 struct task_struct *t; 2410 2411 /* signr will be recorded in task->jobctl for retries */ 2412 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); 2413 2414 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || 2415 unlikely(sig->flags & SIGNAL_GROUP_EXIT) || 2416 unlikely(sig->group_exec_task)) 2417 return false; 2418 /* 2419 * There is no group stop already in progress. We must 2420 * initiate one now. 2421 * 2422 * While ptraced, a task may be resumed while group stop is 2423 * still in effect and then receive a stop signal and 2424 * initiate another group stop. This deviates from the 2425 * usual behavior as two consecutive stop signals can't 2426 * cause two group stops when !ptraced. That is why we 2427 * also check !task_is_stopped(t) below. 2428 * 2429 * The condition can be distinguished by testing whether 2430 * SIGNAL_STOP_STOPPED is already set. Don't generate 2431 * group_exit_code in such case. 2432 * 2433 * This is not necessary for SIGNAL_STOP_CONTINUED because 2434 * an intervening stop signal is required to cause two 2435 * continued events regardless of ptrace. 2436 */ 2437 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 2438 sig->group_exit_code = signr; 2439 2440 sig->group_stop_count = 0; 2441 2442 if (task_set_jobctl_pending(current, signr | gstop)) 2443 sig->group_stop_count++; 2444 2445 t = current; 2446 while_each_thread(current, t) { 2447 /* 2448 * Setting state to TASK_STOPPED for a group 2449 * stop is always done with the siglock held, 2450 * so this check has no races. 2451 */ 2452 if (!task_is_stopped(t) && 2453 task_set_jobctl_pending(t, signr | gstop)) { 2454 sig->group_stop_count++; 2455 if (likely(!(t->ptrace & PT_SEIZED))) 2456 signal_wake_up(t, 0); 2457 else 2458 ptrace_trap_notify(t); 2459 } 2460 } 2461 } 2462 2463 if (likely(!current->ptrace)) { 2464 int notify = 0; 2465 2466 /* 2467 * If there are no other threads in the group, or if there 2468 * is a group stop in progress and we are the last to stop, 2469 * report to the parent. 2470 */ 2471 if (task_participate_group_stop(current)) 2472 notify = CLD_STOPPED; 2473 2474 set_special_state(TASK_STOPPED); 2475 spin_unlock_irq(¤t->sighand->siglock); 2476 2477 /* 2478 * Notify the parent of the group stop completion. Because 2479 * we're not holding either the siglock or tasklist_lock 2480 * here, ptracer may attach inbetween; however, this is for 2481 * group stop and should always be delivered to the real 2482 * parent of the group leader. The new ptracer will get 2483 * its notification when this task transitions into 2484 * TASK_TRACED. 2485 */ 2486 if (notify) { 2487 read_lock(&tasklist_lock); 2488 do_notify_parent_cldstop(current, false, notify); 2489 read_unlock(&tasklist_lock); 2490 } 2491 2492 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2493 cgroup_enter_frozen(); 2494 freezable_schedule(); 2495 return true; 2496 } else { 2497 /* 2498 * While ptraced, group stop is handled by STOP trap. 2499 * Schedule it and let the caller deal with it. 2500 */ 2501 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); 2502 return false; 2503 } 2504 } 2505 2506 /** 2507 * do_jobctl_trap - take care of ptrace jobctl traps 2508 * 2509 * When PT_SEIZED, it's used for both group stop and explicit 2510 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with 2511 * accompanying siginfo. If stopped, lower eight bits of exit_code contain 2512 * the stop signal; otherwise, %SIGTRAP. 2513 * 2514 * When !PT_SEIZED, it's used only for group stop trap with stop signal 2515 * number as exit_code and no siginfo. 2516 * 2517 * CONTEXT: 2518 * Must be called with @current->sighand->siglock held, which may be 2519 * released and re-acquired before returning with intervening sleep. 2520 */ 2521 static void do_jobctl_trap(void) 2522 { 2523 struct signal_struct *signal = current->signal; 2524 int signr = current->jobctl & JOBCTL_STOP_SIGMASK; 2525 2526 if (current->ptrace & PT_SEIZED) { 2527 if (!signal->group_stop_count && 2528 !(signal->flags & SIGNAL_STOP_STOPPED)) 2529 signr = SIGTRAP; 2530 WARN_ON_ONCE(!signr); 2531 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), 2532 CLD_STOPPED, 0); 2533 } else { 2534 WARN_ON_ONCE(!signr); 2535 ptrace_stop(signr, CLD_STOPPED, 0, 0, NULL); 2536 } 2537 } 2538 2539 /** 2540 * do_freezer_trap - handle the freezer jobctl trap 2541 * 2542 * Puts the task into frozen state, if only the task is not about to quit. 2543 * In this case it drops JOBCTL_TRAP_FREEZE. 2544 * 2545 * CONTEXT: 2546 * Must be called with @current->sighand->siglock held, 2547 * which is always released before returning. 2548 */ 2549 static void do_freezer_trap(void) 2550 __releases(¤t->sighand->siglock) 2551 { 2552 /* 2553 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, 2554 * let's make another loop to give it a chance to be handled. 2555 * In any case, we'll return back. 2556 */ 2557 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != 2558 JOBCTL_TRAP_FREEZE) { 2559 spin_unlock_irq(¤t->sighand->siglock); 2560 return; 2561 } 2562 2563 /* 2564 * Now we're sure that there is no pending fatal signal and no 2565 * pending traps. Clear TIF_SIGPENDING to not get out of schedule() 2566 * immediately (if there is a non-fatal signal pending), and 2567 * put the task into sleep. 2568 */ 2569 __set_current_state(TASK_INTERRUPTIBLE); 2570 clear_thread_flag(TIF_SIGPENDING); 2571 spin_unlock_irq(¤t->sighand->siglock); 2572 cgroup_enter_frozen(); 2573 freezable_schedule(); 2574 } 2575 2576 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) 2577 { 2578 /* 2579 * We do not check sig_kernel_stop(signr) but set this marker 2580 * unconditionally because we do not know whether debugger will 2581 * change signr. This flag has no meaning unless we are going 2582 * to stop after return from ptrace_stop(). In this case it will 2583 * be checked in do_signal_stop(), we should only stop if it was 2584 * not cleared by SIGCONT while we were sleeping. See also the 2585 * comment in dequeue_signal(). 2586 */ 2587 current->jobctl |= JOBCTL_STOP_DEQUEUED; 2588 signr = ptrace_stop(signr, CLD_TRAPPED, 0, 0, info); 2589 2590 /* We're back. Did the debugger cancel the sig? */ 2591 if (signr == 0) 2592 return signr; 2593 2594 /* 2595 * Update the siginfo structure if the signal has 2596 * changed. If the debugger wanted something 2597 * specific in the siginfo structure then it should 2598 * have updated *info via PTRACE_SETSIGINFO. 2599 */ 2600 if (signr != info->si_signo) { 2601 clear_siginfo(info); 2602 info->si_signo = signr; 2603 info->si_errno = 0; 2604 info->si_code = SI_USER; 2605 rcu_read_lock(); 2606 info->si_pid = task_pid_vnr(current->parent); 2607 info->si_uid = from_kuid_munged(current_user_ns(), 2608 task_uid(current->parent)); 2609 rcu_read_unlock(); 2610 } 2611 2612 /* If the (new) signal is now blocked, requeue it. */ 2613 if (sigismember(¤t->blocked, signr) || 2614 fatal_signal_pending(current)) { 2615 send_signal(signr, info, current, type); 2616 signr = 0; 2617 } 2618 2619 return signr; 2620 } 2621 2622 static void hide_si_addr_tag_bits(struct ksignal *ksig) 2623 { 2624 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) { 2625 case SIL_FAULT: 2626 case SIL_FAULT_TRAPNO: 2627 case SIL_FAULT_MCEERR: 2628 case SIL_FAULT_BNDERR: 2629 case SIL_FAULT_PKUERR: 2630 case SIL_FAULT_PERF_EVENT: 2631 ksig->info.si_addr = arch_untagged_si_addr( 2632 ksig->info.si_addr, ksig->sig, ksig->info.si_code); 2633 break; 2634 case SIL_KILL: 2635 case SIL_TIMER: 2636 case SIL_POLL: 2637 case SIL_CHLD: 2638 case SIL_RT: 2639 case SIL_SYS: 2640 break; 2641 } 2642 } 2643 2644 bool get_signal(struct ksignal *ksig) 2645 { 2646 struct sighand_struct *sighand = current->sighand; 2647 struct signal_struct *signal = current->signal; 2648 int signr; 2649 2650 clear_notify_signal(); 2651 if (unlikely(task_work_pending(current))) 2652 task_work_run(); 2653 2654 if (!task_sigpending(current)) 2655 return false; 2656 2657 if (unlikely(uprobe_deny_signal())) 2658 return false; 2659 2660 /* 2661 * Do this once, we can't return to user-mode if freezing() == T. 2662 * do_signal_stop() and ptrace_stop() do freezable_schedule() and 2663 * thus do not need another check after return. 2664 */ 2665 try_to_freeze(); 2666 2667 relock: 2668 spin_lock_irq(&sighand->siglock); 2669 2670 /* 2671 * Every stopped thread goes here after wakeup. Check to see if 2672 * we should notify the parent, prepare_signal(SIGCONT) encodes 2673 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2674 */ 2675 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2676 int why; 2677 2678 if (signal->flags & SIGNAL_CLD_CONTINUED) 2679 why = CLD_CONTINUED; 2680 else 2681 why = CLD_STOPPED; 2682 2683 signal->flags &= ~SIGNAL_CLD_MASK; 2684 2685 spin_unlock_irq(&sighand->siglock); 2686 2687 /* 2688 * Notify the parent that we're continuing. This event is 2689 * always per-process and doesn't make whole lot of sense 2690 * for ptracers, who shouldn't consume the state via 2691 * wait(2) either, but, for backward compatibility, notify 2692 * the ptracer of the group leader too unless it's gonna be 2693 * a duplicate. 2694 */ 2695 read_lock(&tasklist_lock); 2696 do_notify_parent_cldstop(current, false, why); 2697 2698 if (ptrace_reparented(current->group_leader)) 2699 do_notify_parent_cldstop(current->group_leader, 2700 true, why); 2701 read_unlock(&tasklist_lock); 2702 2703 goto relock; 2704 } 2705 2706 for (;;) { 2707 struct k_sigaction *ka; 2708 enum pid_type type; 2709 2710 /* Has this task already been marked for death? */ 2711 if ((signal->flags & SIGNAL_GROUP_EXIT) || 2712 signal->group_exec_task) { 2713 ksig->info.si_signo = signr = SIGKILL; 2714 sigdelset(¤t->pending.signal, SIGKILL); 2715 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, 2716 &sighand->action[SIGKILL - 1]); 2717 recalc_sigpending(); 2718 goto fatal; 2719 } 2720 2721 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && 2722 do_signal_stop(0)) 2723 goto relock; 2724 2725 if (unlikely(current->jobctl & 2726 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { 2727 if (current->jobctl & JOBCTL_TRAP_MASK) { 2728 do_jobctl_trap(); 2729 spin_unlock_irq(&sighand->siglock); 2730 } else if (current->jobctl & JOBCTL_TRAP_FREEZE) 2731 do_freezer_trap(); 2732 2733 goto relock; 2734 } 2735 2736 /* 2737 * If the task is leaving the frozen state, let's update 2738 * cgroup counters and reset the frozen bit. 2739 */ 2740 if (unlikely(cgroup_task_frozen(current))) { 2741 spin_unlock_irq(&sighand->siglock); 2742 cgroup_leave_frozen(false); 2743 goto relock; 2744 } 2745 2746 /* 2747 * Signals generated by the execution of an instruction 2748 * need to be delivered before any other pending signals 2749 * so that the instruction pointer in the signal stack 2750 * frame points to the faulting instruction. 2751 */ 2752 type = PIDTYPE_PID; 2753 signr = dequeue_synchronous_signal(&ksig->info); 2754 if (!signr) 2755 signr = dequeue_signal(current, ¤t->blocked, 2756 &ksig->info, &type); 2757 2758 if (!signr) 2759 break; /* will return 0 */ 2760 2761 if (unlikely(current->ptrace) && (signr != SIGKILL) && 2762 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) { 2763 signr = ptrace_signal(signr, &ksig->info, type); 2764 if (!signr) 2765 continue; 2766 } 2767 2768 ka = &sighand->action[signr-1]; 2769 2770 /* Trace actually delivered signals. */ 2771 trace_signal_deliver(signr, &ksig->info, ka); 2772 2773 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 2774 continue; 2775 if (ka->sa.sa_handler != SIG_DFL) { 2776 /* Run the handler. */ 2777 ksig->ka = *ka; 2778 2779 if (ka->sa.sa_flags & SA_ONESHOT) 2780 ka->sa.sa_handler = SIG_DFL; 2781 2782 break; /* will return non-zero "signr" value */ 2783 } 2784 2785 /* 2786 * Now we are doing the default action for this signal. 2787 */ 2788 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 2789 continue; 2790 2791 /* 2792 * Global init gets no signals it doesn't want. 2793 * Container-init gets no signals it doesn't want from same 2794 * container. 2795 * 2796 * Note that if global/container-init sees a sig_kernel_only() 2797 * signal here, the signal must have been generated internally 2798 * or must have come from an ancestor namespace. In either 2799 * case, the signal cannot be dropped. 2800 */ 2801 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 2802 !sig_kernel_only(signr)) 2803 continue; 2804 2805 if (sig_kernel_stop(signr)) { 2806 /* 2807 * The default action is to stop all threads in 2808 * the thread group. The job control signals 2809 * do nothing in an orphaned pgrp, but SIGSTOP 2810 * always works. Note that siglock needs to be 2811 * dropped during the call to is_orphaned_pgrp() 2812 * because of lock ordering with tasklist_lock. 2813 * This allows an intervening SIGCONT to be posted. 2814 * We need to check for that and bail out if necessary. 2815 */ 2816 if (signr != SIGSTOP) { 2817 spin_unlock_irq(&sighand->siglock); 2818 2819 /* signals can be posted during this window */ 2820 2821 if (is_current_pgrp_orphaned()) 2822 goto relock; 2823 2824 spin_lock_irq(&sighand->siglock); 2825 } 2826 2827 if (likely(do_signal_stop(ksig->info.si_signo))) { 2828 /* It released the siglock. */ 2829 goto relock; 2830 } 2831 2832 /* 2833 * We didn't actually stop, due to a race 2834 * with SIGCONT or something like that. 2835 */ 2836 continue; 2837 } 2838 2839 fatal: 2840 spin_unlock_irq(&sighand->siglock); 2841 if (unlikely(cgroup_task_frozen(current))) 2842 cgroup_leave_frozen(true); 2843 2844 /* 2845 * Anything else is fatal, maybe with a core dump. 2846 */ 2847 current->flags |= PF_SIGNALED; 2848 2849 if (sig_kernel_coredump(signr)) { 2850 if (print_fatal_signals) 2851 print_fatal_signal(ksig->info.si_signo); 2852 proc_coredump_connector(current); 2853 /* 2854 * If it was able to dump core, this kills all 2855 * other threads in the group and synchronizes with 2856 * their demise. If we lost the race with another 2857 * thread getting here, it set group_exit_code 2858 * first and our do_group_exit call below will use 2859 * that value and ignore the one we pass it. 2860 */ 2861 do_coredump(&ksig->info); 2862 } 2863 2864 /* 2865 * PF_IO_WORKER threads will catch and exit on fatal signals 2866 * themselves. They have cleanup that must be performed, so 2867 * we cannot call do_exit() on their behalf. 2868 */ 2869 if (current->flags & PF_IO_WORKER) 2870 goto out; 2871 2872 /* 2873 * Death signals, no core dump. 2874 */ 2875 do_group_exit(ksig->info.si_signo); 2876 /* NOTREACHED */ 2877 } 2878 spin_unlock_irq(&sighand->siglock); 2879 out: 2880 ksig->sig = signr; 2881 2882 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS)) 2883 hide_si_addr_tag_bits(ksig); 2884 2885 return ksig->sig > 0; 2886 } 2887 2888 /** 2889 * signal_delivered - called after signal delivery to update blocked signals 2890 * @ksig: kernel signal struct 2891 * @stepping: nonzero if debugger single-step or block-step in use 2892 * 2893 * This function should be called when a signal has successfully been 2894 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask 2895 * is always blocked), and the signal itself is blocked unless %SA_NODEFER 2896 * is set in @ksig->ka.sa.sa_flags. Tracing is notified. 2897 */ 2898 static void signal_delivered(struct ksignal *ksig, int stepping) 2899 { 2900 sigset_t blocked; 2901 2902 /* A signal was successfully delivered, and the 2903 saved sigmask was stored on the signal frame, 2904 and will be restored by sigreturn. So we can 2905 simply clear the restore sigmask flag. */ 2906 clear_restore_sigmask(); 2907 2908 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); 2909 if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) 2910 sigaddset(&blocked, ksig->sig); 2911 set_current_blocked(&blocked); 2912 if (current->sas_ss_flags & SS_AUTODISARM) 2913 sas_ss_reset(current); 2914 if (stepping) 2915 ptrace_notify(SIGTRAP, 0); 2916 } 2917 2918 void signal_setup_done(int failed, struct ksignal *ksig, int stepping) 2919 { 2920 if (failed) 2921 force_sigsegv(ksig->sig); 2922 else 2923 signal_delivered(ksig, stepping); 2924 } 2925 2926 /* 2927 * It could be that complete_signal() picked us to notify about the 2928 * group-wide signal. Other threads should be notified now to take 2929 * the shared signals in @which since we will not. 2930 */ 2931 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) 2932 { 2933 sigset_t retarget; 2934 struct task_struct *t; 2935 2936 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); 2937 if (sigisemptyset(&retarget)) 2938 return; 2939 2940 t = tsk; 2941 while_each_thread(tsk, t) { 2942 if (t->flags & PF_EXITING) 2943 continue; 2944 2945 if (!has_pending_signals(&retarget, &t->blocked)) 2946 continue; 2947 /* Remove the signals this thread can handle. */ 2948 sigandsets(&retarget, &retarget, &t->blocked); 2949 2950 if (!task_sigpending(t)) 2951 signal_wake_up(t, 0); 2952 2953 if (sigisemptyset(&retarget)) 2954 break; 2955 } 2956 } 2957 2958 void exit_signals(struct task_struct *tsk) 2959 { 2960 int group_stop = 0; 2961 sigset_t unblocked; 2962 2963 /* 2964 * @tsk is about to have PF_EXITING set - lock out users which 2965 * expect stable threadgroup. 2966 */ 2967 cgroup_threadgroup_change_begin(tsk); 2968 2969 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) { 2970 tsk->flags |= PF_EXITING; 2971 cgroup_threadgroup_change_end(tsk); 2972 return; 2973 } 2974 2975 spin_lock_irq(&tsk->sighand->siglock); 2976 /* 2977 * From now this task is not visible for group-wide signals, 2978 * see wants_signal(), do_signal_stop(). 2979 */ 2980 tsk->flags |= PF_EXITING; 2981 2982 cgroup_threadgroup_change_end(tsk); 2983 2984 if (!task_sigpending(tsk)) 2985 goto out; 2986 2987 unblocked = tsk->blocked; 2988 signotset(&unblocked); 2989 retarget_shared_pending(tsk, &unblocked); 2990 2991 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && 2992 task_participate_group_stop(tsk)) 2993 group_stop = CLD_STOPPED; 2994 out: 2995 spin_unlock_irq(&tsk->sighand->siglock); 2996 2997 /* 2998 * If group stop has completed, deliver the notification. This 2999 * should always go to the real parent of the group leader. 3000 */ 3001 if (unlikely(group_stop)) { 3002 read_lock(&tasklist_lock); 3003 do_notify_parent_cldstop(tsk, false, group_stop); 3004 read_unlock(&tasklist_lock); 3005 } 3006 } 3007 3008 /* 3009 * System call entry points. 3010 */ 3011 3012 /** 3013 * sys_restart_syscall - restart a system call 3014 */ 3015 SYSCALL_DEFINE0(restart_syscall) 3016 { 3017 struct restart_block *restart = ¤t->restart_block; 3018 return restart->fn(restart); 3019 } 3020 3021 long do_no_restart_syscall(struct restart_block *param) 3022 { 3023 return -EINTR; 3024 } 3025 3026 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) 3027 { 3028 if (task_sigpending(tsk) && !thread_group_empty(tsk)) { 3029 sigset_t newblocked; 3030 /* A set of now blocked but previously unblocked signals. */ 3031 sigandnsets(&newblocked, newset, ¤t->blocked); 3032 retarget_shared_pending(tsk, &newblocked); 3033 } 3034 tsk->blocked = *newset; 3035 recalc_sigpending(); 3036 } 3037 3038 /** 3039 * set_current_blocked - change current->blocked mask 3040 * @newset: new mask 3041 * 3042 * It is wrong to change ->blocked directly, this helper should be used 3043 * to ensure the process can't miss a shared signal we are going to block. 3044 */ 3045 void set_current_blocked(sigset_t *newset) 3046 { 3047 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); 3048 __set_current_blocked(newset); 3049 } 3050 3051 void __set_current_blocked(const sigset_t *newset) 3052 { 3053 struct task_struct *tsk = current; 3054 3055 /* 3056 * In case the signal mask hasn't changed, there is nothing we need 3057 * to do. The current->blocked shouldn't be modified by other task. 3058 */ 3059 if (sigequalsets(&tsk->blocked, newset)) 3060 return; 3061 3062 spin_lock_irq(&tsk->sighand->siglock); 3063 __set_task_blocked(tsk, newset); 3064 spin_unlock_irq(&tsk->sighand->siglock); 3065 } 3066 3067 /* 3068 * This is also useful for kernel threads that want to temporarily 3069 * (or permanently) block certain signals. 3070 * 3071 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 3072 * interface happily blocks "unblockable" signals like SIGKILL 3073 * and friends. 3074 */ 3075 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 3076 { 3077 struct task_struct *tsk = current; 3078 sigset_t newset; 3079 3080 /* Lockless, only current can change ->blocked, never from irq */ 3081 if (oldset) 3082 *oldset = tsk->blocked; 3083 3084 switch (how) { 3085 case SIG_BLOCK: 3086 sigorsets(&newset, &tsk->blocked, set); 3087 break; 3088 case SIG_UNBLOCK: 3089 sigandnsets(&newset, &tsk->blocked, set); 3090 break; 3091 case SIG_SETMASK: 3092 newset = *set; 3093 break; 3094 default: 3095 return -EINVAL; 3096 } 3097 3098 __set_current_blocked(&newset); 3099 return 0; 3100 } 3101 EXPORT_SYMBOL(sigprocmask); 3102 3103 /* 3104 * The api helps set app-provided sigmasks. 3105 * 3106 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and 3107 * epoll_pwait where a new sigmask is passed from userland for the syscalls. 3108 * 3109 * Note that it does set_restore_sigmask() in advance, so it must be always 3110 * paired with restore_saved_sigmask_unless() before return from syscall. 3111 */ 3112 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize) 3113 { 3114 sigset_t kmask; 3115 3116 if (!umask) 3117 return 0; 3118 if (sigsetsize != sizeof(sigset_t)) 3119 return -EINVAL; 3120 if (copy_from_user(&kmask, umask, sizeof(sigset_t))) 3121 return -EFAULT; 3122 3123 set_restore_sigmask(); 3124 current->saved_sigmask = current->blocked; 3125 set_current_blocked(&kmask); 3126 3127 return 0; 3128 } 3129 3130 #ifdef CONFIG_COMPAT 3131 int set_compat_user_sigmask(const compat_sigset_t __user *umask, 3132 size_t sigsetsize) 3133 { 3134 sigset_t kmask; 3135 3136 if (!umask) 3137 return 0; 3138 if (sigsetsize != sizeof(compat_sigset_t)) 3139 return -EINVAL; 3140 if (get_compat_sigset(&kmask, umask)) 3141 return -EFAULT; 3142 3143 set_restore_sigmask(); 3144 current->saved_sigmask = current->blocked; 3145 set_current_blocked(&kmask); 3146 3147 return 0; 3148 } 3149 #endif 3150 3151 /** 3152 * sys_rt_sigprocmask - change the list of currently blocked signals 3153 * @how: whether to add, remove, or set signals 3154 * @nset: stores pending signals 3155 * @oset: previous value of signal mask if non-null 3156 * @sigsetsize: size of sigset_t type 3157 */ 3158 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, 3159 sigset_t __user *, oset, size_t, sigsetsize) 3160 { 3161 sigset_t old_set, new_set; 3162 int error; 3163 3164 /* XXX: Don't preclude handling different sized sigset_t's. */ 3165 if (sigsetsize != sizeof(sigset_t)) 3166 return -EINVAL; 3167 3168 old_set = current->blocked; 3169 3170 if (nset) { 3171 if (copy_from_user(&new_set, nset, sizeof(sigset_t))) 3172 return -EFAULT; 3173 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3174 3175 error = sigprocmask(how, &new_set, NULL); 3176 if (error) 3177 return error; 3178 } 3179 3180 if (oset) { 3181 if (copy_to_user(oset, &old_set, sizeof(sigset_t))) 3182 return -EFAULT; 3183 } 3184 3185 return 0; 3186 } 3187 3188 #ifdef CONFIG_COMPAT 3189 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, 3190 compat_sigset_t __user *, oset, compat_size_t, sigsetsize) 3191 { 3192 sigset_t old_set = current->blocked; 3193 3194 /* XXX: Don't preclude handling different sized sigset_t's. */ 3195 if (sigsetsize != sizeof(sigset_t)) 3196 return -EINVAL; 3197 3198 if (nset) { 3199 sigset_t new_set; 3200 int error; 3201 if (get_compat_sigset(&new_set, nset)) 3202 return -EFAULT; 3203 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3204 3205 error = sigprocmask(how, &new_set, NULL); 3206 if (error) 3207 return error; 3208 } 3209 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; 3210 } 3211 #endif 3212 3213 static void do_sigpending(sigset_t *set) 3214 { 3215 spin_lock_irq(¤t->sighand->siglock); 3216 sigorsets(set, ¤t->pending.signal, 3217 ¤t->signal->shared_pending.signal); 3218 spin_unlock_irq(¤t->sighand->siglock); 3219 3220 /* Outside the lock because only this thread touches it. */ 3221 sigandsets(set, ¤t->blocked, set); 3222 } 3223 3224 /** 3225 * sys_rt_sigpending - examine a pending signal that has been raised 3226 * while blocked 3227 * @uset: stores pending signals 3228 * @sigsetsize: size of sigset_t type or larger 3229 */ 3230 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 3231 { 3232 sigset_t set; 3233 3234 if (sigsetsize > sizeof(*uset)) 3235 return -EINVAL; 3236 3237 do_sigpending(&set); 3238 3239 if (copy_to_user(uset, &set, sigsetsize)) 3240 return -EFAULT; 3241 3242 return 0; 3243 } 3244 3245 #ifdef CONFIG_COMPAT 3246 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, 3247 compat_size_t, sigsetsize) 3248 { 3249 sigset_t set; 3250 3251 if (sigsetsize > sizeof(*uset)) 3252 return -EINVAL; 3253 3254 do_sigpending(&set); 3255 3256 return put_compat_sigset(uset, &set, sigsetsize); 3257 } 3258 #endif 3259 3260 static const struct { 3261 unsigned char limit, layout; 3262 } sig_sicodes[] = { 3263 [SIGILL] = { NSIGILL, SIL_FAULT }, 3264 [SIGFPE] = { NSIGFPE, SIL_FAULT }, 3265 [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, 3266 [SIGBUS] = { NSIGBUS, SIL_FAULT }, 3267 [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, 3268 #if defined(SIGEMT) 3269 [SIGEMT] = { NSIGEMT, SIL_FAULT }, 3270 #endif 3271 [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, 3272 [SIGPOLL] = { NSIGPOLL, SIL_POLL }, 3273 [SIGSYS] = { NSIGSYS, SIL_SYS }, 3274 }; 3275 3276 static bool known_siginfo_layout(unsigned sig, int si_code) 3277 { 3278 if (si_code == SI_KERNEL) 3279 return true; 3280 else if ((si_code > SI_USER)) { 3281 if (sig_specific_sicodes(sig)) { 3282 if (si_code <= sig_sicodes[sig].limit) 3283 return true; 3284 } 3285 else if (si_code <= NSIGPOLL) 3286 return true; 3287 } 3288 else if (si_code >= SI_DETHREAD) 3289 return true; 3290 else if (si_code == SI_ASYNCNL) 3291 return true; 3292 return false; 3293 } 3294 3295 enum siginfo_layout siginfo_layout(unsigned sig, int si_code) 3296 { 3297 enum siginfo_layout layout = SIL_KILL; 3298 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { 3299 if ((sig < ARRAY_SIZE(sig_sicodes)) && 3300 (si_code <= sig_sicodes[sig].limit)) { 3301 layout = sig_sicodes[sig].layout; 3302 /* Handle the exceptions */ 3303 if ((sig == SIGBUS) && 3304 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) 3305 layout = SIL_FAULT_MCEERR; 3306 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) 3307 layout = SIL_FAULT_BNDERR; 3308 #ifdef SEGV_PKUERR 3309 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) 3310 layout = SIL_FAULT_PKUERR; 3311 #endif 3312 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF)) 3313 layout = SIL_FAULT_PERF_EVENT; 3314 else if (IS_ENABLED(CONFIG_SPARC) && 3315 (sig == SIGILL) && (si_code == ILL_ILLTRP)) 3316 layout = SIL_FAULT_TRAPNO; 3317 else if (IS_ENABLED(CONFIG_ALPHA) && 3318 ((sig == SIGFPE) || 3319 ((sig == SIGTRAP) && (si_code == TRAP_UNK)))) 3320 layout = SIL_FAULT_TRAPNO; 3321 } 3322 else if (si_code <= NSIGPOLL) 3323 layout = SIL_POLL; 3324 } else { 3325 if (si_code == SI_TIMER) 3326 layout = SIL_TIMER; 3327 else if (si_code == SI_SIGIO) 3328 layout = SIL_POLL; 3329 else if (si_code < 0) 3330 layout = SIL_RT; 3331 } 3332 return layout; 3333 } 3334 3335 static inline char __user *si_expansion(const siginfo_t __user *info) 3336 { 3337 return ((char __user *)info) + sizeof(struct kernel_siginfo); 3338 } 3339 3340 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) 3341 { 3342 char __user *expansion = si_expansion(to); 3343 if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) 3344 return -EFAULT; 3345 if (clear_user(expansion, SI_EXPANSION_SIZE)) 3346 return -EFAULT; 3347 return 0; 3348 } 3349 3350 static int post_copy_siginfo_from_user(kernel_siginfo_t *info, 3351 const siginfo_t __user *from) 3352 { 3353 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { 3354 char __user *expansion = si_expansion(from); 3355 char buf[SI_EXPANSION_SIZE]; 3356 int i; 3357 /* 3358 * An unknown si_code might need more than 3359 * sizeof(struct kernel_siginfo) bytes. Verify all of the 3360 * extra bytes are 0. This guarantees copy_siginfo_to_user 3361 * will return this data to userspace exactly. 3362 */ 3363 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE)) 3364 return -EFAULT; 3365 for (i = 0; i < SI_EXPANSION_SIZE; i++) { 3366 if (buf[i] != 0) 3367 return -E2BIG; 3368 } 3369 } 3370 return 0; 3371 } 3372 3373 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, 3374 const siginfo_t __user *from) 3375 { 3376 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3377 return -EFAULT; 3378 to->si_signo = signo; 3379 return post_copy_siginfo_from_user(to, from); 3380 } 3381 3382 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) 3383 { 3384 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3385 return -EFAULT; 3386 return post_copy_siginfo_from_user(to, from); 3387 } 3388 3389 #ifdef CONFIG_COMPAT 3390 /** 3391 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo 3392 * @to: compat siginfo destination 3393 * @from: kernel siginfo source 3394 * 3395 * Note: This function does not work properly for the SIGCHLD on x32, but 3396 * fortunately it doesn't have to. The only valid callers for this function are 3397 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code. 3398 * The latter does not care because SIGCHLD will never cause a coredump. 3399 */ 3400 void copy_siginfo_to_external32(struct compat_siginfo *to, 3401 const struct kernel_siginfo *from) 3402 { 3403 memset(to, 0, sizeof(*to)); 3404 3405 to->si_signo = from->si_signo; 3406 to->si_errno = from->si_errno; 3407 to->si_code = from->si_code; 3408 switch(siginfo_layout(from->si_signo, from->si_code)) { 3409 case SIL_KILL: 3410 to->si_pid = from->si_pid; 3411 to->si_uid = from->si_uid; 3412 break; 3413 case SIL_TIMER: 3414 to->si_tid = from->si_tid; 3415 to->si_overrun = from->si_overrun; 3416 to->si_int = from->si_int; 3417 break; 3418 case SIL_POLL: 3419 to->si_band = from->si_band; 3420 to->si_fd = from->si_fd; 3421 break; 3422 case SIL_FAULT: 3423 to->si_addr = ptr_to_compat(from->si_addr); 3424 break; 3425 case SIL_FAULT_TRAPNO: 3426 to->si_addr = ptr_to_compat(from->si_addr); 3427 to->si_trapno = from->si_trapno; 3428 break; 3429 case SIL_FAULT_MCEERR: 3430 to->si_addr = ptr_to_compat(from->si_addr); 3431 to->si_addr_lsb = from->si_addr_lsb; 3432 break; 3433 case SIL_FAULT_BNDERR: 3434 to->si_addr = ptr_to_compat(from->si_addr); 3435 to->si_lower = ptr_to_compat(from->si_lower); 3436 to->si_upper = ptr_to_compat(from->si_upper); 3437 break; 3438 case SIL_FAULT_PKUERR: 3439 to->si_addr = ptr_to_compat(from->si_addr); 3440 to->si_pkey = from->si_pkey; 3441 break; 3442 case SIL_FAULT_PERF_EVENT: 3443 to->si_addr = ptr_to_compat(from->si_addr); 3444 to->si_perf_data = from->si_perf_data; 3445 to->si_perf_type = from->si_perf_type; 3446 to->si_perf_flags = from->si_perf_flags; 3447 break; 3448 case SIL_CHLD: 3449 to->si_pid = from->si_pid; 3450 to->si_uid = from->si_uid; 3451 to->si_status = from->si_status; 3452 to->si_utime = from->si_utime; 3453 to->si_stime = from->si_stime; 3454 break; 3455 case SIL_RT: 3456 to->si_pid = from->si_pid; 3457 to->si_uid = from->si_uid; 3458 to->si_int = from->si_int; 3459 break; 3460 case SIL_SYS: 3461 to->si_call_addr = ptr_to_compat(from->si_call_addr); 3462 to->si_syscall = from->si_syscall; 3463 to->si_arch = from->si_arch; 3464 break; 3465 } 3466 } 3467 3468 int __copy_siginfo_to_user32(struct compat_siginfo __user *to, 3469 const struct kernel_siginfo *from) 3470 { 3471 struct compat_siginfo new; 3472 3473 copy_siginfo_to_external32(&new, from); 3474 if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) 3475 return -EFAULT; 3476 return 0; 3477 } 3478 3479 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, 3480 const struct compat_siginfo *from) 3481 { 3482 clear_siginfo(to); 3483 to->si_signo = from->si_signo; 3484 to->si_errno = from->si_errno; 3485 to->si_code = from->si_code; 3486 switch(siginfo_layout(from->si_signo, from->si_code)) { 3487 case SIL_KILL: 3488 to->si_pid = from->si_pid; 3489 to->si_uid = from->si_uid; 3490 break; 3491 case SIL_TIMER: 3492 to->si_tid = from->si_tid; 3493 to->si_overrun = from->si_overrun; 3494 to->si_int = from->si_int; 3495 break; 3496 case SIL_POLL: 3497 to->si_band = from->si_band; 3498 to->si_fd = from->si_fd; 3499 break; 3500 case SIL_FAULT: 3501 to->si_addr = compat_ptr(from->si_addr); 3502 break; 3503 case SIL_FAULT_TRAPNO: 3504 to->si_addr = compat_ptr(from->si_addr); 3505 to->si_trapno = from->si_trapno; 3506 break; 3507 case SIL_FAULT_MCEERR: 3508 to->si_addr = compat_ptr(from->si_addr); 3509 to->si_addr_lsb = from->si_addr_lsb; 3510 break; 3511 case SIL_FAULT_BNDERR: 3512 to->si_addr = compat_ptr(from->si_addr); 3513 to->si_lower = compat_ptr(from->si_lower); 3514 to->si_upper = compat_ptr(from->si_upper); 3515 break; 3516 case SIL_FAULT_PKUERR: 3517 to->si_addr = compat_ptr(from->si_addr); 3518 to->si_pkey = from->si_pkey; 3519 break; 3520 case SIL_FAULT_PERF_EVENT: 3521 to->si_addr = compat_ptr(from->si_addr); 3522 to->si_perf_data = from->si_perf_data; 3523 to->si_perf_type = from->si_perf_type; 3524 to->si_perf_flags = from->si_perf_flags; 3525 break; 3526 case SIL_CHLD: 3527 to->si_pid = from->si_pid; 3528 to->si_uid = from->si_uid; 3529 to->si_status = from->si_status; 3530 #ifdef CONFIG_X86_X32_ABI 3531 if (in_x32_syscall()) { 3532 to->si_utime = from->_sifields._sigchld_x32._utime; 3533 to->si_stime = from->_sifields._sigchld_x32._stime; 3534 } else 3535 #endif 3536 { 3537 to->si_utime = from->si_utime; 3538 to->si_stime = from->si_stime; 3539 } 3540 break; 3541 case SIL_RT: 3542 to->si_pid = from->si_pid; 3543 to->si_uid = from->si_uid; 3544 to->si_int = from->si_int; 3545 break; 3546 case SIL_SYS: 3547 to->si_call_addr = compat_ptr(from->si_call_addr); 3548 to->si_syscall = from->si_syscall; 3549 to->si_arch = from->si_arch; 3550 break; 3551 } 3552 return 0; 3553 } 3554 3555 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, 3556 const struct compat_siginfo __user *ufrom) 3557 { 3558 struct compat_siginfo from; 3559 3560 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3561 return -EFAULT; 3562 3563 from.si_signo = signo; 3564 return post_copy_siginfo_from_user32(to, &from); 3565 } 3566 3567 int copy_siginfo_from_user32(struct kernel_siginfo *to, 3568 const struct compat_siginfo __user *ufrom) 3569 { 3570 struct compat_siginfo from; 3571 3572 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3573 return -EFAULT; 3574 3575 return post_copy_siginfo_from_user32(to, &from); 3576 } 3577 #endif /* CONFIG_COMPAT */ 3578 3579 /** 3580 * do_sigtimedwait - wait for queued signals specified in @which 3581 * @which: queued signals to wait for 3582 * @info: if non-null, the signal's siginfo is returned here 3583 * @ts: upper bound on process time suspension 3584 */ 3585 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, 3586 const struct timespec64 *ts) 3587 { 3588 ktime_t *to = NULL, timeout = KTIME_MAX; 3589 struct task_struct *tsk = current; 3590 sigset_t mask = *which; 3591 enum pid_type type; 3592 int sig, ret = 0; 3593 3594 if (ts) { 3595 if (!timespec64_valid(ts)) 3596 return -EINVAL; 3597 timeout = timespec64_to_ktime(*ts); 3598 to = &timeout; 3599 } 3600 3601 /* 3602 * Invert the set of allowed signals to get those we want to block. 3603 */ 3604 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); 3605 signotset(&mask); 3606 3607 spin_lock_irq(&tsk->sighand->siglock); 3608 sig = dequeue_signal(tsk, &mask, info, &type); 3609 if (!sig && timeout) { 3610 /* 3611 * None ready, temporarily unblock those we're interested 3612 * while we are sleeping in so that we'll be awakened when 3613 * they arrive. Unblocking is always fine, we can avoid 3614 * set_current_blocked(). 3615 */ 3616 tsk->real_blocked = tsk->blocked; 3617 sigandsets(&tsk->blocked, &tsk->blocked, &mask); 3618 recalc_sigpending(); 3619 spin_unlock_irq(&tsk->sighand->siglock); 3620 3621 __set_current_state(TASK_INTERRUPTIBLE); 3622 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, 3623 HRTIMER_MODE_REL); 3624 spin_lock_irq(&tsk->sighand->siglock); 3625 __set_task_blocked(tsk, &tsk->real_blocked); 3626 sigemptyset(&tsk->real_blocked); 3627 sig = dequeue_signal(tsk, &mask, info, &type); 3628 } 3629 spin_unlock_irq(&tsk->sighand->siglock); 3630 3631 if (sig) 3632 return sig; 3633 return ret ? -EINTR : -EAGAIN; 3634 } 3635 3636 /** 3637 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 3638 * in @uthese 3639 * @uthese: queued signals to wait for 3640 * @uinfo: if non-null, the signal's siginfo is returned here 3641 * @uts: upper bound on process time suspension 3642 * @sigsetsize: size of sigset_t type 3643 */ 3644 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 3645 siginfo_t __user *, uinfo, 3646 const struct __kernel_timespec __user *, uts, 3647 size_t, sigsetsize) 3648 { 3649 sigset_t these; 3650 struct timespec64 ts; 3651 kernel_siginfo_t info; 3652 int ret; 3653 3654 /* XXX: Don't preclude handling different sized sigset_t's. */ 3655 if (sigsetsize != sizeof(sigset_t)) 3656 return -EINVAL; 3657 3658 if (copy_from_user(&these, uthese, sizeof(these))) 3659 return -EFAULT; 3660 3661 if (uts) { 3662 if (get_timespec64(&ts, uts)) 3663 return -EFAULT; 3664 } 3665 3666 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3667 3668 if (ret > 0 && uinfo) { 3669 if (copy_siginfo_to_user(uinfo, &info)) 3670 ret = -EFAULT; 3671 } 3672 3673 return ret; 3674 } 3675 3676 #ifdef CONFIG_COMPAT_32BIT_TIME 3677 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese, 3678 siginfo_t __user *, uinfo, 3679 const struct old_timespec32 __user *, uts, 3680 size_t, sigsetsize) 3681 { 3682 sigset_t these; 3683 struct timespec64 ts; 3684 kernel_siginfo_t info; 3685 int ret; 3686 3687 if (sigsetsize != sizeof(sigset_t)) 3688 return -EINVAL; 3689 3690 if (copy_from_user(&these, uthese, sizeof(these))) 3691 return -EFAULT; 3692 3693 if (uts) { 3694 if (get_old_timespec32(&ts, uts)) 3695 return -EFAULT; 3696 } 3697 3698 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3699 3700 if (ret > 0 && uinfo) { 3701 if (copy_siginfo_to_user(uinfo, &info)) 3702 ret = -EFAULT; 3703 } 3704 3705 return ret; 3706 } 3707 #endif 3708 3709 #ifdef CONFIG_COMPAT 3710 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, 3711 struct compat_siginfo __user *, uinfo, 3712 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize) 3713 { 3714 sigset_t s; 3715 struct timespec64 t; 3716 kernel_siginfo_t info; 3717 long ret; 3718 3719 if (sigsetsize != sizeof(sigset_t)) 3720 return -EINVAL; 3721 3722 if (get_compat_sigset(&s, uthese)) 3723 return -EFAULT; 3724 3725 if (uts) { 3726 if (get_timespec64(&t, uts)) 3727 return -EFAULT; 3728 } 3729 3730 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3731 3732 if (ret > 0 && uinfo) { 3733 if (copy_siginfo_to_user32(uinfo, &info)) 3734 ret = -EFAULT; 3735 } 3736 3737 return ret; 3738 } 3739 3740 #ifdef CONFIG_COMPAT_32BIT_TIME 3741 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, 3742 struct compat_siginfo __user *, uinfo, 3743 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) 3744 { 3745 sigset_t s; 3746 struct timespec64 t; 3747 kernel_siginfo_t info; 3748 long ret; 3749 3750 if (sigsetsize != sizeof(sigset_t)) 3751 return -EINVAL; 3752 3753 if (get_compat_sigset(&s, uthese)) 3754 return -EFAULT; 3755 3756 if (uts) { 3757 if (get_old_timespec32(&t, uts)) 3758 return -EFAULT; 3759 } 3760 3761 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3762 3763 if (ret > 0 && uinfo) { 3764 if (copy_siginfo_to_user32(uinfo, &info)) 3765 ret = -EFAULT; 3766 } 3767 3768 return ret; 3769 } 3770 #endif 3771 #endif 3772 3773 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) 3774 { 3775 clear_siginfo(info); 3776 info->si_signo = sig; 3777 info->si_errno = 0; 3778 info->si_code = SI_USER; 3779 info->si_pid = task_tgid_vnr(current); 3780 info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3781 } 3782 3783 /** 3784 * sys_kill - send a signal to a process 3785 * @pid: the PID of the process 3786 * @sig: signal to be sent 3787 */ 3788 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 3789 { 3790 struct kernel_siginfo info; 3791 3792 prepare_kill_siginfo(sig, &info); 3793 3794 return kill_something_info(sig, &info, pid); 3795 } 3796 3797 /* 3798 * Verify that the signaler and signalee either are in the same pid namespace 3799 * or that the signaler's pid namespace is an ancestor of the signalee's pid 3800 * namespace. 3801 */ 3802 static bool access_pidfd_pidns(struct pid *pid) 3803 { 3804 struct pid_namespace *active = task_active_pid_ns(current); 3805 struct pid_namespace *p = ns_of_pid(pid); 3806 3807 for (;;) { 3808 if (!p) 3809 return false; 3810 if (p == active) 3811 break; 3812 p = p->parent; 3813 } 3814 3815 return true; 3816 } 3817 3818 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, 3819 siginfo_t __user *info) 3820 { 3821 #ifdef CONFIG_COMPAT 3822 /* 3823 * Avoid hooking up compat syscalls and instead handle necessary 3824 * conversions here. Note, this is a stop-gap measure and should not be 3825 * considered a generic solution. 3826 */ 3827 if (in_compat_syscall()) 3828 return copy_siginfo_from_user32( 3829 kinfo, (struct compat_siginfo __user *)info); 3830 #endif 3831 return copy_siginfo_from_user(kinfo, info); 3832 } 3833 3834 static struct pid *pidfd_to_pid(const struct file *file) 3835 { 3836 struct pid *pid; 3837 3838 pid = pidfd_pid(file); 3839 if (!IS_ERR(pid)) 3840 return pid; 3841 3842 return tgid_pidfd_to_pid(file); 3843 } 3844 3845 /** 3846 * sys_pidfd_send_signal - Signal a process through a pidfd 3847 * @pidfd: file descriptor of the process 3848 * @sig: signal to send 3849 * @info: signal info 3850 * @flags: future flags 3851 * 3852 * The syscall currently only signals via PIDTYPE_PID which covers 3853 * kill(<positive-pid>, <signal>. It does not signal threads or process 3854 * groups. 3855 * In order to extend the syscall to threads and process groups the @flags 3856 * argument should be used. In essence, the @flags argument will determine 3857 * what is signaled and not the file descriptor itself. Put in other words, 3858 * grouping is a property of the flags argument not a property of the file 3859 * descriptor. 3860 * 3861 * Return: 0 on success, negative errno on failure 3862 */ 3863 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, 3864 siginfo_t __user *, info, unsigned int, flags) 3865 { 3866 int ret; 3867 struct fd f; 3868 struct pid *pid; 3869 kernel_siginfo_t kinfo; 3870 3871 /* Enforce flags be set to 0 until we add an extension. */ 3872 if (flags) 3873 return -EINVAL; 3874 3875 f = fdget(pidfd); 3876 if (!f.file) 3877 return -EBADF; 3878 3879 /* Is this a pidfd? */ 3880 pid = pidfd_to_pid(f.file); 3881 if (IS_ERR(pid)) { 3882 ret = PTR_ERR(pid); 3883 goto err; 3884 } 3885 3886 ret = -EINVAL; 3887 if (!access_pidfd_pidns(pid)) 3888 goto err; 3889 3890 if (info) { 3891 ret = copy_siginfo_from_user_any(&kinfo, info); 3892 if (unlikely(ret)) 3893 goto err; 3894 3895 ret = -EINVAL; 3896 if (unlikely(sig != kinfo.si_signo)) 3897 goto err; 3898 3899 /* Only allow sending arbitrary signals to yourself. */ 3900 ret = -EPERM; 3901 if ((task_pid(current) != pid) && 3902 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) 3903 goto err; 3904 } else { 3905 prepare_kill_siginfo(sig, &kinfo); 3906 } 3907 3908 ret = kill_pid_info(sig, &kinfo, pid); 3909 3910 err: 3911 fdput(f); 3912 return ret; 3913 } 3914 3915 static int 3916 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) 3917 { 3918 struct task_struct *p; 3919 int error = -ESRCH; 3920 3921 rcu_read_lock(); 3922 p = find_task_by_vpid(pid); 3923 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 3924 error = check_kill_permission(sig, info, p); 3925 /* 3926 * The null signal is a permissions and process existence 3927 * probe. No signal is actually delivered. 3928 */ 3929 if (!error && sig) { 3930 error = do_send_sig_info(sig, info, p, PIDTYPE_PID); 3931 /* 3932 * If lock_task_sighand() failed we pretend the task 3933 * dies after receiving the signal. The window is tiny, 3934 * and the signal is private anyway. 3935 */ 3936 if (unlikely(error == -ESRCH)) 3937 error = 0; 3938 } 3939 } 3940 rcu_read_unlock(); 3941 3942 return error; 3943 } 3944 3945 static int do_tkill(pid_t tgid, pid_t pid, int sig) 3946 { 3947 struct kernel_siginfo info; 3948 3949 clear_siginfo(&info); 3950 info.si_signo = sig; 3951 info.si_errno = 0; 3952 info.si_code = SI_TKILL; 3953 info.si_pid = task_tgid_vnr(current); 3954 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3955 3956 return do_send_specific(tgid, pid, sig, &info); 3957 } 3958 3959 /** 3960 * sys_tgkill - send signal to one specific thread 3961 * @tgid: the thread group ID of the thread 3962 * @pid: the PID of the thread 3963 * @sig: signal to be sent 3964 * 3965 * This syscall also checks the @tgid and returns -ESRCH even if the PID 3966 * exists but it's not belonging to the target process anymore. This 3967 * method solves the problem of threads exiting and PIDs getting reused. 3968 */ 3969 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 3970 { 3971 /* This is only valid for single tasks */ 3972 if (pid <= 0 || tgid <= 0) 3973 return -EINVAL; 3974 3975 return do_tkill(tgid, pid, sig); 3976 } 3977 3978 /** 3979 * sys_tkill - send signal to one specific task 3980 * @pid: the PID of the task 3981 * @sig: signal to be sent 3982 * 3983 * Send a signal to only one task, even if it's a CLONE_THREAD task. 3984 */ 3985 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 3986 { 3987 /* This is only valid for single tasks */ 3988 if (pid <= 0) 3989 return -EINVAL; 3990 3991 return do_tkill(0, pid, sig); 3992 } 3993 3994 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) 3995 { 3996 /* Not even root can pretend to send signals from the kernel. 3997 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3998 */ 3999 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 4000 (task_pid_vnr(current) != pid)) 4001 return -EPERM; 4002 4003 /* POSIX.1b doesn't mention process groups. */ 4004 return kill_proc_info(sig, info, pid); 4005 } 4006 4007 /** 4008 * sys_rt_sigqueueinfo - send signal information to a signal 4009 * @pid: the PID of the thread 4010 * @sig: signal to be sent 4011 * @uinfo: signal info to be sent 4012 */ 4013 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 4014 siginfo_t __user *, uinfo) 4015 { 4016 kernel_siginfo_t info; 4017 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 4018 if (unlikely(ret)) 4019 return ret; 4020 return do_rt_sigqueueinfo(pid, sig, &info); 4021 } 4022 4023 #ifdef CONFIG_COMPAT 4024 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, 4025 compat_pid_t, pid, 4026 int, sig, 4027 struct compat_siginfo __user *, uinfo) 4028 { 4029 kernel_siginfo_t info; 4030 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 4031 if (unlikely(ret)) 4032 return ret; 4033 return do_rt_sigqueueinfo(pid, sig, &info); 4034 } 4035 #endif 4036 4037 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) 4038 { 4039 /* This is only valid for single tasks */ 4040 if (pid <= 0 || tgid <= 0) 4041 return -EINVAL; 4042 4043 /* Not even root can pretend to send signals from the kernel. 4044 * Nor can they impersonate a kill()/tgkill(), which adds source info. 4045 */ 4046 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 4047 (task_pid_vnr(current) != pid)) 4048 return -EPERM; 4049 4050 return do_send_specific(tgid, pid, sig, info); 4051 } 4052 4053 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 4054 siginfo_t __user *, uinfo) 4055 { 4056 kernel_siginfo_t info; 4057 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 4058 if (unlikely(ret)) 4059 return ret; 4060 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 4061 } 4062 4063 #ifdef CONFIG_COMPAT 4064 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, 4065 compat_pid_t, tgid, 4066 compat_pid_t, pid, 4067 int, sig, 4068 struct compat_siginfo __user *, uinfo) 4069 { 4070 kernel_siginfo_t info; 4071 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 4072 if (unlikely(ret)) 4073 return ret; 4074 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 4075 } 4076 #endif 4077 4078 /* 4079 * For kthreads only, must not be used if cloned with CLONE_SIGHAND 4080 */ 4081 void kernel_sigaction(int sig, __sighandler_t action) 4082 { 4083 spin_lock_irq(¤t->sighand->siglock); 4084 current->sighand->action[sig - 1].sa.sa_handler = action; 4085 if (action == SIG_IGN) { 4086 sigset_t mask; 4087 4088 sigemptyset(&mask); 4089 sigaddset(&mask, sig); 4090 4091 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); 4092 flush_sigqueue_mask(&mask, ¤t->pending); 4093 recalc_sigpending(); 4094 } 4095 spin_unlock_irq(¤t->sighand->siglock); 4096 } 4097 EXPORT_SYMBOL(kernel_sigaction); 4098 4099 void __weak sigaction_compat_abi(struct k_sigaction *act, 4100 struct k_sigaction *oact) 4101 { 4102 } 4103 4104 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 4105 { 4106 struct task_struct *p = current, *t; 4107 struct k_sigaction *k; 4108 sigset_t mask; 4109 4110 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 4111 return -EINVAL; 4112 4113 k = &p->sighand->action[sig-1]; 4114 4115 spin_lock_irq(&p->sighand->siglock); 4116 if (k->sa.sa_flags & SA_IMMUTABLE) { 4117 spin_unlock_irq(&p->sighand->siglock); 4118 return -EINVAL; 4119 } 4120 if (oact) 4121 *oact = *k; 4122 4123 /* 4124 * Make sure that we never accidentally claim to support SA_UNSUPPORTED, 4125 * e.g. by having an architecture use the bit in their uapi. 4126 */ 4127 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED); 4128 4129 /* 4130 * Clear unknown flag bits in order to allow userspace to detect missing 4131 * support for flag bits and to allow the kernel to use non-uapi bits 4132 * internally. 4133 */ 4134 if (act) 4135 act->sa.sa_flags &= UAPI_SA_FLAGS; 4136 if (oact) 4137 oact->sa.sa_flags &= UAPI_SA_FLAGS; 4138 4139 sigaction_compat_abi(act, oact); 4140 4141 if (act) { 4142 sigdelsetmask(&act->sa.sa_mask, 4143 sigmask(SIGKILL) | sigmask(SIGSTOP)); 4144 *k = *act; 4145 /* 4146 * POSIX 3.3.1.3: 4147 * "Setting a signal action to SIG_IGN for a signal that is 4148 * pending shall cause the pending signal to be discarded, 4149 * whether or not it is blocked." 4150 * 4151 * "Setting a signal action to SIG_DFL for a signal that is 4152 * pending and whose default action is to ignore the signal 4153 * (for example, SIGCHLD), shall cause the pending signal to 4154 * be discarded, whether or not it is blocked" 4155 */ 4156 if (sig_handler_ignored(sig_handler(p, sig), sig)) { 4157 sigemptyset(&mask); 4158 sigaddset(&mask, sig); 4159 flush_sigqueue_mask(&mask, &p->signal->shared_pending); 4160 for_each_thread(p, t) 4161 flush_sigqueue_mask(&mask, &t->pending); 4162 } 4163 } 4164 4165 spin_unlock_irq(&p->sighand->siglock); 4166 return 0; 4167 } 4168 4169 #ifdef CONFIG_DYNAMIC_SIGFRAME 4170 static inline void sigaltstack_lock(void) 4171 __acquires(¤t->sighand->siglock) 4172 { 4173 spin_lock_irq(¤t->sighand->siglock); 4174 } 4175 4176 static inline void sigaltstack_unlock(void) 4177 __releases(¤t->sighand->siglock) 4178 { 4179 spin_unlock_irq(¤t->sighand->siglock); 4180 } 4181 #else 4182 static inline void sigaltstack_lock(void) { } 4183 static inline void sigaltstack_unlock(void) { } 4184 #endif 4185 4186 static int 4187 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, 4188 size_t min_ss_size) 4189 { 4190 struct task_struct *t = current; 4191 int ret = 0; 4192 4193 if (oss) { 4194 memset(oss, 0, sizeof(stack_t)); 4195 oss->ss_sp = (void __user *) t->sas_ss_sp; 4196 oss->ss_size = t->sas_ss_size; 4197 oss->ss_flags = sas_ss_flags(sp) | 4198 (current->sas_ss_flags & SS_FLAG_BITS); 4199 } 4200 4201 if (ss) { 4202 void __user *ss_sp = ss->ss_sp; 4203 size_t ss_size = ss->ss_size; 4204 unsigned ss_flags = ss->ss_flags; 4205 int ss_mode; 4206 4207 if (unlikely(on_sig_stack(sp))) 4208 return -EPERM; 4209 4210 ss_mode = ss_flags & ~SS_FLAG_BITS; 4211 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && 4212 ss_mode != 0)) 4213 return -EINVAL; 4214 4215 /* 4216 * Return before taking any locks if no actual 4217 * sigaltstack changes were requested. 4218 */ 4219 if (t->sas_ss_sp == (unsigned long)ss_sp && 4220 t->sas_ss_size == ss_size && 4221 t->sas_ss_flags == ss_flags) 4222 return 0; 4223 4224 sigaltstack_lock(); 4225 if (ss_mode == SS_DISABLE) { 4226 ss_size = 0; 4227 ss_sp = NULL; 4228 } else { 4229 if (unlikely(ss_size < min_ss_size)) 4230 ret = -ENOMEM; 4231 if (!sigaltstack_size_valid(ss_size)) 4232 ret = -ENOMEM; 4233 } 4234 if (!ret) { 4235 t->sas_ss_sp = (unsigned long) ss_sp; 4236 t->sas_ss_size = ss_size; 4237 t->sas_ss_flags = ss_flags; 4238 } 4239 sigaltstack_unlock(); 4240 } 4241 return ret; 4242 } 4243 4244 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) 4245 { 4246 stack_t new, old; 4247 int err; 4248 if (uss && copy_from_user(&new, uss, sizeof(stack_t))) 4249 return -EFAULT; 4250 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, 4251 current_user_stack_pointer(), 4252 MINSIGSTKSZ); 4253 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) 4254 err = -EFAULT; 4255 return err; 4256 } 4257 4258 int restore_altstack(const stack_t __user *uss) 4259 { 4260 stack_t new; 4261 if (copy_from_user(&new, uss, sizeof(stack_t))) 4262 return -EFAULT; 4263 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(), 4264 MINSIGSTKSZ); 4265 /* squash all but EFAULT for now */ 4266 return 0; 4267 } 4268 4269 int __save_altstack(stack_t __user *uss, unsigned long sp) 4270 { 4271 struct task_struct *t = current; 4272 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | 4273 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4274 __put_user(t->sas_ss_size, &uss->ss_size); 4275 return err; 4276 } 4277 4278 #ifdef CONFIG_COMPAT 4279 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, 4280 compat_stack_t __user *uoss_ptr) 4281 { 4282 stack_t uss, uoss; 4283 int ret; 4284 4285 if (uss_ptr) { 4286 compat_stack_t uss32; 4287 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) 4288 return -EFAULT; 4289 uss.ss_sp = compat_ptr(uss32.ss_sp); 4290 uss.ss_flags = uss32.ss_flags; 4291 uss.ss_size = uss32.ss_size; 4292 } 4293 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 4294 compat_user_stack_pointer(), 4295 COMPAT_MINSIGSTKSZ); 4296 if (ret >= 0 && uoss_ptr) { 4297 compat_stack_t old; 4298 memset(&old, 0, sizeof(old)); 4299 old.ss_sp = ptr_to_compat(uoss.ss_sp); 4300 old.ss_flags = uoss.ss_flags; 4301 old.ss_size = uoss.ss_size; 4302 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) 4303 ret = -EFAULT; 4304 } 4305 return ret; 4306 } 4307 4308 COMPAT_SYSCALL_DEFINE2(sigaltstack, 4309 const compat_stack_t __user *, uss_ptr, 4310 compat_stack_t __user *, uoss_ptr) 4311 { 4312 return do_compat_sigaltstack(uss_ptr, uoss_ptr); 4313 } 4314 4315 int compat_restore_altstack(const compat_stack_t __user *uss) 4316 { 4317 int err = do_compat_sigaltstack(uss, NULL); 4318 /* squash all but -EFAULT for now */ 4319 return err == -EFAULT ? err : 0; 4320 } 4321 4322 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) 4323 { 4324 int err; 4325 struct task_struct *t = current; 4326 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), 4327 &uss->ss_sp) | 4328 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4329 __put_user(t->sas_ss_size, &uss->ss_size); 4330 return err; 4331 } 4332 #endif 4333 4334 #ifdef __ARCH_WANT_SYS_SIGPENDING 4335 4336 /** 4337 * sys_sigpending - examine pending signals 4338 * @uset: where mask of pending signal is returned 4339 */ 4340 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) 4341 { 4342 sigset_t set; 4343 4344 if (sizeof(old_sigset_t) > sizeof(*uset)) 4345 return -EINVAL; 4346 4347 do_sigpending(&set); 4348 4349 if (copy_to_user(uset, &set, sizeof(old_sigset_t))) 4350 return -EFAULT; 4351 4352 return 0; 4353 } 4354 4355 #ifdef CONFIG_COMPAT 4356 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) 4357 { 4358 sigset_t set; 4359 4360 do_sigpending(&set); 4361 4362 return put_user(set.sig[0], set32); 4363 } 4364 #endif 4365 4366 #endif 4367 4368 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 4369 /** 4370 * sys_sigprocmask - examine and change blocked signals 4371 * @how: whether to add, remove, or set signals 4372 * @nset: signals to add or remove (if non-null) 4373 * @oset: previous value of signal mask if non-null 4374 * 4375 * Some platforms have their own version with special arguments; 4376 * others support only sys_rt_sigprocmask. 4377 */ 4378 4379 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, 4380 old_sigset_t __user *, oset) 4381 { 4382 old_sigset_t old_set, new_set; 4383 sigset_t new_blocked; 4384 4385 old_set = current->blocked.sig[0]; 4386 4387 if (nset) { 4388 if (copy_from_user(&new_set, nset, sizeof(*nset))) 4389 return -EFAULT; 4390 4391 new_blocked = current->blocked; 4392 4393 switch (how) { 4394 case SIG_BLOCK: 4395 sigaddsetmask(&new_blocked, new_set); 4396 break; 4397 case SIG_UNBLOCK: 4398 sigdelsetmask(&new_blocked, new_set); 4399 break; 4400 case SIG_SETMASK: 4401 new_blocked.sig[0] = new_set; 4402 break; 4403 default: 4404 return -EINVAL; 4405 } 4406 4407 set_current_blocked(&new_blocked); 4408 } 4409 4410 if (oset) { 4411 if (copy_to_user(oset, &old_set, sizeof(*oset))) 4412 return -EFAULT; 4413 } 4414 4415 return 0; 4416 } 4417 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 4418 4419 #ifndef CONFIG_ODD_RT_SIGACTION 4420 /** 4421 * sys_rt_sigaction - alter an action taken by a process 4422 * @sig: signal to be sent 4423 * @act: new sigaction 4424 * @oact: used to save the previous sigaction 4425 * @sigsetsize: size of sigset_t type 4426 */ 4427 SYSCALL_DEFINE4(rt_sigaction, int, sig, 4428 const struct sigaction __user *, act, 4429 struct sigaction __user *, oact, 4430 size_t, sigsetsize) 4431 { 4432 struct k_sigaction new_sa, old_sa; 4433 int ret; 4434 4435 /* XXX: Don't preclude handling different sized sigset_t's. */ 4436 if (sigsetsize != sizeof(sigset_t)) 4437 return -EINVAL; 4438 4439 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 4440 return -EFAULT; 4441 4442 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 4443 if (ret) 4444 return ret; 4445 4446 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 4447 return -EFAULT; 4448 4449 return 0; 4450 } 4451 #ifdef CONFIG_COMPAT 4452 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, 4453 const struct compat_sigaction __user *, act, 4454 struct compat_sigaction __user *, oact, 4455 compat_size_t, sigsetsize) 4456 { 4457 struct k_sigaction new_ka, old_ka; 4458 #ifdef __ARCH_HAS_SA_RESTORER 4459 compat_uptr_t restorer; 4460 #endif 4461 int ret; 4462 4463 /* XXX: Don't preclude handling different sized sigset_t's. */ 4464 if (sigsetsize != sizeof(compat_sigset_t)) 4465 return -EINVAL; 4466 4467 if (act) { 4468 compat_uptr_t handler; 4469 ret = get_user(handler, &act->sa_handler); 4470 new_ka.sa.sa_handler = compat_ptr(handler); 4471 #ifdef __ARCH_HAS_SA_RESTORER 4472 ret |= get_user(restorer, &act->sa_restorer); 4473 new_ka.sa.sa_restorer = compat_ptr(restorer); 4474 #endif 4475 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); 4476 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); 4477 if (ret) 4478 return -EFAULT; 4479 } 4480 4481 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4482 if (!ret && oact) { 4483 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 4484 &oact->sa_handler); 4485 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, 4486 sizeof(oact->sa_mask)); 4487 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); 4488 #ifdef __ARCH_HAS_SA_RESTORER 4489 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4490 &oact->sa_restorer); 4491 #endif 4492 } 4493 return ret; 4494 } 4495 #endif 4496 #endif /* !CONFIG_ODD_RT_SIGACTION */ 4497 4498 #ifdef CONFIG_OLD_SIGACTION 4499 SYSCALL_DEFINE3(sigaction, int, sig, 4500 const struct old_sigaction __user *, act, 4501 struct old_sigaction __user *, oact) 4502 { 4503 struct k_sigaction new_ka, old_ka; 4504 int ret; 4505 4506 if (act) { 4507 old_sigset_t mask; 4508 if (!access_ok(act, sizeof(*act)) || 4509 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 4510 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 4511 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4512 __get_user(mask, &act->sa_mask)) 4513 return -EFAULT; 4514 #ifdef __ARCH_HAS_KA_RESTORER 4515 new_ka.ka_restorer = NULL; 4516 #endif 4517 siginitset(&new_ka.sa.sa_mask, mask); 4518 } 4519 4520 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4521 4522 if (!ret && oact) { 4523 if (!access_ok(oact, sizeof(*oact)) || 4524 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 4525 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 4526 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4527 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4528 return -EFAULT; 4529 } 4530 4531 return ret; 4532 } 4533 #endif 4534 #ifdef CONFIG_COMPAT_OLD_SIGACTION 4535 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, 4536 const struct compat_old_sigaction __user *, act, 4537 struct compat_old_sigaction __user *, oact) 4538 { 4539 struct k_sigaction new_ka, old_ka; 4540 int ret; 4541 compat_old_sigset_t mask; 4542 compat_uptr_t handler, restorer; 4543 4544 if (act) { 4545 if (!access_ok(act, sizeof(*act)) || 4546 __get_user(handler, &act->sa_handler) || 4547 __get_user(restorer, &act->sa_restorer) || 4548 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4549 __get_user(mask, &act->sa_mask)) 4550 return -EFAULT; 4551 4552 #ifdef __ARCH_HAS_KA_RESTORER 4553 new_ka.ka_restorer = NULL; 4554 #endif 4555 new_ka.sa.sa_handler = compat_ptr(handler); 4556 new_ka.sa.sa_restorer = compat_ptr(restorer); 4557 siginitset(&new_ka.sa.sa_mask, mask); 4558 } 4559 4560 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4561 4562 if (!ret && oact) { 4563 if (!access_ok(oact, sizeof(*oact)) || 4564 __put_user(ptr_to_compat(old_ka.sa.sa_handler), 4565 &oact->sa_handler) || 4566 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4567 &oact->sa_restorer) || 4568 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4569 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4570 return -EFAULT; 4571 } 4572 return ret; 4573 } 4574 #endif 4575 4576 #ifdef CONFIG_SGETMASK_SYSCALL 4577 4578 /* 4579 * For backwards compatibility. Functionality superseded by sigprocmask. 4580 */ 4581 SYSCALL_DEFINE0(sgetmask) 4582 { 4583 /* SMP safe */ 4584 return current->blocked.sig[0]; 4585 } 4586 4587 SYSCALL_DEFINE1(ssetmask, int, newmask) 4588 { 4589 int old = current->blocked.sig[0]; 4590 sigset_t newset; 4591 4592 siginitset(&newset, newmask); 4593 set_current_blocked(&newset); 4594 4595 return old; 4596 } 4597 #endif /* CONFIG_SGETMASK_SYSCALL */ 4598 4599 #ifdef __ARCH_WANT_SYS_SIGNAL 4600 /* 4601 * For backwards compatibility. Functionality superseded by sigaction. 4602 */ 4603 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 4604 { 4605 struct k_sigaction new_sa, old_sa; 4606 int ret; 4607 4608 new_sa.sa.sa_handler = handler; 4609 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 4610 sigemptyset(&new_sa.sa.sa_mask); 4611 4612 ret = do_sigaction(sig, &new_sa, &old_sa); 4613 4614 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 4615 } 4616 #endif /* __ARCH_WANT_SYS_SIGNAL */ 4617 4618 #ifdef __ARCH_WANT_SYS_PAUSE 4619 4620 SYSCALL_DEFINE0(pause) 4621 { 4622 while (!signal_pending(current)) { 4623 __set_current_state(TASK_INTERRUPTIBLE); 4624 schedule(); 4625 } 4626 return -ERESTARTNOHAND; 4627 } 4628 4629 #endif 4630 4631 static int sigsuspend(sigset_t *set) 4632 { 4633 current->saved_sigmask = current->blocked; 4634 set_current_blocked(set); 4635 4636 while (!signal_pending(current)) { 4637 __set_current_state(TASK_INTERRUPTIBLE); 4638 schedule(); 4639 } 4640 set_restore_sigmask(); 4641 return -ERESTARTNOHAND; 4642 } 4643 4644 /** 4645 * sys_rt_sigsuspend - replace the signal mask for a value with the 4646 * @unewset value until a signal is received 4647 * @unewset: new signal mask value 4648 * @sigsetsize: size of sigset_t type 4649 */ 4650 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 4651 { 4652 sigset_t newset; 4653 4654 /* XXX: Don't preclude handling different sized sigset_t's. */ 4655 if (sigsetsize != sizeof(sigset_t)) 4656 return -EINVAL; 4657 4658 if (copy_from_user(&newset, unewset, sizeof(newset))) 4659 return -EFAULT; 4660 return sigsuspend(&newset); 4661 } 4662 4663 #ifdef CONFIG_COMPAT 4664 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) 4665 { 4666 sigset_t newset; 4667 4668 /* XXX: Don't preclude handling different sized sigset_t's. */ 4669 if (sigsetsize != sizeof(sigset_t)) 4670 return -EINVAL; 4671 4672 if (get_compat_sigset(&newset, unewset)) 4673 return -EFAULT; 4674 return sigsuspend(&newset); 4675 } 4676 #endif 4677 4678 #ifdef CONFIG_OLD_SIGSUSPEND 4679 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) 4680 { 4681 sigset_t blocked; 4682 siginitset(&blocked, mask); 4683 return sigsuspend(&blocked); 4684 } 4685 #endif 4686 #ifdef CONFIG_OLD_SIGSUSPEND3 4687 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) 4688 { 4689 sigset_t blocked; 4690 siginitset(&blocked, mask); 4691 return sigsuspend(&blocked); 4692 } 4693 #endif 4694 4695 __weak const char *arch_vma_name(struct vm_area_struct *vma) 4696 { 4697 return NULL; 4698 } 4699 4700 static inline void siginfo_buildtime_checks(void) 4701 { 4702 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); 4703 4704 /* Verify the offsets in the two siginfos match */ 4705 #define CHECK_OFFSET(field) \ 4706 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) 4707 4708 /* kill */ 4709 CHECK_OFFSET(si_pid); 4710 CHECK_OFFSET(si_uid); 4711 4712 /* timer */ 4713 CHECK_OFFSET(si_tid); 4714 CHECK_OFFSET(si_overrun); 4715 CHECK_OFFSET(si_value); 4716 4717 /* rt */ 4718 CHECK_OFFSET(si_pid); 4719 CHECK_OFFSET(si_uid); 4720 CHECK_OFFSET(si_value); 4721 4722 /* sigchld */ 4723 CHECK_OFFSET(si_pid); 4724 CHECK_OFFSET(si_uid); 4725 CHECK_OFFSET(si_status); 4726 CHECK_OFFSET(si_utime); 4727 CHECK_OFFSET(si_stime); 4728 4729 /* sigfault */ 4730 CHECK_OFFSET(si_addr); 4731 CHECK_OFFSET(si_trapno); 4732 CHECK_OFFSET(si_addr_lsb); 4733 CHECK_OFFSET(si_lower); 4734 CHECK_OFFSET(si_upper); 4735 CHECK_OFFSET(si_pkey); 4736 CHECK_OFFSET(si_perf_data); 4737 CHECK_OFFSET(si_perf_type); 4738 CHECK_OFFSET(si_perf_flags); 4739 4740 /* sigpoll */ 4741 CHECK_OFFSET(si_band); 4742 CHECK_OFFSET(si_fd); 4743 4744 /* sigsys */ 4745 CHECK_OFFSET(si_call_addr); 4746 CHECK_OFFSET(si_syscall); 4747 CHECK_OFFSET(si_arch); 4748 #undef CHECK_OFFSET 4749 4750 /* usb asyncio */ 4751 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) != 4752 offsetof(struct siginfo, si_addr)); 4753 if (sizeof(int) == sizeof(void __user *)) { 4754 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) != 4755 sizeof(void __user *)); 4756 } else { 4757 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) + 4758 sizeof_field(struct siginfo, si_uid)) != 4759 sizeof(void __user *)); 4760 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) != 4761 offsetof(struct siginfo, si_uid)); 4762 } 4763 #ifdef CONFIG_COMPAT 4764 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) != 4765 offsetof(struct compat_siginfo, si_addr)); 4766 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != 4767 sizeof(compat_uptr_t)); 4768 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != 4769 sizeof_field(struct siginfo, si_pid)); 4770 #endif 4771 } 4772 4773 void __init signals_init(void) 4774 { 4775 siginfo_buildtime_checks(); 4776 4777 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT); 4778 } 4779 4780 #ifdef CONFIG_KGDB_KDB 4781 #include <linux/kdb.h> 4782 /* 4783 * kdb_send_sig - Allows kdb to send signals without exposing 4784 * signal internals. This function checks if the required locks are 4785 * available before calling the main signal code, to avoid kdb 4786 * deadlocks. 4787 */ 4788 void kdb_send_sig(struct task_struct *t, int sig) 4789 { 4790 static struct task_struct *kdb_prev_t; 4791 int new_t, ret; 4792 if (!spin_trylock(&t->sighand->siglock)) { 4793 kdb_printf("Can't do kill command now.\n" 4794 "The sigmask lock is held somewhere else in " 4795 "kernel, try again later\n"); 4796 return; 4797 } 4798 new_t = kdb_prev_t != t; 4799 kdb_prev_t = t; 4800 if (!task_is_running(t) && new_t) { 4801 spin_unlock(&t->sighand->siglock); 4802 kdb_printf("Process is not RUNNING, sending a signal from " 4803 "kdb risks deadlock\n" 4804 "on the run queue locks. " 4805 "The signal has _not_ been sent.\n" 4806 "Reissue the kill command if you want to risk " 4807 "the deadlock.\n"); 4808 return; 4809 } 4810 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); 4811 spin_unlock(&t->sighand->siglock); 4812 if (ret) 4813 kdb_printf("Fail to deliver Signal %d to process %d.\n", 4814 sig, t->pid); 4815 else 4816 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 4817 } 4818 #endif /* CONFIG_KGDB_KDB */ 4819