1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/signal.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 8 * 9 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 10 * Changes to use preallocated sigqueue structures 11 * to allow signals to be sent reliably. 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/init.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/user.h> 19 #include <linux/sched/debug.h> 20 #include <linux/sched/task.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/sched/cputime.h> 23 #include <linux/file.h> 24 #include <linux/fs.h> 25 #include <linux/proc_fs.h> 26 #include <linux/tty.h> 27 #include <linux/binfmts.h> 28 #include <linux/coredump.h> 29 #include <linux/security.h> 30 #include <linux/syscalls.h> 31 #include <linux/ptrace.h> 32 #include <linux/signal.h> 33 #include <linux/signalfd.h> 34 #include <linux/ratelimit.h> 35 #include <linux/task_work.h> 36 #include <linux/capability.h> 37 #include <linux/freezer.h> 38 #include <linux/pid_namespace.h> 39 #include <linux/nsproxy.h> 40 #include <linux/user_namespace.h> 41 #include <linux/uprobes.h> 42 #include <linux/compat.h> 43 #include <linux/cn_proc.h> 44 #include <linux/compiler.h> 45 #include <linux/posix-timers.h> 46 #include <linux/cgroup.h> 47 #include <linux/audit.h> 48 #include <linux/sysctl.h> 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/signal.h> 52 53 #include <asm/param.h> 54 #include <linux/uaccess.h> 55 #include <asm/unistd.h> 56 #include <asm/siginfo.h> 57 #include <asm/cacheflush.h> 58 #include <asm/syscall.h> /* for syscall_get_* */ 59 60 /* 61 * SLAB caches for signal bits. 62 */ 63 64 static struct kmem_cache *sigqueue_cachep; 65 66 int print_fatal_signals __read_mostly; 67 68 static void __user *sig_handler(struct task_struct *t, int sig) 69 { 70 return t->sighand->action[sig - 1].sa.sa_handler; 71 } 72 73 static inline bool sig_handler_ignored(void __user *handler, int sig) 74 { 75 /* Is it explicitly or implicitly ignored? */ 76 return handler == SIG_IGN || 77 (handler == SIG_DFL && sig_kernel_ignore(sig)); 78 } 79 80 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) 81 { 82 void __user *handler; 83 84 handler = sig_handler(t, sig); 85 86 /* SIGKILL and SIGSTOP may not be sent to the global init */ 87 if (unlikely(is_global_init(t) && sig_kernel_only(sig))) 88 return true; 89 90 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 91 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 92 return true; 93 94 /* Only allow kernel generated signals to this kthread */ 95 if (unlikely((t->flags & PF_KTHREAD) && 96 (handler == SIG_KTHREAD_KERNEL) && !force)) 97 return true; 98 99 return sig_handler_ignored(handler, sig); 100 } 101 102 static bool sig_ignored(struct task_struct *t, int sig, bool force) 103 { 104 /* 105 * Blocked signals are never ignored, since the 106 * signal handler may change by the time it is 107 * unblocked. 108 */ 109 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 110 return false; 111 112 /* 113 * Tracers may want to know about even ignored signal unless it 114 * is SIGKILL which can't be reported anyway but can be ignored 115 * by SIGNAL_UNKILLABLE task. 116 */ 117 if (t->ptrace && sig != SIGKILL) 118 return false; 119 120 return sig_task_ignored(t, sig, force); 121 } 122 123 /* 124 * Re-calculate pending state from the set of locally pending 125 * signals, globally pending signals, and blocked signals. 126 */ 127 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) 128 { 129 unsigned long ready; 130 long i; 131 132 switch (_NSIG_WORDS) { 133 default: 134 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 135 ready |= signal->sig[i] &~ blocked->sig[i]; 136 break; 137 138 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 139 ready |= signal->sig[2] &~ blocked->sig[2]; 140 ready |= signal->sig[1] &~ blocked->sig[1]; 141 ready |= signal->sig[0] &~ blocked->sig[0]; 142 break; 143 144 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 145 ready |= signal->sig[0] &~ blocked->sig[0]; 146 break; 147 148 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 149 } 150 return ready != 0; 151 } 152 153 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 154 155 static bool recalc_sigpending_tsk(struct task_struct *t) 156 { 157 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || 158 PENDING(&t->pending, &t->blocked) || 159 PENDING(&t->signal->shared_pending, &t->blocked) || 160 cgroup_task_frozen(t)) { 161 set_tsk_thread_flag(t, TIF_SIGPENDING); 162 return true; 163 } 164 165 /* 166 * We must never clear the flag in another thread, or in current 167 * when it's possible the current syscall is returning -ERESTART*. 168 * So we don't clear it here, and only callers who know they should do. 169 */ 170 return false; 171 } 172 173 /* 174 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 175 * This is superfluous when called on current, the wakeup is a harmless no-op. 176 */ 177 void recalc_sigpending_and_wake(struct task_struct *t) 178 { 179 if (recalc_sigpending_tsk(t)) 180 signal_wake_up(t, 0); 181 } 182 183 void recalc_sigpending(void) 184 { 185 if (!recalc_sigpending_tsk(current) && !freezing(current)) 186 clear_thread_flag(TIF_SIGPENDING); 187 188 } 189 EXPORT_SYMBOL(recalc_sigpending); 190 191 void calculate_sigpending(void) 192 { 193 /* Have any signals or users of TIF_SIGPENDING been delayed 194 * until after fork? 195 */ 196 spin_lock_irq(¤t->sighand->siglock); 197 set_tsk_thread_flag(current, TIF_SIGPENDING); 198 recalc_sigpending(); 199 spin_unlock_irq(¤t->sighand->siglock); 200 } 201 202 /* Given the mask, find the first available signal that should be serviced. */ 203 204 #define SYNCHRONOUS_MASK \ 205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) 207 208 int next_signal(struct sigpending *pending, sigset_t *mask) 209 { 210 unsigned long i, *s, *m, x; 211 int sig = 0; 212 213 s = pending->signal.sig; 214 m = mask->sig; 215 216 /* 217 * Handle the first word specially: it contains the 218 * synchronous signals that need to be dequeued first. 219 */ 220 x = *s &~ *m; 221 if (x) { 222 if (x & SYNCHRONOUS_MASK) 223 x &= SYNCHRONOUS_MASK; 224 sig = ffz(~x) + 1; 225 return sig; 226 } 227 228 switch (_NSIG_WORDS) { 229 default: 230 for (i = 1; i < _NSIG_WORDS; ++i) { 231 x = *++s &~ *++m; 232 if (!x) 233 continue; 234 sig = ffz(~x) + i*_NSIG_BPW + 1; 235 break; 236 } 237 break; 238 239 case 2: 240 x = s[1] &~ m[1]; 241 if (!x) 242 break; 243 sig = ffz(~x) + _NSIG_BPW + 1; 244 break; 245 246 case 1: 247 /* Nothing to do */ 248 break; 249 } 250 251 return sig; 252 } 253 254 static inline void print_dropped_signal(int sig) 255 { 256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 257 258 if (!print_fatal_signals) 259 return; 260 261 if (!__ratelimit(&ratelimit_state)) 262 return; 263 264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 265 current->comm, current->pid, sig); 266 } 267 268 /** 269 * task_set_jobctl_pending - set jobctl pending bits 270 * @task: target task 271 * @mask: pending bits to set 272 * 273 * Clear @mask from @task->jobctl. @mask must be subset of 274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | 275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is 276 * cleared. If @task is already being killed or exiting, this function 277 * becomes noop. 278 * 279 * CONTEXT: 280 * Must be called with @task->sighand->siglock held. 281 * 282 * RETURNS: 283 * %true if @mask is set, %false if made noop because @task was dying. 284 */ 285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) 286 { 287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | 288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 290 291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 292 return false; 293 294 if (mask & JOBCTL_STOP_SIGMASK) 295 task->jobctl &= ~JOBCTL_STOP_SIGMASK; 296 297 task->jobctl |= mask; 298 return true; 299 } 300 301 /** 302 * task_clear_jobctl_trapping - clear jobctl trapping bit 303 * @task: target task 304 * 305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. 306 * Clear it and wake up the ptracer. Note that we don't need any further 307 * locking. @task->siglock guarantees that @task->parent points to the 308 * ptracer. 309 * 310 * CONTEXT: 311 * Must be called with @task->sighand->siglock held. 312 */ 313 void task_clear_jobctl_trapping(struct task_struct *task) 314 { 315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { 316 task->jobctl &= ~JOBCTL_TRAPPING; 317 smp_mb(); /* advised by wake_up_bit() */ 318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); 319 } 320 } 321 322 /** 323 * task_clear_jobctl_pending - clear jobctl pending bits 324 * @task: target task 325 * @mask: pending bits to clear 326 * 327 * Clear @mask from @task->jobctl. @mask must be subset of 328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other 329 * STOP bits are cleared together. 330 * 331 * If clearing of @mask leaves no stop or trap pending, this function calls 332 * task_clear_jobctl_trapping(). 333 * 334 * CONTEXT: 335 * Must be called with @task->sighand->siglock held. 336 */ 337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) 338 { 339 BUG_ON(mask & ~JOBCTL_PENDING_MASK); 340 341 if (mask & JOBCTL_STOP_PENDING) 342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; 343 344 task->jobctl &= ~mask; 345 346 if (!(task->jobctl & JOBCTL_PENDING_MASK)) 347 task_clear_jobctl_trapping(task); 348 } 349 350 /** 351 * task_participate_group_stop - participate in a group stop 352 * @task: task participating in a group stop 353 * 354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. 355 * Group stop states are cleared and the group stop count is consumed if 356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group 357 * stop, the appropriate `SIGNAL_*` flags are set. 358 * 359 * CONTEXT: 360 * Must be called with @task->sighand->siglock held. 361 * 362 * RETURNS: 363 * %true if group stop completion should be notified to the parent, %false 364 * otherwise. 365 */ 366 static bool task_participate_group_stop(struct task_struct *task) 367 { 368 struct signal_struct *sig = task->signal; 369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME; 370 371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); 372 373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); 374 375 if (!consume) 376 return false; 377 378 if (!WARN_ON_ONCE(sig->group_stop_count == 0)) 379 sig->group_stop_count--; 380 381 /* 382 * Tell the caller to notify completion iff we are entering into a 383 * fresh group stop. Read comment in do_signal_stop() for details. 384 */ 385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); 387 return true; 388 } 389 return false; 390 } 391 392 void task_join_group_stop(struct task_struct *task) 393 { 394 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; 395 struct signal_struct *sig = current->signal; 396 397 if (sig->group_stop_count) { 398 sig->group_stop_count++; 399 mask |= JOBCTL_STOP_CONSUME; 400 } else if (!(sig->flags & SIGNAL_STOP_STOPPED)) 401 return; 402 403 /* Have the new thread join an on-going signal group stop */ 404 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); 405 } 406 407 /* 408 * allocate a new signal queue record 409 * - this may be called without locks if and only if t == current, otherwise an 410 * appropriate lock must be held to stop the target task from exiting 411 */ 412 static struct sigqueue * 413 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, 414 int override_rlimit, const unsigned int sigqueue_flags) 415 { 416 struct sigqueue *q = NULL; 417 struct ucounts *ucounts = NULL; 418 long sigpending; 419 420 /* 421 * Protect access to @t credentials. This can go away when all 422 * callers hold rcu read lock. 423 * 424 * NOTE! A pending signal will hold on to the user refcount, 425 * and we get/put the refcount only when the sigpending count 426 * changes from/to zero. 427 */ 428 rcu_read_lock(); 429 ucounts = task_ucounts(t); 430 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); 431 rcu_read_unlock(); 432 if (!sigpending) 433 return NULL; 434 435 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { 436 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); 437 } else { 438 print_dropped_signal(sig); 439 } 440 441 if (unlikely(q == NULL)) { 442 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); 443 } else { 444 INIT_LIST_HEAD(&q->list); 445 q->flags = sigqueue_flags; 446 q->ucounts = ucounts; 447 } 448 return q; 449 } 450 451 static void __sigqueue_free(struct sigqueue *q) 452 { 453 if (q->flags & SIGQUEUE_PREALLOC) 454 return; 455 if (q->ucounts) { 456 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING); 457 q->ucounts = NULL; 458 } 459 kmem_cache_free(sigqueue_cachep, q); 460 } 461 462 void flush_sigqueue(struct sigpending *queue) 463 { 464 struct sigqueue *q; 465 466 sigemptyset(&queue->signal); 467 while (!list_empty(&queue->list)) { 468 q = list_entry(queue->list.next, struct sigqueue , list); 469 list_del_init(&q->list); 470 __sigqueue_free(q); 471 } 472 } 473 474 /* 475 * Flush all pending signals for this kthread. 476 */ 477 void flush_signals(struct task_struct *t) 478 { 479 unsigned long flags; 480 481 spin_lock_irqsave(&t->sighand->siglock, flags); 482 clear_tsk_thread_flag(t, TIF_SIGPENDING); 483 flush_sigqueue(&t->pending); 484 flush_sigqueue(&t->signal->shared_pending); 485 spin_unlock_irqrestore(&t->sighand->siglock, flags); 486 } 487 EXPORT_SYMBOL(flush_signals); 488 489 #ifdef CONFIG_POSIX_TIMERS 490 static void __flush_itimer_signals(struct sigpending *pending) 491 { 492 sigset_t signal, retain; 493 struct sigqueue *q, *n; 494 495 signal = pending->signal; 496 sigemptyset(&retain); 497 498 list_for_each_entry_safe(q, n, &pending->list, list) { 499 int sig = q->info.si_signo; 500 501 if (likely(q->info.si_code != SI_TIMER)) { 502 sigaddset(&retain, sig); 503 } else { 504 sigdelset(&signal, sig); 505 list_del_init(&q->list); 506 __sigqueue_free(q); 507 } 508 } 509 510 sigorsets(&pending->signal, &signal, &retain); 511 } 512 513 void flush_itimer_signals(void) 514 { 515 struct task_struct *tsk = current; 516 unsigned long flags; 517 518 spin_lock_irqsave(&tsk->sighand->siglock, flags); 519 __flush_itimer_signals(&tsk->pending); 520 __flush_itimer_signals(&tsk->signal->shared_pending); 521 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 522 } 523 #endif 524 525 void ignore_signals(struct task_struct *t) 526 { 527 int i; 528 529 for (i = 0; i < _NSIG; ++i) 530 t->sighand->action[i].sa.sa_handler = SIG_IGN; 531 532 flush_signals(t); 533 } 534 535 /* 536 * Flush all handlers for a task. 537 */ 538 539 void 540 flush_signal_handlers(struct task_struct *t, int force_default) 541 { 542 int i; 543 struct k_sigaction *ka = &t->sighand->action[0]; 544 for (i = _NSIG ; i != 0 ; i--) { 545 if (force_default || ka->sa.sa_handler != SIG_IGN) 546 ka->sa.sa_handler = SIG_DFL; 547 ka->sa.sa_flags = 0; 548 #ifdef __ARCH_HAS_SA_RESTORER 549 ka->sa.sa_restorer = NULL; 550 #endif 551 sigemptyset(&ka->sa.sa_mask); 552 ka++; 553 } 554 } 555 556 bool unhandled_signal(struct task_struct *tsk, int sig) 557 { 558 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 559 if (is_global_init(tsk)) 560 return true; 561 562 if (handler != SIG_IGN && handler != SIG_DFL) 563 return false; 564 565 /* if ptraced, let the tracer determine */ 566 return !tsk->ptrace; 567 } 568 569 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, 570 bool *resched_timer) 571 { 572 struct sigqueue *q, *first = NULL; 573 574 /* 575 * Collect the siginfo appropriate to this signal. Check if 576 * there is another siginfo for the same signal. 577 */ 578 list_for_each_entry(q, &list->list, list) { 579 if (q->info.si_signo == sig) { 580 if (first) 581 goto still_pending; 582 first = q; 583 } 584 } 585 586 sigdelset(&list->signal, sig); 587 588 if (first) { 589 still_pending: 590 list_del_init(&first->list); 591 copy_siginfo(info, &first->info); 592 593 *resched_timer = 594 (first->flags & SIGQUEUE_PREALLOC) && 595 (info->si_code == SI_TIMER) && 596 (info->si_sys_private); 597 598 __sigqueue_free(first); 599 } else { 600 /* 601 * Ok, it wasn't in the queue. This must be 602 * a fast-pathed signal or we must have been 603 * out of queue space. So zero out the info. 604 */ 605 clear_siginfo(info); 606 info->si_signo = sig; 607 info->si_errno = 0; 608 info->si_code = SI_USER; 609 info->si_pid = 0; 610 info->si_uid = 0; 611 } 612 } 613 614 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 615 kernel_siginfo_t *info, bool *resched_timer) 616 { 617 int sig = next_signal(pending, mask); 618 619 if (sig) 620 collect_signal(sig, pending, info, resched_timer); 621 return sig; 622 } 623 624 /* 625 * Dequeue a signal and return the element to the caller, which is 626 * expected to free it. 627 * 628 * All callers have to hold the siglock. 629 */ 630 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, 631 kernel_siginfo_t *info, enum pid_type *type) 632 { 633 bool resched_timer = false; 634 int signr; 635 636 /* We only dequeue private signals from ourselves, we don't let 637 * signalfd steal them 638 */ 639 *type = PIDTYPE_PID; 640 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); 641 if (!signr) { 642 *type = PIDTYPE_TGID; 643 signr = __dequeue_signal(&tsk->signal->shared_pending, 644 mask, info, &resched_timer); 645 #ifdef CONFIG_POSIX_TIMERS 646 /* 647 * itimer signal ? 648 * 649 * itimers are process shared and we restart periodic 650 * itimers in the signal delivery path to prevent DoS 651 * attacks in the high resolution timer case. This is 652 * compliant with the old way of self-restarting 653 * itimers, as the SIGALRM is a legacy signal and only 654 * queued once. Changing the restart behaviour to 655 * restart the timer in the signal dequeue path is 656 * reducing the timer noise on heavy loaded !highres 657 * systems too. 658 */ 659 if (unlikely(signr == SIGALRM)) { 660 struct hrtimer *tmr = &tsk->signal->real_timer; 661 662 if (!hrtimer_is_queued(tmr) && 663 tsk->signal->it_real_incr != 0) { 664 hrtimer_forward(tmr, tmr->base->get_time(), 665 tsk->signal->it_real_incr); 666 hrtimer_restart(tmr); 667 } 668 } 669 #endif 670 } 671 672 recalc_sigpending(); 673 if (!signr) 674 return 0; 675 676 if (unlikely(sig_kernel_stop(signr))) { 677 /* 678 * Set a marker that we have dequeued a stop signal. Our 679 * caller might release the siglock and then the pending 680 * stop signal it is about to process is no longer in the 681 * pending bitmasks, but must still be cleared by a SIGCONT 682 * (and overruled by a SIGKILL). So those cases clear this 683 * shared flag after we've set it. Note that this flag may 684 * remain set after the signal we return is ignored or 685 * handled. That doesn't matter because its only purpose 686 * is to alert stop-signal processing code when another 687 * processor has come along and cleared the flag. 688 */ 689 current->jobctl |= JOBCTL_STOP_DEQUEUED; 690 } 691 #ifdef CONFIG_POSIX_TIMERS 692 if (resched_timer) { 693 /* 694 * Release the siglock to ensure proper locking order 695 * of timer locks outside of siglocks. Note, we leave 696 * irqs disabled here, since the posix-timers code is 697 * about to disable them again anyway. 698 */ 699 spin_unlock(&tsk->sighand->siglock); 700 posixtimer_rearm(info); 701 spin_lock(&tsk->sighand->siglock); 702 703 /* Don't expose the si_sys_private value to userspace */ 704 info->si_sys_private = 0; 705 } 706 #endif 707 return signr; 708 } 709 EXPORT_SYMBOL_GPL(dequeue_signal); 710 711 static int dequeue_synchronous_signal(kernel_siginfo_t *info) 712 { 713 struct task_struct *tsk = current; 714 struct sigpending *pending = &tsk->pending; 715 struct sigqueue *q, *sync = NULL; 716 717 /* 718 * Might a synchronous signal be in the queue? 719 */ 720 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) 721 return 0; 722 723 /* 724 * Return the first synchronous signal in the queue. 725 */ 726 list_for_each_entry(q, &pending->list, list) { 727 /* Synchronous signals have a positive si_code */ 728 if ((q->info.si_code > SI_USER) && 729 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { 730 sync = q; 731 goto next; 732 } 733 } 734 return 0; 735 next: 736 /* 737 * Check if there is another siginfo for the same signal. 738 */ 739 list_for_each_entry_continue(q, &pending->list, list) { 740 if (q->info.si_signo == sync->info.si_signo) 741 goto still_pending; 742 } 743 744 sigdelset(&pending->signal, sync->info.si_signo); 745 recalc_sigpending(); 746 still_pending: 747 list_del_init(&sync->list); 748 copy_siginfo(info, &sync->info); 749 __sigqueue_free(sync); 750 return info->si_signo; 751 } 752 753 /* 754 * Tell a process that it has a new active signal.. 755 * 756 * NOTE! we rely on the previous spin_lock to 757 * lock interrupts for us! We can only be called with 758 * "siglock" held, and the local interrupt must 759 * have been disabled when that got acquired! 760 * 761 * No need to set need_resched since signal event passing 762 * goes through ->blocked 763 */ 764 void signal_wake_up_state(struct task_struct *t, unsigned int state) 765 { 766 lockdep_assert_held(&t->sighand->siglock); 767 768 set_tsk_thread_flag(t, TIF_SIGPENDING); 769 770 /* 771 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable 772 * case. We don't check t->state here because there is a race with it 773 * executing another processor and just now entering stopped state. 774 * By using wake_up_state, we ensure the process will wake up and 775 * handle its death signal. 776 */ 777 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) 778 kick_process(t); 779 } 780 781 /* 782 * Remove signals in mask from the pending set and queue. 783 * Returns 1 if any signals were found. 784 * 785 * All callers must be holding the siglock. 786 */ 787 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) 788 { 789 struct sigqueue *q, *n; 790 sigset_t m; 791 792 sigandsets(&m, mask, &s->signal); 793 if (sigisemptyset(&m)) 794 return; 795 796 sigandnsets(&s->signal, &s->signal, mask); 797 list_for_each_entry_safe(q, n, &s->list, list) { 798 if (sigismember(mask, q->info.si_signo)) { 799 list_del_init(&q->list); 800 __sigqueue_free(q); 801 } 802 } 803 } 804 805 static inline int is_si_special(const struct kernel_siginfo *info) 806 { 807 return info <= SEND_SIG_PRIV; 808 } 809 810 static inline bool si_fromuser(const struct kernel_siginfo *info) 811 { 812 return info == SEND_SIG_NOINFO || 813 (!is_si_special(info) && SI_FROMUSER(info)); 814 } 815 816 /* 817 * called with RCU read lock from check_kill_permission() 818 */ 819 static bool kill_ok_by_cred(struct task_struct *t) 820 { 821 const struct cred *cred = current_cred(); 822 const struct cred *tcred = __task_cred(t); 823 824 return uid_eq(cred->euid, tcred->suid) || 825 uid_eq(cred->euid, tcred->uid) || 826 uid_eq(cred->uid, tcred->suid) || 827 uid_eq(cred->uid, tcred->uid) || 828 ns_capable(tcred->user_ns, CAP_KILL); 829 } 830 831 /* 832 * Bad permissions for sending the signal 833 * - the caller must hold the RCU read lock 834 */ 835 static int check_kill_permission(int sig, struct kernel_siginfo *info, 836 struct task_struct *t) 837 { 838 struct pid *sid; 839 int error; 840 841 if (!valid_signal(sig)) 842 return -EINVAL; 843 844 if (!si_fromuser(info)) 845 return 0; 846 847 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 848 if (error) 849 return error; 850 851 if (!same_thread_group(current, t) && 852 !kill_ok_by_cred(t)) { 853 switch (sig) { 854 case SIGCONT: 855 sid = task_session(t); 856 /* 857 * We don't return the error if sid == NULL. The 858 * task was unhashed, the caller must notice this. 859 */ 860 if (!sid || sid == task_session(current)) 861 break; 862 fallthrough; 863 default: 864 return -EPERM; 865 } 866 } 867 868 return security_task_kill(t, info, sig, NULL); 869 } 870 871 /** 872 * ptrace_trap_notify - schedule trap to notify ptracer 873 * @t: tracee wanting to notify tracer 874 * 875 * This function schedules sticky ptrace trap which is cleared on the next 876 * TRAP_STOP to notify ptracer of an event. @t must have been seized by 877 * ptracer. 878 * 879 * If @t is running, STOP trap will be taken. If trapped for STOP and 880 * ptracer is listening for events, tracee is woken up so that it can 881 * re-trap for the new event. If trapped otherwise, STOP trap will be 882 * eventually taken without returning to userland after the existing traps 883 * are finished by PTRACE_CONT. 884 * 885 * CONTEXT: 886 * Must be called with @task->sighand->siglock held. 887 */ 888 static void ptrace_trap_notify(struct task_struct *t) 889 { 890 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); 891 lockdep_assert_held(&t->sighand->siglock); 892 893 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 894 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 895 } 896 897 /* 898 * Handle magic process-wide effects of stop/continue signals. Unlike 899 * the signal actions, these happen immediately at signal-generation 900 * time regardless of blocking, ignoring, or handling. This does the 901 * actual continuing for SIGCONT, but not the actual stopping for stop 902 * signals. The process stop is done as a signal action for SIG_DFL. 903 * 904 * Returns true if the signal should be actually delivered, otherwise 905 * it should be dropped. 906 */ 907 static bool prepare_signal(int sig, struct task_struct *p, bool force) 908 { 909 struct signal_struct *signal = p->signal; 910 struct task_struct *t; 911 sigset_t flush; 912 913 if (signal->flags & SIGNAL_GROUP_EXIT) { 914 if (signal->core_state) 915 return sig == SIGKILL; 916 /* 917 * The process is in the middle of dying, drop the signal. 918 */ 919 return false; 920 } else if (sig_kernel_stop(sig)) { 921 /* 922 * This is a stop signal. Remove SIGCONT from all queues. 923 */ 924 siginitset(&flush, sigmask(SIGCONT)); 925 flush_sigqueue_mask(&flush, &signal->shared_pending); 926 for_each_thread(p, t) 927 flush_sigqueue_mask(&flush, &t->pending); 928 } else if (sig == SIGCONT) { 929 unsigned int why; 930 /* 931 * Remove all stop signals from all queues, wake all threads. 932 */ 933 siginitset(&flush, SIG_KERNEL_STOP_MASK); 934 flush_sigqueue_mask(&flush, &signal->shared_pending); 935 for_each_thread(p, t) { 936 flush_sigqueue_mask(&flush, &t->pending); 937 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); 938 if (likely(!(t->ptrace & PT_SEIZED))) { 939 t->jobctl &= ~JOBCTL_STOPPED; 940 wake_up_state(t, __TASK_STOPPED); 941 } else 942 ptrace_trap_notify(t); 943 } 944 945 /* 946 * Notify the parent with CLD_CONTINUED if we were stopped. 947 * 948 * If we were in the middle of a group stop, we pretend it 949 * was already finished, and then continued. Since SIGCHLD 950 * doesn't queue we report only CLD_STOPPED, as if the next 951 * CLD_CONTINUED was dropped. 952 */ 953 why = 0; 954 if (signal->flags & SIGNAL_STOP_STOPPED) 955 why |= SIGNAL_CLD_CONTINUED; 956 else if (signal->group_stop_count) 957 why |= SIGNAL_CLD_STOPPED; 958 959 if (why) { 960 /* 961 * The first thread which returns from do_signal_stop() 962 * will take ->siglock, notice SIGNAL_CLD_MASK, and 963 * notify its parent. See get_signal(). 964 */ 965 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); 966 signal->group_stop_count = 0; 967 signal->group_exit_code = 0; 968 } 969 } 970 971 return !sig_ignored(p, sig, force); 972 } 973 974 /* 975 * Test if P wants to take SIG. After we've checked all threads with this, 976 * it's equivalent to finding no threads not blocking SIG. Any threads not 977 * blocking SIG were ruled out because they are not running and already 978 * have pending signals. Such threads will dequeue from the shared queue 979 * as soon as they're available, so putting the signal on the shared queue 980 * will be equivalent to sending it to one such thread. 981 */ 982 static inline bool wants_signal(int sig, struct task_struct *p) 983 { 984 if (sigismember(&p->blocked, sig)) 985 return false; 986 987 if (p->flags & PF_EXITING) 988 return false; 989 990 if (sig == SIGKILL) 991 return true; 992 993 if (task_is_stopped_or_traced(p)) 994 return false; 995 996 return task_curr(p) || !task_sigpending(p); 997 } 998 999 static void complete_signal(int sig, struct task_struct *p, enum pid_type type) 1000 { 1001 struct signal_struct *signal = p->signal; 1002 struct task_struct *t; 1003 1004 /* 1005 * Now find a thread we can wake up to take the signal off the queue. 1006 * 1007 * Try the suggested task first (may or may not be the main thread). 1008 */ 1009 if (wants_signal(sig, p)) 1010 t = p; 1011 else if ((type == PIDTYPE_PID) || thread_group_empty(p)) 1012 /* 1013 * There is just one thread and it does not need to be woken. 1014 * It will dequeue unblocked signals before it runs again. 1015 */ 1016 return; 1017 else { 1018 /* 1019 * Otherwise try to find a suitable thread. 1020 */ 1021 t = signal->curr_target; 1022 while (!wants_signal(sig, t)) { 1023 t = next_thread(t); 1024 if (t == signal->curr_target) 1025 /* 1026 * No thread needs to be woken. 1027 * Any eligible threads will see 1028 * the signal in the queue soon. 1029 */ 1030 return; 1031 } 1032 signal->curr_target = t; 1033 } 1034 1035 /* 1036 * Found a killable thread. If the signal will be fatal, 1037 * then start taking the whole group down immediately. 1038 */ 1039 if (sig_fatal(p, sig) && 1040 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) && 1041 !sigismember(&t->real_blocked, sig) && 1042 (sig == SIGKILL || !p->ptrace)) { 1043 /* 1044 * This signal will be fatal to the whole group. 1045 */ 1046 if (!sig_kernel_coredump(sig)) { 1047 /* 1048 * Start a group exit and wake everybody up. 1049 * This way we don't have other threads 1050 * running and doing things after a slower 1051 * thread has the fatal signal pending. 1052 */ 1053 signal->flags = SIGNAL_GROUP_EXIT; 1054 signal->group_exit_code = sig; 1055 signal->group_stop_count = 0; 1056 t = p; 1057 do { 1058 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1059 sigaddset(&t->pending.signal, SIGKILL); 1060 signal_wake_up(t, 1); 1061 } while_each_thread(p, t); 1062 return; 1063 } 1064 } 1065 1066 /* 1067 * The signal is already in the shared-pending queue. 1068 * Tell the chosen thread to wake up and dequeue it. 1069 */ 1070 signal_wake_up(t, sig == SIGKILL); 1071 return; 1072 } 1073 1074 static inline bool legacy_queue(struct sigpending *signals, int sig) 1075 { 1076 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 1077 } 1078 1079 static int __send_signal_locked(int sig, struct kernel_siginfo *info, 1080 struct task_struct *t, enum pid_type type, bool force) 1081 { 1082 struct sigpending *pending; 1083 struct sigqueue *q; 1084 int override_rlimit; 1085 int ret = 0, result; 1086 1087 lockdep_assert_held(&t->sighand->siglock); 1088 1089 result = TRACE_SIGNAL_IGNORED; 1090 if (!prepare_signal(sig, t, force)) 1091 goto ret; 1092 1093 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1094 /* 1095 * Short-circuit ignored signals and support queuing 1096 * exactly one non-rt signal, so that we can get more 1097 * detailed information about the cause of the signal. 1098 */ 1099 result = TRACE_SIGNAL_ALREADY_PENDING; 1100 if (legacy_queue(pending, sig)) 1101 goto ret; 1102 1103 result = TRACE_SIGNAL_DELIVERED; 1104 /* 1105 * Skip useless siginfo allocation for SIGKILL and kernel threads. 1106 */ 1107 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) 1108 goto out_set; 1109 1110 /* 1111 * Real-time signals must be queued if sent by sigqueue, or 1112 * some other real-time mechanism. It is implementation 1113 * defined whether kill() does so. We attempt to do so, on 1114 * the principle of least surprise, but since kill is not 1115 * allowed to fail with EAGAIN when low on memory we just 1116 * make sure at least one signal gets delivered and don't 1117 * pass on the info struct. 1118 */ 1119 if (sig < SIGRTMIN) 1120 override_rlimit = (is_si_special(info) || info->si_code >= 0); 1121 else 1122 override_rlimit = 0; 1123 1124 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0); 1125 1126 if (q) { 1127 list_add_tail(&q->list, &pending->list); 1128 switch ((unsigned long) info) { 1129 case (unsigned long) SEND_SIG_NOINFO: 1130 clear_siginfo(&q->info); 1131 q->info.si_signo = sig; 1132 q->info.si_errno = 0; 1133 q->info.si_code = SI_USER; 1134 q->info.si_pid = task_tgid_nr_ns(current, 1135 task_active_pid_ns(t)); 1136 rcu_read_lock(); 1137 q->info.si_uid = 1138 from_kuid_munged(task_cred_xxx(t, user_ns), 1139 current_uid()); 1140 rcu_read_unlock(); 1141 break; 1142 case (unsigned long) SEND_SIG_PRIV: 1143 clear_siginfo(&q->info); 1144 q->info.si_signo = sig; 1145 q->info.si_errno = 0; 1146 q->info.si_code = SI_KERNEL; 1147 q->info.si_pid = 0; 1148 q->info.si_uid = 0; 1149 break; 1150 default: 1151 copy_siginfo(&q->info, info); 1152 break; 1153 } 1154 } else if (!is_si_special(info) && 1155 sig >= SIGRTMIN && info->si_code != SI_USER) { 1156 /* 1157 * Queue overflow, abort. We may abort if the 1158 * signal was rt and sent by user using something 1159 * other than kill(). 1160 */ 1161 result = TRACE_SIGNAL_OVERFLOW_FAIL; 1162 ret = -EAGAIN; 1163 goto ret; 1164 } else { 1165 /* 1166 * This is a silent loss of information. We still 1167 * send the signal, but the *info bits are lost. 1168 */ 1169 result = TRACE_SIGNAL_LOSE_INFO; 1170 } 1171 1172 out_set: 1173 signalfd_notify(t, sig); 1174 sigaddset(&pending->signal, sig); 1175 1176 /* Let multiprocess signals appear after on-going forks */ 1177 if (type > PIDTYPE_TGID) { 1178 struct multiprocess_signals *delayed; 1179 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { 1180 sigset_t *signal = &delayed->signal; 1181 /* Can't queue both a stop and a continue signal */ 1182 if (sig == SIGCONT) 1183 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); 1184 else if (sig_kernel_stop(sig)) 1185 sigdelset(signal, SIGCONT); 1186 sigaddset(signal, sig); 1187 } 1188 } 1189 1190 complete_signal(sig, t, type); 1191 ret: 1192 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result); 1193 return ret; 1194 } 1195 1196 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) 1197 { 1198 bool ret = false; 1199 switch (siginfo_layout(info->si_signo, info->si_code)) { 1200 case SIL_KILL: 1201 case SIL_CHLD: 1202 case SIL_RT: 1203 ret = true; 1204 break; 1205 case SIL_TIMER: 1206 case SIL_POLL: 1207 case SIL_FAULT: 1208 case SIL_FAULT_TRAPNO: 1209 case SIL_FAULT_MCEERR: 1210 case SIL_FAULT_BNDERR: 1211 case SIL_FAULT_PKUERR: 1212 case SIL_FAULT_PERF_EVENT: 1213 case SIL_SYS: 1214 ret = false; 1215 break; 1216 } 1217 return ret; 1218 } 1219 1220 int send_signal_locked(int sig, struct kernel_siginfo *info, 1221 struct task_struct *t, enum pid_type type) 1222 { 1223 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ 1224 bool force = false; 1225 1226 if (info == SEND_SIG_NOINFO) { 1227 /* Force if sent from an ancestor pid namespace */ 1228 force = !task_pid_nr_ns(current, task_active_pid_ns(t)); 1229 } else if (info == SEND_SIG_PRIV) { 1230 /* Don't ignore kernel generated signals */ 1231 force = true; 1232 } else if (has_si_pid_and_uid(info)) { 1233 /* SIGKILL and SIGSTOP is special or has ids */ 1234 struct user_namespace *t_user_ns; 1235 1236 rcu_read_lock(); 1237 t_user_ns = task_cred_xxx(t, user_ns); 1238 if (current_user_ns() != t_user_ns) { 1239 kuid_t uid = make_kuid(current_user_ns(), info->si_uid); 1240 info->si_uid = from_kuid_munged(t_user_ns, uid); 1241 } 1242 rcu_read_unlock(); 1243 1244 /* A kernel generated signal? */ 1245 force = (info->si_code == SI_KERNEL); 1246 1247 /* From an ancestor pid namespace? */ 1248 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) { 1249 info->si_pid = 0; 1250 force = true; 1251 } 1252 } 1253 return __send_signal_locked(sig, info, t, type, force); 1254 } 1255 1256 static void print_fatal_signal(int signr) 1257 { 1258 struct pt_regs *regs = task_pt_regs(current); 1259 pr_info("potentially unexpected fatal signal %d.\n", signr); 1260 1261 #if defined(__i386__) && !defined(__arch_um__) 1262 pr_info("code at %08lx: ", regs->ip); 1263 { 1264 int i; 1265 for (i = 0; i < 16; i++) { 1266 unsigned char insn; 1267 1268 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1269 break; 1270 pr_cont("%02x ", insn); 1271 } 1272 } 1273 pr_cont("\n"); 1274 #endif 1275 preempt_disable(); 1276 show_regs(regs); 1277 preempt_enable(); 1278 } 1279 1280 static int __init setup_print_fatal_signals(char *str) 1281 { 1282 get_option (&str, &print_fatal_signals); 1283 1284 return 1; 1285 } 1286 1287 __setup("print-fatal-signals=", setup_print_fatal_signals); 1288 1289 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, 1290 enum pid_type type) 1291 { 1292 unsigned long flags; 1293 int ret = -ESRCH; 1294 1295 if (lock_task_sighand(p, &flags)) { 1296 ret = send_signal_locked(sig, info, p, type); 1297 unlock_task_sighand(p, &flags); 1298 } 1299 1300 return ret; 1301 } 1302 1303 enum sig_handler { 1304 HANDLER_CURRENT, /* If reachable use the current handler */ 1305 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */ 1306 HANDLER_EXIT, /* Only visible as the process exit code */ 1307 }; 1308 1309 /* 1310 * Force a signal that the process can't ignore: if necessary 1311 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1312 * 1313 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1314 * since we do not want to have a signal handler that was blocked 1315 * be invoked when user space had explicitly blocked it. 1316 * 1317 * We don't want to have recursive SIGSEGV's etc, for example, 1318 * that is why we also clear SIGNAL_UNKILLABLE. 1319 */ 1320 static int 1321 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, 1322 enum sig_handler handler) 1323 { 1324 unsigned long int flags; 1325 int ret, blocked, ignored; 1326 struct k_sigaction *action; 1327 int sig = info->si_signo; 1328 1329 spin_lock_irqsave(&t->sighand->siglock, flags); 1330 action = &t->sighand->action[sig-1]; 1331 ignored = action->sa.sa_handler == SIG_IGN; 1332 blocked = sigismember(&t->blocked, sig); 1333 if (blocked || ignored || (handler != HANDLER_CURRENT)) { 1334 action->sa.sa_handler = SIG_DFL; 1335 if (handler == HANDLER_EXIT) 1336 action->sa.sa_flags |= SA_IMMUTABLE; 1337 if (blocked) { 1338 sigdelset(&t->blocked, sig); 1339 recalc_sigpending_and_wake(t); 1340 } 1341 } 1342 /* 1343 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect 1344 * debugging to leave init killable. But HANDLER_EXIT is always fatal. 1345 */ 1346 if (action->sa.sa_handler == SIG_DFL && 1347 (!t->ptrace || (handler == HANDLER_EXIT))) 1348 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1349 ret = send_signal_locked(sig, info, t, PIDTYPE_PID); 1350 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1351 1352 return ret; 1353 } 1354 1355 int force_sig_info(struct kernel_siginfo *info) 1356 { 1357 return force_sig_info_to_task(info, current, HANDLER_CURRENT); 1358 } 1359 1360 /* 1361 * Nuke all other threads in the group. 1362 */ 1363 int zap_other_threads(struct task_struct *p) 1364 { 1365 struct task_struct *t = p; 1366 int count = 0; 1367 1368 p->signal->group_stop_count = 0; 1369 1370 while_each_thread(p, t) { 1371 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1372 /* Don't require de_thread to wait for the vhost_worker */ 1373 if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER) 1374 count++; 1375 1376 /* Don't bother with already dead threads */ 1377 if (t->exit_state) 1378 continue; 1379 sigaddset(&t->pending.signal, SIGKILL); 1380 signal_wake_up(t, 1); 1381 } 1382 1383 return count; 1384 } 1385 1386 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1387 unsigned long *flags) 1388 { 1389 struct sighand_struct *sighand; 1390 1391 rcu_read_lock(); 1392 for (;;) { 1393 sighand = rcu_dereference(tsk->sighand); 1394 if (unlikely(sighand == NULL)) 1395 break; 1396 1397 /* 1398 * This sighand can be already freed and even reused, but 1399 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which 1400 * initializes ->siglock: this slab can't go away, it has 1401 * the same object type, ->siglock can't be reinitialized. 1402 * 1403 * We need to ensure that tsk->sighand is still the same 1404 * after we take the lock, we can race with de_thread() or 1405 * __exit_signal(). In the latter case the next iteration 1406 * must see ->sighand == NULL. 1407 */ 1408 spin_lock_irqsave(&sighand->siglock, *flags); 1409 if (likely(sighand == rcu_access_pointer(tsk->sighand))) 1410 break; 1411 spin_unlock_irqrestore(&sighand->siglock, *flags); 1412 } 1413 rcu_read_unlock(); 1414 1415 return sighand; 1416 } 1417 1418 #ifdef CONFIG_LOCKDEP 1419 void lockdep_assert_task_sighand_held(struct task_struct *task) 1420 { 1421 struct sighand_struct *sighand; 1422 1423 rcu_read_lock(); 1424 sighand = rcu_dereference(task->sighand); 1425 if (sighand) 1426 lockdep_assert_held(&sighand->siglock); 1427 else 1428 WARN_ON_ONCE(1); 1429 rcu_read_unlock(); 1430 } 1431 #endif 1432 1433 /* 1434 * send signal info to all the members of a group 1435 */ 1436 int group_send_sig_info(int sig, struct kernel_siginfo *info, 1437 struct task_struct *p, enum pid_type type) 1438 { 1439 int ret; 1440 1441 rcu_read_lock(); 1442 ret = check_kill_permission(sig, info, p); 1443 rcu_read_unlock(); 1444 1445 if (!ret && sig) 1446 ret = do_send_sig_info(sig, info, p, type); 1447 1448 return ret; 1449 } 1450 1451 /* 1452 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1453 * control characters do (^C, ^Z etc) 1454 * - the caller must hold at least a readlock on tasklist_lock 1455 */ 1456 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) 1457 { 1458 struct task_struct *p = NULL; 1459 int retval, success; 1460 1461 success = 0; 1462 retval = -ESRCH; 1463 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1464 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); 1465 success |= !err; 1466 retval = err; 1467 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1468 return success ? 0 : retval; 1469 } 1470 1471 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) 1472 { 1473 int error = -ESRCH; 1474 struct task_struct *p; 1475 1476 for (;;) { 1477 rcu_read_lock(); 1478 p = pid_task(pid, PIDTYPE_PID); 1479 if (p) 1480 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); 1481 rcu_read_unlock(); 1482 if (likely(!p || error != -ESRCH)) 1483 return error; 1484 1485 /* 1486 * The task was unhashed in between, try again. If it 1487 * is dead, pid_task() will return NULL, if we race with 1488 * de_thread() it will find the new leader. 1489 */ 1490 } 1491 } 1492 1493 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) 1494 { 1495 int error; 1496 rcu_read_lock(); 1497 error = kill_pid_info(sig, info, find_vpid(pid)); 1498 rcu_read_unlock(); 1499 return error; 1500 } 1501 1502 static inline bool kill_as_cred_perm(const struct cred *cred, 1503 struct task_struct *target) 1504 { 1505 const struct cred *pcred = __task_cred(target); 1506 1507 return uid_eq(cred->euid, pcred->suid) || 1508 uid_eq(cred->euid, pcred->uid) || 1509 uid_eq(cred->uid, pcred->suid) || 1510 uid_eq(cred->uid, pcred->uid); 1511 } 1512 1513 /* 1514 * The usb asyncio usage of siginfo is wrong. The glibc support 1515 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. 1516 * AKA after the generic fields: 1517 * kernel_pid_t si_pid; 1518 * kernel_uid32_t si_uid; 1519 * sigval_t si_value; 1520 * 1521 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout 1522 * after the generic fields is: 1523 * void __user *si_addr; 1524 * 1525 * This is a practical problem when there is a 64bit big endian kernel 1526 * and a 32bit userspace. As the 32bit address will encoded in the low 1527 * 32bits of the pointer. Those low 32bits will be stored at higher 1528 * address than appear in a 32 bit pointer. So userspace will not 1529 * see the address it was expecting for it's completions. 1530 * 1531 * There is nothing in the encoding that can allow 1532 * copy_siginfo_to_user32 to detect this confusion of formats, so 1533 * handle this by requiring the caller of kill_pid_usb_asyncio to 1534 * notice when this situration takes place and to store the 32bit 1535 * pointer in sival_int, instead of sival_addr of the sigval_t addr 1536 * parameter. 1537 */ 1538 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, 1539 struct pid *pid, const struct cred *cred) 1540 { 1541 struct kernel_siginfo info; 1542 struct task_struct *p; 1543 unsigned long flags; 1544 int ret = -EINVAL; 1545 1546 if (!valid_signal(sig)) 1547 return ret; 1548 1549 clear_siginfo(&info); 1550 info.si_signo = sig; 1551 info.si_errno = errno; 1552 info.si_code = SI_ASYNCIO; 1553 *((sigval_t *)&info.si_pid) = addr; 1554 1555 rcu_read_lock(); 1556 p = pid_task(pid, PIDTYPE_PID); 1557 if (!p) { 1558 ret = -ESRCH; 1559 goto out_unlock; 1560 } 1561 if (!kill_as_cred_perm(cred, p)) { 1562 ret = -EPERM; 1563 goto out_unlock; 1564 } 1565 ret = security_task_kill(p, &info, sig, cred); 1566 if (ret) 1567 goto out_unlock; 1568 1569 if (sig) { 1570 if (lock_task_sighand(p, &flags)) { 1571 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false); 1572 unlock_task_sighand(p, &flags); 1573 } else 1574 ret = -ESRCH; 1575 } 1576 out_unlock: 1577 rcu_read_unlock(); 1578 return ret; 1579 } 1580 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio); 1581 1582 /* 1583 * kill_something_info() interprets pid in interesting ways just like kill(2). 1584 * 1585 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1586 * is probably wrong. Should make it like BSD or SYSV. 1587 */ 1588 1589 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) 1590 { 1591 int ret; 1592 1593 if (pid > 0) 1594 return kill_proc_info(sig, info, pid); 1595 1596 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ 1597 if (pid == INT_MIN) 1598 return -ESRCH; 1599 1600 read_lock(&tasklist_lock); 1601 if (pid != -1) { 1602 ret = __kill_pgrp_info(sig, info, 1603 pid ? find_vpid(-pid) : task_pgrp(current)); 1604 } else { 1605 int retval = 0, count = 0; 1606 struct task_struct * p; 1607 1608 for_each_process(p) { 1609 if (task_pid_vnr(p) > 1 && 1610 !same_thread_group(p, current)) { 1611 int err = group_send_sig_info(sig, info, p, 1612 PIDTYPE_MAX); 1613 ++count; 1614 if (err != -EPERM) 1615 retval = err; 1616 } 1617 } 1618 ret = count ? retval : -ESRCH; 1619 } 1620 read_unlock(&tasklist_lock); 1621 1622 return ret; 1623 } 1624 1625 /* 1626 * These are for backward compatibility with the rest of the kernel source. 1627 */ 1628 1629 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1630 { 1631 /* 1632 * Make sure legacy kernel users don't send in bad values 1633 * (normal paths check this in check_kill_permission). 1634 */ 1635 if (!valid_signal(sig)) 1636 return -EINVAL; 1637 1638 return do_send_sig_info(sig, info, p, PIDTYPE_PID); 1639 } 1640 EXPORT_SYMBOL(send_sig_info); 1641 1642 #define __si_special(priv) \ 1643 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1644 1645 int 1646 send_sig(int sig, struct task_struct *p, int priv) 1647 { 1648 return send_sig_info(sig, __si_special(priv), p); 1649 } 1650 EXPORT_SYMBOL(send_sig); 1651 1652 void force_sig(int sig) 1653 { 1654 struct kernel_siginfo info; 1655 1656 clear_siginfo(&info); 1657 info.si_signo = sig; 1658 info.si_errno = 0; 1659 info.si_code = SI_KERNEL; 1660 info.si_pid = 0; 1661 info.si_uid = 0; 1662 force_sig_info(&info); 1663 } 1664 EXPORT_SYMBOL(force_sig); 1665 1666 void force_fatal_sig(int sig) 1667 { 1668 struct kernel_siginfo info; 1669 1670 clear_siginfo(&info); 1671 info.si_signo = sig; 1672 info.si_errno = 0; 1673 info.si_code = SI_KERNEL; 1674 info.si_pid = 0; 1675 info.si_uid = 0; 1676 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL); 1677 } 1678 1679 void force_exit_sig(int sig) 1680 { 1681 struct kernel_siginfo info; 1682 1683 clear_siginfo(&info); 1684 info.si_signo = sig; 1685 info.si_errno = 0; 1686 info.si_code = SI_KERNEL; 1687 info.si_pid = 0; 1688 info.si_uid = 0; 1689 force_sig_info_to_task(&info, current, HANDLER_EXIT); 1690 } 1691 1692 /* 1693 * When things go south during signal handling, we 1694 * will force a SIGSEGV. And if the signal that caused 1695 * the problem was already a SIGSEGV, we'll want to 1696 * make sure we don't even try to deliver the signal.. 1697 */ 1698 void force_sigsegv(int sig) 1699 { 1700 if (sig == SIGSEGV) 1701 force_fatal_sig(SIGSEGV); 1702 else 1703 force_sig(SIGSEGV); 1704 } 1705 1706 int force_sig_fault_to_task(int sig, int code, void __user *addr 1707 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1708 , struct task_struct *t) 1709 { 1710 struct kernel_siginfo info; 1711 1712 clear_siginfo(&info); 1713 info.si_signo = sig; 1714 info.si_errno = 0; 1715 info.si_code = code; 1716 info.si_addr = addr; 1717 #ifdef __ia64__ 1718 info.si_imm = imm; 1719 info.si_flags = flags; 1720 info.si_isr = isr; 1721 #endif 1722 return force_sig_info_to_task(&info, t, HANDLER_CURRENT); 1723 } 1724 1725 int force_sig_fault(int sig, int code, void __user *addr 1726 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)) 1727 { 1728 return force_sig_fault_to_task(sig, code, addr 1729 ___ARCH_SI_IA64(imm, flags, isr), current); 1730 } 1731 1732 int send_sig_fault(int sig, int code, void __user *addr 1733 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1734 , struct task_struct *t) 1735 { 1736 struct kernel_siginfo info; 1737 1738 clear_siginfo(&info); 1739 info.si_signo = sig; 1740 info.si_errno = 0; 1741 info.si_code = code; 1742 info.si_addr = addr; 1743 #ifdef __ia64__ 1744 info.si_imm = imm; 1745 info.si_flags = flags; 1746 info.si_isr = isr; 1747 #endif 1748 return send_sig_info(info.si_signo, &info, t); 1749 } 1750 1751 int force_sig_mceerr(int code, void __user *addr, short lsb) 1752 { 1753 struct kernel_siginfo info; 1754 1755 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1756 clear_siginfo(&info); 1757 info.si_signo = SIGBUS; 1758 info.si_errno = 0; 1759 info.si_code = code; 1760 info.si_addr = addr; 1761 info.si_addr_lsb = lsb; 1762 return force_sig_info(&info); 1763 } 1764 1765 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) 1766 { 1767 struct kernel_siginfo info; 1768 1769 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1770 clear_siginfo(&info); 1771 info.si_signo = SIGBUS; 1772 info.si_errno = 0; 1773 info.si_code = code; 1774 info.si_addr = addr; 1775 info.si_addr_lsb = lsb; 1776 return send_sig_info(info.si_signo, &info, t); 1777 } 1778 EXPORT_SYMBOL(send_sig_mceerr); 1779 1780 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) 1781 { 1782 struct kernel_siginfo info; 1783 1784 clear_siginfo(&info); 1785 info.si_signo = SIGSEGV; 1786 info.si_errno = 0; 1787 info.si_code = SEGV_BNDERR; 1788 info.si_addr = addr; 1789 info.si_lower = lower; 1790 info.si_upper = upper; 1791 return force_sig_info(&info); 1792 } 1793 1794 #ifdef SEGV_PKUERR 1795 int force_sig_pkuerr(void __user *addr, u32 pkey) 1796 { 1797 struct kernel_siginfo info; 1798 1799 clear_siginfo(&info); 1800 info.si_signo = SIGSEGV; 1801 info.si_errno = 0; 1802 info.si_code = SEGV_PKUERR; 1803 info.si_addr = addr; 1804 info.si_pkey = pkey; 1805 return force_sig_info(&info); 1806 } 1807 #endif 1808 1809 int send_sig_perf(void __user *addr, u32 type, u64 sig_data) 1810 { 1811 struct kernel_siginfo info; 1812 1813 clear_siginfo(&info); 1814 info.si_signo = SIGTRAP; 1815 info.si_errno = 0; 1816 info.si_code = TRAP_PERF; 1817 info.si_addr = addr; 1818 info.si_perf_data = sig_data; 1819 info.si_perf_type = type; 1820 1821 /* 1822 * Signals generated by perf events should not terminate the whole 1823 * process if SIGTRAP is blocked, however, delivering the signal 1824 * asynchronously is better than not delivering at all. But tell user 1825 * space if the signal was asynchronous, so it can clearly be 1826 * distinguished from normal synchronous ones. 1827 */ 1828 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ? 1829 TRAP_PERF_FLAG_ASYNC : 1830 0; 1831 1832 return send_sig_info(info.si_signo, &info, current); 1833 } 1834 1835 /** 1836 * force_sig_seccomp - signals the task to allow in-process syscall emulation 1837 * @syscall: syscall number to send to userland 1838 * @reason: filter-supplied reason code to send to userland (via si_errno) 1839 * @force_coredump: true to trigger a coredump 1840 * 1841 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. 1842 */ 1843 int force_sig_seccomp(int syscall, int reason, bool force_coredump) 1844 { 1845 struct kernel_siginfo info; 1846 1847 clear_siginfo(&info); 1848 info.si_signo = SIGSYS; 1849 info.si_code = SYS_SECCOMP; 1850 info.si_call_addr = (void __user *)KSTK_EIP(current); 1851 info.si_errno = reason; 1852 info.si_arch = syscall_get_arch(current); 1853 info.si_syscall = syscall; 1854 return force_sig_info_to_task(&info, current, 1855 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT); 1856 } 1857 1858 /* For the crazy architectures that include trap information in 1859 * the errno field, instead of an actual errno value. 1860 */ 1861 int force_sig_ptrace_errno_trap(int errno, void __user *addr) 1862 { 1863 struct kernel_siginfo info; 1864 1865 clear_siginfo(&info); 1866 info.si_signo = SIGTRAP; 1867 info.si_errno = errno; 1868 info.si_code = TRAP_HWBKPT; 1869 info.si_addr = addr; 1870 return force_sig_info(&info); 1871 } 1872 1873 /* For the rare architectures that include trap information using 1874 * si_trapno. 1875 */ 1876 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno) 1877 { 1878 struct kernel_siginfo info; 1879 1880 clear_siginfo(&info); 1881 info.si_signo = sig; 1882 info.si_errno = 0; 1883 info.si_code = code; 1884 info.si_addr = addr; 1885 info.si_trapno = trapno; 1886 return force_sig_info(&info); 1887 } 1888 1889 /* For the rare architectures that include trap information using 1890 * si_trapno. 1891 */ 1892 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, 1893 struct task_struct *t) 1894 { 1895 struct kernel_siginfo info; 1896 1897 clear_siginfo(&info); 1898 info.si_signo = sig; 1899 info.si_errno = 0; 1900 info.si_code = code; 1901 info.si_addr = addr; 1902 info.si_trapno = trapno; 1903 return send_sig_info(info.si_signo, &info, t); 1904 } 1905 1906 int kill_pgrp(struct pid *pid, int sig, int priv) 1907 { 1908 int ret; 1909 1910 read_lock(&tasklist_lock); 1911 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1912 read_unlock(&tasklist_lock); 1913 1914 return ret; 1915 } 1916 EXPORT_SYMBOL(kill_pgrp); 1917 1918 int kill_pid(struct pid *pid, int sig, int priv) 1919 { 1920 return kill_pid_info(sig, __si_special(priv), pid); 1921 } 1922 EXPORT_SYMBOL(kill_pid); 1923 1924 /* 1925 * These functions support sending signals using preallocated sigqueue 1926 * structures. This is needed "because realtime applications cannot 1927 * afford to lose notifications of asynchronous events, like timer 1928 * expirations or I/O completions". In the case of POSIX Timers 1929 * we allocate the sigqueue structure from the timer_create. If this 1930 * allocation fails we are able to report the failure to the application 1931 * with an EAGAIN error. 1932 */ 1933 struct sigqueue *sigqueue_alloc(void) 1934 { 1935 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC); 1936 } 1937 1938 void sigqueue_free(struct sigqueue *q) 1939 { 1940 unsigned long flags; 1941 spinlock_t *lock = ¤t->sighand->siglock; 1942 1943 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1944 /* 1945 * We must hold ->siglock while testing q->list 1946 * to serialize with collect_signal() or with 1947 * __exit_signal()->flush_sigqueue(). 1948 */ 1949 spin_lock_irqsave(lock, flags); 1950 q->flags &= ~SIGQUEUE_PREALLOC; 1951 /* 1952 * If it is queued it will be freed when dequeued, 1953 * like the "regular" sigqueue. 1954 */ 1955 if (!list_empty(&q->list)) 1956 q = NULL; 1957 spin_unlock_irqrestore(lock, flags); 1958 1959 if (q) 1960 __sigqueue_free(q); 1961 } 1962 1963 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) 1964 { 1965 int sig = q->info.si_signo; 1966 struct sigpending *pending; 1967 struct task_struct *t; 1968 unsigned long flags; 1969 int ret, result; 1970 1971 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1972 1973 ret = -1; 1974 rcu_read_lock(); 1975 1976 /* 1977 * This function is used by POSIX timers to deliver a timer signal. 1978 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID 1979 * set), the signal must be delivered to the specific thread (queues 1980 * into t->pending). 1981 * 1982 * Where type is not PIDTYPE_PID, signals must be delivered to the 1983 * process. In this case, prefer to deliver to current if it is in 1984 * the same thread group as the target process, which avoids 1985 * unnecessarily waking up a potentially idle task. 1986 */ 1987 t = pid_task(pid, type); 1988 if (!t) 1989 goto ret; 1990 if (type != PIDTYPE_PID && same_thread_group(t, current)) 1991 t = current; 1992 if (!likely(lock_task_sighand(t, &flags))) 1993 goto ret; 1994 1995 ret = 1; /* the signal is ignored */ 1996 result = TRACE_SIGNAL_IGNORED; 1997 if (!prepare_signal(sig, t, false)) 1998 goto out; 1999 2000 ret = 0; 2001 if (unlikely(!list_empty(&q->list))) { 2002 /* 2003 * If an SI_TIMER entry is already queue just increment 2004 * the overrun count. 2005 */ 2006 BUG_ON(q->info.si_code != SI_TIMER); 2007 q->info.si_overrun++; 2008 result = TRACE_SIGNAL_ALREADY_PENDING; 2009 goto out; 2010 } 2011 q->info.si_overrun = 0; 2012 2013 signalfd_notify(t, sig); 2014 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 2015 list_add_tail(&q->list, &pending->list); 2016 sigaddset(&pending->signal, sig); 2017 complete_signal(sig, t, type); 2018 result = TRACE_SIGNAL_DELIVERED; 2019 out: 2020 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); 2021 unlock_task_sighand(t, &flags); 2022 ret: 2023 rcu_read_unlock(); 2024 return ret; 2025 } 2026 2027 static void do_notify_pidfd(struct task_struct *task) 2028 { 2029 struct pid *pid; 2030 2031 WARN_ON(task->exit_state == 0); 2032 pid = task_pid(task); 2033 wake_up_all(&pid->wait_pidfd); 2034 } 2035 2036 /* 2037 * Let a parent know about the death of a child. 2038 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 2039 * 2040 * Returns true if our parent ignored us and so we've switched to 2041 * self-reaping. 2042 */ 2043 bool do_notify_parent(struct task_struct *tsk, int sig) 2044 { 2045 struct kernel_siginfo info; 2046 unsigned long flags; 2047 struct sighand_struct *psig; 2048 bool autoreap = false; 2049 u64 utime, stime; 2050 2051 WARN_ON_ONCE(sig == -1); 2052 2053 /* do_notify_parent_cldstop should have been called instead. */ 2054 WARN_ON_ONCE(task_is_stopped_or_traced(tsk)); 2055 2056 WARN_ON_ONCE(!tsk->ptrace && 2057 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 2058 2059 /* Wake up all pidfd waiters */ 2060 do_notify_pidfd(tsk); 2061 2062 if (sig != SIGCHLD) { 2063 /* 2064 * This is only possible if parent == real_parent. 2065 * Check if it has changed security domain. 2066 */ 2067 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) 2068 sig = SIGCHLD; 2069 } 2070 2071 clear_siginfo(&info); 2072 info.si_signo = sig; 2073 info.si_errno = 0; 2074 /* 2075 * We are under tasklist_lock here so our parent is tied to 2076 * us and cannot change. 2077 * 2078 * task_active_pid_ns will always return the same pid namespace 2079 * until a task passes through release_task. 2080 * 2081 * write_lock() currently calls preempt_disable() which is the 2082 * same as rcu_read_lock(), but according to Oleg, this is not 2083 * correct to rely on this 2084 */ 2085 rcu_read_lock(); 2086 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); 2087 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), 2088 task_uid(tsk)); 2089 rcu_read_unlock(); 2090 2091 task_cputime(tsk, &utime, &stime); 2092 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); 2093 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); 2094 2095 info.si_status = tsk->exit_code & 0x7f; 2096 if (tsk->exit_code & 0x80) 2097 info.si_code = CLD_DUMPED; 2098 else if (tsk->exit_code & 0x7f) 2099 info.si_code = CLD_KILLED; 2100 else { 2101 info.si_code = CLD_EXITED; 2102 info.si_status = tsk->exit_code >> 8; 2103 } 2104 2105 psig = tsk->parent->sighand; 2106 spin_lock_irqsave(&psig->siglock, flags); 2107 if (!tsk->ptrace && sig == SIGCHLD && 2108 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 2109 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 2110 /* 2111 * We are exiting and our parent doesn't care. POSIX.1 2112 * defines special semantics for setting SIGCHLD to SIG_IGN 2113 * or setting the SA_NOCLDWAIT flag: we should be reaped 2114 * automatically and not left for our parent's wait4 call. 2115 * Rather than having the parent do it as a magic kind of 2116 * signal handler, we just set this to tell do_exit that we 2117 * can be cleaned up without becoming a zombie. Note that 2118 * we still call __wake_up_parent in this case, because a 2119 * blocked sys_wait4 might now return -ECHILD. 2120 * 2121 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 2122 * is implementation-defined: we do (if you don't want 2123 * it, just use SIG_IGN instead). 2124 */ 2125 autoreap = true; 2126 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 2127 sig = 0; 2128 } 2129 /* 2130 * Send with __send_signal as si_pid and si_uid are in the 2131 * parent's namespaces. 2132 */ 2133 if (valid_signal(sig) && sig) 2134 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false); 2135 __wake_up_parent(tsk, tsk->parent); 2136 spin_unlock_irqrestore(&psig->siglock, flags); 2137 2138 return autoreap; 2139 } 2140 2141 /** 2142 * do_notify_parent_cldstop - notify parent of stopped/continued state change 2143 * @tsk: task reporting the state change 2144 * @for_ptracer: the notification is for ptracer 2145 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report 2146 * 2147 * Notify @tsk's parent that the stopped/continued state has changed. If 2148 * @for_ptracer is %false, @tsk's group leader notifies to its real parent. 2149 * If %true, @tsk reports to @tsk->parent which should be the ptracer. 2150 * 2151 * CONTEXT: 2152 * Must be called with tasklist_lock at least read locked. 2153 */ 2154 static void do_notify_parent_cldstop(struct task_struct *tsk, 2155 bool for_ptracer, int why) 2156 { 2157 struct kernel_siginfo info; 2158 unsigned long flags; 2159 struct task_struct *parent; 2160 struct sighand_struct *sighand; 2161 u64 utime, stime; 2162 2163 if (for_ptracer) { 2164 parent = tsk->parent; 2165 } else { 2166 tsk = tsk->group_leader; 2167 parent = tsk->real_parent; 2168 } 2169 2170 clear_siginfo(&info); 2171 info.si_signo = SIGCHLD; 2172 info.si_errno = 0; 2173 /* 2174 * see comment in do_notify_parent() about the following 4 lines 2175 */ 2176 rcu_read_lock(); 2177 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); 2178 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 2179 rcu_read_unlock(); 2180 2181 task_cputime(tsk, &utime, &stime); 2182 info.si_utime = nsec_to_clock_t(utime); 2183 info.si_stime = nsec_to_clock_t(stime); 2184 2185 info.si_code = why; 2186 switch (why) { 2187 case CLD_CONTINUED: 2188 info.si_status = SIGCONT; 2189 break; 2190 case CLD_STOPPED: 2191 info.si_status = tsk->signal->group_exit_code & 0x7f; 2192 break; 2193 case CLD_TRAPPED: 2194 info.si_status = tsk->exit_code & 0x7f; 2195 break; 2196 default: 2197 BUG(); 2198 } 2199 2200 sighand = parent->sighand; 2201 spin_lock_irqsave(&sighand->siglock, flags); 2202 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 2203 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 2204 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID); 2205 /* 2206 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 2207 */ 2208 __wake_up_parent(tsk, parent); 2209 spin_unlock_irqrestore(&sighand->siglock, flags); 2210 } 2211 2212 /* 2213 * This must be called with current->sighand->siglock held. 2214 * 2215 * This should be the path for all ptrace stops. 2216 * We always set current->last_siginfo while stopped here. 2217 * That makes it a way to test a stopped process for 2218 * being ptrace-stopped vs being job-control-stopped. 2219 * 2220 * Returns the signal the ptracer requested the code resume 2221 * with. If the code did not stop because the tracer is gone, 2222 * the stop signal remains unchanged unless clear_code. 2223 */ 2224 static int ptrace_stop(int exit_code, int why, unsigned long message, 2225 kernel_siginfo_t *info) 2226 __releases(¤t->sighand->siglock) 2227 __acquires(¤t->sighand->siglock) 2228 { 2229 bool gstop_done = false; 2230 2231 if (arch_ptrace_stop_needed()) { 2232 /* 2233 * The arch code has something special to do before a 2234 * ptrace stop. This is allowed to block, e.g. for faults 2235 * on user stack pages. We can't keep the siglock while 2236 * calling arch_ptrace_stop, so we must release it now. 2237 * To preserve proper semantics, we must do this before 2238 * any signal bookkeeping like checking group_stop_count. 2239 */ 2240 spin_unlock_irq(¤t->sighand->siglock); 2241 arch_ptrace_stop(); 2242 spin_lock_irq(¤t->sighand->siglock); 2243 } 2244 2245 /* 2246 * After this point ptrace_signal_wake_up or signal_wake_up 2247 * will clear TASK_TRACED if ptrace_unlink happens or a fatal 2248 * signal comes in. Handle previous ptrace_unlinks and fatal 2249 * signals here to prevent ptrace_stop sleeping in schedule. 2250 */ 2251 if (!current->ptrace || __fatal_signal_pending(current)) 2252 return exit_code; 2253 2254 set_special_state(TASK_TRACED); 2255 current->jobctl |= JOBCTL_TRACED; 2256 2257 /* 2258 * We're committing to trapping. TRACED should be visible before 2259 * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). 2260 * Also, transition to TRACED and updates to ->jobctl should be 2261 * atomic with respect to siglock and should be done after the arch 2262 * hook as siglock is released and regrabbed across it. 2263 * 2264 * TRACER TRACEE 2265 * 2266 * ptrace_attach() 2267 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) 2268 * do_wait() 2269 * set_current_state() smp_wmb(); 2270 * ptrace_do_wait() 2271 * wait_task_stopped() 2272 * task_stopped_code() 2273 * [L] task_is_traced() [S] task_clear_jobctl_trapping(); 2274 */ 2275 smp_wmb(); 2276 2277 current->ptrace_message = message; 2278 current->last_siginfo = info; 2279 current->exit_code = exit_code; 2280 2281 /* 2282 * If @why is CLD_STOPPED, we're trapping to participate in a group 2283 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 2284 * across siglock relocks since INTERRUPT was scheduled, PENDING 2285 * could be clear now. We act as if SIGCONT is received after 2286 * TASK_TRACED is entered - ignore it. 2287 */ 2288 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) 2289 gstop_done = task_participate_group_stop(current); 2290 2291 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ 2292 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); 2293 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) 2294 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); 2295 2296 /* entering a trap, clear TRAPPING */ 2297 task_clear_jobctl_trapping(current); 2298 2299 spin_unlock_irq(¤t->sighand->siglock); 2300 read_lock(&tasklist_lock); 2301 /* 2302 * Notify parents of the stop. 2303 * 2304 * While ptraced, there are two parents - the ptracer and 2305 * the real_parent of the group_leader. The ptracer should 2306 * know about every stop while the real parent is only 2307 * interested in the completion of group stop. The states 2308 * for the two don't interact with each other. Notify 2309 * separately unless they're gonna be duplicates. 2310 */ 2311 if (current->ptrace) 2312 do_notify_parent_cldstop(current, true, why); 2313 if (gstop_done && (!current->ptrace || ptrace_reparented(current))) 2314 do_notify_parent_cldstop(current, false, why); 2315 2316 /* 2317 * Don't want to allow preemption here, because 2318 * sys_ptrace() needs this task to be inactive. 2319 * 2320 * XXX: implement read_unlock_no_resched(). 2321 */ 2322 preempt_disable(); 2323 read_unlock(&tasklist_lock); 2324 cgroup_enter_frozen(); 2325 preempt_enable_no_resched(); 2326 schedule(); 2327 cgroup_leave_frozen(true); 2328 2329 /* 2330 * We are back. Now reacquire the siglock before touching 2331 * last_siginfo, so that we are sure to have synchronized with 2332 * any signal-sending on another CPU that wants to examine it. 2333 */ 2334 spin_lock_irq(¤t->sighand->siglock); 2335 exit_code = current->exit_code; 2336 current->last_siginfo = NULL; 2337 current->ptrace_message = 0; 2338 current->exit_code = 0; 2339 2340 /* LISTENING can be set only during STOP traps, clear it */ 2341 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN); 2342 2343 /* 2344 * Queued signals ignored us while we were stopped for tracing. 2345 * So check for any that we should take before resuming user mode. 2346 * This sets TIF_SIGPENDING, but never clears it. 2347 */ 2348 recalc_sigpending_tsk(current); 2349 return exit_code; 2350 } 2351 2352 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message) 2353 { 2354 kernel_siginfo_t info; 2355 2356 clear_siginfo(&info); 2357 info.si_signo = signr; 2358 info.si_code = exit_code; 2359 info.si_pid = task_pid_vnr(current); 2360 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 2361 2362 /* Let the debugger run. */ 2363 return ptrace_stop(exit_code, why, message, &info); 2364 } 2365 2366 int ptrace_notify(int exit_code, unsigned long message) 2367 { 2368 int signr; 2369 2370 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 2371 if (unlikely(task_work_pending(current))) 2372 task_work_run(); 2373 2374 spin_lock_irq(¤t->sighand->siglock); 2375 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message); 2376 spin_unlock_irq(¤t->sighand->siglock); 2377 return signr; 2378 } 2379 2380 /** 2381 * do_signal_stop - handle group stop for SIGSTOP and other stop signals 2382 * @signr: signr causing group stop if initiating 2383 * 2384 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr 2385 * and participate in it. If already set, participate in the existing 2386 * group stop. If participated in a group stop (and thus slept), %true is 2387 * returned with siglock released. 2388 * 2389 * If ptraced, this function doesn't handle stop itself. Instead, 2390 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock 2391 * untouched. The caller must ensure that INTERRUPT trap handling takes 2392 * places afterwards. 2393 * 2394 * CONTEXT: 2395 * Must be called with @current->sighand->siglock held, which is released 2396 * on %true return. 2397 * 2398 * RETURNS: 2399 * %false if group stop is already cancelled or ptrace trap is scheduled. 2400 * %true if participated in group stop. 2401 */ 2402 static bool do_signal_stop(int signr) 2403 __releases(¤t->sighand->siglock) 2404 { 2405 struct signal_struct *sig = current->signal; 2406 2407 if (!(current->jobctl & JOBCTL_STOP_PENDING)) { 2408 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 2409 struct task_struct *t; 2410 2411 /* signr will be recorded in task->jobctl for retries */ 2412 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); 2413 2414 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || 2415 unlikely(sig->flags & SIGNAL_GROUP_EXIT) || 2416 unlikely(sig->group_exec_task)) 2417 return false; 2418 /* 2419 * There is no group stop already in progress. We must 2420 * initiate one now. 2421 * 2422 * While ptraced, a task may be resumed while group stop is 2423 * still in effect and then receive a stop signal and 2424 * initiate another group stop. This deviates from the 2425 * usual behavior as two consecutive stop signals can't 2426 * cause two group stops when !ptraced. That is why we 2427 * also check !task_is_stopped(t) below. 2428 * 2429 * The condition can be distinguished by testing whether 2430 * SIGNAL_STOP_STOPPED is already set. Don't generate 2431 * group_exit_code in such case. 2432 * 2433 * This is not necessary for SIGNAL_STOP_CONTINUED because 2434 * an intervening stop signal is required to cause two 2435 * continued events regardless of ptrace. 2436 */ 2437 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 2438 sig->group_exit_code = signr; 2439 2440 sig->group_stop_count = 0; 2441 2442 if (task_set_jobctl_pending(current, signr | gstop)) 2443 sig->group_stop_count++; 2444 2445 t = current; 2446 while_each_thread(current, t) { 2447 /* 2448 * Setting state to TASK_STOPPED for a group 2449 * stop is always done with the siglock held, 2450 * so this check has no races. 2451 */ 2452 if (!task_is_stopped(t) && 2453 task_set_jobctl_pending(t, signr | gstop)) { 2454 sig->group_stop_count++; 2455 if (likely(!(t->ptrace & PT_SEIZED))) 2456 signal_wake_up(t, 0); 2457 else 2458 ptrace_trap_notify(t); 2459 } 2460 } 2461 } 2462 2463 if (likely(!current->ptrace)) { 2464 int notify = 0; 2465 2466 /* 2467 * If there are no other threads in the group, or if there 2468 * is a group stop in progress and we are the last to stop, 2469 * report to the parent. 2470 */ 2471 if (task_participate_group_stop(current)) 2472 notify = CLD_STOPPED; 2473 2474 current->jobctl |= JOBCTL_STOPPED; 2475 set_special_state(TASK_STOPPED); 2476 spin_unlock_irq(¤t->sighand->siglock); 2477 2478 /* 2479 * Notify the parent of the group stop completion. Because 2480 * we're not holding either the siglock or tasklist_lock 2481 * here, ptracer may attach inbetween; however, this is for 2482 * group stop and should always be delivered to the real 2483 * parent of the group leader. The new ptracer will get 2484 * its notification when this task transitions into 2485 * TASK_TRACED. 2486 */ 2487 if (notify) { 2488 read_lock(&tasklist_lock); 2489 do_notify_parent_cldstop(current, false, notify); 2490 read_unlock(&tasklist_lock); 2491 } 2492 2493 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2494 cgroup_enter_frozen(); 2495 schedule(); 2496 return true; 2497 } else { 2498 /* 2499 * While ptraced, group stop is handled by STOP trap. 2500 * Schedule it and let the caller deal with it. 2501 */ 2502 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); 2503 return false; 2504 } 2505 } 2506 2507 /** 2508 * do_jobctl_trap - take care of ptrace jobctl traps 2509 * 2510 * When PT_SEIZED, it's used for both group stop and explicit 2511 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with 2512 * accompanying siginfo. If stopped, lower eight bits of exit_code contain 2513 * the stop signal; otherwise, %SIGTRAP. 2514 * 2515 * When !PT_SEIZED, it's used only for group stop trap with stop signal 2516 * number as exit_code and no siginfo. 2517 * 2518 * CONTEXT: 2519 * Must be called with @current->sighand->siglock held, which may be 2520 * released and re-acquired before returning with intervening sleep. 2521 */ 2522 static void do_jobctl_trap(void) 2523 { 2524 struct signal_struct *signal = current->signal; 2525 int signr = current->jobctl & JOBCTL_STOP_SIGMASK; 2526 2527 if (current->ptrace & PT_SEIZED) { 2528 if (!signal->group_stop_count && 2529 !(signal->flags & SIGNAL_STOP_STOPPED)) 2530 signr = SIGTRAP; 2531 WARN_ON_ONCE(!signr); 2532 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), 2533 CLD_STOPPED, 0); 2534 } else { 2535 WARN_ON_ONCE(!signr); 2536 ptrace_stop(signr, CLD_STOPPED, 0, NULL); 2537 } 2538 } 2539 2540 /** 2541 * do_freezer_trap - handle the freezer jobctl trap 2542 * 2543 * Puts the task into frozen state, if only the task is not about to quit. 2544 * In this case it drops JOBCTL_TRAP_FREEZE. 2545 * 2546 * CONTEXT: 2547 * Must be called with @current->sighand->siglock held, 2548 * which is always released before returning. 2549 */ 2550 static void do_freezer_trap(void) 2551 __releases(¤t->sighand->siglock) 2552 { 2553 /* 2554 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, 2555 * let's make another loop to give it a chance to be handled. 2556 * In any case, we'll return back. 2557 */ 2558 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != 2559 JOBCTL_TRAP_FREEZE) { 2560 spin_unlock_irq(¤t->sighand->siglock); 2561 return; 2562 } 2563 2564 /* 2565 * Now we're sure that there is no pending fatal signal and no 2566 * pending traps. Clear TIF_SIGPENDING to not get out of schedule() 2567 * immediately (if there is a non-fatal signal pending), and 2568 * put the task into sleep. 2569 */ 2570 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 2571 clear_thread_flag(TIF_SIGPENDING); 2572 spin_unlock_irq(¤t->sighand->siglock); 2573 cgroup_enter_frozen(); 2574 schedule(); 2575 } 2576 2577 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) 2578 { 2579 /* 2580 * We do not check sig_kernel_stop(signr) but set this marker 2581 * unconditionally because we do not know whether debugger will 2582 * change signr. This flag has no meaning unless we are going 2583 * to stop after return from ptrace_stop(). In this case it will 2584 * be checked in do_signal_stop(), we should only stop if it was 2585 * not cleared by SIGCONT while we were sleeping. See also the 2586 * comment in dequeue_signal(). 2587 */ 2588 current->jobctl |= JOBCTL_STOP_DEQUEUED; 2589 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info); 2590 2591 /* We're back. Did the debugger cancel the sig? */ 2592 if (signr == 0) 2593 return signr; 2594 2595 /* 2596 * Update the siginfo structure if the signal has 2597 * changed. If the debugger wanted something 2598 * specific in the siginfo structure then it should 2599 * have updated *info via PTRACE_SETSIGINFO. 2600 */ 2601 if (signr != info->si_signo) { 2602 clear_siginfo(info); 2603 info->si_signo = signr; 2604 info->si_errno = 0; 2605 info->si_code = SI_USER; 2606 rcu_read_lock(); 2607 info->si_pid = task_pid_vnr(current->parent); 2608 info->si_uid = from_kuid_munged(current_user_ns(), 2609 task_uid(current->parent)); 2610 rcu_read_unlock(); 2611 } 2612 2613 /* If the (new) signal is now blocked, requeue it. */ 2614 if (sigismember(¤t->blocked, signr) || 2615 fatal_signal_pending(current)) { 2616 send_signal_locked(signr, info, current, type); 2617 signr = 0; 2618 } 2619 2620 return signr; 2621 } 2622 2623 static void hide_si_addr_tag_bits(struct ksignal *ksig) 2624 { 2625 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) { 2626 case SIL_FAULT: 2627 case SIL_FAULT_TRAPNO: 2628 case SIL_FAULT_MCEERR: 2629 case SIL_FAULT_BNDERR: 2630 case SIL_FAULT_PKUERR: 2631 case SIL_FAULT_PERF_EVENT: 2632 ksig->info.si_addr = arch_untagged_si_addr( 2633 ksig->info.si_addr, ksig->sig, ksig->info.si_code); 2634 break; 2635 case SIL_KILL: 2636 case SIL_TIMER: 2637 case SIL_POLL: 2638 case SIL_CHLD: 2639 case SIL_RT: 2640 case SIL_SYS: 2641 break; 2642 } 2643 } 2644 2645 bool get_signal(struct ksignal *ksig) 2646 { 2647 struct sighand_struct *sighand = current->sighand; 2648 struct signal_struct *signal = current->signal; 2649 int signr; 2650 2651 clear_notify_signal(); 2652 if (unlikely(task_work_pending(current))) 2653 task_work_run(); 2654 2655 if (!task_sigpending(current)) 2656 return false; 2657 2658 if (unlikely(uprobe_deny_signal())) 2659 return false; 2660 2661 /* 2662 * Do this once, we can't return to user-mode if freezing() == T. 2663 * do_signal_stop() and ptrace_stop() do freezable_schedule() and 2664 * thus do not need another check after return. 2665 */ 2666 try_to_freeze(); 2667 2668 relock: 2669 spin_lock_irq(&sighand->siglock); 2670 2671 /* 2672 * Every stopped thread goes here after wakeup. Check to see if 2673 * we should notify the parent, prepare_signal(SIGCONT) encodes 2674 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2675 */ 2676 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2677 int why; 2678 2679 if (signal->flags & SIGNAL_CLD_CONTINUED) 2680 why = CLD_CONTINUED; 2681 else 2682 why = CLD_STOPPED; 2683 2684 signal->flags &= ~SIGNAL_CLD_MASK; 2685 2686 spin_unlock_irq(&sighand->siglock); 2687 2688 /* 2689 * Notify the parent that we're continuing. This event is 2690 * always per-process and doesn't make whole lot of sense 2691 * for ptracers, who shouldn't consume the state via 2692 * wait(2) either, but, for backward compatibility, notify 2693 * the ptracer of the group leader too unless it's gonna be 2694 * a duplicate. 2695 */ 2696 read_lock(&tasklist_lock); 2697 do_notify_parent_cldstop(current, false, why); 2698 2699 if (ptrace_reparented(current->group_leader)) 2700 do_notify_parent_cldstop(current->group_leader, 2701 true, why); 2702 read_unlock(&tasklist_lock); 2703 2704 goto relock; 2705 } 2706 2707 for (;;) { 2708 struct k_sigaction *ka; 2709 enum pid_type type; 2710 2711 /* Has this task already been marked for death? */ 2712 if ((signal->flags & SIGNAL_GROUP_EXIT) || 2713 signal->group_exec_task) { 2714 clear_siginfo(&ksig->info); 2715 ksig->info.si_signo = signr = SIGKILL; 2716 sigdelset(¤t->pending.signal, SIGKILL); 2717 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, 2718 &sighand->action[SIGKILL - 1]); 2719 recalc_sigpending(); 2720 goto fatal; 2721 } 2722 2723 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && 2724 do_signal_stop(0)) 2725 goto relock; 2726 2727 if (unlikely(current->jobctl & 2728 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { 2729 if (current->jobctl & JOBCTL_TRAP_MASK) { 2730 do_jobctl_trap(); 2731 spin_unlock_irq(&sighand->siglock); 2732 } else if (current->jobctl & JOBCTL_TRAP_FREEZE) 2733 do_freezer_trap(); 2734 2735 goto relock; 2736 } 2737 2738 /* 2739 * If the task is leaving the frozen state, let's update 2740 * cgroup counters and reset the frozen bit. 2741 */ 2742 if (unlikely(cgroup_task_frozen(current))) { 2743 spin_unlock_irq(&sighand->siglock); 2744 cgroup_leave_frozen(false); 2745 goto relock; 2746 } 2747 2748 /* 2749 * Signals generated by the execution of an instruction 2750 * need to be delivered before any other pending signals 2751 * so that the instruction pointer in the signal stack 2752 * frame points to the faulting instruction. 2753 */ 2754 type = PIDTYPE_PID; 2755 signr = dequeue_synchronous_signal(&ksig->info); 2756 if (!signr) 2757 signr = dequeue_signal(current, ¤t->blocked, 2758 &ksig->info, &type); 2759 2760 if (!signr) 2761 break; /* will return 0 */ 2762 2763 if (unlikely(current->ptrace) && (signr != SIGKILL) && 2764 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) { 2765 signr = ptrace_signal(signr, &ksig->info, type); 2766 if (!signr) 2767 continue; 2768 } 2769 2770 ka = &sighand->action[signr-1]; 2771 2772 /* Trace actually delivered signals. */ 2773 trace_signal_deliver(signr, &ksig->info, ka); 2774 2775 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 2776 continue; 2777 if (ka->sa.sa_handler != SIG_DFL) { 2778 /* Run the handler. */ 2779 ksig->ka = *ka; 2780 2781 if (ka->sa.sa_flags & SA_ONESHOT) 2782 ka->sa.sa_handler = SIG_DFL; 2783 2784 break; /* will return non-zero "signr" value */ 2785 } 2786 2787 /* 2788 * Now we are doing the default action for this signal. 2789 */ 2790 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 2791 continue; 2792 2793 /* 2794 * Global init gets no signals it doesn't want. 2795 * Container-init gets no signals it doesn't want from same 2796 * container. 2797 * 2798 * Note that if global/container-init sees a sig_kernel_only() 2799 * signal here, the signal must have been generated internally 2800 * or must have come from an ancestor namespace. In either 2801 * case, the signal cannot be dropped. 2802 */ 2803 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 2804 !sig_kernel_only(signr)) 2805 continue; 2806 2807 if (sig_kernel_stop(signr)) { 2808 /* 2809 * The default action is to stop all threads in 2810 * the thread group. The job control signals 2811 * do nothing in an orphaned pgrp, but SIGSTOP 2812 * always works. Note that siglock needs to be 2813 * dropped during the call to is_orphaned_pgrp() 2814 * because of lock ordering with tasklist_lock. 2815 * This allows an intervening SIGCONT to be posted. 2816 * We need to check for that and bail out if necessary. 2817 */ 2818 if (signr != SIGSTOP) { 2819 spin_unlock_irq(&sighand->siglock); 2820 2821 /* signals can be posted during this window */ 2822 2823 if (is_current_pgrp_orphaned()) 2824 goto relock; 2825 2826 spin_lock_irq(&sighand->siglock); 2827 } 2828 2829 if (likely(do_signal_stop(ksig->info.si_signo))) { 2830 /* It released the siglock. */ 2831 goto relock; 2832 } 2833 2834 /* 2835 * We didn't actually stop, due to a race 2836 * with SIGCONT or something like that. 2837 */ 2838 continue; 2839 } 2840 2841 fatal: 2842 spin_unlock_irq(&sighand->siglock); 2843 if (unlikely(cgroup_task_frozen(current))) 2844 cgroup_leave_frozen(true); 2845 2846 /* 2847 * Anything else is fatal, maybe with a core dump. 2848 */ 2849 current->flags |= PF_SIGNALED; 2850 2851 if (sig_kernel_coredump(signr)) { 2852 if (print_fatal_signals) 2853 print_fatal_signal(ksig->info.si_signo); 2854 proc_coredump_connector(current); 2855 /* 2856 * If it was able to dump core, this kills all 2857 * other threads in the group and synchronizes with 2858 * their demise. If we lost the race with another 2859 * thread getting here, it set group_exit_code 2860 * first and our do_group_exit call below will use 2861 * that value and ignore the one we pass it. 2862 */ 2863 do_coredump(&ksig->info); 2864 } 2865 2866 /* 2867 * PF_USER_WORKER threads will catch and exit on fatal signals 2868 * themselves. They have cleanup that must be performed, so 2869 * we cannot call do_exit() on their behalf. 2870 */ 2871 if (current->flags & PF_USER_WORKER) 2872 goto out; 2873 2874 /* 2875 * Death signals, no core dump. 2876 */ 2877 do_group_exit(ksig->info.si_signo); 2878 /* NOTREACHED */ 2879 } 2880 spin_unlock_irq(&sighand->siglock); 2881 out: 2882 ksig->sig = signr; 2883 2884 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS)) 2885 hide_si_addr_tag_bits(ksig); 2886 2887 return ksig->sig > 0; 2888 } 2889 2890 /** 2891 * signal_delivered - called after signal delivery to update blocked signals 2892 * @ksig: kernel signal struct 2893 * @stepping: nonzero if debugger single-step or block-step in use 2894 * 2895 * This function should be called when a signal has successfully been 2896 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask 2897 * is always blocked), and the signal itself is blocked unless %SA_NODEFER 2898 * is set in @ksig->ka.sa.sa_flags. Tracing is notified. 2899 */ 2900 static void signal_delivered(struct ksignal *ksig, int stepping) 2901 { 2902 sigset_t blocked; 2903 2904 /* A signal was successfully delivered, and the 2905 saved sigmask was stored on the signal frame, 2906 and will be restored by sigreturn. So we can 2907 simply clear the restore sigmask flag. */ 2908 clear_restore_sigmask(); 2909 2910 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); 2911 if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) 2912 sigaddset(&blocked, ksig->sig); 2913 set_current_blocked(&blocked); 2914 if (current->sas_ss_flags & SS_AUTODISARM) 2915 sas_ss_reset(current); 2916 if (stepping) 2917 ptrace_notify(SIGTRAP, 0); 2918 } 2919 2920 void signal_setup_done(int failed, struct ksignal *ksig, int stepping) 2921 { 2922 if (failed) 2923 force_sigsegv(ksig->sig); 2924 else 2925 signal_delivered(ksig, stepping); 2926 } 2927 2928 /* 2929 * It could be that complete_signal() picked us to notify about the 2930 * group-wide signal. Other threads should be notified now to take 2931 * the shared signals in @which since we will not. 2932 */ 2933 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) 2934 { 2935 sigset_t retarget; 2936 struct task_struct *t; 2937 2938 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); 2939 if (sigisemptyset(&retarget)) 2940 return; 2941 2942 t = tsk; 2943 while_each_thread(tsk, t) { 2944 if (t->flags & PF_EXITING) 2945 continue; 2946 2947 if (!has_pending_signals(&retarget, &t->blocked)) 2948 continue; 2949 /* Remove the signals this thread can handle. */ 2950 sigandsets(&retarget, &retarget, &t->blocked); 2951 2952 if (!task_sigpending(t)) 2953 signal_wake_up(t, 0); 2954 2955 if (sigisemptyset(&retarget)) 2956 break; 2957 } 2958 } 2959 2960 void exit_signals(struct task_struct *tsk) 2961 { 2962 int group_stop = 0; 2963 sigset_t unblocked; 2964 2965 /* 2966 * @tsk is about to have PF_EXITING set - lock out users which 2967 * expect stable threadgroup. 2968 */ 2969 cgroup_threadgroup_change_begin(tsk); 2970 2971 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) { 2972 sched_mm_cid_exit_signals(tsk); 2973 tsk->flags |= PF_EXITING; 2974 cgroup_threadgroup_change_end(tsk); 2975 return; 2976 } 2977 2978 spin_lock_irq(&tsk->sighand->siglock); 2979 /* 2980 * From now this task is not visible for group-wide signals, 2981 * see wants_signal(), do_signal_stop(). 2982 */ 2983 sched_mm_cid_exit_signals(tsk); 2984 tsk->flags |= PF_EXITING; 2985 2986 cgroup_threadgroup_change_end(tsk); 2987 2988 if (!task_sigpending(tsk)) 2989 goto out; 2990 2991 unblocked = tsk->blocked; 2992 signotset(&unblocked); 2993 retarget_shared_pending(tsk, &unblocked); 2994 2995 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && 2996 task_participate_group_stop(tsk)) 2997 group_stop = CLD_STOPPED; 2998 out: 2999 spin_unlock_irq(&tsk->sighand->siglock); 3000 3001 /* 3002 * If group stop has completed, deliver the notification. This 3003 * should always go to the real parent of the group leader. 3004 */ 3005 if (unlikely(group_stop)) { 3006 read_lock(&tasklist_lock); 3007 do_notify_parent_cldstop(tsk, false, group_stop); 3008 read_unlock(&tasklist_lock); 3009 } 3010 } 3011 3012 /* 3013 * System call entry points. 3014 */ 3015 3016 /** 3017 * sys_restart_syscall - restart a system call 3018 */ 3019 SYSCALL_DEFINE0(restart_syscall) 3020 { 3021 struct restart_block *restart = ¤t->restart_block; 3022 return restart->fn(restart); 3023 } 3024 3025 long do_no_restart_syscall(struct restart_block *param) 3026 { 3027 return -EINTR; 3028 } 3029 3030 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) 3031 { 3032 if (task_sigpending(tsk) && !thread_group_empty(tsk)) { 3033 sigset_t newblocked; 3034 /* A set of now blocked but previously unblocked signals. */ 3035 sigandnsets(&newblocked, newset, ¤t->blocked); 3036 retarget_shared_pending(tsk, &newblocked); 3037 } 3038 tsk->blocked = *newset; 3039 recalc_sigpending(); 3040 } 3041 3042 /** 3043 * set_current_blocked - change current->blocked mask 3044 * @newset: new mask 3045 * 3046 * It is wrong to change ->blocked directly, this helper should be used 3047 * to ensure the process can't miss a shared signal we are going to block. 3048 */ 3049 void set_current_blocked(sigset_t *newset) 3050 { 3051 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); 3052 __set_current_blocked(newset); 3053 } 3054 3055 void __set_current_blocked(const sigset_t *newset) 3056 { 3057 struct task_struct *tsk = current; 3058 3059 /* 3060 * In case the signal mask hasn't changed, there is nothing we need 3061 * to do. The current->blocked shouldn't be modified by other task. 3062 */ 3063 if (sigequalsets(&tsk->blocked, newset)) 3064 return; 3065 3066 spin_lock_irq(&tsk->sighand->siglock); 3067 __set_task_blocked(tsk, newset); 3068 spin_unlock_irq(&tsk->sighand->siglock); 3069 } 3070 3071 /* 3072 * This is also useful for kernel threads that want to temporarily 3073 * (or permanently) block certain signals. 3074 * 3075 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 3076 * interface happily blocks "unblockable" signals like SIGKILL 3077 * and friends. 3078 */ 3079 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 3080 { 3081 struct task_struct *tsk = current; 3082 sigset_t newset; 3083 3084 /* Lockless, only current can change ->blocked, never from irq */ 3085 if (oldset) 3086 *oldset = tsk->blocked; 3087 3088 switch (how) { 3089 case SIG_BLOCK: 3090 sigorsets(&newset, &tsk->blocked, set); 3091 break; 3092 case SIG_UNBLOCK: 3093 sigandnsets(&newset, &tsk->blocked, set); 3094 break; 3095 case SIG_SETMASK: 3096 newset = *set; 3097 break; 3098 default: 3099 return -EINVAL; 3100 } 3101 3102 __set_current_blocked(&newset); 3103 return 0; 3104 } 3105 EXPORT_SYMBOL(sigprocmask); 3106 3107 /* 3108 * The api helps set app-provided sigmasks. 3109 * 3110 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and 3111 * epoll_pwait where a new sigmask is passed from userland for the syscalls. 3112 * 3113 * Note that it does set_restore_sigmask() in advance, so it must be always 3114 * paired with restore_saved_sigmask_unless() before return from syscall. 3115 */ 3116 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize) 3117 { 3118 sigset_t kmask; 3119 3120 if (!umask) 3121 return 0; 3122 if (sigsetsize != sizeof(sigset_t)) 3123 return -EINVAL; 3124 if (copy_from_user(&kmask, umask, sizeof(sigset_t))) 3125 return -EFAULT; 3126 3127 set_restore_sigmask(); 3128 current->saved_sigmask = current->blocked; 3129 set_current_blocked(&kmask); 3130 3131 return 0; 3132 } 3133 3134 #ifdef CONFIG_COMPAT 3135 int set_compat_user_sigmask(const compat_sigset_t __user *umask, 3136 size_t sigsetsize) 3137 { 3138 sigset_t kmask; 3139 3140 if (!umask) 3141 return 0; 3142 if (sigsetsize != sizeof(compat_sigset_t)) 3143 return -EINVAL; 3144 if (get_compat_sigset(&kmask, umask)) 3145 return -EFAULT; 3146 3147 set_restore_sigmask(); 3148 current->saved_sigmask = current->blocked; 3149 set_current_blocked(&kmask); 3150 3151 return 0; 3152 } 3153 #endif 3154 3155 /** 3156 * sys_rt_sigprocmask - change the list of currently blocked signals 3157 * @how: whether to add, remove, or set signals 3158 * @nset: stores pending signals 3159 * @oset: previous value of signal mask if non-null 3160 * @sigsetsize: size of sigset_t type 3161 */ 3162 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, 3163 sigset_t __user *, oset, size_t, sigsetsize) 3164 { 3165 sigset_t old_set, new_set; 3166 int error; 3167 3168 /* XXX: Don't preclude handling different sized sigset_t's. */ 3169 if (sigsetsize != sizeof(sigset_t)) 3170 return -EINVAL; 3171 3172 old_set = current->blocked; 3173 3174 if (nset) { 3175 if (copy_from_user(&new_set, nset, sizeof(sigset_t))) 3176 return -EFAULT; 3177 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3178 3179 error = sigprocmask(how, &new_set, NULL); 3180 if (error) 3181 return error; 3182 } 3183 3184 if (oset) { 3185 if (copy_to_user(oset, &old_set, sizeof(sigset_t))) 3186 return -EFAULT; 3187 } 3188 3189 return 0; 3190 } 3191 3192 #ifdef CONFIG_COMPAT 3193 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, 3194 compat_sigset_t __user *, oset, compat_size_t, sigsetsize) 3195 { 3196 sigset_t old_set = current->blocked; 3197 3198 /* XXX: Don't preclude handling different sized sigset_t's. */ 3199 if (sigsetsize != sizeof(sigset_t)) 3200 return -EINVAL; 3201 3202 if (nset) { 3203 sigset_t new_set; 3204 int error; 3205 if (get_compat_sigset(&new_set, nset)) 3206 return -EFAULT; 3207 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3208 3209 error = sigprocmask(how, &new_set, NULL); 3210 if (error) 3211 return error; 3212 } 3213 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; 3214 } 3215 #endif 3216 3217 static void do_sigpending(sigset_t *set) 3218 { 3219 spin_lock_irq(¤t->sighand->siglock); 3220 sigorsets(set, ¤t->pending.signal, 3221 ¤t->signal->shared_pending.signal); 3222 spin_unlock_irq(¤t->sighand->siglock); 3223 3224 /* Outside the lock because only this thread touches it. */ 3225 sigandsets(set, ¤t->blocked, set); 3226 } 3227 3228 /** 3229 * sys_rt_sigpending - examine a pending signal that has been raised 3230 * while blocked 3231 * @uset: stores pending signals 3232 * @sigsetsize: size of sigset_t type or larger 3233 */ 3234 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 3235 { 3236 sigset_t set; 3237 3238 if (sigsetsize > sizeof(*uset)) 3239 return -EINVAL; 3240 3241 do_sigpending(&set); 3242 3243 if (copy_to_user(uset, &set, sigsetsize)) 3244 return -EFAULT; 3245 3246 return 0; 3247 } 3248 3249 #ifdef CONFIG_COMPAT 3250 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, 3251 compat_size_t, sigsetsize) 3252 { 3253 sigset_t set; 3254 3255 if (sigsetsize > sizeof(*uset)) 3256 return -EINVAL; 3257 3258 do_sigpending(&set); 3259 3260 return put_compat_sigset(uset, &set, sigsetsize); 3261 } 3262 #endif 3263 3264 static const struct { 3265 unsigned char limit, layout; 3266 } sig_sicodes[] = { 3267 [SIGILL] = { NSIGILL, SIL_FAULT }, 3268 [SIGFPE] = { NSIGFPE, SIL_FAULT }, 3269 [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, 3270 [SIGBUS] = { NSIGBUS, SIL_FAULT }, 3271 [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, 3272 #if defined(SIGEMT) 3273 [SIGEMT] = { NSIGEMT, SIL_FAULT }, 3274 #endif 3275 [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, 3276 [SIGPOLL] = { NSIGPOLL, SIL_POLL }, 3277 [SIGSYS] = { NSIGSYS, SIL_SYS }, 3278 }; 3279 3280 static bool known_siginfo_layout(unsigned sig, int si_code) 3281 { 3282 if (si_code == SI_KERNEL) 3283 return true; 3284 else if ((si_code > SI_USER)) { 3285 if (sig_specific_sicodes(sig)) { 3286 if (si_code <= sig_sicodes[sig].limit) 3287 return true; 3288 } 3289 else if (si_code <= NSIGPOLL) 3290 return true; 3291 } 3292 else if (si_code >= SI_DETHREAD) 3293 return true; 3294 else if (si_code == SI_ASYNCNL) 3295 return true; 3296 return false; 3297 } 3298 3299 enum siginfo_layout siginfo_layout(unsigned sig, int si_code) 3300 { 3301 enum siginfo_layout layout = SIL_KILL; 3302 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { 3303 if ((sig < ARRAY_SIZE(sig_sicodes)) && 3304 (si_code <= sig_sicodes[sig].limit)) { 3305 layout = sig_sicodes[sig].layout; 3306 /* Handle the exceptions */ 3307 if ((sig == SIGBUS) && 3308 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) 3309 layout = SIL_FAULT_MCEERR; 3310 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) 3311 layout = SIL_FAULT_BNDERR; 3312 #ifdef SEGV_PKUERR 3313 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) 3314 layout = SIL_FAULT_PKUERR; 3315 #endif 3316 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF)) 3317 layout = SIL_FAULT_PERF_EVENT; 3318 else if (IS_ENABLED(CONFIG_SPARC) && 3319 (sig == SIGILL) && (si_code == ILL_ILLTRP)) 3320 layout = SIL_FAULT_TRAPNO; 3321 else if (IS_ENABLED(CONFIG_ALPHA) && 3322 ((sig == SIGFPE) || 3323 ((sig == SIGTRAP) && (si_code == TRAP_UNK)))) 3324 layout = SIL_FAULT_TRAPNO; 3325 } 3326 else if (si_code <= NSIGPOLL) 3327 layout = SIL_POLL; 3328 } else { 3329 if (si_code == SI_TIMER) 3330 layout = SIL_TIMER; 3331 else if (si_code == SI_SIGIO) 3332 layout = SIL_POLL; 3333 else if (si_code < 0) 3334 layout = SIL_RT; 3335 } 3336 return layout; 3337 } 3338 3339 static inline char __user *si_expansion(const siginfo_t __user *info) 3340 { 3341 return ((char __user *)info) + sizeof(struct kernel_siginfo); 3342 } 3343 3344 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) 3345 { 3346 char __user *expansion = si_expansion(to); 3347 if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) 3348 return -EFAULT; 3349 if (clear_user(expansion, SI_EXPANSION_SIZE)) 3350 return -EFAULT; 3351 return 0; 3352 } 3353 3354 static int post_copy_siginfo_from_user(kernel_siginfo_t *info, 3355 const siginfo_t __user *from) 3356 { 3357 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { 3358 char __user *expansion = si_expansion(from); 3359 char buf[SI_EXPANSION_SIZE]; 3360 int i; 3361 /* 3362 * An unknown si_code might need more than 3363 * sizeof(struct kernel_siginfo) bytes. Verify all of the 3364 * extra bytes are 0. This guarantees copy_siginfo_to_user 3365 * will return this data to userspace exactly. 3366 */ 3367 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE)) 3368 return -EFAULT; 3369 for (i = 0; i < SI_EXPANSION_SIZE; i++) { 3370 if (buf[i] != 0) 3371 return -E2BIG; 3372 } 3373 } 3374 return 0; 3375 } 3376 3377 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, 3378 const siginfo_t __user *from) 3379 { 3380 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3381 return -EFAULT; 3382 to->si_signo = signo; 3383 return post_copy_siginfo_from_user(to, from); 3384 } 3385 3386 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) 3387 { 3388 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3389 return -EFAULT; 3390 return post_copy_siginfo_from_user(to, from); 3391 } 3392 3393 #ifdef CONFIG_COMPAT 3394 /** 3395 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo 3396 * @to: compat siginfo destination 3397 * @from: kernel siginfo source 3398 * 3399 * Note: This function does not work properly for the SIGCHLD on x32, but 3400 * fortunately it doesn't have to. The only valid callers for this function are 3401 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code. 3402 * The latter does not care because SIGCHLD will never cause a coredump. 3403 */ 3404 void copy_siginfo_to_external32(struct compat_siginfo *to, 3405 const struct kernel_siginfo *from) 3406 { 3407 memset(to, 0, sizeof(*to)); 3408 3409 to->si_signo = from->si_signo; 3410 to->si_errno = from->si_errno; 3411 to->si_code = from->si_code; 3412 switch(siginfo_layout(from->si_signo, from->si_code)) { 3413 case SIL_KILL: 3414 to->si_pid = from->si_pid; 3415 to->si_uid = from->si_uid; 3416 break; 3417 case SIL_TIMER: 3418 to->si_tid = from->si_tid; 3419 to->si_overrun = from->si_overrun; 3420 to->si_int = from->si_int; 3421 break; 3422 case SIL_POLL: 3423 to->si_band = from->si_band; 3424 to->si_fd = from->si_fd; 3425 break; 3426 case SIL_FAULT: 3427 to->si_addr = ptr_to_compat(from->si_addr); 3428 break; 3429 case SIL_FAULT_TRAPNO: 3430 to->si_addr = ptr_to_compat(from->si_addr); 3431 to->si_trapno = from->si_trapno; 3432 break; 3433 case SIL_FAULT_MCEERR: 3434 to->si_addr = ptr_to_compat(from->si_addr); 3435 to->si_addr_lsb = from->si_addr_lsb; 3436 break; 3437 case SIL_FAULT_BNDERR: 3438 to->si_addr = ptr_to_compat(from->si_addr); 3439 to->si_lower = ptr_to_compat(from->si_lower); 3440 to->si_upper = ptr_to_compat(from->si_upper); 3441 break; 3442 case SIL_FAULT_PKUERR: 3443 to->si_addr = ptr_to_compat(from->si_addr); 3444 to->si_pkey = from->si_pkey; 3445 break; 3446 case SIL_FAULT_PERF_EVENT: 3447 to->si_addr = ptr_to_compat(from->si_addr); 3448 to->si_perf_data = from->si_perf_data; 3449 to->si_perf_type = from->si_perf_type; 3450 to->si_perf_flags = from->si_perf_flags; 3451 break; 3452 case SIL_CHLD: 3453 to->si_pid = from->si_pid; 3454 to->si_uid = from->si_uid; 3455 to->si_status = from->si_status; 3456 to->si_utime = from->si_utime; 3457 to->si_stime = from->si_stime; 3458 break; 3459 case SIL_RT: 3460 to->si_pid = from->si_pid; 3461 to->si_uid = from->si_uid; 3462 to->si_int = from->si_int; 3463 break; 3464 case SIL_SYS: 3465 to->si_call_addr = ptr_to_compat(from->si_call_addr); 3466 to->si_syscall = from->si_syscall; 3467 to->si_arch = from->si_arch; 3468 break; 3469 } 3470 } 3471 3472 int __copy_siginfo_to_user32(struct compat_siginfo __user *to, 3473 const struct kernel_siginfo *from) 3474 { 3475 struct compat_siginfo new; 3476 3477 copy_siginfo_to_external32(&new, from); 3478 if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) 3479 return -EFAULT; 3480 return 0; 3481 } 3482 3483 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, 3484 const struct compat_siginfo *from) 3485 { 3486 clear_siginfo(to); 3487 to->si_signo = from->si_signo; 3488 to->si_errno = from->si_errno; 3489 to->si_code = from->si_code; 3490 switch(siginfo_layout(from->si_signo, from->si_code)) { 3491 case SIL_KILL: 3492 to->si_pid = from->si_pid; 3493 to->si_uid = from->si_uid; 3494 break; 3495 case SIL_TIMER: 3496 to->si_tid = from->si_tid; 3497 to->si_overrun = from->si_overrun; 3498 to->si_int = from->si_int; 3499 break; 3500 case SIL_POLL: 3501 to->si_band = from->si_band; 3502 to->si_fd = from->si_fd; 3503 break; 3504 case SIL_FAULT: 3505 to->si_addr = compat_ptr(from->si_addr); 3506 break; 3507 case SIL_FAULT_TRAPNO: 3508 to->si_addr = compat_ptr(from->si_addr); 3509 to->si_trapno = from->si_trapno; 3510 break; 3511 case SIL_FAULT_MCEERR: 3512 to->si_addr = compat_ptr(from->si_addr); 3513 to->si_addr_lsb = from->si_addr_lsb; 3514 break; 3515 case SIL_FAULT_BNDERR: 3516 to->si_addr = compat_ptr(from->si_addr); 3517 to->si_lower = compat_ptr(from->si_lower); 3518 to->si_upper = compat_ptr(from->si_upper); 3519 break; 3520 case SIL_FAULT_PKUERR: 3521 to->si_addr = compat_ptr(from->si_addr); 3522 to->si_pkey = from->si_pkey; 3523 break; 3524 case SIL_FAULT_PERF_EVENT: 3525 to->si_addr = compat_ptr(from->si_addr); 3526 to->si_perf_data = from->si_perf_data; 3527 to->si_perf_type = from->si_perf_type; 3528 to->si_perf_flags = from->si_perf_flags; 3529 break; 3530 case SIL_CHLD: 3531 to->si_pid = from->si_pid; 3532 to->si_uid = from->si_uid; 3533 to->si_status = from->si_status; 3534 #ifdef CONFIG_X86_X32_ABI 3535 if (in_x32_syscall()) { 3536 to->si_utime = from->_sifields._sigchld_x32._utime; 3537 to->si_stime = from->_sifields._sigchld_x32._stime; 3538 } else 3539 #endif 3540 { 3541 to->si_utime = from->si_utime; 3542 to->si_stime = from->si_stime; 3543 } 3544 break; 3545 case SIL_RT: 3546 to->si_pid = from->si_pid; 3547 to->si_uid = from->si_uid; 3548 to->si_int = from->si_int; 3549 break; 3550 case SIL_SYS: 3551 to->si_call_addr = compat_ptr(from->si_call_addr); 3552 to->si_syscall = from->si_syscall; 3553 to->si_arch = from->si_arch; 3554 break; 3555 } 3556 return 0; 3557 } 3558 3559 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, 3560 const struct compat_siginfo __user *ufrom) 3561 { 3562 struct compat_siginfo from; 3563 3564 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3565 return -EFAULT; 3566 3567 from.si_signo = signo; 3568 return post_copy_siginfo_from_user32(to, &from); 3569 } 3570 3571 int copy_siginfo_from_user32(struct kernel_siginfo *to, 3572 const struct compat_siginfo __user *ufrom) 3573 { 3574 struct compat_siginfo from; 3575 3576 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3577 return -EFAULT; 3578 3579 return post_copy_siginfo_from_user32(to, &from); 3580 } 3581 #endif /* CONFIG_COMPAT */ 3582 3583 /** 3584 * do_sigtimedwait - wait for queued signals specified in @which 3585 * @which: queued signals to wait for 3586 * @info: if non-null, the signal's siginfo is returned here 3587 * @ts: upper bound on process time suspension 3588 */ 3589 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, 3590 const struct timespec64 *ts) 3591 { 3592 ktime_t *to = NULL, timeout = KTIME_MAX; 3593 struct task_struct *tsk = current; 3594 sigset_t mask = *which; 3595 enum pid_type type; 3596 int sig, ret = 0; 3597 3598 if (ts) { 3599 if (!timespec64_valid(ts)) 3600 return -EINVAL; 3601 timeout = timespec64_to_ktime(*ts); 3602 to = &timeout; 3603 } 3604 3605 /* 3606 * Invert the set of allowed signals to get those we want to block. 3607 */ 3608 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); 3609 signotset(&mask); 3610 3611 spin_lock_irq(&tsk->sighand->siglock); 3612 sig = dequeue_signal(tsk, &mask, info, &type); 3613 if (!sig && timeout) { 3614 /* 3615 * None ready, temporarily unblock those we're interested 3616 * while we are sleeping in so that we'll be awakened when 3617 * they arrive. Unblocking is always fine, we can avoid 3618 * set_current_blocked(). 3619 */ 3620 tsk->real_blocked = tsk->blocked; 3621 sigandsets(&tsk->blocked, &tsk->blocked, &mask); 3622 recalc_sigpending(); 3623 spin_unlock_irq(&tsk->sighand->siglock); 3624 3625 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 3626 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns, 3627 HRTIMER_MODE_REL); 3628 spin_lock_irq(&tsk->sighand->siglock); 3629 __set_task_blocked(tsk, &tsk->real_blocked); 3630 sigemptyset(&tsk->real_blocked); 3631 sig = dequeue_signal(tsk, &mask, info, &type); 3632 } 3633 spin_unlock_irq(&tsk->sighand->siglock); 3634 3635 if (sig) 3636 return sig; 3637 return ret ? -EINTR : -EAGAIN; 3638 } 3639 3640 /** 3641 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 3642 * in @uthese 3643 * @uthese: queued signals to wait for 3644 * @uinfo: if non-null, the signal's siginfo is returned here 3645 * @uts: upper bound on process time suspension 3646 * @sigsetsize: size of sigset_t type 3647 */ 3648 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 3649 siginfo_t __user *, uinfo, 3650 const struct __kernel_timespec __user *, uts, 3651 size_t, sigsetsize) 3652 { 3653 sigset_t these; 3654 struct timespec64 ts; 3655 kernel_siginfo_t info; 3656 int ret; 3657 3658 /* XXX: Don't preclude handling different sized sigset_t's. */ 3659 if (sigsetsize != sizeof(sigset_t)) 3660 return -EINVAL; 3661 3662 if (copy_from_user(&these, uthese, sizeof(these))) 3663 return -EFAULT; 3664 3665 if (uts) { 3666 if (get_timespec64(&ts, uts)) 3667 return -EFAULT; 3668 } 3669 3670 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3671 3672 if (ret > 0 && uinfo) { 3673 if (copy_siginfo_to_user(uinfo, &info)) 3674 ret = -EFAULT; 3675 } 3676 3677 return ret; 3678 } 3679 3680 #ifdef CONFIG_COMPAT_32BIT_TIME 3681 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese, 3682 siginfo_t __user *, uinfo, 3683 const struct old_timespec32 __user *, uts, 3684 size_t, sigsetsize) 3685 { 3686 sigset_t these; 3687 struct timespec64 ts; 3688 kernel_siginfo_t info; 3689 int ret; 3690 3691 if (sigsetsize != sizeof(sigset_t)) 3692 return -EINVAL; 3693 3694 if (copy_from_user(&these, uthese, sizeof(these))) 3695 return -EFAULT; 3696 3697 if (uts) { 3698 if (get_old_timespec32(&ts, uts)) 3699 return -EFAULT; 3700 } 3701 3702 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3703 3704 if (ret > 0 && uinfo) { 3705 if (copy_siginfo_to_user(uinfo, &info)) 3706 ret = -EFAULT; 3707 } 3708 3709 return ret; 3710 } 3711 #endif 3712 3713 #ifdef CONFIG_COMPAT 3714 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, 3715 struct compat_siginfo __user *, uinfo, 3716 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize) 3717 { 3718 sigset_t s; 3719 struct timespec64 t; 3720 kernel_siginfo_t info; 3721 long ret; 3722 3723 if (sigsetsize != sizeof(sigset_t)) 3724 return -EINVAL; 3725 3726 if (get_compat_sigset(&s, uthese)) 3727 return -EFAULT; 3728 3729 if (uts) { 3730 if (get_timespec64(&t, uts)) 3731 return -EFAULT; 3732 } 3733 3734 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3735 3736 if (ret > 0 && uinfo) { 3737 if (copy_siginfo_to_user32(uinfo, &info)) 3738 ret = -EFAULT; 3739 } 3740 3741 return ret; 3742 } 3743 3744 #ifdef CONFIG_COMPAT_32BIT_TIME 3745 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, 3746 struct compat_siginfo __user *, uinfo, 3747 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) 3748 { 3749 sigset_t s; 3750 struct timespec64 t; 3751 kernel_siginfo_t info; 3752 long ret; 3753 3754 if (sigsetsize != sizeof(sigset_t)) 3755 return -EINVAL; 3756 3757 if (get_compat_sigset(&s, uthese)) 3758 return -EFAULT; 3759 3760 if (uts) { 3761 if (get_old_timespec32(&t, uts)) 3762 return -EFAULT; 3763 } 3764 3765 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3766 3767 if (ret > 0 && uinfo) { 3768 if (copy_siginfo_to_user32(uinfo, &info)) 3769 ret = -EFAULT; 3770 } 3771 3772 return ret; 3773 } 3774 #endif 3775 #endif 3776 3777 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) 3778 { 3779 clear_siginfo(info); 3780 info->si_signo = sig; 3781 info->si_errno = 0; 3782 info->si_code = SI_USER; 3783 info->si_pid = task_tgid_vnr(current); 3784 info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3785 } 3786 3787 /** 3788 * sys_kill - send a signal to a process 3789 * @pid: the PID of the process 3790 * @sig: signal to be sent 3791 */ 3792 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 3793 { 3794 struct kernel_siginfo info; 3795 3796 prepare_kill_siginfo(sig, &info); 3797 3798 return kill_something_info(sig, &info, pid); 3799 } 3800 3801 /* 3802 * Verify that the signaler and signalee either are in the same pid namespace 3803 * or that the signaler's pid namespace is an ancestor of the signalee's pid 3804 * namespace. 3805 */ 3806 static bool access_pidfd_pidns(struct pid *pid) 3807 { 3808 struct pid_namespace *active = task_active_pid_ns(current); 3809 struct pid_namespace *p = ns_of_pid(pid); 3810 3811 for (;;) { 3812 if (!p) 3813 return false; 3814 if (p == active) 3815 break; 3816 p = p->parent; 3817 } 3818 3819 return true; 3820 } 3821 3822 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, 3823 siginfo_t __user *info) 3824 { 3825 #ifdef CONFIG_COMPAT 3826 /* 3827 * Avoid hooking up compat syscalls and instead handle necessary 3828 * conversions here. Note, this is a stop-gap measure and should not be 3829 * considered a generic solution. 3830 */ 3831 if (in_compat_syscall()) 3832 return copy_siginfo_from_user32( 3833 kinfo, (struct compat_siginfo __user *)info); 3834 #endif 3835 return copy_siginfo_from_user(kinfo, info); 3836 } 3837 3838 static struct pid *pidfd_to_pid(const struct file *file) 3839 { 3840 struct pid *pid; 3841 3842 pid = pidfd_pid(file); 3843 if (!IS_ERR(pid)) 3844 return pid; 3845 3846 return tgid_pidfd_to_pid(file); 3847 } 3848 3849 /** 3850 * sys_pidfd_send_signal - Signal a process through a pidfd 3851 * @pidfd: file descriptor of the process 3852 * @sig: signal to send 3853 * @info: signal info 3854 * @flags: future flags 3855 * 3856 * The syscall currently only signals via PIDTYPE_PID which covers 3857 * kill(<positive-pid>, <signal>. It does not signal threads or process 3858 * groups. 3859 * In order to extend the syscall to threads and process groups the @flags 3860 * argument should be used. In essence, the @flags argument will determine 3861 * what is signaled and not the file descriptor itself. Put in other words, 3862 * grouping is a property of the flags argument not a property of the file 3863 * descriptor. 3864 * 3865 * Return: 0 on success, negative errno on failure 3866 */ 3867 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, 3868 siginfo_t __user *, info, unsigned int, flags) 3869 { 3870 int ret; 3871 struct fd f; 3872 struct pid *pid; 3873 kernel_siginfo_t kinfo; 3874 3875 /* Enforce flags be set to 0 until we add an extension. */ 3876 if (flags) 3877 return -EINVAL; 3878 3879 f = fdget(pidfd); 3880 if (!f.file) 3881 return -EBADF; 3882 3883 /* Is this a pidfd? */ 3884 pid = pidfd_to_pid(f.file); 3885 if (IS_ERR(pid)) { 3886 ret = PTR_ERR(pid); 3887 goto err; 3888 } 3889 3890 ret = -EINVAL; 3891 if (!access_pidfd_pidns(pid)) 3892 goto err; 3893 3894 if (info) { 3895 ret = copy_siginfo_from_user_any(&kinfo, info); 3896 if (unlikely(ret)) 3897 goto err; 3898 3899 ret = -EINVAL; 3900 if (unlikely(sig != kinfo.si_signo)) 3901 goto err; 3902 3903 /* Only allow sending arbitrary signals to yourself. */ 3904 ret = -EPERM; 3905 if ((task_pid(current) != pid) && 3906 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) 3907 goto err; 3908 } else { 3909 prepare_kill_siginfo(sig, &kinfo); 3910 } 3911 3912 ret = kill_pid_info(sig, &kinfo, pid); 3913 3914 err: 3915 fdput(f); 3916 return ret; 3917 } 3918 3919 static int 3920 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) 3921 { 3922 struct task_struct *p; 3923 int error = -ESRCH; 3924 3925 rcu_read_lock(); 3926 p = find_task_by_vpid(pid); 3927 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 3928 error = check_kill_permission(sig, info, p); 3929 /* 3930 * The null signal is a permissions and process existence 3931 * probe. No signal is actually delivered. 3932 */ 3933 if (!error && sig) { 3934 error = do_send_sig_info(sig, info, p, PIDTYPE_PID); 3935 /* 3936 * If lock_task_sighand() failed we pretend the task 3937 * dies after receiving the signal. The window is tiny, 3938 * and the signal is private anyway. 3939 */ 3940 if (unlikely(error == -ESRCH)) 3941 error = 0; 3942 } 3943 } 3944 rcu_read_unlock(); 3945 3946 return error; 3947 } 3948 3949 static int do_tkill(pid_t tgid, pid_t pid, int sig) 3950 { 3951 struct kernel_siginfo info; 3952 3953 clear_siginfo(&info); 3954 info.si_signo = sig; 3955 info.si_errno = 0; 3956 info.si_code = SI_TKILL; 3957 info.si_pid = task_tgid_vnr(current); 3958 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3959 3960 return do_send_specific(tgid, pid, sig, &info); 3961 } 3962 3963 /** 3964 * sys_tgkill - send signal to one specific thread 3965 * @tgid: the thread group ID of the thread 3966 * @pid: the PID of the thread 3967 * @sig: signal to be sent 3968 * 3969 * This syscall also checks the @tgid and returns -ESRCH even if the PID 3970 * exists but it's not belonging to the target process anymore. This 3971 * method solves the problem of threads exiting and PIDs getting reused. 3972 */ 3973 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 3974 { 3975 /* This is only valid for single tasks */ 3976 if (pid <= 0 || tgid <= 0) 3977 return -EINVAL; 3978 3979 return do_tkill(tgid, pid, sig); 3980 } 3981 3982 /** 3983 * sys_tkill - send signal to one specific task 3984 * @pid: the PID of the task 3985 * @sig: signal to be sent 3986 * 3987 * Send a signal to only one task, even if it's a CLONE_THREAD task. 3988 */ 3989 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 3990 { 3991 /* This is only valid for single tasks */ 3992 if (pid <= 0) 3993 return -EINVAL; 3994 3995 return do_tkill(0, pid, sig); 3996 } 3997 3998 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) 3999 { 4000 /* Not even root can pretend to send signals from the kernel. 4001 * Nor can they impersonate a kill()/tgkill(), which adds source info. 4002 */ 4003 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 4004 (task_pid_vnr(current) != pid)) 4005 return -EPERM; 4006 4007 /* POSIX.1b doesn't mention process groups. */ 4008 return kill_proc_info(sig, info, pid); 4009 } 4010 4011 /** 4012 * sys_rt_sigqueueinfo - send signal information to a signal 4013 * @pid: the PID of the thread 4014 * @sig: signal to be sent 4015 * @uinfo: signal info to be sent 4016 */ 4017 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 4018 siginfo_t __user *, uinfo) 4019 { 4020 kernel_siginfo_t info; 4021 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 4022 if (unlikely(ret)) 4023 return ret; 4024 return do_rt_sigqueueinfo(pid, sig, &info); 4025 } 4026 4027 #ifdef CONFIG_COMPAT 4028 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, 4029 compat_pid_t, pid, 4030 int, sig, 4031 struct compat_siginfo __user *, uinfo) 4032 { 4033 kernel_siginfo_t info; 4034 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 4035 if (unlikely(ret)) 4036 return ret; 4037 return do_rt_sigqueueinfo(pid, sig, &info); 4038 } 4039 #endif 4040 4041 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) 4042 { 4043 /* This is only valid for single tasks */ 4044 if (pid <= 0 || tgid <= 0) 4045 return -EINVAL; 4046 4047 /* Not even root can pretend to send signals from the kernel. 4048 * Nor can they impersonate a kill()/tgkill(), which adds source info. 4049 */ 4050 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 4051 (task_pid_vnr(current) != pid)) 4052 return -EPERM; 4053 4054 return do_send_specific(tgid, pid, sig, info); 4055 } 4056 4057 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 4058 siginfo_t __user *, uinfo) 4059 { 4060 kernel_siginfo_t info; 4061 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 4062 if (unlikely(ret)) 4063 return ret; 4064 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 4065 } 4066 4067 #ifdef CONFIG_COMPAT 4068 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, 4069 compat_pid_t, tgid, 4070 compat_pid_t, pid, 4071 int, sig, 4072 struct compat_siginfo __user *, uinfo) 4073 { 4074 kernel_siginfo_t info; 4075 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 4076 if (unlikely(ret)) 4077 return ret; 4078 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 4079 } 4080 #endif 4081 4082 /* 4083 * For kthreads only, must not be used if cloned with CLONE_SIGHAND 4084 */ 4085 void kernel_sigaction(int sig, __sighandler_t action) 4086 { 4087 spin_lock_irq(¤t->sighand->siglock); 4088 current->sighand->action[sig - 1].sa.sa_handler = action; 4089 if (action == SIG_IGN) { 4090 sigset_t mask; 4091 4092 sigemptyset(&mask); 4093 sigaddset(&mask, sig); 4094 4095 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); 4096 flush_sigqueue_mask(&mask, ¤t->pending); 4097 recalc_sigpending(); 4098 } 4099 spin_unlock_irq(¤t->sighand->siglock); 4100 } 4101 EXPORT_SYMBOL(kernel_sigaction); 4102 4103 void __weak sigaction_compat_abi(struct k_sigaction *act, 4104 struct k_sigaction *oact) 4105 { 4106 } 4107 4108 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 4109 { 4110 struct task_struct *p = current, *t; 4111 struct k_sigaction *k; 4112 sigset_t mask; 4113 4114 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 4115 return -EINVAL; 4116 4117 k = &p->sighand->action[sig-1]; 4118 4119 spin_lock_irq(&p->sighand->siglock); 4120 if (k->sa.sa_flags & SA_IMMUTABLE) { 4121 spin_unlock_irq(&p->sighand->siglock); 4122 return -EINVAL; 4123 } 4124 if (oact) 4125 *oact = *k; 4126 4127 /* 4128 * Make sure that we never accidentally claim to support SA_UNSUPPORTED, 4129 * e.g. by having an architecture use the bit in their uapi. 4130 */ 4131 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED); 4132 4133 /* 4134 * Clear unknown flag bits in order to allow userspace to detect missing 4135 * support for flag bits and to allow the kernel to use non-uapi bits 4136 * internally. 4137 */ 4138 if (act) 4139 act->sa.sa_flags &= UAPI_SA_FLAGS; 4140 if (oact) 4141 oact->sa.sa_flags &= UAPI_SA_FLAGS; 4142 4143 sigaction_compat_abi(act, oact); 4144 4145 if (act) { 4146 sigdelsetmask(&act->sa.sa_mask, 4147 sigmask(SIGKILL) | sigmask(SIGSTOP)); 4148 *k = *act; 4149 /* 4150 * POSIX 3.3.1.3: 4151 * "Setting a signal action to SIG_IGN for a signal that is 4152 * pending shall cause the pending signal to be discarded, 4153 * whether or not it is blocked." 4154 * 4155 * "Setting a signal action to SIG_DFL for a signal that is 4156 * pending and whose default action is to ignore the signal 4157 * (for example, SIGCHLD), shall cause the pending signal to 4158 * be discarded, whether or not it is blocked" 4159 */ 4160 if (sig_handler_ignored(sig_handler(p, sig), sig)) { 4161 sigemptyset(&mask); 4162 sigaddset(&mask, sig); 4163 flush_sigqueue_mask(&mask, &p->signal->shared_pending); 4164 for_each_thread(p, t) 4165 flush_sigqueue_mask(&mask, &t->pending); 4166 } 4167 } 4168 4169 spin_unlock_irq(&p->sighand->siglock); 4170 return 0; 4171 } 4172 4173 #ifdef CONFIG_DYNAMIC_SIGFRAME 4174 static inline void sigaltstack_lock(void) 4175 __acquires(¤t->sighand->siglock) 4176 { 4177 spin_lock_irq(¤t->sighand->siglock); 4178 } 4179 4180 static inline void sigaltstack_unlock(void) 4181 __releases(¤t->sighand->siglock) 4182 { 4183 spin_unlock_irq(¤t->sighand->siglock); 4184 } 4185 #else 4186 static inline void sigaltstack_lock(void) { } 4187 static inline void sigaltstack_unlock(void) { } 4188 #endif 4189 4190 static int 4191 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, 4192 size_t min_ss_size) 4193 { 4194 struct task_struct *t = current; 4195 int ret = 0; 4196 4197 if (oss) { 4198 memset(oss, 0, sizeof(stack_t)); 4199 oss->ss_sp = (void __user *) t->sas_ss_sp; 4200 oss->ss_size = t->sas_ss_size; 4201 oss->ss_flags = sas_ss_flags(sp) | 4202 (current->sas_ss_flags & SS_FLAG_BITS); 4203 } 4204 4205 if (ss) { 4206 void __user *ss_sp = ss->ss_sp; 4207 size_t ss_size = ss->ss_size; 4208 unsigned ss_flags = ss->ss_flags; 4209 int ss_mode; 4210 4211 if (unlikely(on_sig_stack(sp))) 4212 return -EPERM; 4213 4214 ss_mode = ss_flags & ~SS_FLAG_BITS; 4215 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && 4216 ss_mode != 0)) 4217 return -EINVAL; 4218 4219 /* 4220 * Return before taking any locks if no actual 4221 * sigaltstack changes were requested. 4222 */ 4223 if (t->sas_ss_sp == (unsigned long)ss_sp && 4224 t->sas_ss_size == ss_size && 4225 t->sas_ss_flags == ss_flags) 4226 return 0; 4227 4228 sigaltstack_lock(); 4229 if (ss_mode == SS_DISABLE) { 4230 ss_size = 0; 4231 ss_sp = NULL; 4232 } else { 4233 if (unlikely(ss_size < min_ss_size)) 4234 ret = -ENOMEM; 4235 if (!sigaltstack_size_valid(ss_size)) 4236 ret = -ENOMEM; 4237 } 4238 if (!ret) { 4239 t->sas_ss_sp = (unsigned long) ss_sp; 4240 t->sas_ss_size = ss_size; 4241 t->sas_ss_flags = ss_flags; 4242 } 4243 sigaltstack_unlock(); 4244 } 4245 return ret; 4246 } 4247 4248 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) 4249 { 4250 stack_t new, old; 4251 int err; 4252 if (uss && copy_from_user(&new, uss, sizeof(stack_t))) 4253 return -EFAULT; 4254 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, 4255 current_user_stack_pointer(), 4256 MINSIGSTKSZ); 4257 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) 4258 err = -EFAULT; 4259 return err; 4260 } 4261 4262 int restore_altstack(const stack_t __user *uss) 4263 { 4264 stack_t new; 4265 if (copy_from_user(&new, uss, sizeof(stack_t))) 4266 return -EFAULT; 4267 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(), 4268 MINSIGSTKSZ); 4269 /* squash all but EFAULT for now */ 4270 return 0; 4271 } 4272 4273 int __save_altstack(stack_t __user *uss, unsigned long sp) 4274 { 4275 struct task_struct *t = current; 4276 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | 4277 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4278 __put_user(t->sas_ss_size, &uss->ss_size); 4279 return err; 4280 } 4281 4282 #ifdef CONFIG_COMPAT 4283 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, 4284 compat_stack_t __user *uoss_ptr) 4285 { 4286 stack_t uss, uoss; 4287 int ret; 4288 4289 if (uss_ptr) { 4290 compat_stack_t uss32; 4291 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) 4292 return -EFAULT; 4293 uss.ss_sp = compat_ptr(uss32.ss_sp); 4294 uss.ss_flags = uss32.ss_flags; 4295 uss.ss_size = uss32.ss_size; 4296 } 4297 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 4298 compat_user_stack_pointer(), 4299 COMPAT_MINSIGSTKSZ); 4300 if (ret >= 0 && uoss_ptr) { 4301 compat_stack_t old; 4302 memset(&old, 0, sizeof(old)); 4303 old.ss_sp = ptr_to_compat(uoss.ss_sp); 4304 old.ss_flags = uoss.ss_flags; 4305 old.ss_size = uoss.ss_size; 4306 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) 4307 ret = -EFAULT; 4308 } 4309 return ret; 4310 } 4311 4312 COMPAT_SYSCALL_DEFINE2(sigaltstack, 4313 const compat_stack_t __user *, uss_ptr, 4314 compat_stack_t __user *, uoss_ptr) 4315 { 4316 return do_compat_sigaltstack(uss_ptr, uoss_ptr); 4317 } 4318 4319 int compat_restore_altstack(const compat_stack_t __user *uss) 4320 { 4321 int err = do_compat_sigaltstack(uss, NULL); 4322 /* squash all but -EFAULT for now */ 4323 return err == -EFAULT ? err : 0; 4324 } 4325 4326 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) 4327 { 4328 int err; 4329 struct task_struct *t = current; 4330 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), 4331 &uss->ss_sp) | 4332 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4333 __put_user(t->sas_ss_size, &uss->ss_size); 4334 return err; 4335 } 4336 #endif 4337 4338 #ifdef __ARCH_WANT_SYS_SIGPENDING 4339 4340 /** 4341 * sys_sigpending - examine pending signals 4342 * @uset: where mask of pending signal is returned 4343 */ 4344 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) 4345 { 4346 sigset_t set; 4347 4348 if (sizeof(old_sigset_t) > sizeof(*uset)) 4349 return -EINVAL; 4350 4351 do_sigpending(&set); 4352 4353 if (copy_to_user(uset, &set, sizeof(old_sigset_t))) 4354 return -EFAULT; 4355 4356 return 0; 4357 } 4358 4359 #ifdef CONFIG_COMPAT 4360 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) 4361 { 4362 sigset_t set; 4363 4364 do_sigpending(&set); 4365 4366 return put_user(set.sig[0], set32); 4367 } 4368 #endif 4369 4370 #endif 4371 4372 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 4373 /** 4374 * sys_sigprocmask - examine and change blocked signals 4375 * @how: whether to add, remove, or set signals 4376 * @nset: signals to add or remove (if non-null) 4377 * @oset: previous value of signal mask if non-null 4378 * 4379 * Some platforms have their own version with special arguments; 4380 * others support only sys_rt_sigprocmask. 4381 */ 4382 4383 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, 4384 old_sigset_t __user *, oset) 4385 { 4386 old_sigset_t old_set, new_set; 4387 sigset_t new_blocked; 4388 4389 old_set = current->blocked.sig[0]; 4390 4391 if (nset) { 4392 if (copy_from_user(&new_set, nset, sizeof(*nset))) 4393 return -EFAULT; 4394 4395 new_blocked = current->blocked; 4396 4397 switch (how) { 4398 case SIG_BLOCK: 4399 sigaddsetmask(&new_blocked, new_set); 4400 break; 4401 case SIG_UNBLOCK: 4402 sigdelsetmask(&new_blocked, new_set); 4403 break; 4404 case SIG_SETMASK: 4405 new_blocked.sig[0] = new_set; 4406 break; 4407 default: 4408 return -EINVAL; 4409 } 4410 4411 set_current_blocked(&new_blocked); 4412 } 4413 4414 if (oset) { 4415 if (copy_to_user(oset, &old_set, sizeof(*oset))) 4416 return -EFAULT; 4417 } 4418 4419 return 0; 4420 } 4421 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 4422 4423 #ifndef CONFIG_ODD_RT_SIGACTION 4424 /** 4425 * sys_rt_sigaction - alter an action taken by a process 4426 * @sig: signal to be sent 4427 * @act: new sigaction 4428 * @oact: used to save the previous sigaction 4429 * @sigsetsize: size of sigset_t type 4430 */ 4431 SYSCALL_DEFINE4(rt_sigaction, int, sig, 4432 const struct sigaction __user *, act, 4433 struct sigaction __user *, oact, 4434 size_t, sigsetsize) 4435 { 4436 struct k_sigaction new_sa, old_sa; 4437 int ret; 4438 4439 /* XXX: Don't preclude handling different sized sigset_t's. */ 4440 if (sigsetsize != sizeof(sigset_t)) 4441 return -EINVAL; 4442 4443 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 4444 return -EFAULT; 4445 4446 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 4447 if (ret) 4448 return ret; 4449 4450 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 4451 return -EFAULT; 4452 4453 return 0; 4454 } 4455 #ifdef CONFIG_COMPAT 4456 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, 4457 const struct compat_sigaction __user *, act, 4458 struct compat_sigaction __user *, oact, 4459 compat_size_t, sigsetsize) 4460 { 4461 struct k_sigaction new_ka, old_ka; 4462 #ifdef __ARCH_HAS_SA_RESTORER 4463 compat_uptr_t restorer; 4464 #endif 4465 int ret; 4466 4467 /* XXX: Don't preclude handling different sized sigset_t's. */ 4468 if (sigsetsize != sizeof(compat_sigset_t)) 4469 return -EINVAL; 4470 4471 if (act) { 4472 compat_uptr_t handler; 4473 ret = get_user(handler, &act->sa_handler); 4474 new_ka.sa.sa_handler = compat_ptr(handler); 4475 #ifdef __ARCH_HAS_SA_RESTORER 4476 ret |= get_user(restorer, &act->sa_restorer); 4477 new_ka.sa.sa_restorer = compat_ptr(restorer); 4478 #endif 4479 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); 4480 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); 4481 if (ret) 4482 return -EFAULT; 4483 } 4484 4485 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4486 if (!ret && oact) { 4487 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 4488 &oact->sa_handler); 4489 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, 4490 sizeof(oact->sa_mask)); 4491 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); 4492 #ifdef __ARCH_HAS_SA_RESTORER 4493 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4494 &oact->sa_restorer); 4495 #endif 4496 } 4497 return ret; 4498 } 4499 #endif 4500 #endif /* !CONFIG_ODD_RT_SIGACTION */ 4501 4502 #ifdef CONFIG_OLD_SIGACTION 4503 SYSCALL_DEFINE3(sigaction, int, sig, 4504 const struct old_sigaction __user *, act, 4505 struct old_sigaction __user *, oact) 4506 { 4507 struct k_sigaction new_ka, old_ka; 4508 int ret; 4509 4510 if (act) { 4511 old_sigset_t mask; 4512 if (!access_ok(act, sizeof(*act)) || 4513 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 4514 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 4515 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4516 __get_user(mask, &act->sa_mask)) 4517 return -EFAULT; 4518 #ifdef __ARCH_HAS_KA_RESTORER 4519 new_ka.ka_restorer = NULL; 4520 #endif 4521 siginitset(&new_ka.sa.sa_mask, mask); 4522 } 4523 4524 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4525 4526 if (!ret && oact) { 4527 if (!access_ok(oact, sizeof(*oact)) || 4528 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 4529 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 4530 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4531 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4532 return -EFAULT; 4533 } 4534 4535 return ret; 4536 } 4537 #endif 4538 #ifdef CONFIG_COMPAT_OLD_SIGACTION 4539 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, 4540 const struct compat_old_sigaction __user *, act, 4541 struct compat_old_sigaction __user *, oact) 4542 { 4543 struct k_sigaction new_ka, old_ka; 4544 int ret; 4545 compat_old_sigset_t mask; 4546 compat_uptr_t handler, restorer; 4547 4548 if (act) { 4549 if (!access_ok(act, sizeof(*act)) || 4550 __get_user(handler, &act->sa_handler) || 4551 __get_user(restorer, &act->sa_restorer) || 4552 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4553 __get_user(mask, &act->sa_mask)) 4554 return -EFAULT; 4555 4556 #ifdef __ARCH_HAS_KA_RESTORER 4557 new_ka.ka_restorer = NULL; 4558 #endif 4559 new_ka.sa.sa_handler = compat_ptr(handler); 4560 new_ka.sa.sa_restorer = compat_ptr(restorer); 4561 siginitset(&new_ka.sa.sa_mask, mask); 4562 } 4563 4564 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4565 4566 if (!ret && oact) { 4567 if (!access_ok(oact, sizeof(*oact)) || 4568 __put_user(ptr_to_compat(old_ka.sa.sa_handler), 4569 &oact->sa_handler) || 4570 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4571 &oact->sa_restorer) || 4572 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4573 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4574 return -EFAULT; 4575 } 4576 return ret; 4577 } 4578 #endif 4579 4580 #ifdef CONFIG_SGETMASK_SYSCALL 4581 4582 /* 4583 * For backwards compatibility. Functionality superseded by sigprocmask. 4584 */ 4585 SYSCALL_DEFINE0(sgetmask) 4586 { 4587 /* SMP safe */ 4588 return current->blocked.sig[0]; 4589 } 4590 4591 SYSCALL_DEFINE1(ssetmask, int, newmask) 4592 { 4593 int old = current->blocked.sig[0]; 4594 sigset_t newset; 4595 4596 siginitset(&newset, newmask); 4597 set_current_blocked(&newset); 4598 4599 return old; 4600 } 4601 #endif /* CONFIG_SGETMASK_SYSCALL */ 4602 4603 #ifdef __ARCH_WANT_SYS_SIGNAL 4604 /* 4605 * For backwards compatibility. Functionality superseded by sigaction. 4606 */ 4607 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 4608 { 4609 struct k_sigaction new_sa, old_sa; 4610 int ret; 4611 4612 new_sa.sa.sa_handler = handler; 4613 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 4614 sigemptyset(&new_sa.sa.sa_mask); 4615 4616 ret = do_sigaction(sig, &new_sa, &old_sa); 4617 4618 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 4619 } 4620 #endif /* __ARCH_WANT_SYS_SIGNAL */ 4621 4622 #ifdef __ARCH_WANT_SYS_PAUSE 4623 4624 SYSCALL_DEFINE0(pause) 4625 { 4626 while (!signal_pending(current)) { 4627 __set_current_state(TASK_INTERRUPTIBLE); 4628 schedule(); 4629 } 4630 return -ERESTARTNOHAND; 4631 } 4632 4633 #endif 4634 4635 static int sigsuspend(sigset_t *set) 4636 { 4637 current->saved_sigmask = current->blocked; 4638 set_current_blocked(set); 4639 4640 while (!signal_pending(current)) { 4641 __set_current_state(TASK_INTERRUPTIBLE); 4642 schedule(); 4643 } 4644 set_restore_sigmask(); 4645 return -ERESTARTNOHAND; 4646 } 4647 4648 /** 4649 * sys_rt_sigsuspend - replace the signal mask for a value with the 4650 * @unewset value until a signal is received 4651 * @unewset: new signal mask value 4652 * @sigsetsize: size of sigset_t type 4653 */ 4654 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 4655 { 4656 sigset_t newset; 4657 4658 /* XXX: Don't preclude handling different sized sigset_t's. */ 4659 if (sigsetsize != sizeof(sigset_t)) 4660 return -EINVAL; 4661 4662 if (copy_from_user(&newset, unewset, sizeof(newset))) 4663 return -EFAULT; 4664 return sigsuspend(&newset); 4665 } 4666 4667 #ifdef CONFIG_COMPAT 4668 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) 4669 { 4670 sigset_t newset; 4671 4672 /* XXX: Don't preclude handling different sized sigset_t's. */ 4673 if (sigsetsize != sizeof(sigset_t)) 4674 return -EINVAL; 4675 4676 if (get_compat_sigset(&newset, unewset)) 4677 return -EFAULT; 4678 return sigsuspend(&newset); 4679 } 4680 #endif 4681 4682 #ifdef CONFIG_OLD_SIGSUSPEND 4683 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) 4684 { 4685 sigset_t blocked; 4686 siginitset(&blocked, mask); 4687 return sigsuspend(&blocked); 4688 } 4689 #endif 4690 #ifdef CONFIG_OLD_SIGSUSPEND3 4691 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) 4692 { 4693 sigset_t blocked; 4694 siginitset(&blocked, mask); 4695 return sigsuspend(&blocked); 4696 } 4697 #endif 4698 4699 __weak const char *arch_vma_name(struct vm_area_struct *vma) 4700 { 4701 return NULL; 4702 } 4703 4704 static inline void siginfo_buildtime_checks(void) 4705 { 4706 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); 4707 4708 /* Verify the offsets in the two siginfos match */ 4709 #define CHECK_OFFSET(field) \ 4710 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) 4711 4712 /* kill */ 4713 CHECK_OFFSET(si_pid); 4714 CHECK_OFFSET(si_uid); 4715 4716 /* timer */ 4717 CHECK_OFFSET(si_tid); 4718 CHECK_OFFSET(si_overrun); 4719 CHECK_OFFSET(si_value); 4720 4721 /* rt */ 4722 CHECK_OFFSET(si_pid); 4723 CHECK_OFFSET(si_uid); 4724 CHECK_OFFSET(si_value); 4725 4726 /* sigchld */ 4727 CHECK_OFFSET(si_pid); 4728 CHECK_OFFSET(si_uid); 4729 CHECK_OFFSET(si_status); 4730 CHECK_OFFSET(si_utime); 4731 CHECK_OFFSET(si_stime); 4732 4733 /* sigfault */ 4734 CHECK_OFFSET(si_addr); 4735 CHECK_OFFSET(si_trapno); 4736 CHECK_OFFSET(si_addr_lsb); 4737 CHECK_OFFSET(si_lower); 4738 CHECK_OFFSET(si_upper); 4739 CHECK_OFFSET(si_pkey); 4740 CHECK_OFFSET(si_perf_data); 4741 CHECK_OFFSET(si_perf_type); 4742 CHECK_OFFSET(si_perf_flags); 4743 4744 /* sigpoll */ 4745 CHECK_OFFSET(si_band); 4746 CHECK_OFFSET(si_fd); 4747 4748 /* sigsys */ 4749 CHECK_OFFSET(si_call_addr); 4750 CHECK_OFFSET(si_syscall); 4751 CHECK_OFFSET(si_arch); 4752 #undef CHECK_OFFSET 4753 4754 /* usb asyncio */ 4755 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) != 4756 offsetof(struct siginfo, si_addr)); 4757 if (sizeof(int) == sizeof(void __user *)) { 4758 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) != 4759 sizeof(void __user *)); 4760 } else { 4761 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) + 4762 sizeof_field(struct siginfo, si_uid)) != 4763 sizeof(void __user *)); 4764 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) != 4765 offsetof(struct siginfo, si_uid)); 4766 } 4767 #ifdef CONFIG_COMPAT 4768 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) != 4769 offsetof(struct compat_siginfo, si_addr)); 4770 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != 4771 sizeof(compat_uptr_t)); 4772 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != 4773 sizeof_field(struct siginfo, si_pid)); 4774 #endif 4775 } 4776 4777 #if defined(CONFIG_SYSCTL) 4778 static struct ctl_table signal_debug_table[] = { 4779 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE 4780 { 4781 .procname = "exception-trace", 4782 .data = &show_unhandled_signals, 4783 .maxlen = sizeof(int), 4784 .mode = 0644, 4785 .proc_handler = proc_dointvec 4786 }, 4787 #endif 4788 { } 4789 }; 4790 4791 static int __init init_signal_sysctls(void) 4792 { 4793 register_sysctl_init("debug", signal_debug_table); 4794 return 0; 4795 } 4796 early_initcall(init_signal_sysctls); 4797 #endif /* CONFIG_SYSCTL */ 4798 4799 void __init signals_init(void) 4800 { 4801 siginfo_buildtime_checks(); 4802 4803 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT); 4804 } 4805 4806 #ifdef CONFIG_KGDB_KDB 4807 #include <linux/kdb.h> 4808 /* 4809 * kdb_send_sig - Allows kdb to send signals without exposing 4810 * signal internals. This function checks if the required locks are 4811 * available before calling the main signal code, to avoid kdb 4812 * deadlocks. 4813 */ 4814 void kdb_send_sig(struct task_struct *t, int sig) 4815 { 4816 static struct task_struct *kdb_prev_t; 4817 int new_t, ret; 4818 if (!spin_trylock(&t->sighand->siglock)) { 4819 kdb_printf("Can't do kill command now.\n" 4820 "The sigmask lock is held somewhere else in " 4821 "kernel, try again later\n"); 4822 return; 4823 } 4824 new_t = kdb_prev_t != t; 4825 kdb_prev_t = t; 4826 if (!task_is_running(t) && new_t) { 4827 spin_unlock(&t->sighand->siglock); 4828 kdb_printf("Process is not RUNNING, sending a signal from " 4829 "kdb risks deadlock\n" 4830 "on the run queue locks. " 4831 "The signal has _not_ been sent.\n" 4832 "Reissue the kill command if you want to risk " 4833 "the deadlock.\n"); 4834 return; 4835 } 4836 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); 4837 spin_unlock(&t->sighand->siglock); 4838 if (ret) 4839 kdb_printf("Fail to deliver Signal %d to process %d.\n", 4840 sig, t->pid); 4841 else 4842 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 4843 } 4844 #endif /* CONFIG_KGDB_KDB */ 4845