1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/signal.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 8 * 9 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 10 * Changes to use preallocated sigqueue structures 11 * to allow signals to be sent reliably. 12 */ 13 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/init.h> 17 #include <linux/sched/mm.h> 18 #include <linux/sched/user.h> 19 #include <linux/sched/debug.h> 20 #include <linux/sched/task.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/sched/cputime.h> 23 #include <linux/file.h> 24 #include <linux/fs.h> 25 #include <linux/proc_fs.h> 26 #include <linux/tty.h> 27 #include <linux/binfmts.h> 28 #include <linux/coredump.h> 29 #include <linux/security.h> 30 #include <linux/syscalls.h> 31 #include <linux/ptrace.h> 32 #include <linux/signal.h> 33 #include <linux/signalfd.h> 34 #include <linux/ratelimit.h> 35 #include <linux/tracehook.h> 36 #include <linux/capability.h> 37 #include <linux/freezer.h> 38 #include <linux/pid_namespace.h> 39 #include <linux/nsproxy.h> 40 #include <linux/user_namespace.h> 41 #include <linux/uprobes.h> 42 #include <linux/compat.h> 43 #include <linux/cn_proc.h> 44 #include <linux/compiler.h> 45 #include <linux/posix-timers.h> 46 #include <linux/livepatch.h> 47 #include <linux/cgroup.h> 48 #include <linux/audit.h> 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/signal.h> 52 53 #include <asm/param.h> 54 #include <linux/uaccess.h> 55 #include <asm/unistd.h> 56 #include <asm/siginfo.h> 57 #include <asm/cacheflush.h> 58 59 /* 60 * SLAB caches for signal bits. 61 */ 62 63 static struct kmem_cache *sigqueue_cachep; 64 65 int print_fatal_signals __read_mostly; 66 67 static void __user *sig_handler(struct task_struct *t, int sig) 68 { 69 return t->sighand->action[sig - 1].sa.sa_handler; 70 } 71 72 static inline bool sig_handler_ignored(void __user *handler, int sig) 73 { 74 /* Is it explicitly or implicitly ignored? */ 75 return handler == SIG_IGN || 76 (handler == SIG_DFL && sig_kernel_ignore(sig)); 77 } 78 79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) 80 { 81 void __user *handler; 82 83 handler = sig_handler(t, sig); 84 85 /* SIGKILL and SIGSTOP may not be sent to the global init */ 86 if (unlikely(is_global_init(t) && sig_kernel_only(sig))) 87 return true; 88 89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 91 return true; 92 93 return sig_handler_ignored(handler, sig); 94 } 95 96 static bool sig_ignored(struct task_struct *t, int sig, bool force) 97 { 98 /* 99 * Blocked signals are never ignored, since the 100 * signal handler may change by the time it is 101 * unblocked. 102 */ 103 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 104 return false; 105 106 /* 107 * Tracers may want to know about even ignored signal unless it 108 * is SIGKILL which can't be reported anyway but can be ignored 109 * by SIGNAL_UNKILLABLE task. 110 */ 111 if (t->ptrace && sig != SIGKILL) 112 return false; 113 114 return sig_task_ignored(t, sig, force); 115 } 116 117 /* 118 * Re-calculate pending state from the set of locally pending 119 * signals, globally pending signals, and blocked signals. 120 */ 121 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) 122 { 123 unsigned long ready; 124 long i; 125 126 switch (_NSIG_WORDS) { 127 default: 128 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 129 ready |= signal->sig[i] &~ blocked->sig[i]; 130 break; 131 132 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 133 ready |= signal->sig[2] &~ blocked->sig[2]; 134 ready |= signal->sig[1] &~ blocked->sig[1]; 135 ready |= signal->sig[0] &~ blocked->sig[0]; 136 break; 137 138 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 139 ready |= signal->sig[0] &~ blocked->sig[0]; 140 break; 141 142 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 143 } 144 return ready != 0; 145 } 146 147 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 148 149 static bool recalc_sigpending_tsk(struct task_struct *t) 150 { 151 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || 152 PENDING(&t->pending, &t->blocked) || 153 PENDING(&t->signal->shared_pending, &t->blocked) || 154 cgroup_task_frozen(t)) { 155 set_tsk_thread_flag(t, TIF_SIGPENDING); 156 return true; 157 } 158 159 /* 160 * We must never clear the flag in another thread, or in current 161 * when it's possible the current syscall is returning -ERESTART*. 162 * So we don't clear it here, and only callers who know they should do. 163 */ 164 return false; 165 } 166 167 /* 168 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 169 * This is superfluous when called on current, the wakeup is a harmless no-op. 170 */ 171 void recalc_sigpending_and_wake(struct task_struct *t) 172 { 173 if (recalc_sigpending_tsk(t)) 174 signal_wake_up(t, 0); 175 } 176 177 void recalc_sigpending(void) 178 { 179 if (!recalc_sigpending_tsk(current) && !freezing(current) && 180 !klp_patch_pending(current)) 181 clear_thread_flag(TIF_SIGPENDING); 182 183 } 184 EXPORT_SYMBOL(recalc_sigpending); 185 186 void calculate_sigpending(void) 187 { 188 /* Have any signals or users of TIF_SIGPENDING been delayed 189 * until after fork? 190 */ 191 spin_lock_irq(¤t->sighand->siglock); 192 set_tsk_thread_flag(current, TIF_SIGPENDING); 193 recalc_sigpending(); 194 spin_unlock_irq(¤t->sighand->siglock); 195 } 196 197 /* Given the mask, find the first available signal that should be serviced. */ 198 199 #define SYNCHRONOUS_MASK \ 200 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 201 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) 202 203 int next_signal(struct sigpending *pending, sigset_t *mask) 204 { 205 unsigned long i, *s, *m, x; 206 int sig = 0; 207 208 s = pending->signal.sig; 209 m = mask->sig; 210 211 /* 212 * Handle the first word specially: it contains the 213 * synchronous signals that need to be dequeued first. 214 */ 215 x = *s &~ *m; 216 if (x) { 217 if (x & SYNCHRONOUS_MASK) 218 x &= SYNCHRONOUS_MASK; 219 sig = ffz(~x) + 1; 220 return sig; 221 } 222 223 switch (_NSIG_WORDS) { 224 default: 225 for (i = 1; i < _NSIG_WORDS; ++i) { 226 x = *++s &~ *++m; 227 if (!x) 228 continue; 229 sig = ffz(~x) + i*_NSIG_BPW + 1; 230 break; 231 } 232 break; 233 234 case 2: 235 x = s[1] &~ m[1]; 236 if (!x) 237 break; 238 sig = ffz(~x) + _NSIG_BPW + 1; 239 break; 240 241 case 1: 242 /* Nothing to do */ 243 break; 244 } 245 246 return sig; 247 } 248 249 static inline void print_dropped_signal(int sig) 250 { 251 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 252 253 if (!print_fatal_signals) 254 return; 255 256 if (!__ratelimit(&ratelimit_state)) 257 return; 258 259 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 260 current->comm, current->pid, sig); 261 } 262 263 /** 264 * task_set_jobctl_pending - set jobctl pending bits 265 * @task: target task 266 * @mask: pending bits to set 267 * 268 * Clear @mask from @task->jobctl. @mask must be subset of 269 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | 270 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is 271 * cleared. If @task is already being killed or exiting, this function 272 * becomes noop. 273 * 274 * CONTEXT: 275 * Must be called with @task->sighand->siglock held. 276 * 277 * RETURNS: 278 * %true if @mask is set, %false if made noop because @task was dying. 279 */ 280 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) 281 { 282 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | 283 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 284 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 285 286 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 287 return false; 288 289 if (mask & JOBCTL_STOP_SIGMASK) 290 task->jobctl &= ~JOBCTL_STOP_SIGMASK; 291 292 task->jobctl |= mask; 293 return true; 294 } 295 296 /** 297 * task_clear_jobctl_trapping - clear jobctl trapping bit 298 * @task: target task 299 * 300 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. 301 * Clear it and wake up the ptracer. Note that we don't need any further 302 * locking. @task->siglock guarantees that @task->parent points to the 303 * ptracer. 304 * 305 * CONTEXT: 306 * Must be called with @task->sighand->siglock held. 307 */ 308 void task_clear_jobctl_trapping(struct task_struct *task) 309 { 310 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { 311 task->jobctl &= ~JOBCTL_TRAPPING; 312 smp_mb(); /* advised by wake_up_bit() */ 313 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); 314 } 315 } 316 317 /** 318 * task_clear_jobctl_pending - clear jobctl pending bits 319 * @task: target task 320 * @mask: pending bits to clear 321 * 322 * Clear @mask from @task->jobctl. @mask must be subset of 323 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other 324 * STOP bits are cleared together. 325 * 326 * If clearing of @mask leaves no stop or trap pending, this function calls 327 * task_clear_jobctl_trapping(). 328 * 329 * CONTEXT: 330 * Must be called with @task->sighand->siglock held. 331 */ 332 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) 333 { 334 BUG_ON(mask & ~JOBCTL_PENDING_MASK); 335 336 if (mask & JOBCTL_STOP_PENDING) 337 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; 338 339 task->jobctl &= ~mask; 340 341 if (!(task->jobctl & JOBCTL_PENDING_MASK)) 342 task_clear_jobctl_trapping(task); 343 } 344 345 /** 346 * task_participate_group_stop - participate in a group stop 347 * @task: task participating in a group stop 348 * 349 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. 350 * Group stop states are cleared and the group stop count is consumed if 351 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group 352 * stop, the appropriate %SIGNAL_* flags are set. 353 * 354 * CONTEXT: 355 * Must be called with @task->sighand->siglock held. 356 * 357 * RETURNS: 358 * %true if group stop completion should be notified to the parent, %false 359 * otherwise. 360 */ 361 static bool task_participate_group_stop(struct task_struct *task) 362 { 363 struct signal_struct *sig = task->signal; 364 bool consume = task->jobctl & JOBCTL_STOP_CONSUME; 365 366 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); 367 368 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); 369 370 if (!consume) 371 return false; 372 373 if (!WARN_ON_ONCE(sig->group_stop_count == 0)) 374 sig->group_stop_count--; 375 376 /* 377 * Tell the caller to notify completion iff we are entering into a 378 * fresh group stop. Read comment in do_signal_stop() for details. 379 */ 380 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 381 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); 382 return true; 383 } 384 return false; 385 } 386 387 void task_join_group_stop(struct task_struct *task) 388 { 389 /* Have the new thread join an on-going signal group stop */ 390 unsigned long jobctl = current->jobctl; 391 if (jobctl & JOBCTL_STOP_PENDING) { 392 struct signal_struct *sig = current->signal; 393 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; 394 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 395 if (task_set_jobctl_pending(task, signr | gstop)) { 396 sig->group_stop_count++; 397 } 398 } 399 } 400 401 /* 402 * allocate a new signal queue record 403 * - this may be called without locks if and only if t == current, otherwise an 404 * appropriate lock must be held to stop the target task from exiting 405 */ 406 static struct sigqueue * 407 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 408 { 409 struct sigqueue *q = NULL; 410 struct user_struct *user; 411 412 /* 413 * Protect access to @t credentials. This can go away when all 414 * callers hold rcu read lock. 415 */ 416 rcu_read_lock(); 417 user = get_uid(__task_cred(t)->user); 418 atomic_inc(&user->sigpending); 419 rcu_read_unlock(); 420 421 if (override_rlimit || 422 atomic_read(&user->sigpending) <= 423 task_rlimit(t, RLIMIT_SIGPENDING)) { 424 q = kmem_cache_alloc(sigqueue_cachep, flags); 425 } else { 426 print_dropped_signal(sig); 427 } 428 429 if (unlikely(q == NULL)) { 430 atomic_dec(&user->sigpending); 431 free_uid(user); 432 } else { 433 INIT_LIST_HEAD(&q->list); 434 q->flags = 0; 435 q->user = user; 436 } 437 438 return q; 439 } 440 441 static void __sigqueue_free(struct sigqueue *q) 442 { 443 if (q->flags & SIGQUEUE_PREALLOC) 444 return; 445 atomic_dec(&q->user->sigpending); 446 free_uid(q->user); 447 kmem_cache_free(sigqueue_cachep, q); 448 } 449 450 void flush_sigqueue(struct sigpending *queue) 451 { 452 struct sigqueue *q; 453 454 sigemptyset(&queue->signal); 455 while (!list_empty(&queue->list)) { 456 q = list_entry(queue->list.next, struct sigqueue , list); 457 list_del_init(&q->list); 458 __sigqueue_free(q); 459 } 460 } 461 462 /* 463 * Flush all pending signals for this kthread. 464 */ 465 void flush_signals(struct task_struct *t) 466 { 467 unsigned long flags; 468 469 spin_lock_irqsave(&t->sighand->siglock, flags); 470 clear_tsk_thread_flag(t, TIF_SIGPENDING); 471 flush_sigqueue(&t->pending); 472 flush_sigqueue(&t->signal->shared_pending); 473 spin_unlock_irqrestore(&t->sighand->siglock, flags); 474 } 475 EXPORT_SYMBOL(flush_signals); 476 477 #ifdef CONFIG_POSIX_TIMERS 478 static void __flush_itimer_signals(struct sigpending *pending) 479 { 480 sigset_t signal, retain; 481 struct sigqueue *q, *n; 482 483 signal = pending->signal; 484 sigemptyset(&retain); 485 486 list_for_each_entry_safe(q, n, &pending->list, list) { 487 int sig = q->info.si_signo; 488 489 if (likely(q->info.si_code != SI_TIMER)) { 490 sigaddset(&retain, sig); 491 } else { 492 sigdelset(&signal, sig); 493 list_del_init(&q->list); 494 __sigqueue_free(q); 495 } 496 } 497 498 sigorsets(&pending->signal, &signal, &retain); 499 } 500 501 void flush_itimer_signals(void) 502 { 503 struct task_struct *tsk = current; 504 unsigned long flags; 505 506 spin_lock_irqsave(&tsk->sighand->siglock, flags); 507 __flush_itimer_signals(&tsk->pending); 508 __flush_itimer_signals(&tsk->signal->shared_pending); 509 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 510 } 511 #endif 512 513 void ignore_signals(struct task_struct *t) 514 { 515 int i; 516 517 for (i = 0; i < _NSIG; ++i) 518 t->sighand->action[i].sa.sa_handler = SIG_IGN; 519 520 flush_signals(t); 521 } 522 523 /* 524 * Flush all handlers for a task. 525 */ 526 527 void 528 flush_signal_handlers(struct task_struct *t, int force_default) 529 { 530 int i; 531 struct k_sigaction *ka = &t->sighand->action[0]; 532 for (i = _NSIG ; i != 0 ; i--) { 533 if (force_default || ka->sa.sa_handler != SIG_IGN) 534 ka->sa.sa_handler = SIG_DFL; 535 ka->sa.sa_flags = 0; 536 #ifdef __ARCH_HAS_SA_RESTORER 537 ka->sa.sa_restorer = NULL; 538 #endif 539 sigemptyset(&ka->sa.sa_mask); 540 ka++; 541 } 542 } 543 544 bool unhandled_signal(struct task_struct *tsk, int sig) 545 { 546 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 547 if (is_global_init(tsk)) 548 return true; 549 550 if (handler != SIG_IGN && handler != SIG_DFL) 551 return false; 552 553 /* if ptraced, let the tracer determine */ 554 return !tsk->ptrace; 555 } 556 557 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, 558 bool *resched_timer) 559 { 560 struct sigqueue *q, *first = NULL; 561 562 /* 563 * Collect the siginfo appropriate to this signal. Check if 564 * there is another siginfo for the same signal. 565 */ 566 list_for_each_entry(q, &list->list, list) { 567 if (q->info.si_signo == sig) { 568 if (first) 569 goto still_pending; 570 first = q; 571 } 572 } 573 574 sigdelset(&list->signal, sig); 575 576 if (first) { 577 still_pending: 578 list_del_init(&first->list); 579 copy_siginfo(info, &first->info); 580 581 *resched_timer = 582 (first->flags & SIGQUEUE_PREALLOC) && 583 (info->si_code == SI_TIMER) && 584 (info->si_sys_private); 585 586 __sigqueue_free(first); 587 } else { 588 /* 589 * Ok, it wasn't in the queue. This must be 590 * a fast-pathed signal or we must have been 591 * out of queue space. So zero out the info. 592 */ 593 clear_siginfo(info); 594 info->si_signo = sig; 595 info->si_errno = 0; 596 info->si_code = SI_USER; 597 info->si_pid = 0; 598 info->si_uid = 0; 599 } 600 } 601 602 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 603 kernel_siginfo_t *info, bool *resched_timer) 604 { 605 int sig = next_signal(pending, mask); 606 607 if (sig) 608 collect_signal(sig, pending, info, resched_timer); 609 return sig; 610 } 611 612 /* 613 * Dequeue a signal and return the element to the caller, which is 614 * expected to free it. 615 * 616 * All callers have to hold the siglock. 617 */ 618 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) 619 { 620 bool resched_timer = false; 621 int signr; 622 623 /* We only dequeue private signals from ourselves, we don't let 624 * signalfd steal them 625 */ 626 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); 627 if (!signr) { 628 signr = __dequeue_signal(&tsk->signal->shared_pending, 629 mask, info, &resched_timer); 630 #ifdef CONFIG_POSIX_TIMERS 631 /* 632 * itimer signal ? 633 * 634 * itimers are process shared and we restart periodic 635 * itimers in the signal delivery path to prevent DoS 636 * attacks in the high resolution timer case. This is 637 * compliant with the old way of self-restarting 638 * itimers, as the SIGALRM is a legacy signal and only 639 * queued once. Changing the restart behaviour to 640 * restart the timer in the signal dequeue path is 641 * reducing the timer noise on heavy loaded !highres 642 * systems too. 643 */ 644 if (unlikely(signr == SIGALRM)) { 645 struct hrtimer *tmr = &tsk->signal->real_timer; 646 647 if (!hrtimer_is_queued(tmr) && 648 tsk->signal->it_real_incr != 0) { 649 hrtimer_forward(tmr, tmr->base->get_time(), 650 tsk->signal->it_real_incr); 651 hrtimer_restart(tmr); 652 } 653 } 654 #endif 655 } 656 657 recalc_sigpending(); 658 if (!signr) 659 return 0; 660 661 if (unlikely(sig_kernel_stop(signr))) { 662 /* 663 * Set a marker that we have dequeued a stop signal. Our 664 * caller might release the siglock and then the pending 665 * stop signal it is about to process is no longer in the 666 * pending bitmasks, but must still be cleared by a SIGCONT 667 * (and overruled by a SIGKILL). So those cases clear this 668 * shared flag after we've set it. Note that this flag may 669 * remain set after the signal we return is ignored or 670 * handled. That doesn't matter because its only purpose 671 * is to alert stop-signal processing code when another 672 * processor has come along and cleared the flag. 673 */ 674 current->jobctl |= JOBCTL_STOP_DEQUEUED; 675 } 676 #ifdef CONFIG_POSIX_TIMERS 677 if (resched_timer) { 678 /* 679 * Release the siglock to ensure proper locking order 680 * of timer locks outside of siglocks. Note, we leave 681 * irqs disabled here, since the posix-timers code is 682 * about to disable them again anyway. 683 */ 684 spin_unlock(&tsk->sighand->siglock); 685 posixtimer_rearm(info); 686 spin_lock(&tsk->sighand->siglock); 687 688 /* Don't expose the si_sys_private value to userspace */ 689 info->si_sys_private = 0; 690 } 691 #endif 692 return signr; 693 } 694 EXPORT_SYMBOL_GPL(dequeue_signal); 695 696 static int dequeue_synchronous_signal(kernel_siginfo_t *info) 697 { 698 struct task_struct *tsk = current; 699 struct sigpending *pending = &tsk->pending; 700 struct sigqueue *q, *sync = NULL; 701 702 /* 703 * Might a synchronous signal be in the queue? 704 */ 705 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) 706 return 0; 707 708 /* 709 * Return the first synchronous signal in the queue. 710 */ 711 list_for_each_entry(q, &pending->list, list) { 712 /* Synchronous signals have a postive si_code */ 713 if ((q->info.si_code > SI_USER) && 714 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { 715 sync = q; 716 goto next; 717 } 718 } 719 return 0; 720 next: 721 /* 722 * Check if there is another siginfo for the same signal. 723 */ 724 list_for_each_entry_continue(q, &pending->list, list) { 725 if (q->info.si_signo == sync->info.si_signo) 726 goto still_pending; 727 } 728 729 sigdelset(&pending->signal, sync->info.si_signo); 730 recalc_sigpending(); 731 still_pending: 732 list_del_init(&sync->list); 733 copy_siginfo(info, &sync->info); 734 __sigqueue_free(sync); 735 return info->si_signo; 736 } 737 738 /* 739 * Tell a process that it has a new active signal.. 740 * 741 * NOTE! we rely on the previous spin_lock to 742 * lock interrupts for us! We can only be called with 743 * "siglock" held, and the local interrupt must 744 * have been disabled when that got acquired! 745 * 746 * No need to set need_resched since signal event passing 747 * goes through ->blocked 748 */ 749 void signal_wake_up_state(struct task_struct *t, unsigned int state) 750 { 751 set_tsk_thread_flag(t, TIF_SIGPENDING); 752 /* 753 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable 754 * case. We don't check t->state here because there is a race with it 755 * executing another processor and just now entering stopped state. 756 * By using wake_up_state, we ensure the process will wake up and 757 * handle its death signal. 758 */ 759 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) 760 kick_process(t); 761 } 762 763 /* 764 * Remove signals in mask from the pending set and queue. 765 * Returns 1 if any signals were found. 766 * 767 * All callers must be holding the siglock. 768 */ 769 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) 770 { 771 struct sigqueue *q, *n; 772 sigset_t m; 773 774 sigandsets(&m, mask, &s->signal); 775 if (sigisemptyset(&m)) 776 return; 777 778 sigandnsets(&s->signal, &s->signal, mask); 779 list_for_each_entry_safe(q, n, &s->list, list) { 780 if (sigismember(mask, q->info.si_signo)) { 781 list_del_init(&q->list); 782 __sigqueue_free(q); 783 } 784 } 785 } 786 787 static inline int is_si_special(const struct kernel_siginfo *info) 788 { 789 return info <= SEND_SIG_PRIV; 790 } 791 792 static inline bool si_fromuser(const struct kernel_siginfo *info) 793 { 794 return info == SEND_SIG_NOINFO || 795 (!is_si_special(info) && SI_FROMUSER(info)); 796 } 797 798 /* 799 * called with RCU read lock from check_kill_permission() 800 */ 801 static bool kill_ok_by_cred(struct task_struct *t) 802 { 803 const struct cred *cred = current_cred(); 804 const struct cred *tcred = __task_cred(t); 805 806 return uid_eq(cred->euid, tcred->suid) || 807 uid_eq(cred->euid, tcred->uid) || 808 uid_eq(cred->uid, tcred->suid) || 809 uid_eq(cred->uid, tcred->uid) || 810 ns_capable(tcred->user_ns, CAP_KILL); 811 } 812 813 /* 814 * Bad permissions for sending the signal 815 * - the caller must hold the RCU read lock 816 */ 817 static int check_kill_permission(int sig, struct kernel_siginfo *info, 818 struct task_struct *t) 819 { 820 struct pid *sid; 821 int error; 822 823 if (!valid_signal(sig)) 824 return -EINVAL; 825 826 if (!si_fromuser(info)) 827 return 0; 828 829 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 830 if (error) 831 return error; 832 833 if (!same_thread_group(current, t) && 834 !kill_ok_by_cred(t)) { 835 switch (sig) { 836 case SIGCONT: 837 sid = task_session(t); 838 /* 839 * We don't return the error if sid == NULL. The 840 * task was unhashed, the caller must notice this. 841 */ 842 if (!sid || sid == task_session(current)) 843 break; 844 /* fall through */ 845 default: 846 return -EPERM; 847 } 848 } 849 850 return security_task_kill(t, info, sig, NULL); 851 } 852 853 /** 854 * ptrace_trap_notify - schedule trap to notify ptracer 855 * @t: tracee wanting to notify tracer 856 * 857 * This function schedules sticky ptrace trap which is cleared on the next 858 * TRAP_STOP to notify ptracer of an event. @t must have been seized by 859 * ptracer. 860 * 861 * If @t is running, STOP trap will be taken. If trapped for STOP and 862 * ptracer is listening for events, tracee is woken up so that it can 863 * re-trap for the new event. If trapped otherwise, STOP trap will be 864 * eventually taken without returning to userland after the existing traps 865 * are finished by PTRACE_CONT. 866 * 867 * CONTEXT: 868 * Must be called with @task->sighand->siglock held. 869 */ 870 static void ptrace_trap_notify(struct task_struct *t) 871 { 872 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); 873 assert_spin_locked(&t->sighand->siglock); 874 875 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 876 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 877 } 878 879 /* 880 * Handle magic process-wide effects of stop/continue signals. Unlike 881 * the signal actions, these happen immediately at signal-generation 882 * time regardless of blocking, ignoring, or handling. This does the 883 * actual continuing for SIGCONT, but not the actual stopping for stop 884 * signals. The process stop is done as a signal action for SIG_DFL. 885 * 886 * Returns true if the signal should be actually delivered, otherwise 887 * it should be dropped. 888 */ 889 static bool prepare_signal(int sig, struct task_struct *p, bool force) 890 { 891 struct signal_struct *signal = p->signal; 892 struct task_struct *t; 893 sigset_t flush; 894 895 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { 896 if (!(signal->flags & SIGNAL_GROUP_EXIT)) 897 return sig == SIGKILL; 898 /* 899 * The process is in the middle of dying, nothing to do. 900 */ 901 } else if (sig_kernel_stop(sig)) { 902 /* 903 * This is a stop signal. Remove SIGCONT from all queues. 904 */ 905 siginitset(&flush, sigmask(SIGCONT)); 906 flush_sigqueue_mask(&flush, &signal->shared_pending); 907 for_each_thread(p, t) 908 flush_sigqueue_mask(&flush, &t->pending); 909 } else if (sig == SIGCONT) { 910 unsigned int why; 911 /* 912 * Remove all stop signals from all queues, wake all threads. 913 */ 914 siginitset(&flush, SIG_KERNEL_STOP_MASK); 915 flush_sigqueue_mask(&flush, &signal->shared_pending); 916 for_each_thread(p, t) { 917 flush_sigqueue_mask(&flush, &t->pending); 918 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); 919 if (likely(!(t->ptrace & PT_SEIZED))) 920 wake_up_state(t, __TASK_STOPPED); 921 else 922 ptrace_trap_notify(t); 923 } 924 925 /* 926 * Notify the parent with CLD_CONTINUED if we were stopped. 927 * 928 * If we were in the middle of a group stop, we pretend it 929 * was already finished, and then continued. Since SIGCHLD 930 * doesn't queue we report only CLD_STOPPED, as if the next 931 * CLD_CONTINUED was dropped. 932 */ 933 why = 0; 934 if (signal->flags & SIGNAL_STOP_STOPPED) 935 why |= SIGNAL_CLD_CONTINUED; 936 else if (signal->group_stop_count) 937 why |= SIGNAL_CLD_STOPPED; 938 939 if (why) { 940 /* 941 * The first thread which returns from do_signal_stop() 942 * will take ->siglock, notice SIGNAL_CLD_MASK, and 943 * notify its parent. See get_signal(). 944 */ 945 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); 946 signal->group_stop_count = 0; 947 signal->group_exit_code = 0; 948 } 949 } 950 951 return !sig_ignored(p, sig, force); 952 } 953 954 /* 955 * Test if P wants to take SIG. After we've checked all threads with this, 956 * it's equivalent to finding no threads not blocking SIG. Any threads not 957 * blocking SIG were ruled out because they are not running and already 958 * have pending signals. Such threads will dequeue from the shared queue 959 * as soon as they're available, so putting the signal on the shared queue 960 * will be equivalent to sending it to one such thread. 961 */ 962 static inline bool wants_signal(int sig, struct task_struct *p) 963 { 964 if (sigismember(&p->blocked, sig)) 965 return false; 966 967 if (p->flags & PF_EXITING) 968 return false; 969 970 if (sig == SIGKILL) 971 return true; 972 973 if (task_is_stopped_or_traced(p)) 974 return false; 975 976 return task_curr(p) || !signal_pending(p); 977 } 978 979 static void complete_signal(int sig, struct task_struct *p, enum pid_type type) 980 { 981 struct signal_struct *signal = p->signal; 982 struct task_struct *t; 983 984 /* 985 * Now find a thread we can wake up to take the signal off the queue. 986 * 987 * If the main thread wants the signal, it gets first crack. 988 * Probably the least surprising to the average bear. 989 */ 990 if (wants_signal(sig, p)) 991 t = p; 992 else if ((type == PIDTYPE_PID) || thread_group_empty(p)) 993 /* 994 * There is just one thread and it does not need to be woken. 995 * It will dequeue unblocked signals before it runs again. 996 */ 997 return; 998 else { 999 /* 1000 * Otherwise try to find a suitable thread. 1001 */ 1002 t = signal->curr_target; 1003 while (!wants_signal(sig, t)) { 1004 t = next_thread(t); 1005 if (t == signal->curr_target) 1006 /* 1007 * No thread needs to be woken. 1008 * Any eligible threads will see 1009 * the signal in the queue soon. 1010 */ 1011 return; 1012 } 1013 signal->curr_target = t; 1014 } 1015 1016 /* 1017 * Found a killable thread. If the signal will be fatal, 1018 * then start taking the whole group down immediately. 1019 */ 1020 if (sig_fatal(p, sig) && 1021 !(signal->flags & SIGNAL_GROUP_EXIT) && 1022 !sigismember(&t->real_blocked, sig) && 1023 (sig == SIGKILL || !p->ptrace)) { 1024 /* 1025 * This signal will be fatal to the whole group. 1026 */ 1027 if (!sig_kernel_coredump(sig)) { 1028 /* 1029 * Start a group exit and wake everybody up. 1030 * This way we don't have other threads 1031 * running and doing things after a slower 1032 * thread has the fatal signal pending. 1033 */ 1034 signal->flags = SIGNAL_GROUP_EXIT; 1035 signal->group_exit_code = sig; 1036 signal->group_stop_count = 0; 1037 t = p; 1038 do { 1039 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1040 sigaddset(&t->pending.signal, SIGKILL); 1041 signal_wake_up(t, 1); 1042 } while_each_thread(p, t); 1043 return; 1044 } 1045 } 1046 1047 /* 1048 * The signal is already in the shared-pending queue. 1049 * Tell the chosen thread to wake up and dequeue it. 1050 */ 1051 signal_wake_up(t, sig == SIGKILL); 1052 return; 1053 } 1054 1055 static inline bool legacy_queue(struct sigpending *signals, int sig) 1056 { 1057 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 1058 } 1059 1060 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, 1061 enum pid_type type, bool force) 1062 { 1063 struct sigpending *pending; 1064 struct sigqueue *q; 1065 int override_rlimit; 1066 int ret = 0, result; 1067 1068 assert_spin_locked(&t->sighand->siglock); 1069 1070 result = TRACE_SIGNAL_IGNORED; 1071 if (!prepare_signal(sig, t, force)) 1072 goto ret; 1073 1074 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1075 /* 1076 * Short-circuit ignored signals and support queuing 1077 * exactly one non-rt signal, so that we can get more 1078 * detailed information about the cause of the signal. 1079 */ 1080 result = TRACE_SIGNAL_ALREADY_PENDING; 1081 if (legacy_queue(pending, sig)) 1082 goto ret; 1083 1084 result = TRACE_SIGNAL_DELIVERED; 1085 /* 1086 * Skip useless siginfo allocation for SIGKILL and kernel threads. 1087 */ 1088 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) 1089 goto out_set; 1090 1091 /* 1092 * Real-time signals must be queued if sent by sigqueue, or 1093 * some other real-time mechanism. It is implementation 1094 * defined whether kill() does so. We attempt to do so, on 1095 * the principle of least surprise, but since kill is not 1096 * allowed to fail with EAGAIN when low on memory we just 1097 * make sure at least one signal gets delivered and don't 1098 * pass on the info struct. 1099 */ 1100 if (sig < SIGRTMIN) 1101 override_rlimit = (is_si_special(info) || info->si_code >= 0); 1102 else 1103 override_rlimit = 0; 1104 1105 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); 1106 if (q) { 1107 list_add_tail(&q->list, &pending->list); 1108 switch ((unsigned long) info) { 1109 case (unsigned long) SEND_SIG_NOINFO: 1110 clear_siginfo(&q->info); 1111 q->info.si_signo = sig; 1112 q->info.si_errno = 0; 1113 q->info.si_code = SI_USER; 1114 q->info.si_pid = task_tgid_nr_ns(current, 1115 task_active_pid_ns(t)); 1116 rcu_read_lock(); 1117 q->info.si_uid = 1118 from_kuid_munged(task_cred_xxx(t, user_ns), 1119 current_uid()); 1120 rcu_read_unlock(); 1121 break; 1122 case (unsigned long) SEND_SIG_PRIV: 1123 clear_siginfo(&q->info); 1124 q->info.si_signo = sig; 1125 q->info.si_errno = 0; 1126 q->info.si_code = SI_KERNEL; 1127 q->info.si_pid = 0; 1128 q->info.si_uid = 0; 1129 break; 1130 default: 1131 copy_siginfo(&q->info, info); 1132 break; 1133 } 1134 } else if (!is_si_special(info) && 1135 sig >= SIGRTMIN && info->si_code != SI_USER) { 1136 /* 1137 * Queue overflow, abort. We may abort if the 1138 * signal was rt and sent by user using something 1139 * other than kill(). 1140 */ 1141 result = TRACE_SIGNAL_OVERFLOW_FAIL; 1142 ret = -EAGAIN; 1143 goto ret; 1144 } else { 1145 /* 1146 * This is a silent loss of information. We still 1147 * send the signal, but the *info bits are lost. 1148 */ 1149 result = TRACE_SIGNAL_LOSE_INFO; 1150 } 1151 1152 out_set: 1153 signalfd_notify(t, sig); 1154 sigaddset(&pending->signal, sig); 1155 1156 /* Let multiprocess signals appear after on-going forks */ 1157 if (type > PIDTYPE_TGID) { 1158 struct multiprocess_signals *delayed; 1159 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { 1160 sigset_t *signal = &delayed->signal; 1161 /* Can't queue both a stop and a continue signal */ 1162 if (sig == SIGCONT) 1163 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); 1164 else if (sig_kernel_stop(sig)) 1165 sigdelset(signal, SIGCONT); 1166 sigaddset(signal, sig); 1167 } 1168 } 1169 1170 complete_signal(sig, t, type); 1171 ret: 1172 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result); 1173 return ret; 1174 } 1175 1176 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) 1177 { 1178 bool ret = false; 1179 switch (siginfo_layout(info->si_signo, info->si_code)) { 1180 case SIL_KILL: 1181 case SIL_CHLD: 1182 case SIL_RT: 1183 ret = true; 1184 break; 1185 case SIL_TIMER: 1186 case SIL_POLL: 1187 case SIL_FAULT: 1188 case SIL_FAULT_MCEERR: 1189 case SIL_FAULT_BNDERR: 1190 case SIL_FAULT_PKUERR: 1191 case SIL_SYS: 1192 ret = false; 1193 break; 1194 } 1195 return ret; 1196 } 1197 1198 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, 1199 enum pid_type type) 1200 { 1201 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ 1202 bool force = false; 1203 1204 if (info == SEND_SIG_NOINFO) { 1205 /* Force if sent from an ancestor pid namespace */ 1206 force = !task_pid_nr_ns(current, task_active_pid_ns(t)); 1207 } else if (info == SEND_SIG_PRIV) { 1208 /* Don't ignore kernel generated signals */ 1209 force = true; 1210 } else if (has_si_pid_and_uid(info)) { 1211 /* SIGKILL and SIGSTOP is special or has ids */ 1212 struct user_namespace *t_user_ns; 1213 1214 rcu_read_lock(); 1215 t_user_ns = task_cred_xxx(t, user_ns); 1216 if (current_user_ns() != t_user_ns) { 1217 kuid_t uid = make_kuid(current_user_ns(), info->si_uid); 1218 info->si_uid = from_kuid_munged(t_user_ns, uid); 1219 } 1220 rcu_read_unlock(); 1221 1222 /* A kernel generated signal? */ 1223 force = (info->si_code == SI_KERNEL); 1224 1225 /* From an ancestor pid namespace? */ 1226 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) { 1227 info->si_pid = 0; 1228 force = true; 1229 } 1230 } 1231 return __send_signal(sig, info, t, type, force); 1232 } 1233 1234 static void print_fatal_signal(int signr) 1235 { 1236 struct pt_regs *regs = signal_pt_regs(); 1237 pr_info("potentially unexpected fatal signal %d.\n", signr); 1238 1239 #if defined(__i386__) && !defined(__arch_um__) 1240 pr_info("code at %08lx: ", regs->ip); 1241 { 1242 int i; 1243 for (i = 0; i < 16; i++) { 1244 unsigned char insn; 1245 1246 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1247 break; 1248 pr_cont("%02x ", insn); 1249 } 1250 } 1251 pr_cont("\n"); 1252 #endif 1253 preempt_disable(); 1254 show_regs(regs); 1255 preempt_enable(); 1256 } 1257 1258 static int __init setup_print_fatal_signals(char *str) 1259 { 1260 get_option (&str, &print_fatal_signals); 1261 1262 return 1; 1263 } 1264 1265 __setup("print-fatal-signals=", setup_print_fatal_signals); 1266 1267 int 1268 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1269 { 1270 return send_signal(sig, info, p, PIDTYPE_TGID); 1271 } 1272 1273 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, 1274 enum pid_type type) 1275 { 1276 unsigned long flags; 1277 int ret = -ESRCH; 1278 1279 if (lock_task_sighand(p, &flags)) { 1280 ret = send_signal(sig, info, p, type); 1281 unlock_task_sighand(p, &flags); 1282 } 1283 1284 return ret; 1285 } 1286 1287 /* 1288 * Force a signal that the process can't ignore: if necessary 1289 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1290 * 1291 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1292 * since we do not want to have a signal handler that was blocked 1293 * be invoked when user space had explicitly blocked it. 1294 * 1295 * We don't want to have recursive SIGSEGV's etc, for example, 1296 * that is why we also clear SIGNAL_UNKILLABLE. 1297 */ 1298 static int 1299 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t) 1300 { 1301 unsigned long int flags; 1302 int ret, blocked, ignored; 1303 struct k_sigaction *action; 1304 int sig = info->si_signo; 1305 1306 spin_lock_irqsave(&t->sighand->siglock, flags); 1307 action = &t->sighand->action[sig-1]; 1308 ignored = action->sa.sa_handler == SIG_IGN; 1309 blocked = sigismember(&t->blocked, sig); 1310 if (blocked || ignored) { 1311 action->sa.sa_handler = SIG_DFL; 1312 if (blocked) { 1313 sigdelset(&t->blocked, sig); 1314 recalc_sigpending_and_wake(t); 1315 } 1316 } 1317 /* 1318 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect 1319 * debugging to leave init killable. 1320 */ 1321 if (action->sa.sa_handler == SIG_DFL && !t->ptrace) 1322 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1323 ret = send_signal(sig, info, t, PIDTYPE_PID); 1324 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1325 1326 return ret; 1327 } 1328 1329 int force_sig_info(struct kernel_siginfo *info) 1330 { 1331 return force_sig_info_to_task(info, current); 1332 } 1333 1334 /* 1335 * Nuke all other threads in the group. 1336 */ 1337 int zap_other_threads(struct task_struct *p) 1338 { 1339 struct task_struct *t = p; 1340 int count = 0; 1341 1342 p->signal->group_stop_count = 0; 1343 1344 while_each_thread(p, t) { 1345 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1346 count++; 1347 1348 /* Don't bother with already dead threads */ 1349 if (t->exit_state) 1350 continue; 1351 sigaddset(&t->pending.signal, SIGKILL); 1352 signal_wake_up(t, 1); 1353 } 1354 1355 return count; 1356 } 1357 1358 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1359 unsigned long *flags) 1360 { 1361 struct sighand_struct *sighand; 1362 1363 rcu_read_lock(); 1364 for (;;) { 1365 sighand = rcu_dereference(tsk->sighand); 1366 if (unlikely(sighand == NULL)) 1367 break; 1368 1369 /* 1370 * This sighand can be already freed and even reused, but 1371 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which 1372 * initializes ->siglock: this slab can't go away, it has 1373 * the same object type, ->siglock can't be reinitialized. 1374 * 1375 * We need to ensure that tsk->sighand is still the same 1376 * after we take the lock, we can race with de_thread() or 1377 * __exit_signal(). In the latter case the next iteration 1378 * must see ->sighand == NULL. 1379 */ 1380 spin_lock_irqsave(&sighand->siglock, *flags); 1381 if (likely(sighand == tsk->sighand)) 1382 break; 1383 spin_unlock_irqrestore(&sighand->siglock, *flags); 1384 } 1385 rcu_read_unlock(); 1386 1387 return sighand; 1388 } 1389 1390 /* 1391 * send signal info to all the members of a group 1392 */ 1393 int group_send_sig_info(int sig, struct kernel_siginfo *info, 1394 struct task_struct *p, enum pid_type type) 1395 { 1396 int ret; 1397 1398 rcu_read_lock(); 1399 ret = check_kill_permission(sig, info, p); 1400 rcu_read_unlock(); 1401 1402 if (!ret && sig) 1403 ret = do_send_sig_info(sig, info, p, type); 1404 1405 return ret; 1406 } 1407 1408 /* 1409 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1410 * control characters do (^C, ^Z etc) 1411 * - the caller must hold at least a readlock on tasklist_lock 1412 */ 1413 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) 1414 { 1415 struct task_struct *p = NULL; 1416 int retval, success; 1417 1418 success = 0; 1419 retval = -ESRCH; 1420 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1421 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); 1422 success |= !err; 1423 retval = err; 1424 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1425 return success ? 0 : retval; 1426 } 1427 1428 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) 1429 { 1430 int error = -ESRCH; 1431 struct task_struct *p; 1432 1433 for (;;) { 1434 rcu_read_lock(); 1435 p = pid_task(pid, PIDTYPE_PID); 1436 if (p) 1437 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); 1438 rcu_read_unlock(); 1439 if (likely(!p || error != -ESRCH)) 1440 return error; 1441 1442 /* 1443 * The task was unhashed in between, try again. If it 1444 * is dead, pid_task() will return NULL, if we race with 1445 * de_thread() it will find the new leader. 1446 */ 1447 } 1448 } 1449 1450 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) 1451 { 1452 int error; 1453 rcu_read_lock(); 1454 error = kill_pid_info(sig, info, find_vpid(pid)); 1455 rcu_read_unlock(); 1456 return error; 1457 } 1458 1459 static inline bool kill_as_cred_perm(const struct cred *cred, 1460 struct task_struct *target) 1461 { 1462 const struct cred *pcred = __task_cred(target); 1463 1464 return uid_eq(cred->euid, pcred->suid) || 1465 uid_eq(cred->euid, pcred->uid) || 1466 uid_eq(cred->uid, pcred->suid) || 1467 uid_eq(cred->uid, pcred->uid); 1468 } 1469 1470 /* 1471 * The usb asyncio usage of siginfo is wrong. The glibc support 1472 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. 1473 * AKA after the generic fields: 1474 * kernel_pid_t si_pid; 1475 * kernel_uid32_t si_uid; 1476 * sigval_t si_value; 1477 * 1478 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout 1479 * after the generic fields is: 1480 * void __user *si_addr; 1481 * 1482 * This is a practical problem when there is a 64bit big endian kernel 1483 * and a 32bit userspace. As the 32bit address will encoded in the low 1484 * 32bits of the pointer. Those low 32bits will be stored at higher 1485 * address than appear in a 32 bit pointer. So userspace will not 1486 * see the address it was expecting for it's completions. 1487 * 1488 * There is nothing in the encoding that can allow 1489 * copy_siginfo_to_user32 to detect this confusion of formats, so 1490 * handle this by requiring the caller of kill_pid_usb_asyncio to 1491 * notice when this situration takes place and to store the 32bit 1492 * pointer in sival_int, instead of sival_addr of the sigval_t addr 1493 * parameter. 1494 */ 1495 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, 1496 struct pid *pid, const struct cred *cred) 1497 { 1498 struct kernel_siginfo info; 1499 struct task_struct *p; 1500 unsigned long flags; 1501 int ret = -EINVAL; 1502 1503 clear_siginfo(&info); 1504 info.si_signo = sig; 1505 info.si_errno = errno; 1506 info.si_code = SI_ASYNCIO; 1507 *((sigval_t *)&info.si_pid) = addr; 1508 1509 if (!valid_signal(sig)) 1510 return ret; 1511 1512 rcu_read_lock(); 1513 p = pid_task(pid, PIDTYPE_PID); 1514 if (!p) { 1515 ret = -ESRCH; 1516 goto out_unlock; 1517 } 1518 if (!kill_as_cred_perm(cred, p)) { 1519 ret = -EPERM; 1520 goto out_unlock; 1521 } 1522 ret = security_task_kill(p, &info, sig, cred); 1523 if (ret) 1524 goto out_unlock; 1525 1526 if (sig) { 1527 if (lock_task_sighand(p, &flags)) { 1528 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false); 1529 unlock_task_sighand(p, &flags); 1530 } else 1531 ret = -ESRCH; 1532 } 1533 out_unlock: 1534 rcu_read_unlock(); 1535 return ret; 1536 } 1537 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio); 1538 1539 /* 1540 * kill_something_info() interprets pid in interesting ways just like kill(2). 1541 * 1542 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1543 * is probably wrong. Should make it like BSD or SYSV. 1544 */ 1545 1546 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) 1547 { 1548 int ret; 1549 1550 if (pid > 0) { 1551 rcu_read_lock(); 1552 ret = kill_pid_info(sig, info, find_vpid(pid)); 1553 rcu_read_unlock(); 1554 return ret; 1555 } 1556 1557 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ 1558 if (pid == INT_MIN) 1559 return -ESRCH; 1560 1561 read_lock(&tasklist_lock); 1562 if (pid != -1) { 1563 ret = __kill_pgrp_info(sig, info, 1564 pid ? find_vpid(-pid) : task_pgrp(current)); 1565 } else { 1566 int retval = 0, count = 0; 1567 struct task_struct * p; 1568 1569 for_each_process(p) { 1570 if (task_pid_vnr(p) > 1 && 1571 !same_thread_group(p, current)) { 1572 int err = group_send_sig_info(sig, info, p, 1573 PIDTYPE_MAX); 1574 ++count; 1575 if (err != -EPERM) 1576 retval = err; 1577 } 1578 } 1579 ret = count ? retval : -ESRCH; 1580 } 1581 read_unlock(&tasklist_lock); 1582 1583 return ret; 1584 } 1585 1586 /* 1587 * These are for backward compatibility with the rest of the kernel source. 1588 */ 1589 1590 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) 1591 { 1592 /* 1593 * Make sure legacy kernel users don't send in bad values 1594 * (normal paths check this in check_kill_permission). 1595 */ 1596 if (!valid_signal(sig)) 1597 return -EINVAL; 1598 1599 return do_send_sig_info(sig, info, p, PIDTYPE_PID); 1600 } 1601 EXPORT_SYMBOL(send_sig_info); 1602 1603 #define __si_special(priv) \ 1604 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1605 1606 int 1607 send_sig(int sig, struct task_struct *p, int priv) 1608 { 1609 return send_sig_info(sig, __si_special(priv), p); 1610 } 1611 EXPORT_SYMBOL(send_sig); 1612 1613 void force_sig(int sig) 1614 { 1615 struct kernel_siginfo info; 1616 1617 clear_siginfo(&info); 1618 info.si_signo = sig; 1619 info.si_errno = 0; 1620 info.si_code = SI_KERNEL; 1621 info.si_pid = 0; 1622 info.si_uid = 0; 1623 force_sig_info(&info); 1624 } 1625 EXPORT_SYMBOL(force_sig); 1626 1627 /* 1628 * When things go south during signal handling, we 1629 * will force a SIGSEGV. And if the signal that caused 1630 * the problem was already a SIGSEGV, we'll want to 1631 * make sure we don't even try to deliver the signal.. 1632 */ 1633 void force_sigsegv(int sig) 1634 { 1635 struct task_struct *p = current; 1636 1637 if (sig == SIGSEGV) { 1638 unsigned long flags; 1639 spin_lock_irqsave(&p->sighand->siglock, flags); 1640 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1641 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1642 } 1643 force_sig(SIGSEGV); 1644 } 1645 1646 int force_sig_fault_to_task(int sig, int code, void __user *addr 1647 ___ARCH_SI_TRAPNO(int trapno) 1648 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1649 , struct task_struct *t) 1650 { 1651 struct kernel_siginfo info; 1652 1653 clear_siginfo(&info); 1654 info.si_signo = sig; 1655 info.si_errno = 0; 1656 info.si_code = code; 1657 info.si_addr = addr; 1658 #ifdef __ARCH_SI_TRAPNO 1659 info.si_trapno = trapno; 1660 #endif 1661 #ifdef __ia64__ 1662 info.si_imm = imm; 1663 info.si_flags = flags; 1664 info.si_isr = isr; 1665 #endif 1666 return force_sig_info_to_task(&info, t); 1667 } 1668 1669 int force_sig_fault(int sig, int code, void __user *addr 1670 ___ARCH_SI_TRAPNO(int trapno) 1671 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)) 1672 { 1673 return force_sig_fault_to_task(sig, code, addr 1674 ___ARCH_SI_TRAPNO(trapno) 1675 ___ARCH_SI_IA64(imm, flags, isr), current); 1676 } 1677 1678 int send_sig_fault(int sig, int code, void __user *addr 1679 ___ARCH_SI_TRAPNO(int trapno) 1680 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1681 , struct task_struct *t) 1682 { 1683 struct kernel_siginfo info; 1684 1685 clear_siginfo(&info); 1686 info.si_signo = sig; 1687 info.si_errno = 0; 1688 info.si_code = code; 1689 info.si_addr = addr; 1690 #ifdef __ARCH_SI_TRAPNO 1691 info.si_trapno = trapno; 1692 #endif 1693 #ifdef __ia64__ 1694 info.si_imm = imm; 1695 info.si_flags = flags; 1696 info.si_isr = isr; 1697 #endif 1698 return send_sig_info(info.si_signo, &info, t); 1699 } 1700 1701 int force_sig_mceerr(int code, void __user *addr, short lsb) 1702 { 1703 struct kernel_siginfo info; 1704 1705 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1706 clear_siginfo(&info); 1707 info.si_signo = SIGBUS; 1708 info.si_errno = 0; 1709 info.si_code = code; 1710 info.si_addr = addr; 1711 info.si_addr_lsb = lsb; 1712 return force_sig_info(&info); 1713 } 1714 1715 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) 1716 { 1717 struct kernel_siginfo info; 1718 1719 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1720 clear_siginfo(&info); 1721 info.si_signo = SIGBUS; 1722 info.si_errno = 0; 1723 info.si_code = code; 1724 info.si_addr = addr; 1725 info.si_addr_lsb = lsb; 1726 return send_sig_info(info.si_signo, &info, t); 1727 } 1728 EXPORT_SYMBOL(send_sig_mceerr); 1729 1730 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) 1731 { 1732 struct kernel_siginfo info; 1733 1734 clear_siginfo(&info); 1735 info.si_signo = SIGSEGV; 1736 info.si_errno = 0; 1737 info.si_code = SEGV_BNDERR; 1738 info.si_addr = addr; 1739 info.si_lower = lower; 1740 info.si_upper = upper; 1741 return force_sig_info(&info); 1742 } 1743 1744 #ifdef SEGV_PKUERR 1745 int force_sig_pkuerr(void __user *addr, u32 pkey) 1746 { 1747 struct kernel_siginfo info; 1748 1749 clear_siginfo(&info); 1750 info.si_signo = SIGSEGV; 1751 info.si_errno = 0; 1752 info.si_code = SEGV_PKUERR; 1753 info.si_addr = addr; 1754 info.si_pkey = pkey; 1755 return force_sig_info(&info); 1756 } 1757 #endif 1758 1759 /* For the crazy architectures that include trap information in 1760 * the errno field, instead of an actual errno value. 1761 */ 1762 int force_sig_ptrace_errno_trap(int errno, void __user *addr) 1763 { 1764 struct kernel_siginfo info; 1765 1766 clear_siginfo(&info); 1767 info.si_signo = SIGTRAP; 1768 info.si_errno = errno; 1769 info.si_code = TRAP_HWBKPT; 1770 info.si_addr = addr; 1771 return force_sig_info(&info); 1772 } 1773 1774 int kill_pgrp(struct pid *pid, int sig, int priv) 1775 { 1776 int ret; 1777 1778 read_lock(&tasklist_lock); 1779 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1780 read_unlock(&tasklist_lock); 1781 1782 return ret; 1783 } 1784 EXPORT_SYMBOL(kill_pgrp); 1785 1786 int kill_pid(struct pid *pid, int sig, int priv) 1787 { 1788 return kill_pid_info(sig, __si_special(priv), pid); 1789 } 1790 EXPORT_SYMBOL(kill_pid); 1791 1792 /* 1793 * These functions support sending signals using preallocated sigqueue 1794 * structures. This is needed "because realtime applications cannot 1795 * afford to lose notifications of asynchronous events, like timer 1796 * expirations or I/O completions". In the case of POSIX Timers 1797 * we allocate the sigqueue structure from the timer_create. If this 1798 * allocation fails we are able to report the failure to the application 1799 * with an EAGAIN error. 1800 */ 1801 struct sigqueue *sigqueue_alloc(void) 1802 { 1803 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); 1804 1805 if (q) 1806 q->flags |= SIGQUEUE_PREALLOC; 1807 1808 return q; 1809 } 1810 1811 void sigqueue_free(struct sigqueue *q) 1812 { 1813 unsigned long flags; 1814 spinlock_t *lock = ¤t->sighand->siglock; 1815 1816 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1817 /* 1818 * We must hold ->siglock while testing q->list 1819 * to serialize with collect_signal() or with 1820 * __exit_signal()->flush_sigqueue(). 1821 */ 1822 spin_lock_irqsave(lock, flags); 1823 q->flags &= ~SIGQUEUE_PREALLOC; 1824 /* 1825 * If it is queued it will be freed when dequeued, 1826 * like the "regular" sigqueue. 1827 */ 1828 if (!list_empty(&q->list)) 1829 q = NULL; 1830 spin_unlock_irqrestore(lock, flags); 1831 1832 if (q) 1833 __sigqueue_free(q); 1834 } 1835 1836 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) 1837 { 1838 int sig = q->info.si_signo; 1839 struct sigpending *pending; 1840 struct task_struct *t; 1841 unsigned long flags; 1842 int ret, result; 1843 1844 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1845 1846 ret = -1; 1847 rcu_read_lock(); 1848 t = pid_task(pid, type); 1849 if (!t || !likely(lock_task_sighand(t, &flags))) 1850 goto ret; 1851 1852 ret = 1; /* the signal is ignored */ 1853 result = TRACE_SIGNAL_IGNORED; 1854 if (!prepare_signal(sig, t, false)) 1855 goto out; 1856 1857 ret = 0; 1858 if (unlikely(!list_empty(&q->list))) { 1859 /* 1860 * If an SI_TIMER entry is already queue just increment 1861 * the overrun count. 1862 */ 1863 BUG_ON(q->info.si_code != SI_TIMER); 1864 q->info.si_overrun++; 1865 result = TRACE_SIGNAL_ALREADY_PENDING; 1866 goto out; 1867 } 1868 q->info.si_overrun = 0; 1869 1870 signalfd_notify(t, sig); 1871 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; 1872 list_add_tail(&q->list, &pending->list); 1873 sigaddset(&pending->signal, sig); 1874 complete_signal(sig, t, type); 1875 result = TRACE_SIGNAL_DELIVERED; 1876 out: 1877 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); 1878 unlock_task_sighand(t, &flags); 1879 ret: 1880 rcu_read_unlock(); 1881 return ret; 1882 } 1883 1884 static void do_notify_pidfd(struct task_struct *task) 1885 { 1886 struct pid *pid; 1887 1888 pid = task_pid(task); 1889 wake_up_all(&pid->wait_pidfd); 1890 } 1891 1892 /* 1893 * Let a parent know about the death of a child. 1894 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1895 * 1896 * Returns true if our parent ignored us and so we've switched to 1897 * self-reaping. 1898 */ 1899 bool do_notify_parent(struct task_struct *tsk, int sig) 1900 { 1901 struct kernel_siginfo info; 1902 unsigned long flags; 1903 struct sighand_struct *psig; 1904 bool autoreap = false; 1905 u64 utime, stime; 1906 1907 BUG_ON(sig == -1); 1908 1909 /* do_notify_parent_cldstop should have been called instead. */ 1910 BUG_ON(task_is_stopped_or_traced(tsk)); 1911 1912 BUG_ON(!tsk->ptrace && 1913 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1914 1915 /* Wake up all pidfd waiters */ 1916 do_notify_pidfd(tsk); 1917 1918 if (sig != SIGCHLD) { 1919 /* 1920 * This is only possible if parent == real_parent. 1921 * Check if it has changed security domain. 1922 */ 1923 if (tsk->parent_exec_id != tsk->parent->self_exec_id) 1924 sig = SIGCHLD; 1925 } 1926 1927 clear_siginfo(&info); 1928 info.si_signo = sig; 1929 info.si_errno = 0; 1930 /* 1931 * We are under tasklist_lock here so our parent is tied to 1932 * us and cannot change. 1933 * 1934 * task_active_pid_ns will always return the same pid namespace 1935 * until a task passes through release_task. 1936 * 1937 * write_lock() currently calls preempt_disable() which is the 1938 * same as rcu_read_lock(), but according to Oleg, this is not 1939 * correct to rely on this 1940 */ 1941 rcu_read_lock(); 1942 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); 1943 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), 1944 task_uid(tsk)); 1945 rcu_read_unlock(); 1946 1947 task_cputime(tsk, &utime, &stime); 1948 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); 1949 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); 1950 1951 info.si_status = tsk->exit_code & 0x7f; 1952 if (tsk->exit_code & 0x80) 1953 info.si_code = CLD_DUMPED; 1954 else if (tsk->exit_code & 0x7f) 1955 info.si_code = CLD_KILLED; 1956 else { 1957 info.si_code = CLD_EXITED; 1958 info.si_status = tsk->exit_code >> 8; 1959 } 1960 1961 psig = tsk->parent->sighand; 1962 spin_lock_irqsave(&psig->siglock, flags); 1963 if (!tsk->ptrace && sig == SIGCHLD && 1964 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1965 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1966 /* 1967 * We are exiting and our parent doesn't care. POSIX.1 1968 * defines special semantics for setting SIGCHLD to SIG_IGN 1969 * or setting the SA_NOCLDWAIT flag: we should be reaped 1970 * automatically and not left for our parent's wait4 call. 1971 * Rather than having the parent do it as a magic kind of 1972 * signal handler, we just set this to tell do_exit that we 1973 * can be cleaned up without becoming a zombie. Note that 1974 * we still call __wake_up_parent in this case, because a 1975 * blocked sys_wait4 might now return -ECHILD. 1976 * 1977 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1978 * is implementation-defined: we do (if you don't want 1979 * it, just use SIG_IGN instead). 1980 */ 1981 autoreap = true; 1982 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1983 sig = 0; 1984 } 1985 if (valid_signal(sig) && sig) 1986 __group_send_sig_info(sig, &info, tsk->parent); 1987 __wake_up_parent(tsk, tsk->parent); 1988 spin_unlock_irqrestore(&psig->siglock, flags); 1989 1990 return autoreap; 1991 } 1992 1993 /** 1994 * do_notify_parent_cldstop - notify parent of stopped/continued state change 1995 * @tsk: task reporting the state change 1996 * @for_ptracer: the notification is for ptracer 1997 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report 1998 * 1999 * Notify @tsk's parent that the stopped/continued state has changed. If 2000 * @for_ptracer is %false, @tsk's group leader notifies to its real parent. 2001 * If %true, @tsk reports to @tsk->parent which should be the ptracer. 2002 * 2003 * CONTEXT: 2004 * Must be called with tasklist_lock at least read locked. 2005 */ 2006 static void do_notify_parent_cldstop(struct task_struct *tsk, 2007 bool for_ptracer, int why) 2008 { 2009 struct kernel_siginfo info; 2010 unsigned long flags; 2011 struct task_struct *parent; 2012 struct sighand_struct *sighand; 2013 u64 utime, stime; 2014 2015 if (for_ptracer) { 2016 parent = tsk->parent; 2017 } else { 2018 tsk = tsk->group_leader; 2019 parent = tsk->real_parent; 2020 } 2021 2022 clear_siginfo(&info); 2023 info.si_signo = SIGCHLD; 2024 info.si_errno = 0; 2025 /* 2026 * see comment in do_notify_parent() about the following 4 lines 2027 */ 2028 rcu_read_lock(); 2029 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); 2030 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 2031 rcu_read_unlock(); 2032 2033 task_cputime(tsk, &utime, &stime); 2034 info.si_utime = nsec_to_clock_t(utime); 2035 info.si_stime = nsec_to_clock_t(stime); 2036 2037 info.si_code = why; 2038 switch (why) { 2039 case CLD_CONTINUED: 2040 info.si_status = SIGCONT; 2041 break; 2042 case CLD_STOPPED: 2043 info.si_status = tsk->signal->group_exit_code & 0x7f; 2044 break; 2045 case CLD_TRAPPED: 2046 info.si_status = tsk->exit_code & 0x7f; 2047 break; 2048 default: 2049 BUG(); 2050 } 2051 2052 sighand = parent->sighand; 2053 spin_lock_irqsave(&sighand->siglock, flags); 2054 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 2055 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 2056 __group_send_sig_info(SIGCHLD, &info, parent); 2057 /* 2058 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 2059 */ 2060 __wake_up_parent(tsk, parent); 2061 spin_unlock_irqrestore(&sighand->siglock, flags); 2062 } 2063 2064 static inline bool may_ptrace_stop(void) 2065 { 2066 if (!likely(current->ptrace)) 2067 return false; 2068 /* 2069 * Are we in the middle of do_coredump? 2070 * If so and our tracer is also part of the coredump stopping 2071 * is a deadlock situation, and pointless because our tracer 2072 * is dead so don't allow us to stop. 2073 * If SIGKILL was already sent before the caller unlocked 2074 * ->siglock we must see ->core_state != NULL. Otherwise it 2075 * is safe to enter schedule(). 2076 * 2077 * This is almost outdated, a task with the pending SIGKILL can't 2078 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported 2079 * after SIGKILL was already dequeued. 2080 */ 2081 if (unlikely(current->mm->core_state) && 2082 unlikely(current->mm == current->parent->mm)) 2083 return false; 2084 2085 return true; 2086 } 2087 2088 /* 2089 * Return non-zero if there is a SIGKILL that should be waking us up. 2090 * Called with the siglock held. 2091 */ 2092 static bool sigkill_pending(struct task_struct *tsk) 2093 { 2094 return sigismember(&tsk->pending.signal, SIGKILL) || 2095 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 2096 } 2097 2098 /* 2099 * This must be called with current->sighand->siglock held. 2100 * 2101 * This should be the path for all ptrace stops. 2102 * We always set current->last_siginfo while stopped here. 2103 * That makes it a way to test a stopped process for 2104 * being ptrace-stopped vs being job-control-stopped. 2105 * 2106 * If we actually decide not to stop at all because the tracer 2107 * is gone, we keep current->exit_code unless clear_code. 2108 */ 2109 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) 2110 __releases(¤t->sighand->siglock) 2111 __acquires(¤t->sighand->siglock) 2112 { 2113 bool gstop_done = false; 2114 2115 if (arch_ptrace_stop_needed(exit_code, info)) { 2116 /* 2117 * The arch code has something special to do before a 2118 * ptrace stop. This is allowed to block, e.g. for faults 2119 * on user stack pages. We can't keep the siglock while 2120 * calling arch_ptrace_stop, so we must release it now. 2121 * To preserve proper semantics, we must do this before 2122 * any signal bookkeeping like checking group_stop_count. 2123 * Meanwhile, a SIGKILL could come in before we retake the 2124 * siglock. That must prevent us from sleeping in TASK_TRACED. 2125 * So after regaining the lock, we must check for SIGKILL. 2126 */ 2127 spin_unlock_irq(¤t->sighand->siglock); 2128 arch_ptrace_stop(exit_code, info); 2129 spin_lock_irq(¤t->sighand->siglock); 2130 if (sigkill_pending(current)) 2131 return; 2132 } 2133 2134 set_special_state(TASK_TRACED); 2135 2136 /* 2137 * We're committing to trapping. TRACED should be visible before 2138 * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). 2139 * Also, transition to TRACED and updates to ->jobctl should be 2140 * atomic with respect to siglock and should be done after the arch 2141 * hook as siglock is released and regrabbed across it. 2142 * 2143 * TRACER TRACEE 2144 * 2145 * ptrace_attach() 2146 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) 2147 * do_wait() 2148 * set_current_state() smp_wmb(); 2149 * ptrace_do_wait() 2150 * wait_task_stopped() 2151 * task_stopped_code() 2152 * [L] task_is_traced() [S] task_clear_jobctl_trapping(); 2153 */ 2154 smp_wmb(); 2155 2156 current->last_siginfo = info; 2157 current->exit_code = exit_code; 2158 2159 /* 2160 * If @why is CLD_STOPPED, we're trapping to participate in a group 2161 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 2162 * across siglock relocks since INTERRUPT was scheduled, PENDING 2163 * could be clear now. We act as if SIGCONT is received after 2164 * TASK_TRACED is entered - ignore it. 2165 */ 2166 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) 2167 gstop_done = task_participate_group_stop(current); 2168 2169 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ 2170 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); 2171 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) 2172 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); 2173 2174 /* entering a trap, clear TRAPPING */ 2175 task_clear_jobctl_trapping(current); 2176 2177 spin_unlock_irq(¤t->sighand->siglock); 2178 read_lock(&tasklist_lock); 2179 if (may_ptrace_stop()) { 2180 /* 2181 * Notify parents of the stop. 2182 * 2183 * While ptraced, there are two parents - the ptracer and 2184 * the real_parent of the group_leader. The ptracer should 2185 * know about every stop while the real parent is only 2186 * interested in the completion of group stop. The states 2187 * for the two don't interact with each other. Notify 2188 * separately unless they're gonna be duplicates. 2189 */ 2190 do_notify_parent_cldstop(current, true, why); 2191 if (gstop_done && ptrace_reparented(current)) 2192 do_notify_parent_cldstop(current, false, why); 2193 2194 /* 2195 * Don't want to allow preemption here, because 2196 * sys_ptrace() needs this task to be inactive. 2197 * 2198 * XXX: implement read_unlock_no_resched(). 2199 */ 2200 preempt_disable(); 2201 read_unlock(&tasklist_lock); 2202 preempt_enable_no_resched(); 2203 cgroup_enter_frozen(); 2204 freezable_schedule(); 2205 cgroup_leave_frozen(true); 2206 } else { 2207 /* 2208 * By the time we got the lock, our tracer went away. 2209 * Don't drop the lock yet, another tracer may come. 2210 * 2211 * If @gstop_done, the ptracer went away between group stop 2212 * completion and here. During detach, it would have set 2213 * JOBCTL_STOP_PENDING on us and we'll re-enter 2214 * TASK_STOPPED in do_signal_stop() on return, so notifying 2215 * the real parent of the group stop completion is enough. 2216 */ 2217 if (gstop_done) 2218 do_notify_parent_cldstop(current, false, why); 2219 2220 /* tasklist protects us from ptrace_freeze_traced() */ 2221 __set_current_state(TASK_RUNNING); 2222 if (clear_code) 2223 current->exit_code = 0; 2224 read_unlock(&tasklist_lock); 2225 } 2226 2227 /* 2228 * We are back. Now reacquire the siglock before touching 2229 * last_siginfo, so that we are sure to have synchronized with 2230 * any signal-sending on another CPU that wants to examine it. 2231 */ 2232 spin_lock_irq(¤t->sighand->siglock); 2233 current->last_siginfo = NULL; 2234 2235 /* LISTENING can be set only during STOP traps, clear it */ 2236 current->jobctl &= ~JOBCTL_LISTENING; 2237 2238 /* 2239 * Queued signals ignored us while we were stopped for tracing. 2240 * So check for any that we should take before resuming user mode. 2241 * This sets TIF_SIGPENDING, but never clears it. 2242 */ 2243 recalc_sigpending_tsk(current); 2244 } 2245 2246 static void ptrace_do_notify(int signr, int exit_code, int why) 2247 { 2248 kernel_siginfo_t info; 2249 2250 clear_siginfo(&info); 2251 info.si_signo = signr; 2252 info.si_code = exit_code; 2253 info.si_pid = task_pid_vnr(current); 2254 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 2255 2256 /* Let the debugger run. */ 2257 ptrace_stop(exit_code, why, 1, &info); 2258 } 2259 2260 void ptrace_notify(int exit_code) 2261 { 2262 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 2263 if (unlikely(current->task_works)) 2264 task_work_run(); 2265 2266 spin_lock_irq(¤t->sighand->siglock); 2267 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); 2268 spin_unlock_irq(¤t->sighand->siglock); 2269 } 2270 2271 /** 2272 * do_signal_stop - handle group stop for SIGSTOP and other stop signals 2273 * @signr: signr causing group stop if initiating 2274 * 2275 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr 2276 * and participate in it. If already set, participate in the existing 2277 * group stop. If participated in a group stop (and thus slept), %true is 2278 * returned with siglock released. 2279 * 2280 * If ptraced, this function doesn't handle stop itself. Instead, 2281 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock 2282 * untouched. The caller must ensure that INTERRUPT trap handling takes 2283 * places afterwards. 2284 * 2285 * CONTEXT: 2286 * Must be called with @current->sighand->siglock held, which is released 2287 * on %true return. 2288 * 2289 * RETURNS: 2290 * %false if group stop is already cancelled or ptrace trap is scheduled. 2291 * %true if participated in group stop. 2292 */ 2293 static bool do_signal_stop(int signr) 2294 __releases(¤t->sighand->siglock) 2295 { 2296 struct signal_struct *sig = current->signal; 2297 2298 if (!(current->jobctl & JOBCTL_STOP_PENDING)) { 2299 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 2300 struct task_struct *t; 2301 2302 /* signr will be recorded in task->jobctl for retries */ 2303 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); 2304 2305 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || 2306 unlikely(signal_group_exit(sig))) 2307 return false; 2308 /* 2309 * There is no group stop already in progress. We must 2310 * initiate one now. 2311 * 2312 * While ptraced, a task may be resumed while group stop is 2313 * still in effect and then receive a stop signal and 2314 * initiate another group stop. This deviates from the 2315 * usual behavior as two consecutive stop signals can't 2316 * cause two group stops when !ptraced. That is why we 2317 * also check !task_is_stopped(t) below. 2318 * 2319 * The condition can be distinguished by testing whether 2320 * SIGNAL_STOP_STOPPED is already set. Don't generate 2321 * group_exit_code in such case. 2322 * 2323 * This is not necessary for SIGNAL_STOP_CONTINUED because 2324 * an intervening stop signal is required to cause two 2325 * continued events regardless of ptrace. 2326 */ 2327 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 2328 sig->group_exit_code = signr; 2329 2330 sig->group_stop_count = 0; 2331 2332 if (task_set_jobctl_pending(current, signr | gstop)) 2333 sig->group_stop_count++; 2334 2335 t = current; 2336 while_each_thread(current, t) { 2337 /* 2338 * Setting state to TASK_STOPPED for a group 2339 * stop is always done with the siglock held, 2340 * so this check has no races. 2341 */ 2342 if (!task_is_stopped(t) && 2343 task_set_jobctl_pending(t, signr | gstop)) { 2344 sig->group_stop_count++; 2345 if (likely(!(t->ptrace & PT_SEIZED))) 2346 signal_wake_up(t, 0); 2347 else 2348 ptrace_trap_notify(t); 2349 } 2350 } 2351 } 2352 2353 if (likely(!current->ptrace)) { 2354 int notify = 0; 2355 2356 /* 2357 * If there are no other threads in the group, or if there 2358 * is a group stop in progress and we are the last to stop, 2359 * report to the parent. 2360 */ 2361 if (task_participate_group_stop(current)) 2362 notify = CLD_STOPPED; 2363 2364 set_special_state(TASK_STOPPED); 2365 spin_unlock_irq(¤t->sighand->siglock); 2366 2367 /* 2368 * Notify the parent of the group stop completion. Because 2369 * we're not holding either the siglock or tasklist_lock 2370 * here, ptracer may attach inbetween; however, this is for 2371 * group stop and should always be delivered to the real 2372 * parent of the group leader. The new ptracer will get 2373 * its notification when this task transitions into 2374 * TASK_TRACED. 2375 */ 2376 if (notify) { 2377 read_lock(&tasklist_lock); 2378 do_notify_parent_cldstop(current, false, notify); 2379 read_unlock(&tasklist_lock); 2380 } 2381 2382 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2383 cgroup_enter_frozen(); 2384 freezable_schedule(); 2385 return true; 2386 } else { 2387 /* 2388 * While ptraced, group stop is handled by STOP trap. 2389 * Schedule it and let the caller deal with it. 2390 */ 2391 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); 2392 return false; 2393 } 2394 } 2395 2396 /** 2397 * do_jobctl_trap - take care of ptrace jobctl traps 2398 * 2399 * When PT_SEIZED, it's used for both group stop and explicit 2400 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with 2401 * accompanying siginfo. If stopped, lower eight bits of exit_code contain 2402 * the stop signal; otherwise, %SIGTRAP. 2403 * 2404 * When !PT_SEIZED, it's used only for group stop trap with stop signal 2405 * number as exit_code and no siginfo. 2406 * 2407 * CONTEXT: 2408 * Must be called with @current->sighand->siglock held, which may be 2409 * released and re-acquired before returning with intervening sleep. 2410 */ 2411 static void do_jobctl_trap(void) 2412 { 2413 struct signal_struct *signal = current->signal; 2414 int signr = current->jobctl & JOBCTL_STOP_SIGMASK; 2415 2416 if (current->ptrace & PT_SEIZED) { 2417 if (!signal->group_stop_count && 2418 !(signal->flags & SIGNAL_STOP_STOPPED)) 2419 signr = SIGTRAP; 2420 WARN_ON_ONCE(!signr); 2421 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), 2422 CLD_STOPPED); 2423 } else { 2424 WARN_ON_ONCE(!signr); 2425 ptrace_stop(signr, CLD_STOPPED, 0, NULL); 2426 current->exit_code = 0; 2427 } 2428 } 2429 2430 /** 2431 * do_freezer_trap - handle the freezer jobctl trap 2432 * 2433 * Puts the task into frozen state, if only the task is not about to quit. 2434 * In this case it drops JOBCTL_TRAP_FREEZE. 2435 * 2436 * CONTEXT: 2437 * Must be called with @current->sighand->siglock held, 2438 * which is always released before returning. 2439 */ 2440 static void do_freezer_trap(void) 2441 __releases(¤t->sighand->siglock) 2442 { 2443 /* 2444 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, 2445 * let's make another loop to give it a chance to be handled. 2446 * In any case, we'll return back. 2447 */ 2448 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != 2449 JOBCTL_TRAP_FREEZE) { 2450 spin_unlock_irq(¤t->sighand->siglock); 2451 return; 2452 } 2453 2454 /* 2455 * Now we're sure that there is no pending fatal signal and no 2456 * pending traps. Clear TIF_SIGPENDING to not get out of schedule() 2457 * immediately (if there is a non-fatal signal pending), and 2458 * put the task into sleep. 2459 */ 2460 __set_current_state(TASK_INTERRUPTIBLE); 2461 clear_thread_flag(TIF_SIGPENDING); 2462 spin_unlock_irq(¤t->sighand->siglock); 2463 cgroup_enter_frozen(); 2464 freezable_schedule(); 2465 } 2466 2467 static int ptrace_signal(int signr, kernel_siginfo_t *info) 2468 { 2469 /* 2470 * We do not check sig_kernel_stop(signr) but set this marker 2471 * unconditionally because we do not know whether debugger will 2472 * change signr. This flag has no meaning unless we are going 2473 * to stop after return from ptrace_stop(). In this case it will 2474 * be checked in do_signal_stop(), we should only stop if it was 2475 * not cleared by SIGCONT while we were sleeping. See also the 2476 * comment in dequeue_signal(). 2477 */ 2478 current->jobctl |= JOBCTL_STOP_DEQUEUED; 2479 ptrace_stop(signr, CLD_TRAPPED, 0, info); 2480 2481 /* We're back. Did the debugger cancel the sig? */ 2482 signr = current->exit_code; 2483 if (signr == 0) 2484 return signr; 2485 2486 current->exit_code = 0; 2487 2488 /* 2489 * Update the siginfo structure if the signal has 2490 * changed. If the debugger wanted something 2491 * specific in the siginfo structure then it should 2492 * have updated *info via PTRACE_SETSIGINFO. 2493 */ 2494 if (signr != info->si_signo) { 2495 clear_siginfo(info); 2496 info->si_signo = signr; 2497 info->si_errno = 0; 2498 info->si_code = SI_USER; 2499 rcu_read_lock(); 2500 info->si_pid = task_pid_vnr(current->parent); 2501 info->si_uid = from_kuid_munged(current_user_ns(), 2502 task_uid(current->parent)); 2503 rcu_read_unlock(); 2504 } 2505 2506 /* If the (new) signal is now blocked, requeue it. */ 2507 if (sigismember(¤t->blocked, signr)) { 2508 send_signal(signr, info, current, PIDTYPE_PID); 2509 signr = 0; 2510 } 2511 2512 return signr; 2513 } 2514 2515 bool get_signal(struct ksignal *ksig) 2516 { 2517 struct sighand_struct *sighand = current->sighand; 2518 struct signal_struct *signal = current->signal; 2519 int signr; 2520 2521 if (unlikely(current->task_works)) 2522 task_work_run(); 2523 2524 if (unlikely(uprobe_deny_signal())) 2525 return false; 2526 2527 /* 2528 * Do this once, we can't return to user-mode if freezing() == T. 2529 * do_signal_stop() and ptrace_stop() do freezable_schedule() and 2530 * thus do not need another check after return. 2531 */ 2532 try_to_freeze(); 2533 2534 relock: 2535 spin_lock_irq(&sighand->siglock); 2536 /* 2537 * Every stopped thread goes here after wakeup. Check to see if 2538 * we should notify the parent, prepare_signal(SIGCONT) encodes 2539 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2540 */ 2541 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2542 int why; 2543 2544 if (signal->flags & SIGNAL_CLD_CONTINUED) 2545 why = CLD_CONTINUED; 2546 else 2547 why = CLD_STOPPED; 2548 2549 signal->flags &= ~SIGNAL_CLD_MASK; 2550 2551 spin_unlock_irq(&sighand->siglock); 2552 2553 /* 2554 * Notify the parent that we're continuing. This event is 2555 * always per-process and doesn't make whole lot of sense 2556 * for ptracers, who shouldn't consume the state via 2557 * wait(2) either, but, for backward compatibility, notify 2558 * the ptracer of the group leader too unless it's gonna be 2559 * a duplicate. 2560 */ 2561 read_lock(&tasklist_lock); 2562 do_notify_parent_cldstop(current, false, why); 2563 2564 if (ptrace_reparented(current->group_leader)) 2565 do_notify_parent_cldstop(current->group_leader, 2566 true, why); 2567 read_unlock(&tasklist_lock); 2568 2569 goto relock; 2570 } 2571 2572 /* Has this task already been marked for death? */ 2573 if (signal_group_exit(signal)) { 2574 ksig->info.si_signo = signr = SIGKILL; 2575 sigdelset(¤t->pending.signal, SIGKILL); 2576 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, 2577 &sighand->action[SIGKILL - 1]); 2578 recalc_sigpending(); 2579 goto fatal; 2580 } 2581 2582 for (;;) { 2583 struct k_sigaction *ka; 2584 2585 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && 2586 do_signal_stop(0)) 2587 goto relock; 2588 2589 if (unlikely(current->jobctl & 2590 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { 2591 if (current->jobctl & JOBCTL_TRAP_MASK) { 2592 do_jobctl_trap(); 2593 spin_unlock_irq(&sighand->siglock); 2594 } else if (current->jobctl & JOBCTL_TRAP_FREEZE) 2595 do_freezer_trap(); 2596 2597 goto relock; 2598 } 2599 2600 /* 2601 * If the task is leaving the frozen state, let's update 2602 * cgroup counters and reset the frozen bit. 2603 */ 2604 if (unlikely(cgroup_task_frozen(current))) { 2605 spin_unlock_irq(&sighand->siglock); 2606 cgroup_leave_frozen(false); 2607 goto relock; 2608 } 2609 2610 /* 2611 * Signals generated by the execution of an instruction 2612 * need to be delivered before any other pending signals 2613 * so that the instruction pointer in the signal stack 2614 * frame points to the faulting instruction. 2615 */ 2616 signr = dequeue_synchronous_signal(&ksig->info); 2617 if (!signr) 2618 signr = dequeue_signal(current, ¤t->blocked, &ksig->info); 2619 2620 if (!signr) 2621 break; /* will return 0 */ 2622 2623 if (unlikely(current->ptrace) && signr != SIGKILL) { 2624 signr = ptrace_signal(signr, &ksig->info); 2625 if (!signr) 2626 continue; 2627 } 2628 2629 ka = &sighand->action[signr-1]; 2630 2631 /* Trace actually delivered signals. */ 2632 trace_signal_deliver(signr, &ksig->info, ka); 2633 2634 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 2635 continue; 2636 if (ka->sa.sa_handler != SIG_DFL) { 2637 /* Run the handler. */ 2638 ksig->ka = *ka; 2639 2640 if (ka->sa.sa_flags & SA_ONESHOT) 2641 ka->sa.sa_handler = SIG_DFL; 2642 2643 break; /* will return non-zero "signr" value */ 2644 } 2645 2646 /* 2647 * Now we are doing the default action for this signal. 2648 */ 2649 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 2650 continue; 2651 2652 /* 2653 * Global init gets no signals it doesn't want. 2654 * Container-init gets no signals it doesn't want from same 2655 * container. 2656 * 2657 * Note that if global/container-init sees a sig_kernel_only() 2658 * signal here, the signal must have been generated internally 2659 * or must have come from an ancestor namespace. In either 2660 * case, the signal cannot be dropped. 2661 */ 2662 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 2663 !sig_kernel_only(signr)) 2664 continue; 2665 2666 if (sig_kernel_stop(signr)) { 2667 /* 2668 * The default action is to stop all threads in 2669 * the thread group. The job control signals 2670 * do nothing in an orphaned pgrp, but SIGSTOP 2671 * always works. Note that siglock needs to be 2672 * dropped during the call to is_orphaned_pgrp() 2673 * because of lock ordering with tasklist_lock. 2674 * This allows an intervening SIGCONT to be posted. 2675 * We need to check for that and bail out if necessary. 2676 */ 2677 if (signr != SIGSTOP) { 2678 spin_unlock_irq(&sighand->siglock); 2679 2680 /* signals can be posted during this window */ 2681 2682 if (is_current_pgrp_orphaned()) 2683 goto relock; 2684 2685 spin_lock_irq(&sighand->siglock); 2686 } 2687 2688 if (likely(do_signal_stop(ksig->info.si_signo))) { 2689 /* It released the siglock. */ 2690 goto relock; 2691 } 2692 2693 /* 2694 * We didn't actually stop, due to a race 2695 * with SIGCONT or something like that. 2696 */ 2697 continue; 2698 } 2699 2700 fatal: 2701 spin_unlock_irq(&sighand->siglock); 2702 if (unlikely(cgroup_task_frozen(current))) 2703 cgroup_leave_frozen(true); 2704 2705 /* 2706 * Anything else is fatal, maybe with a core dump. 2707 */ 2708 current->flags |= PF_SIGNALED; 2709 2710 if (sig_kernel_coredump(signr)) { 2711 if (print_fatal_signals) 2712 print_fatal_signal(ksig->info.si_signo); 2713 proc_coredump_connector(current); 2714 /* 2715 * If it was able to dump core, this kills all 2716 * other threads in the group and synchronizes with 2717 * their demise. If we lost the race with another 2718 * thread getting here, it set group_exit_code 2719 * first and our do_group_exit call below will use 2720 * that value and ignore the one we pass it. 2721 */ 2722 do_coredump(&ksig->info); 2723 } 2724 2725 /* 2726 * Death signals, no core dump. 2727 */ 2728 do_group_exit(ksig->info.si_signo); 2729 /* NOTREACHED */ 2730 } 2731 spin_unlock_irq(&sighand->siglock); 2732 2733 ksig->sig = signr; 2734 return ksig->sig > 0; 2735 } 2736 2737 /** 2738 * signal_delivered - 2739 * @ksig: kernel signal struct 2740 * @stepping: nonzero if debugger single-step or block-step in use 2741 * 2742 * This function should be called when a signal has successfully been 2743 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask 2744 * is always blocked, and the signal itself is blocked unless %SA_NODEFER 2745 * is set in @ksig->ka.sa.sa_flags. Tracing is notified. 2746 */ 2747 static void signal_delivered(struct ksignal *ksig, int stepping) 2748 { 2749 sigset_t blocked; 2750 2751 /* A signal was successfully delivered, and the 2752 saved sigmask was stored on the signal frame, 2753 and will be restored by sigreturn. So we can 2754 simply clear the restore sigmask flag. */ 2755 clear_restore_sigmask(); 2756 2757 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); 2758 if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) 2759 sigaddset(&blocked, ksig->sig); 2760 set_current_blocked(&blocked); 2761 tracehook_signal_handler(stepping); 2762 } 2763 2764 void signal_setup_done(int failed, struct ksignal *ksig, int stepping) 2765 { 2766 if (failed) 2767 force_sigsegv(ksig->sig); 2768 else 2769 signal_delivered(ksig, stepping); 2770 } 2771 2772 /* 2773 * It could be that complete_signal() picked us to notify about the 2774 * group-wide signal. Other threads should be notified now to take 2775 * the shared signals in @which since we will not. 2776 */ 2777 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) 2778 { 2779 sigset_t retarget; 2780 struct task_struct *t; 2781 2782 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); 2783 if (sigisemptyset(&retarget)) 2784 return; 2785 2786 t = tsk; 2787 while_each_thread(tsk, t) { 2788 if (t->flags & PF_EXITING) 2789 continue; 2790 2791 if (!has_pending_signals(&retarget, &t->blocked)) 2792 continue; 2793 /* Remove the signals this thread can handle. */ 2794 sigandsets(&retarget, &retarget, &t->blocked); 2795 2796 if (!signal_pending(t)) 2797 signal_wake_up(t, 0); 2798 2799 if (sigisemptyset(&retarget)) 2800 break; 2801 } 2802 } 2803 2804 void exit_signals(struct task_struct *tsk) 2805 { 2806 int group_stop = 0; 2807 sigset_t unblocked; 2808 2809 /* 2810 * @tsk is about to have PF_EXITING set - lock out users which 2811 * expect stable threadgroup. 2812 */ 2813 cgroup_threadgroup_change_begin(tsk); 2814 2815 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 2816 tsk->flags |= PF_EXITING; 2817 cgroup_threadgroup_change_end(tsk); 2818 return; 2819 } 2820 2821 spin_lock_irq(&tsk->sighand->siglock); 2822 /* 2823 * From now this task is not visible for group-wide signals, 2824 * see wants_signal(), do_signal_stop(). 2825 */ 2826 tsk->flags |= PF_EXITING; 2827 2828 cgroup_threadgroup_change_end(tsk); 2829 2830 if (!signal_pending(tsk)) 2831 goto out; 2832 2833 unblocked = tsk->blocked; 2834 signotset(&unblocked); 2835 retarget_shared_pending(tsk, &unblocked); 2836 2837 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && 2838 task_participate_group_stop(tsk)) 2839 group_stop = CLD_STOPPED; 2840 out: 2841 spin_unlock_irq(&tsk->sighand->siglock); 2842 2843 /* 2844 * If group stop has completed, deliver the notification. This 2845 * should always go to the real parent of the group leader. 2846 */ 2847 if (unlikely(group_stop)) { 2848 read_lock(&tasklist_lock); 2849 do_notify_parent_cldstop(tsk, false, group_stop); 2850 read_unlock(&tasklist_lock); 2851 } 2852 } 2853 2854 /* 2855 * System call entry points. 2856 */ 2857 2858 /** 2859 * sys_restart_syscall - restart a system call 2860 */ 2861 SYSCALL_DEFINE0(restart_syscall) 2862 { 2863 struct restart_block *restart = ¤t->restart_block; 2864 return restart->fn(restart); 2865 } 2866 2867 long do_no_restart_syscall(struct restart_block *param) 2868 { 2869 return -EINTR; 2870 } 2871 2872 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) 2873 { 2874 if (signal_pending(tsk) && !thread_group_empty(tsk)) { 2875 sigset_t newblocked; 2876 /* A set of now blocked but previously unblocked signals. */ 2877 sigandnsets(&newblocked, newset, ¤t->blocked); 2878 retarget_shared_pending(tsk, &newblocked); 2879 } 2880 tsk->blocked = *newset; 2881 recalc_sigpending(); 2882 } 2883 2884 /** 2885 * set_current_blocked - change current->blocked mask 2886 * @newset: new mask 2887 * 2888 * It is wrong to change ->blocked directly, this helper should be used 2889 * to ensure the process can't miss a shared signal we are going to block. 2890 */ 2891 void set_current_blocked(sigset_t *newset) 2892 { 2893 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); 2894 __set_current_blocked(newset); 2895 } 2896 2897 void __set_current_blocked(const sigset_t *newset) 2898 { 2899 struct task_struct *tsk = current; 2900 2901 /* 2902 * In case the signal mask hasn't changed, there is nothing we need 2903 * to do. The current->blocked shouldn't be modified by other task. 2904 */ 2905 if (sigequalsets(&tsk->blocked, newset)) 2906 return; 2907 2908 spin_lock_irq(&tsk->sighand->siglock); 2909 __set_task_blocked(tsk, newset); 2910 spin_unlock_irq(&tsk->sighand->siglock); 2911 } 2912 2913 /* 2914 * This is also useful for kernel threads that want to temporarily 2915 * (or permanently) block certain signals. 2916 * 2917 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 2918 * interface happily blocks "unblockable" signals like SIGKILL 2919 * and friends. 2920 */ 2921 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2922 { 2923 struct task_struct *tsk = current; 2924 sigset_t newset; 2925 2926 /* Lockless, only current can change ->blocked, never from irq */ 2927 if (oldset) 2928 *oldset = tsk->blocked; 2929 2930 switch (how) { 2931 case SIG_BLOCK: 2932 sigorsets(&newset, &tsk->blocked, set); 2933 break; 2934 case SIG_UNBLOCK: 2935 sigandnsets(&newset, &tsk->blocked, set); 2936 break; 2937 case SIG_SETMASK: 2938 newset = *set; 2939 break; 2940 default: 2941 return -EINVAL; 2942 } 2943 2944 __set_current_blocked(&newset); 2945 return 0; 2946 } 2947 EXPORT_SYMBOL(sigprocmask); 2948 2949 /* 2950 * The api helps set app-provided sigmasks. 2951 * 2952 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and 2953 * epoll_pwait where a new sigmask is passed from userland for the syscalls. 2954 */ 2955 int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, 2956 sigset_t *oldset, size_t sigsetsize) 2957 { 2958 if (!usigmask) 2959 return 0; 2960 2961 if (sigsetsize != sizeof(sigset_t)) 2962 return -EINVAL; 2963 if (copy_from_user(set, usigmask, sizeof(sigset_t))) 2964 return -EFAULT; 2965 2966 *oldset = current->blocked; 2967 set_current_blocked(set); 2968 2969 return 0; 2970 } 2971 EXPORT_SYMBOL(set_user_sigmask); 2972 2973 #ifdef CONFIG_COMPAT 2974 int set_compat_user_sigmask(const compat_sigset_t __user *usigmask, 2975 sigset_t *set, sigset_t *oldset, 2976 size_t sigsetsize) 2977 { 2978 if (!usigmask) 2979 return 0; 2980 2981 if (sigsetsize != sizeof(compat_sigset_t)) 2982 return -EINVAL; 2983 if (get_compat_sigset(set, usigmask)) 2984 return -EFAULT; 2985 2986 *oldset = current->blocked; 2987 set_current_blocked(set); 2988 2989 return 0; 2990 } 2991 EXPORT_SYMBOL(set_compat_user_sigmask); 2992 #endif 2993 2994 /* 2995 * restore_user_sigmask: 2996 * usigmask: sigmask passed in from userland. 2997 * sigsaved: saved sigmask when the syscall started and changed the sigmask to 2998 * usigmask. 2999 * 3000 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and 3001 * epoll_pwait where a new sigmask is passed in from userland for the syscalls. 3002 */ 3003 void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved, 3004 bool interrupted) 3005 { 3006 3007 if (!usigmask) 3008 return; 3009 /* 3010 * When signals are pending, do not restore them here. 3011 * Restoring sigmask here can lead to delivering signals that the above 3012 * syscalls are intended to block because of the sigmask passed in. 3013 */ 3014 if (interrupted) { 3015 current->saved_sigmask = *sigsaved; 3016 set_restore_sigmask(); 3017 return; 3018 } 3019 3020 /* 3021 * This is needed because the fast syscall return path does not restore 3022 * saved_sigmask when signals are not pending. 3023 */ 3024 set_current_blocked(sigsaved); 3025 } 3026 EXPORT_SYMBOL(restore_user_sigmask); 3027 3028 /** 3029 * sys_rt_sigprocmask - change the list of currently blocked signals 3030 * @how: whether to add, remove, or set signals 3031 * @nset: stores pending signals 3032 * @oset: previous value of signal mask if non-null 3033 * @sigsetsize: size of sigset_t type 3034 */ 3035 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, 3036 sigset_t __user *, oset, size_t, sigsetsize) 3037 { 3038 sigset_t old_set, new_set; 3039 int error; 3040 3041 /* XXX: Don't preclude handling different sized sigset_t's. */ 3042 if (sigsetsize != sizeof(sigset_t)) 3043 return -EINVAL; 3044 3045 old_set = current->blocked; 3046 3047 if (nset) { 3048 if (copy_from_user(&new_set, nset, sizeof(sigset_t))) 3049 return -EFAULT; 3050 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3051 3052 error = sigprocmask(how, &new_set, NULL); 3053 if (error) 3054 return error; 3055 } 3056 3057 if (oset) { 3058 if (copy_to_user(oset, &old_set, sizeof(sigset_t))) 3059 return -EFAULT; 3060 } 3061 3062 return 0; 3063 } 3064 3065 #ifdef CONFIG_COMPAT 3066 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, 3067 compat_sigset_t __user *, oset, compat_size_t, sigsetsize) 3068 { 3069 sigset_t old_set = current->blocked; 3070 3071 /* XXX: Don't preclude handling different sized sigset_t's. */ 3072 if (sigsetsize != sizeof(sigset_t)) 3073 return -EINVAL; 3074 3075 if (nset) { 3076 sigset_t new_set; 3077 int error; 3078 if (get_compat_sigset(&new_set, nset)) 3079 return -EFAULT; 3080 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3081 3082 error = sigprocmask(how, &new_set, NULL); 3083 if (error) 3084 return error; 3085 } 3086 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; 3087 } 3088 #endif 3089 3090 static void do_sigpending(sigset_t *set) 3091 { 3092 spin_lock_irq(¤t->sighand->siglock); 3093 sigorsets(set, ¤t->pending.signal, 3094 ¤t->signal->shared_pending.signal); 3095 spin_unlock_irq(¤t->sighand->siglock); 3096 3097 /* Outside the lock because only this thread touches it. */ 3098 sigandsets(set, ¤t->blocked, set); 3099 } 3100 3101 /** 3102 * sys_rt_sigpending - examine a pending signal that has been raised 3103 * while blocked 3104 * @uset: stores pending signals 3105 * @sigsetsize: size of sigset_t type or larger 3106 */ 3107 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 3108 { 3109 sigset_t set; 3110 3111 if (sigsetsize > sizeof(*uset)) 3112 return -EINVAL; 3113 3114 do_sigpending(&set); 3115 3116 if (copy_to_user(uset, &set, sigsetsize)) 3117 return -EFAULT; 3118 3119 return 0; 3120 } 3121 3122 #ifdef CONFIG_COMPAT 3123 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, 3124 compat_size_t, sigsetsize) 3125 { 3126 sigset_t set; 3127 3128 if (sigsetsize > sizeof(*uset)) 3129 return -EINVAL; 3130 3131 do_sigpending(&set); 3132 3133 return put_compat_sigset(uset, &set, sigsetsize); 3134 } 3135 #endif 3136 3137 static const struct { 3138 unsigned char limit, layout; 3139 } sig_sicodes[] = { 3140 [SIGILL] = { NSIGILL, SIL_FAULT }, 3141 [SIGFPE] = { NSIGFPE, SIL_FAULT }, 3142 [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, 3143 [SIGBUS] = { NSIGBUS, SIL_FAULT }, 3144 [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, 3145 #if defined(SIGEMT) 3146 [SIGEMT] = { NSIGEMT, SIL_FAULT }, 3147 #endif 3148 [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, 3149 [SIGPOLL] = { NSIGPOLL, SIL_POLL }, 3150 [SIGSYS] = { NSIGSYS, SIL_SYS }, 3151 }; 3152 3153 static bool known_siginfo_layout(unsigned sig, int si_code) 3154 { 3155 if (si_code == SI_KERNEL) 3156 return true; 3157 else if ((si_code > SI_USER)) { 3158 if (sig_specific_sicodes(sig)) { 3159 if (si_code <= sig_sicodes[sig].limit) 3160 return true; 3161 } 3162 else if (si_code <= NSIGPOLL) 3163 return true; 3164 } 3165 else if (si_code >= SI_DETHREAD) 3166 return true; 3167 else if (si_code == SI_ASYNCNL) 3168 return true; 3169 return false; 3170 } 3171 3172 enum siginfo_layout siginfo_layout(unsigned sig, int si_code) 3173 { 3174 enum siginfo_layout layout = SIL_KILL; 3175 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { 3176 if ((sig < ARRAY_SIZE(sig_sicodes)) && 3177 (si_code <= sig_sicodes[sig].limit)) { 3178 layout = sig_sicodes[sig].layout; 3179 /* Handle the exceptions */ 3180 if ((sig == SIGBUS) && 3181 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) 3182 layout = SIL_FAULT_MCEERR; 3183 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) 3184 layout = SIL_FAULT_BNDERR; 3185 #ifdef SEGV_PKUERR 3186 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) 3187 layout = SIL_FAULT_PKUERR; 3188 #endif 3189 } 3190 else if (si_code <= NSIGPOLL) 3191 layout = SIL_POLL; 3192 } else { 3193 if (si_code == SI_TIMER) 3194 layout = SIL_TIMER; 3195 else if (si_code == SI_SIGIO) 3196 layout = SIL_POLL; 3197 else if (si_code < 0) 3198 layout = SIL_RT; 3199 } 3200 return layout; 3201 } 3202 3203 static inline char __user *si_expansion(const siginfo_t __user *info) 3204 { 3205 return ((char __user *)info) + sizeof(struct kernel_siginfo); 3206 } 3207 3208 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) 3209 { 3210 char __user *expansion = si_expansion(to); 3211 if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) 3212 return -EFAULT; 3213 if (clear_user(expansion, SI_EXPANSION_SIZE)) 3214 return -EFAULT; 3215 return 0; 3216 } 3217 3218 static int post_copy_siginfo_from_user(kernel_siginfo_t *info, 3219 const siginfo_t __user *from) 3220 { 3221 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { 3222 char __user *expansion = si_expansion(from); 3223 char buf[SI_EXPANSION_SIZE]; 3224 int i; 3225 /* 3226 * An unknown si_code might need more than 3227 * sizeof(struct kernel_siginfo) bytes. Verify all of the 3228 * extra bytes are 0. This guarantees copy_siginfo_to_user 3229 * will return this data to userspace exactly. 3230 */ 3231 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE)) 3232 return -EFAULT; 3233 for (i = 0; i < SI_EXPANSION_SIZE; i++) { 3234 if (buf[i] != 0) 3235 return -E2BIG; 3236 } 3237 } 3238 return 0; 3239 } 3240 3241 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, 3242 const siginfo_t __user *from) 3243 { 3244 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3245 return -EFAULT; 3246 to->si_signo = signo; 3247 return post_copy_siginfo_from_user(to, from); 3248 } 3249 3250 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) 3251 { 3252 if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) 3253 return -EFAULT; 3254 return post_copy_siginfo_from_user(to, from); 3255 } 3256 3257 #ifdef CONFIG_COMPAT 3258 int copy_siginfo_to_user32(struct compat_siginfo __user *to, 3259 const struct kernel_siginfo *from) 3260 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) 3261 { 3262 return __copy_siginfo_to_user32(to, from, in_x32_syscall()); 3263 } 3264 int __copy_siginfo_to_user32(struct compat_siginfo __user *to, 3265 const struct kernel_siginfo *from, bool x32_ABI) 3266 #endif 3267 { 3268 struct compat_siginfo new; 3269 memset(&new, 0, sizeof(new)); 3270 3271 new.si_signo = from->si_signo; 3272 new.si_errno = from->si_errno; 3273 new.si_code = from->si_code; 3274 switch(siginfo_layout(from->si_signo, from->si_code)) { 3275 case SIL_KILL: 3276 new.si_pid = from->si_pid; 3277 new.si_uid = from->si_uid; 3278 break; 3279 case SIL_TIMER: 3280 new.si_tid = from->si_tid; 3281 new.si_overrun = from->si_overrun; 3282 new.si_int = from->si_int; 3283 break; 3284 case SIL_POLL: 3285 new.si_band = from->si_band; 3286 new.si_fd = from->si_fd; 3287 break; 3288 case SIL_FAULT: 3289 new.si_addr = ptr_to_compat(from->si_addr); 3290 #ifdef __ARCH_SI_TRAPNO 3291 new.si_trapno = from->si_trapno; 3292 #endif 3293 break; 3294 case SIL_FAULT_MCEERR: 3295 new.si_addr = ptr_to_compat(from->si_addr); 3296 #ifdef __ARCH_SI_TRAPNO 3297 new.si_trapno = from->si_trapno; 3298 #endif 3299 new.si_addr_lsb = from->si_addr_lsb; 3300 break; 3301 case SIL_FAULT_BNDERR: 3302 new.si_addr = ptr_to_compat(from->si_addr); 3303 #ifdef __ARCH_SI_TRAPNO 3304 new.si_trapno = from->si_trapno; 3305 #endif 3306 new.si_lower = ptr_to_compat(from->si_lower); 3307 new.si_upper = ptr_to_compat(from->si_upper); 3308 break; 3309 case SIL_FAULT_PKUERR: 3310 new.si_addr = ptr_to_compat(from->si_addr); 3311 #ifdef __ARCH_SI_TRAPNO 3312 new.si_trapno = from->si_trapno; 3313 #endif 3314 new.si_pkey = from->si_pkey; 3315 break; 3316 case SIL_CHLD: 3317 new.si_pid = from->si_pid; 3318 new.si_uid = from->si_uid; 3319 new.si_status = from->si_status; 3320 #ifdef CONFIG_X86_X32_ABI 3321 if (x32_ABI) { 3322 new._sifields._sigchld_x32._utime = from->si_utime; 3323 new._sifields._sigchld_x32._stime = from->si_stime; 3324 } else 3325 #endif 3326 { 3327 new.si_utime = from->si_utime; 3328 new.si_stime = from->si_stime; 3329 } 3330 break; 3331 case SIL_RT: 3332 new.si_pid = from->si_pid; 3333 new.si_uid = from->si_uid; 3334 new.si_int = from->si_int; 3335 break; 3336 case SIL_SYS: 3337 new.si_call_addr = ptr_to_compat(from->si_call_addr); 3338 new.si_syscall = from->si_syscall; 3339 new.si_arch = from->si_arch; 3340 break; 3341 } 3342 3343 if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) 3344 return -EFAULT; 3345 3346 return 0; 3347 } 3348 3349 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, 3350 const struct compat_siginfo *from) 3351 { 3352 clear_siginfo(to); 3353 to->si_signo = from->si_signo; 3354 to->si_errno = from->si_errno; 3355 to->si_code = from->si_code; 3356 switch(siginfo_layout(from->si_signo, from->si_code)) { 3357 case SIL_KILL: 3358 to->si_pid = from->si_pid; 3359 to->si_uid = from->si_uid; 3360 break; 3361 case SIL_TIMER: 3362 to->si_tid = from->si_tid; 3363 to->si_overrun = from->si_overrun; 3364 to->si_int = from->si_int; 3365 break; 3366 case SIL_POLL: 3367 to->si_band = from->si_band; 3368 to->si_fd = from->si_fd; 3369 break; 3370 case SIL_FAULT: 3371 to->si_addr = compat_ptr(from->si_addr); 3372 #ifdef __ARCH_SI_TRAPNO 3373 to->si_trapno = from->si_trapno; 3374 #endif 3375 break; 3376 case SIL_FAULT_MCEERR: 3377 to->si_addr = compat_ptr(from->si_addr); 3378 #ifdef __ARCH_SI_TRAPNO 3379 to->si_trapno = from->si_trapno; 3380 #endif 3381 to->si_addr_lsb = from->si_addr_lsb; 3382 break; 3383 case SIL_FAULT_BNDERR: 3384 to->si_addr = compat_ptr(from->si_addr); 3385 #ifdef __ARCH_SI_TRAPNO 3386 to->si_trapno = from->si_trapno; 3387 #endif 3388 to->si_lower = compat_ptr(from->si_lower); 3389 to->si_upper = compat_ptr(from->si_upper); 3390 break; 3391 case SIL_FAULT_PKUERR: 3392 to->si_addr = compat_ptr(from->si_addr); 3393 #ifdef __ARCH_SI_TRAPNO 3394 to->si_trapno = from->si_trapno; 3395 #endif 3396 to->si_pkey = from->si_pkey; 3397 break; 3398 case SIL_CHLD: 3399 to->si_pid = from->si_pid; 3400 to->si_uid = from->si_uid; 3401 to->si_status = from->si_status; 3402 #ifdef CONFIG_X86_X32_ABI 3403 if (in_x32_syscall()) { 3404 to->si_utime = from->_sifields._sigchld_x32._utime; 3405 to->si_stime = from->_sifields._sigchld_x32._stime; 3406 } else 3407 #endif 3408 { 3409 to->si_utime = from->si_utime; 3410 to->si_stime = from->si_stime; 3411 } 3412 break; 3413 case SIL_RT: 3414 to->si_pid = from->si_pid; 3415 to->si_uid = from->si_uid; 3416 to->si_int = from->si_int; 3417 break; 3418 case SIL_SYS: 3419 to->si_call_addr = compat_ptr(from->si_call_addr); 3420 to->si_syscall = from->si_syscall; 3421 to->si_arch = from->si_arch; 3422 break; 3423 } 3424 return 0; 3425 } 3426 3427 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, 3428 const struct compat_siginfo __user *ufrom) 3429 { 3430 struct compat_siginfo from; 3431 3432 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3433 return -EFAULT; 3434 3435 from.si_signo = signo; 3436 return post_copy_siginfo_from_user32(to, &from); 3437 } 3438 3439 int copy_siginfo_from_user32(struct kernel_siginfo *to, 3440 const struct compat_siginfo __user *ufrom) 3441 { 3442 struct compat_siginfo from; 3443 3444 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 3445 return -EFAULT; 3446 3447 return post_copy_siginfo_from_user32(to, &from); 3448 } 3449 #endif /* CONFIG_COMPAT */ 3450 3451 /** 3452 * do_sigtimedwait - wait for queued signals specified in @which 3453 * @which: queued signals to wait for 3454 * @info: if non-null, the signal's siginfo is returned here 3455 * @ts: upper bound on process time suspension 3456 */ 3457 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, 3458 const struct timespec64 *ts) 3459 { 3460 ktime_t *to = NULL, timeout = KTIME_MAX; 3461 struct task_struct *tsk = current; 3462 sigset_t mask = *which; 3463 int sig, ret = 0; 3464 3465 if (ts) { 3466 if (!timespec64_valid(ts)) 3467 return -EINVAL; 3468 timeout = timespec64_to_ktime(*ts); 3469 to = &timeout; 3470 } 3471 3472 /* 3473 * Invert the set of allowed signals to get those we want to block. 3474 */ 3475 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); 3476 signotset(&mask); 3477 3478 spin_lock_irq(&tsk->sighand->siglock); 3479 sig = dequeue_signal(tsk, &mask, info); 3480 if (!sig && timeout) { 3481 /* 3482 * None ready, temporarily unblock those we're interested 3483 * while we are sleeping in so that we'll be awakened when 3484 * they arrive. Unblocking is always fine, we can avoid 3485 * set_current_blocked(). 3486 */ 3487 tsk->real_blocked = tsk->blocked; 3488 sigandsets(&tsk->blocked, &tsk->blocked, &mask); 3489 recalc_sigpending(); 3490 spin_unlock_irq(&tsk->sighand->siglock); 3491 3492 __set_current_state(TASK_INTERRUPTIBLE); 3493 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, 3494 HRTIMER_MODE_REL); 3495 spin_lock_irq(&tsk->sighand->siglock); 3496 __set_task_blocked(tsk, &tsk->real_blocked); 3497 sigemptyset(&tsk->real_blocked); 3498 sig = dequeue_signal(tsk, &mask, info); 3499 } 3500 spin_unlock_irq(&tsk->sighand->siglock); 3501 3502 if (sig) 3503 return sig; 3504 return ret ? -EINTR : -EAGAIN; 3505 } 3506 3507 /** 3508 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 3509 * in @uthese 3510 * @uthese: queued signals to wait for 3511 * @uinfo: if non-null, the signal's siginfo is returned here 3512 * @uts: upper bound on process time suspension 3513 * @sigsetsize: size of sigset_t type 3514 */ 3515 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 3516 siginfo_t __user *, uinfo, 3517 const struct __kernel_timespec __user *, uts, 3518 size_t, sigsetsize) 3519 { 3520 sigset_t these; 3521 struct timespec64 ts; 3522 kernel_siginfo_t info; 3523 int ret; 3524 3525 /* XXX: Don't preclude handling different sized sigset_t's. */ 3526 if (sigsetsize != sizeof(sigset_t)) 3527 return -EINVAL; 3528 3529 if (copy_from_user(&these, uthese, sizeof(these))) 3530 return -EFAULT; 3531 3532 if (uts) { 3533 if (get_timespec64(&ts, uts)) 3534 return -EFAULT; 3535 } 3536 3537 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3538 3539 if (ret > 0 && uinfo) { 3540 if (copy_siginfo_to_user(uinfo, &info)) 3541 ret = -EFAULT; 3542 } 3543 3544 return ret; 3545 } 3546 3547 #ifdef CONFIG_COMPAT_32BIT_TIME 3548 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese, 3549 siginfo_t __user *, uinfo, 3550 const struct old_timespec32 __user *, uts, 3551 size_t, sigsetsize) 3552 { 3553 sigset_t these; 3554 struct timespec64 ts; 3555 kernel_siginfo_t info; 3556 int ret; 3557 3558 if (sigsetsize != sizeof(sigset_t)) 3559 return -EINVAL; 3560 3561 if (copy_from_user(&these, uthese, sizeof(these))) 3562 return -EFAULT; 3563 3564 if (uts) { 3565 if (get_old_timespec32(&ts, uts)) 3566 return -EFAULT; 3567 } 3568 3569 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3570 3571 if (ret > 0 && uinfo) { 3572 if (copy_siginfo_to_user(uinfo, &info)) 3573 ret = -EFAULT; 3574 } 3575 3576 return ret; 3577 } 3578 #endif 3579 3580 #ifdef CONFIG_COMPAT 3581 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, 3582 struct compat_siginfo __user *, uinfo, 3583 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize) 3584 { 3585 sigset_t s; 3586 struct timespec64 t; 3587 kernel_siginfo_t info; 3588 long ret; 3589 3590 if (sigsetsize != sizeof(sigset_t)) 3591 return -EINVAL; 3592 3593 if (get_compat_sigset(&s, uthese)) 3594 return -EFAULT; 3595 3596 if (uts) { 3597 if (get_timespec64(&t, uts)) 3598 return -EFAULT; 3599 } 3600 3601 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3602 3603 if (ret > 0 && uinfo) { 3604 if (copy_siginfo_to_user32(uinfo, &info)) 3605 ret = -EFAULT; 3606 } 3607 3608 return ret; 3609 } 3610 3611 #ifdef CONFIG_COMPAT_32BIT_TIME 3612 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, 3613 struct compat_siginfo __user *, uinfo, 3614 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) 3615 { 3616 sigset_t s; 3617 struct timespec64 t; 3618 kernel_siginfo_t info; 3619 long ret; 3620 3621 if (sigsetsize != sizeof(sigset_t)) 3622 return -EINVAL; 3623 3624 if (get_compat_sigset(&s, uthese)) 3625 return -EFAULT; 3626 3627 if (uts) { 3628 if (get_old_timespec32(&t, uts)) 3629 return -EFAULT; 3630 } 3631 3632 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3633 3634 if (ret > 0 && uinfo) { 3635 if (copy_siginfo_to_user32(uinfo, &info)) 3636 ret = -EFAULT; 3637 } 3638 3639 return ret; 3640 } 3641 #endif 3642 #endif 3643 3644 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) 3645 { 3646 clear_siginfo(info); 3647 info->si_signo = sig; 3648 info->si_errno = 0; 3649 info->si_code = SI_USER; 3650 info->si_pid = task_tgid_vnr(current); 3651 info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3652 } 3653 3654 /** 3655 * sys_kill - send a signal to a process 3656 * @pid: the PID of the process 3657 * @sig: signal to be sent 3658 */ 3659 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 3660 { 3661 struct kernel_siginfo info; 3662 3663 prepare_kill_siginfo(sig, &info); 3664 3665 return kill_something_info(sig, &info, pid); 3666 } 3667 3668 /* 3669 * Verify that the signaler and signalee either are in the same pid namespace 3670 * or that the signaler's pid namespace is an ancestor of the signalee's pid 3671 * namespace. 3672 */ 3673 static bool access_pidfd_pidns(struct pid *pid) 3674 { 3675 struct pid_namespace *active = task_active_pid_ns(current); 3676 struct pid_namespace *p = ns_of_pid(pid); 3677 3678 for (;;) { 3679 if (!p) 3680 return false; 3681 if (p == active) 3682 break; 3683 p = p->parent; 3684 } 3685 3686 return true; 3687 } 3688 3689 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info) 3690 { 3691 #ifdef CONFIG_COMPAT 3692 /* 3693 * Avoid hooking up compat syscalls and instead handle necessary 3694 * conversions here. Note, this is a stop-gap measure and should not be 3695 * considered a generic solution. 3696 */ 3697 if (in_compat_syscall()) 3698 return copy_siginfo_from_user32( 3699 kinfo, (struct compat_siginfo __user *)info); 3700 #endif 3701 return copy_siginfo_from_user(kinfo, info); 3702 } 3703 3704 static struct pid *pidfd_to_pid(const struct file *file) 3705 { 3706 if (file->f_op == &pidfd_fops) 3707 return file->private_data; 3708 3709 return tgid_pidfd_to_pid(file); 3710 } 3711 3712 /** 3713 * sys_pidfd_send_signal - Signal a process through a pidfd 3714 * @pidfd: file descriptor of the process 3715 * @sig: signal to send 3716 * @info: signal info 3717 * @flags: future flags 3718 * 3719 * The syscall currently only signals via PIDTYPE_PID which covers 3720 * kill(<positive-pid>, <signal>. It does not signal threads or process 3721 * groups. 3722 * In order to extend the syscall to threads and process groups the @flags 3723 * argument should be used. In essence, the @flags argument will determine 3724 * what is signaled and not the file descriptor itself. Put in other words, 3725 * grouping is a property of the flags argument not a property of the file 3726 * descriptor. 3727 * 3728 * Return: 0 on success, negative errno on failure 3729 */ 3730 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, 3731 siginfo_t __user *, info, unsigned int, flags) 3732 { 3733 int ret; 3734 struct fd f; 3735 struct pid *pid; 3736 kernel_siginfo_t kinfo; 3737 3738 /* Enforce flags be set to 0 until we add an extension. */ 3739 if (flags) 3740 return -EINVAL; 3741 3742 f = fdget(pidfd); 3743 if (!f.file) 3744 return -EBADF; 3745 3746 /* Is this a pidfd? */ 3747 pid = pidfd_to_pid(f.file); 3748 if (IS_ERR(pid)) { 3749 ret = PTR_ERR(pid); 3750 goto err; 3751 } 3752 3753 ret = -EINVAL; 3754 if (!access_pidfd_pidns(pid)) 3755 goto err; 3756 3757 if (info) { 3758 ret = copy_siginfo_from_user_any(&kinfo, info); 3759 if (unlikely(ret)) 3760 goto err; 3761 3762 ret = -EINVAL; 3763 if (unlikely(sig != kinfo.si_signo)) 3764 goto err; 3765 3766 /* Only allow sending arbitrary signals to yourself. */ 3767 ret = -EPERM; 3768 if ((task_pid(current) != pid) && 3769 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) 3770 goto err; 3771 } else { 3772 prepare_kill_siginfo(sig, &kinfo); 3773 } 3774 3775 ret = kill_pid_info(sig, &kinfo, pid); 3776 3777 err: 3778 fdput(f); 3779 return ret; 3780 } 3781 3782 static int 3783 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) 3784 { 3785 struct task_struct *p; 3786 int error = -ESRCH; 3787 3788 rcu_read_lock(); 3789 p = find_task_by_vpid(pid); 3790 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 3791 error = check_kill_permission(sig, info, p); 3792 /* 3793 * The null signal is a permissions and process existence 3794 * probe. No signal is actually delivered. 3795 */ 3796 if (!error && sig) { 3797 error = do_send_sig_info(sig, info, p, PIDTYPE_PID); 3798 /* 3799 * If lock_task_sighand() failed we pretend the task 3800 * dies after receiving the signal. The window is tiny, 3801 * and the signal is private anyway. 3802 */ 3803 if (unlikely(error == -ESRCH)) 3804 error = 0; 3805 } 3806 } 3807 rcu_read_unlock(); 3808 3809 return error; 3810 } 3811 3812 static int do_tkill(pid_t tgid, pid_t pid, int sig) 3813 { 3814 struct kernel_siginfo info; 3815 3816 clear_siginfo(&info); 3817 info.si_signo = sig; 3818 info.si_errno = 0; 3819 info.si_code = SI_TKILL; 3820 info.si_pid = task_tgid_vnr(current); 3821 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3822 3823 return do_send_specific(tgid, pid, sig, &info); 3824 } 3825 3826 /** 3827 * sys_tgkill - send signal to one specific thread 3828 * @tgid: the thread group ID of the thread 3829 * @pid: the PID of the thread 3830 * @sig: signal to be sent 3831 * 3832 * This syscall also checks the @tgid and returns -ESRCH even if the PID 3833 * exists but it's not belonging to the target process anymore. This 3834 * method solves the problem of threads exiting and PIDs getting reused. 3835 */ 3836 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 3837 { 3838 /* This is only valid for single tasks */ 3839 if (pid <= 0 || tgid <= 0) 3840 return -EINVAL; 3841 3842 return do_tkill(tgid, pid, sig); 3843 } 3844 3845 /** 3846 * sys_tkill - send signal to one specific task 3847 * @pid: the PID of the task 3848 * @sig: signal to be sent 3849 * 3850 * Send a signal to only one task, even if it's a CLONE_THREAD task. 3851 */ 3852 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 3853 { 3854 /* This is only valid for single tasks */ 3855 if (pid <= 0) 3856 return -EINVAL; 3857 3858 return do_tkill(0, pid, sig); 3859 } 3860 3861 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) 3862 { 3863 /* Not even root can pretend to send signals from the kernel. 3864 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3865 */ 3866 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3867 (task_pid_vnr(current) != pid)) 3868 return -EPERM; 3869 3870 /* POSIX.1b doesn't mention process groups. */ 3871 return kill_proc_info(sig, info, pid); 3872 } 3873 3874 /** 3875 * sys_rt_sigqueueinfo - send signal information to a signal 3876 * @pid: the PID of the thread 3877 * @sig: signal to be sent 3878 * @uinfo: signal info to be sent 3879 */ 3880 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 3881 siginfo_t __user *, uinfo) 3882 { 3883 kernel_siginfo_t info; 3884 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 3885 if (unlikely(ret)) 3886 return ret; 3887 return do_rt_sigqueueinfo(pid, sig, &info); 3888 } 3889 3890 #ifdef CONFIG_COMPAT 3891 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, 3892 compat_pid_t, pid, 3893 int, sig, 3894 struct compat_siginfo __user *, uinfo) 3895 { 3896 kernel_siginfo_t info; 3897 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 3898 if (unlikely(ret)) 3899 return ret; 3900 return do_rt_sigqueueinfo(pid, sig, &info); 3901 } 3902 #endif 3903 3904 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) 3905 { 3906 /* This is only valid for single tasks */ 3907 if (pid <= 0 || tgid <= 0) 3908 return -EINVAL; 3909 3910 /* Not even root can pretend to send signals from the kernel. 3911 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3912 */ 3913 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3914 (task_pid_vnr(current) != pid)) 3915 return -EPERM; 3916 3917 return do_send_specific(tgid, pid, sig, info); 3918 } 3919 3920 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 3921 siginfo_t __user *, uinfo) 3922 { 3923 kernel_siginfo_t info; 3924 int ret = __copy_siginfo_from_user(sig, &info, uinfo); 3925 if (unlikely(ret)) 3926 return ret; 3927 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3928 } 3929 3930 #ifdef CONFIG_COMPAT 3931 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, 3932 compat_pid_t, tgid, 3933 compat_pid_t, pid, 3934 int, sig, 3935 struct compat_siginfo __user *, uinfo) 3936 { 3937 kernel_siginfo_t info; 3938 int ret = __copy_siginfo_from_user32(sig, &info, uinfo); 3939 if (unlikely(ret)) 3940 return ret; 3941 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3942 } 3943 #endif 3944 3945 /* 3946 * For kthreads only, must not be used if cloned with CLONE_SIGHAND 3947 */ 3948 void kernel_sigaction(int sig, __sighandler_t action) 3949 { 3950 spin_lock_irq(¤t->sighand->siglock); 3951 current->sighand->action[sig - 1].sa.sa_handler = action; 3952 if (action == SIG_IGN) { 3953 sigset_t mask; 3954 3955 sigemptyset(&mask); 3956 sigaddset(&mask, sig); 3957 3958 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); 3959 flush_sigqueue_mask(&mask, ¤t->pending); 3960 recalc_sigpending(); 3961 } 3962 spin_unlock_irq(¤t->sighand->siglock); 3963 } 3964 EXPORT_SYMBOL(kernel_sigaction); 3965 3966 void __weak sigaction_compat_abi(struct k_sigaction *act, 3967 struct k_sigaction *oact) 3968 { 3969 } 3970 3971 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 3972 { 3973 struct task_struct *p = current, *t; 3974 struct k_sigaction *k; 3975 sigset_t mask; 3976 3977 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 3978 return -EINVAL; 3979 3980 k = &p->sighand->action[sig-1]; 3981 3982 spin_lock_irq(&p->sighand->siglock); 3983 if (oact) 3984 *oact = *k; 3985 3986 sigaction_compat_abi(act, oact); 3987 3988 if (act) { 3989 sigdelsetmask(&act->sa.sa_mask, 3990 sigmask(SIGKILL) | sigmask(SIGSTOP)); 3991 *k = *act; 3992 /* 3993 * POSIX 3.3.1.3: 3994 * "Setting a signal action to SIG_IGN for a signal that is 3995 * pending shall cause the pending signal to be discarded, 3996 * whether or not it is blocked." 3997 * 3998 * "Setting a signal action to SIG_DFL for a signal that is 3999 * pending and whose default action is to ignore the signal 4000 * (for example, SIGCHLD), shall cause the pending signal to 4001 * be discarded, whether or not it is blocked" 4002 */ 4003 if (sig_handler_ignored(sig_handler(p, sig), sig)) { 4004 sigemptyset(&mask); 4005 sigaddset(&mask, sig); 4006 flush_sigqueue_mask(&mask, &p->signal->shared_pending); 4007 for_each_thread(p, t) 4008 flush_sigqueue_mask(&mask, &t->pending); 4009 } 4010 } 4011 4012 spin_unlock_irq(&p->sighand->siglock); 4013 return 0; 4014 } 4015 4016 static int 4017 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, 4018 size_t min_ss_size) 4019 { 4020 struct task_struct *t = current; 4021 4022 if (oss) { 4023 memset(oss, 0, sizeof(stack_t)); 4024 oss->ss_sp = (void __user *) t->sas_ss_sp; 4025 oss->ss_size = t->sas_ss_size; 4026 oss->ss_flags = sas_ss_flags(sp) | 4027 (current->sas_ss_flags & SS_FLAG_BITS); 4028 } 4029 4030 if (ss) { 4031 void __user *ss_sp = ss->ss_sp; 4032 size_t ss_size = ss->ss_size; 4033 unsigned ss_flags = ss->ss_flags; 4034 int ss_mode; 4035 4036 if (unlikely(on_sig_stack(sp))) 4037 return -EPERM; 4038 4039 ss_mode = ss_flags & ~SS_FLAG_BITS; 4040 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && 4041 ss_mode != 0)) 4042 return -EINVAL; 4043 4044 if (ss_mode == SS_DISABLE) { 4045 ss_size = 0; 4046 ss_sp = NULL; 4047 } else { 4048 if (unlikely(ss_size < min_ss_size)) 4049 return -ENOMEM; 4050 } 4051 4052 t->sas_ss_sp = (unsigned long) ss_sp; 4053 t->sas_ss_size = ss_size; 4054 t->sas_ss_flags = ss_flags; 4055 } 4056 return 0; 4057 } 4058 4059 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) 4060 { 4061 stack_t new, old; 4062 int err; 4063 if (uss && copy_from_user(&new, uss, sizeof(stack_t))) 4064 return -EFAULT; 4065 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, 4066 current_user_stack_pointer(), 4067 MINSIGSTKSZ); 4068 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) 4069 err = -EFAULT; 4070 return err; 4071 } 4072 4073 int restore_altstack(const stack_t __user *uss) 4074 { 4075 stack_t new; 4076 if (copy_from_user(&new, uss, sizeof(stack_t))) 4077 return -EFAULT; 4078 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(), 4079 MINSIGSTKSZ); 4080 /* squash all but EFAULT for now */ 4081 return 0; 4082 } 4083 4084 int __save_altstack(stack_t __user *uss, unsigned long sp) 4085 { 4086 struct task_struct *t = current; 4087 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | 4088 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4089 __put_user(t->sas_ss_size, &uss->ss_size); 4090 if (err) 4091 return err; 4092 if (t->sas_ss_flags & SS_AUTODISARM) 4093 sas_ss_reset(t); 4094 return 0; 4095 } 4096 4097 #ifdef CONFIG_COMPAT 4098 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, 4099 compat_stack_t __user *uoss_ptr) 4100 { 4101 stack_t uss, uoss; 4102 int ret; 4103 4104 if (uss_ptr) { 4105 compat_stack_t uss32; 4106 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) 4107 return -EFAULT; 4108 uss.ss_sp = compat_ptr(uss32.ss_sp); 4109 uss.ss_flags = uss32.ss_flags; 4110 uss.ss_size = uss32.ss_size; 4111 } 4112 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 4113 compat_user_stack_pointer(), 4114 COMPAT_MINSIGSTKSZ); 4115 if (ret >= 0 && uoss_ptr) { 4116 compat_stack_t old; 4117 memset(&old, 0, sizeof(old)); 4118 old.ss_sp = ptr_to_compat(uoss.ss_sp); 4119 old.ss_flags = uoss.ss_flags; 4120 old.ss_size = uoss.ss_size; 4121 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) 4122 ret = -EFAULT; 4123 } 4124 return ret; 4125 } 4126 4127 COMPAT_SYSCALL_DEFINE2(sigaltstack, 4128 const compat_stack_t __user *, uss_ptr, 4129 compat_stack_t __user *, uoss_ptr) 4130 { 4131 return do_compat_sigaltstack(uss_ptr, uoss_ptr); 4132 } 4133 4134 int compat_restore_altstack(const compat_stack_t __user *uss) 4135 { 4136 int err = do_compat_sigaltstack(uss, NULL); 4137 /* squash all but -EFAULT for now */ 4138 return err == -EFAULT ? err : 0; 4139 } 4140 4141 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) 4142 { 4143 int err; 4144 struct task_struct *t = current; 4145 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), 4146 &uss->ss_sp) | 4147 __put_user(t->sas_ss_flags, &uss->ss_flags) | 4148 __put_user(t->sas_ss_size, &uss->ss_size); 4149 if (err) 4150 return err; 4151 if (t->sas_ss_flags & SS_AUTODISARM) 4152 sas_ss_reset(t); 4153 return 0; 4154 } 4155 #endif 4156 4157 #ifdef __ARCH_WANT_SYS_SIGPENDING 4158 4159 /** 4160 * sys_sigpending - examine pending signals 4161 * @uset: where mask of pending signal is returned 4162 */ 4163 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) 4164 { 4165 sigset_t set; 4166 4167 if (sizeof(old_sigset_t) > sizeof(*uset)) 4168 return -EINVAL; 4169 4170 do_sigpending(&set); 4171 4172 if (copy_to_user(uset, &set, sizeof(old_sigset_t))) 4173 return -EFAULT; 4174 4175 return 0; 4176 } 4177 4178 #ifdef CONFIG_COMPAT 4179 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) 4180 { 4181 sigset_t set; 4182 4183 do_sigpending(&set); 4184 4185 return put_user(set.sig[0], set32); 4186 } 4187 #endif 4188 4189 #endif 4190 4191 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 4192 /** 4193 * sys_sigprocmask - examine and change blocked signals 4194 * @how: whether to add, remove, or set signals 4195 * @nset: signals to add or remove (if non-null) 4196 * @oset: previous value of signal mask if non-null 4197 * 4198 * Some platforms have their own version with special arguments; 4199 * others support only sys_rt_sigprocmask. 4200 */ 4201 4202 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, 4203 old_sigset_t __user *, oset) 4204 { 4205 old_sigset_t old_set, new_set; 4206 sigset_t new_blocked; 4207 4208 old_set = current->blocked.sig[0]; 4209 4210 if (nset) { 4211 if (copy_from_user(&new_set, nset, sizeof(*nset))) 4212 return -EFAULT; 4213 4214 new_blocked = current->blocked; 4215 4216 switch (how) { 4217 case SIG_BLOCK: 4218 sigaddsetmask(&new_blocked, new_set); 4219 break; 4220 case SIG_UNBLOCK: 4221 sigdelsetmask(&new_blocked, new_set); 4222 break; 4223 case SIG_SETMASK: 4224 new_blocked.sig[0] = new_set; 4225 break; 4226 default: 4227 return -EINVAL; 4228 } 4229 4230 set_current_blocked(&new_blocked); 4231 } 4232 4233 if (oset) { 4234 if (copy_to_user(oset, &old_set, sizeof(*oset))) 4235 return -EFAULT; 4236 } 4237 4238 return 0; 4239 } 4240 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 4241 4242 #ifndef CONFIG_ODD_RT_SIGACTION 4243 /** 4244 * sys_rt_sigaction - alter an action taken by a process 4245 * @sig: signal to be sent 4246 * @act: new sigaction 4247 * @oact: used to save the previous sigaction 4248 * @sigsetsize: size of sigset_t type 4249 */ 4250 SYSCALL_DEFINE4(rt_sigaction, int, sig, 4251 const struct sigaction __user *, act, 4252 struct sigaction __user *, oact, 4253 size_t, sigsetsize) 4254 { 4255 struct k_sigaction new_sa, old_sa; 4256 int ret; 4257 4258 /* XXX: Don't preclude handling different sized sigset_t's. */ 4259 if (sigsetsize != sizeof(sigset_t)) 4260 return -EINVAL; 4261 4262 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 4263 return -EFAULT; 4264 4265 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 4266 if (ret) 4267 return ret; 4268 4269 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 4270 return -EFAULT; 4271 4272 return 0; 4273 } 4274 #ifdef CONFIG_COMPAT 4275 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, 4276 const struct compat_sigaction __user *, act, 4277 struct compat_sigaction __user *, oact, 4278 compat_size_t, sigsetsize) 4279 { 4280 struct k_sigaction new_ka, old_ka; 4281 #ifdef __ARCH_HAS_SA_RESTORER 4282 compat_uptr_t restorer; 4283 #endif 4284 int ret; 4285 4286 /* XXX: Don't preclude handling different sized sigset_t's. */ 4287 if (sigsetsize != sizeof(compat_sigset_t)) 4288 return -EINVAL; 4289 4290 if (act) { 4291 compat_uptr_t handler; 4292 ret = get_user(handler, &act->sa_handler); 4293 new_ka.sa.sa_handler = compat_ptr(handler); 4294 #ifdef __ARCH_HAS_SA_RESTORER 4295 ret |= get_user(restorer, &act->sa_restorer); 4296 new_ka.sa.sa_restorer = compat_ptr(restorer); 4297 #endif 4298 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); 4299 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); 4300 if (ret) 4301 return -EFAULT; 4302 } 4303 4304 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4305 if (!ret && oact) { 4306 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 4307 &oact->sa_handler); 4308 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, 4309 sizeof(oact->sa_mask)); 4310 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); 4311 #ifdef __ARCH_HAS_SA_RESTORER 4312 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4313 &oact->sa_restorer); 4314 #endif 4315 } 4316 return ret; 4317 } 4318 #endif 4319 #endif /* !CONFIG_ODD_RT_SIGACTION */ 4320 4321 #ifdef CONFIG_OLD_SIGACTION 4322 SYSCALL_DEFINE3(sigaction, int, sig, 4323 const struct old_sigaction __user *, act, 4324 struct old_sigaction __user *, oact) 4325 { 4326 struct k_sigaction new_ka, old_ka; 4327 int ret; 4328 4329 if (act) { 4330 old_sigset_t mask; 4331 if (!access_ok(act, sizeof(*act)) || 4332 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 4333 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 4334 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4335 __get_user(mask, &act->sa_mask)) 4336 return -EFAULT; 4337 #ifdef __ARCH_HAS_KA_RESTORER 4338 new_ka.ka_restorer = NULL; 4339 #endif 4340 siginitset(&new_ka.sa.sa_mask, mask); 4341 } 4342 4343 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4344 4345 if (!ret && oact) { 4346 if (!access_ok(oact, sizeof(*oact)) || 4347 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 4348 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 4349 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4350 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4351 return -EFAULT; 4352 } 4353 4354 return ret; 4355 } 4356 #endif 4357 #ifdef CONFIG_COMPAT_OLD_SIGACTION 4358 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, 4359 const struct compat_old_sigaction __user *, act, 4360 struct compat_old_sigaction __user *, oact) 4361 { 4362 struct k_sigaction new_ka, old_ka; 4363 int ret; 4364 compat_old_sigset_t mask; 4365 compat_uptr_t handler, restorer; 4366 4367 if (act) { 4368 if (!access_ok(act, sizeof(*act)) || 4369 __get_user(handler, &act->sa_handler) || 4370 __get_user(restorer, &act->sa_restorer) || 4371 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 4372 __get_user(mask, &act->sa_mask)) 4373 return -EFAULT; 4374 4375 #ifdef __ARCH_HAS_KA_RESTORER 4376 new_ka.ka_restorer = NULL; 4377 #endif 4378 new_ka.sa.sa_handler = compat_ptr(handler); 4379 new_ka.sa.sa_restorer = compat_ptr(restorer); 4380 siginitset(&new_ka.sa.sa_mask, mask); 4381 } 4382 4383 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 4384 4385 if (!ret && oact) { 4386 if (!access_ok(oact, sizeof(*oact)) || 4387 __put_user(ptr_to_compat(old_ka.sa.sa_handler), 4388 &oact->sa_handler) || 4389 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), 4390 &oact->sa_restorer) || 4391 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 4392 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 4393 return -EFAULT; 4394 } 4395 return ret; 4396 } 4397 #endif 4398 4399 #ifdef CONFIG_SGETMASK_SYSCALL 4400 4401 /* 4402 * For backwards compatibility. Functionality superseded by sigprocmask. 4403 */ 4404 SYSCALL_DEFINE0(sgetmask) 4405 { 4406 /* SMP safe */ 4407 return current->blocked.sig[0]; 4408 } 4409 4410 SYSCALL_DEFINE1(ssetmask, int, newmask) 4411 { 4412 int old = current->blocked.sig[0]; 4413 sigset_t newset; 4414 4415 siginitset(&newset, newmask); 4416 set_current_blocked(&newset); 4417 4418 return old; 4419 } 4420 #endif /* CONFIG_SGETMASK_SYSCALL */ 4421 4422 #ifdef __ARCH_WANT_SYS_SIGNAL 4423 /* 4424 * For backwards compatibility. Functionality superseded by sigaction. 4425 */ 4426 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 4427 { 4428 struct k_sigaction new_sa, old_sa; 4429 int ret; 4430 4431 new_sa.sa.sa_handler = handler; 4432 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 4433 sigemptyset(&new_sa.sa.sa_mask); 4434 4435 ret = do_sigaction(sig, &new_sa, &old_sa); 4436 4437 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 4438 } 4439 #endif /* __ARCH_WANT_SYS_SIGNAL */ 4440 4441 #ifdef __ARCH_WANT_SYS_PAUSE 4442 4443 SYSCALL_DEFINE0(pause) 4444 { 4445 while (!signal_pending(current)) { 4446 __set_current_state(TASK_INTERRUPTIBLE); 4447 schedule(); 4448 } 4449 return -ERESTARTNOHAND; 4450 } 4451 4452 #endif 4453 4454 static int sigsuspend(sigset_t *set) 4455 { 4456 current->saved_sigmask = current->blocked; 4457 set_current_blocked(set); 4458 4459 while (!signal_pending(current)) { 4460 __set_current_state(TASK_INTERRUPTIBLE); 4461 schedule(); 4462 } 4463 set_restore_sigmask(); 4464 return -ERESTARTNOHAND; 4465 } 4466 4467 /** 4468 * sys_rt_sigsuspend - replace the signal mask for a value with the 4469 * @unewset value until a signal is received 4470 * @unewset: new signal mask value 4471 * @sigsetsize: size of sigset_t type 4472 */ 4473 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 4474 { 4475 sigset_t newset; 4476 4477 /* XXX: Don't preclude handling different sized sigset_t's. */ 4478 if (sigsetsize != sizeof(sigset_t)) 4479 return -EINVAL; 4480 4481 if (copy_from_user(&newset, unewset, sizeof(newset))) 4482 return -EFAULT; 4483 return sigsuspend(&newset); 4484 } 4485 4486 #ifdef CONFIG_COMPAT 4487 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) 4488 { 4489 sigset_t newset; 4490 4491 /* XXX: Don't preclude handling different sized sigset_t's. */ 4492 if (sigsetsize != sizeof(sigset_t)) 4493 return -EINVAL; 4494 4495 if (get_compat_sigset(&newset, unewset)) 4496 return -EFAULT; 4497 return sigsuspend(&newset); 4498 } 4499 #endif 4500 4501 #ifdef CONFIG_OLD_SIGSUSPEND 4502 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) 4503 { 4504 sigset_t blocked; 4505 siginitset(&blocked, mask); 4506 return sigsuspend(&blocked); 4507 } 4508 #endif 4509 #ifdef CONFIG_OLD_SIGSUSPEND3 4510 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) 4511 { 4512 sigset_t blocked; 4513 siginitset(&blocked, mask); 4514 return sigsuspend(&blocked); 4515 } 4516 #endif 4517 4518 __weak const char *arch_vma_name(struct vm_area_struct *vma) 4519 { 4520 return NULL; 4521 } 4522 4523 static inline void siginfo_buildtime_checks(void) 4524 { 4525 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); 4526 4527 /* Verify the offsets in the two siginfos match */ 4528 #define CHECK_OFFSET(field) \ 4529 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) 4530 4531 /* kill */ 4532 CHECK_OFFSET(si_pid); 4533 CHECK_OFFSET(si_uid); 4534 4535 /* timer */ 4536 CHECK_OFFSET(si_tid); 4537 CHECK_OFFSET(si_overrun); 4538 CHECK_OFFSET(si_value); 4539 4540 /* rt */ 4541 CHECK_OFFSET(si_pid); 4542 CHECK_OFFSET(si_uid); 4543 CHECK_OFFSET(si_value); 4544 4545 /* sigchld */ 4546 CHECK_OFFSET(si_pid); 4547 CHECK_OFFSET(si_uid); 4548 CHECK_OFFSET(si_status); 4549 CHECK_OFFSET(si_utime); 4550 CHECK_OFFSET(si_stime); 4551 4552 /* sigfault */ 4553 CHECK_OFFSET(si_addr); 4554 CHECK_OFFSET(si_addr_lsb); 4555 CHECK_OFFSET(si_lower); 4556 CHECK_OFFSET(si_upper); 4557 CHECK_OFFSET(si_pkey); 4558 4559 /* sigpoll */ 4560 CHECK_OFFSET(si_band); 4561 CHECK_OFFSET(si_fd); 4562 4563 /* sigsys */ 4564 CHECK_OFFSET(si_call_addr); 4565 CHECK_OFFSET(si_syscall); 4566 CHECK_OFFSET(si_arch); 4567 #undef CHECK_OFFSET 4568 4569 /* usb asyncio */ 4570 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) != 4571 offsetof(struct siginfo, si_addr)); 4572 if (sizeof(int) == sizeof(void __user *)) { 4573 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) != 4574 sizeof(void __user *)); 4575 } else { 4576 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) + 4577 sizeof_field(struct siginfo, si_uid)) != 4578 sizeof(void __user *)); 4579 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) != 4580 offsetof(struct siginfo, si_uid)); 4581 } 4582 #ifdef CONFIG_COMPAT 4583 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) != 4584 offsetof(struct compat_siginfo, si_addr)); 4585 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != 4586 sizeof(compat_uptr_t)); 4587 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != 4588 sizeof_field(struct siginfo, si_pid)); 4589 #endif 4590 } 4591 4592 void __init signals_init(void) 4593 { 4594 siginfo_buildtime_checks(); 4595 4596 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 4597 } 4598 4599 #ifdef CONFIG_KGDB_KDB 4600 #include <linux/kdb.h> 4601 /* 4602 * kdb_send_sig - Allows kdb to send signals without exposing 4603 * signal internals. This function checks if the required locks are 4604 * available before calling the main signal code, to avoid kdb 4605 * deadlocks. 4606 */ 4607 void kdb_send_sig(struct task_struct *t, int sig) 4608 { 4609 static struct task_struct *kdb_prev_t; 4610 int new_t, ret; 4611 if (!spin_trylock(&t->sighand->siglock)) { 4612 kdb_printf("Can't do kill command now.\n" 4613 "The sigmask lock is held somewhere else in " 4614 "kernel, try again later\n"); 4615 return; 4616 } 4617 new_t = kdb_prev_t != t; 4618 kdb_prev_t = t; 4619 if (t->state != TASK_RUNNING && new_t) { 4620 spin_unlock(&t->sighand->siglock); 4621 kdb_printf("Process is not RUNNING, sending a signal from " 4622 "kdb risks deadlock\n" 4623 "on the run queue locks. " 4624 "The signal has _not_ been sent.\n" 4625 "Reissue the kill command if you want to risk " 4626 "the deadlock.\n"); 4627 return; 4628 } 4629 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); 4630 spin_unlock(&t->sighand->siglock); 4631 if (ret) 4632 kdb_printf("Fail to deliver Signal %d to process %d.\n", 4633 sig, t->pid); 4634 else 4635 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 4636 } 4637 #endif /* CONFIG_KGDB_KDB */ 4638