1 /* 2 * Implement CPU time clocks for the POSIX clock interface. 3 */ 4 5 #include <linux/sched/signal.h> 6 #include <linux/sched/cputime.h> 7 #include <linux/posix-timers.h> 8 #include <linux/errno.h> 9 #include <linux/math64.h> 10 #include <linux/uaccess.h> 11 #include <linux/kernel_stat.h> 12 #include <trace/events/timer.h> 13 #include <linux/tick.h> 14 #include <linux/workqueue.h> 15 #include <linux/compat.h> 16 17 #include "posix-timers.h" 18 19 static void posix_cpu_timer_rearm(struct k_itimer *timer); 20 21 /* 22 * Called after updating RLIMIT_CPU to run cpu timer and update 23 * tsk->signal->cputime_expires expiration cache if necessary. Needs 24 * siglock protection since other code may update expiration cache as 25 * well. 26 */ 27 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) 28 { 29 u64 nsecs = rlim_new * NSEC_PER_SEC; 30 31 spin_lock_irq(&task->sighand->siglock); 32 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); 33 spin_unlock_irq(&task->sighand->siglock); 34 } 35 36 static int check_clock(const clockid_t which_clock) 37 { 38 int error = 0; 39 struct task_struct *p; 40 const pid_t pid = CPUCLOCK_PID(which_clock); 41 42 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) 43 return -EINVAL; 44 45 if (pid == 0) 46 return 0; 47 48 rcu_read_lock(); 49 p = find_task_by_vpid(pid); 50 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? 51 same_thread_group(p, current) : has_group_leader_pid(p))) { 52 error = -EINVAL; 53 } 54 rcu_read_unlock(); 55 56 return error; 57 } 58 59 /* 60 * Update expiry time from increment, and increase overrun count, 61 * given the current clock sample. 62 */ 63 static void bump_cpu_timer(struct k_itimer *timer, u64 now) 64 { 65 int i; 66 u64 delta, incr; 67 68 if (timer->it.cpu.incr == 0) 69 return; 70 71 if (now < timer->it.cpu.expires) 72 return; 73 74 incr = timer->it.cpu.incr; 75 delta = now + incr - timer->it.cpu.expires; 76 77 /* Don't use (incr*2 < delta), incr*2 might overflow. */ 78 for (i = 0; incr < delta - incr; i++) 79 incr = incr << 1; 80 81 for (; i >= 0; incr >>= 1, i--) { 82 if (delta < incr) 83 continue; 84 85 timer->it.cpu.expires += incr; 86 timer->it_overrun += 1 << i; 87 delta -= incr; 88 } 89 } 90 91 /** 92 * task_cputime_zero - Check a task_cputime struct for all zero fields. 93 * 94 * @cputime: The struct to compare. 95 * 96 * Checks @cputime to see if all fields are zero. Returns true if all fields 97 * are zero, false if any field is nonzero. 98 */ 99 static inline int task_cputime_zero(const struct task_cputime *cputime) 100 { 101 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) 102 return 1; 103 return 0; 104 } 105 106 static inline u64 prof_ticks(struct task_struct *p) 107 { 108 u64 utime, stime; 109 110 task_cputime(p, &utime, &stime); 111 112 return utime + stime; 113 } 114 static inline u64 virt_ticks(struct task_struct *p) 115 { 116 u64 utime, stime; 117 118 task_cputime(p, &utime, &stime); 119 120 return utime; 121 } 122 123 static int 124 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp) 125 { 126 int error = check_clock(which_clock); 127 if (!error) { 128 tp->tv_sec = 0; 129 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); 130 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 131 /* 132 * If sched_clock is using a cycle counter, we 133 * don't have any idea of its true resolution 134 * exported, but it is much more than 1s/HZ. 135 */ 136 tp->tv_nsec = 1; 137 } 138 } 139 return error; 140 } 141 142 static int 143 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp) 144 { 145 /* 146 * You can never reset a CPU clock, but we check for other errors 147 * in the call before failing with EPERM. 148 */ 149 int error = check_clock(which_clock); 150 if (error == 0) { 151 error = -EPERM; 152 } 153 return error; 154 } 155 156 157 /* 158 * Sample a per-thread clock for the given task. 159 */ 160 static int cpu_clock_sample(const clockid_t which_clock, 161 struct task_struct *p, u64 *sample) 162 { 163 switch (CPUCLOCK_WHICH(which_clock)) { 164 default: 165 return -EINVAL; 166 case CPUCLOCK_PROF: 167 *sample = prof_ticks(p); 168 break; 169 case CPUCLOCK_VIRT: 170 *sample = virt_ticks(p); 171 break; 172 case CPUCLOCK_SCHED: 173 *sample = task_sched_runtime(p); 174 break; 175 } 176 return 0; 177 } 178 179 /* 180 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg 181 * to avoid race conditions with concurrent updates to cputime. 182 */ 183 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) 184 { 185 u64 curr_cputime; 186 retry: 187 curr_cputime = atomic64_read(cputime); 188 if (sum_cputime > curr_cputime) { 189 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) 190 goto retry; 191 } 192 } 193 194 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) 195 { 196 __update_gt_cputime(&cputime_atomic->utime, sum->utime); 197 __update_gt_cputime(&cputime_atomic->stime, sum->stime); 198 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); 199 } 200 201 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ 202 static inline void sample_cputime_atomic(struct task_cputime *times, 203 struct task_cputime_atomic *atomic_times) 204 { 205 times->utime = atomic64_read(&atomic_times->utime); 206 times->stime = atomic64_read(&atomic_times->stime); 207 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); 208 } 209 210 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) 211 { 212 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 213 struct task_cputime sum; 214 215 /* Check if cputimer isn't running. This is accessed without locking. */ 216 if (!READ_ONCE(cputimer->running)) { 217 /* 218 * The POSIX timer interface allows for absolute time expiry 219 * values through the TIMER_ABSTIME flag, therefore we have 220 * to synchronize the timer to the clock every time we start it. 221 */ 222 thread_group_cputime(tsk, &sum); 223 update_gt_cputime(&cputimer->cputime_atomic, &sum); 224 225 /* 226 * We're setting cputimer->running without a lock. Ensure 227 * this only gets written to in one operation. We set 228 * running after update_gt_cputime() as a small optimization, 229 * but barriers are not required because update_gt_cputime() 230 * can handle concurrent updates. 231 */ 232 WRITE_ONCE(cputimer->running, true); 233 } 234 sample_cputime_atomic(times, &cputimer->cputime_atomic); 235 } 236 237 /* 238 * Sample a process (thread group) clock for the given group_leader task. 239 * Must be called with task sighand lock held for safe while_each_thread() 240 * traversal. 241 */ 242 static int cpu_clock_sample_group(const clockid_t which_clock, 243 struct task_struct *p, 244 u64 *sample) 245 { 246 struct task_cputime cputime; 247 248 switch (CPUCLOCK_WHICH(which_clock)) { 249 default: 250 return -EINVAL; 251 case CPUCLOCK_PROF: 252 thread_group_cputime(p, &cputime); 253 *sample = cputime.utime + cputime.stime; 254 break; 255 case CPUCLOCK_VIRT: 256 thread_group_cputime(p, &cputime); 257 *sample = cputime.utime; 258 break; 259 case CPUCLOCK_SCHED: 260 thread_group_cputime(p, &cputime); 261 *sample = cputime.sum_exec_runtime; 262 break; 263 } 264 return 0; 265 } 266 267 static int posix_cpu_clock_get_task(struct task_struct *tsk, 268 const clockid_t which_clock, 269 struct timespec64 *tp) 270 { 271 int err = -EINVAL; 272 u64 rtn; 273 274 if (CPUCLOCK_PERTHREAD(which_clock)) { 275 if (same_thread_group(tsk, current)) 276 err = cpu_clock_sample(which_clock, tsk, &rtn); 277 } else { 278 if (tsk == current || thread_group_leader(tsk)) 279 err = cpu_clock_sample_group(which_clock, tsk, &rtn); 280 } 281 282 if (!err) 283 *tp = ns_to_timespec64(rtn); 284 285 return err; 286 } 287 288 289 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp) 290 { 291 const pid_t pid = CPUCLOCK_PID(which_clock); 292 int err = -EINVAL; 293 294 if (pid == 0) { 295 /* 296 * Special case constant value for our own clocks. 297 * We don't have to do any lookup to find ourselves. 298 */ 299 err = posix_cpu_clock_get_task(current, which_clock, tp); 300 } else { 301 /* 302 * Find the given PID, and validate that the caller 303 * should be able to see it. 304 */ 305 struct task_struct *p; 306 rcu_read_lock(); 307 p = find_task_by_vpid(pid); 308 if (p) 309 err = posix_cpu_clock_get_task(p, which_clock, tp); 310 rcu_read_unlock(); 311 } 312 313 return err; 314 } 315 316 /* 317 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. 318 * This is called from sys_timer_create() and do_cpu_nanosleep() with the 319 * new timer already all-zeros initialized. 320 */ 321 static int posix_cpu_timer_create(struct k_itimer *new_timer) 322 { 323 int ret = 0; 324 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); 325 struct task_struct *p; 326 327 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) 328 return -EINVAL; 329 330 new_timer->kclock = &clock_posix_cpu; 331 332 INIT_LIST_HEAD(&new_timer->it.cpu.entry); 333 334 rcu_read_lock(); 335 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { 336 if (pid == 0) { 337 p = current; 338 } else { 339 p = find_task_by_vpid(pid); 340 if (p && !same_thread_group(p, current)) 341 p = NULL; 342 } 343 } else { 344 if (pid == 0) { 345 p = current->group_leader; 346 } else { 347 p = find_task_by_vpid(pid); 348 if (p && !has_group_leader_pid(p)) 349 p = NULL; 350 } 351 } 352 new_timer->it.cpu.task = p; 353 if (p) { 354 get_task_struct(p); 355 } else { 356 ret = -EINVAL; 357 } 358 rcu_read_unlock(); 359 360 return ret; 361 } 362 363 /* 364 * Clean up a CPU-clock timer that is about to be destroyed. 365 * This is called from timer deletion with the timer already locked. 366 * If we return TIMER_RETRY, it's necessary to release the timer's lock 367 * and try again. (This happens when the timer is in the middle of firing.) 368 */ 369 static int posix_cpu_timer_del(struct k_itimer *timer) 370 { 371 int ret = 0; 372 unsigned long flags; 373 struct sighand_struct *sighand; 374 struct task_struct *p = timer->it.cpu.task; 375 376 WARN_ON_ONCE(p == NULL); 377 378 /* 379 * Protect against sighand release/switch in exit/exec and process/ 380 * thread timer list entry concurrent read/writes. 381 */ 382 sighand = lock_task_sighand(p, &flags); 383 if (unlikely(sighand == NULL)) { 384 /* 385 * We raced with the reaping of the task. 386 * The deletion should have cleared us off the list. 387 */ 388 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); 389 } else { 390 if (timer->it.cpu.firing) 391 ret = TIMER_RETRY; 392 else 393 list_del(&timer->it.cpu.entry); 394 395 unlock_task_sighand(p, &flags); 396 } 397 398 if (!ret) 399 put_task_struct(p); 400 401 return ret; 402 } 403 404 static void cleanup_timers_list(struct list_head *head) 405 { 406 struct cpu_timer_list *timer, *next; 407 408 list_for_each_entry_safe(timer, next, head, entry) 409 list_del_init(&timer->entry); 410 } 411 412 /* 413 * Clean out CPU timers still ticking when a thread exited. The task 414 * pointer is cleared, and the expiry time is replaced with the residual 415 * time for later timer_gettime calls to return. 416 * This must be called with the siglock held. 417 */ 418 static void cleanup_timers(struct list_head *head) 419 { 420 cleanup_timers_list(head); 421 cleanup_timers_list(++head); 422 cleanup_timers_list(++head); 423 } 424 425 /* 426 * These are both called with the siglock held, when the current thread 427 * is being reaped. When the final (leader) thread in the group is reaped, 428 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. 429 */ 430 void posix_cpu_timers_exit(struct task_struct *tsk) 431 { 432 cleanup_timers(tsk->cpu_timers); 433 } 434 void posix_cpu_timers_exit_group(struct task_struct *tsk) 435 { 436 cleanup_timers(tsk->signal->cpu_timers); 437 } 438 439 static inline int expires_gt(u64 expires, u64 new_exp) 440 { 441 return expires == 0 || expires > new_exp; 442 } 443 444 /* 445 * Insert the timer on the appropriate list before any timers that 446 * expire later. This must be called with the sighand lock held. 447 */ 448 static void arm_timer(struct k_itimer *timer) 449 { 450 struct task_struct *p = timer->it.cpu.task; 451 struct list_head *head, *listpos; 452 struct task_cputime *cputime_expires; 453 struct cpu_timer_list *const nt = &timer->it.cpu; 454 struct cpu_timer_list *next; 455 456 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 457 head = p->cpu_timers; 458 cputime_expires = &p->cputime_expires; 459 } else { 460 head = p->signal->cpu_timers; 461 cputime_expires = &p->signal->cputime_expires; 462 } 463 head += CPUCLOCK_WHICH(timer->it_clock); 464 465 listpos = head; 466 list_for_each_entry(next, head, entry) { 467 if (nt->expires < next->expires) 468 break; 469 listpos = &next->entry; 470 } 471 list_add(&nt->entry, listpos); 472 473 if (listpos == head) { 474 u64 exp = nt->expires; 475 476 /* 477 * We are the new earliest-expiring POSIX 1.b timer, hence 478 * need to update expiration cache. Take into account that 479 * for process timers we share expiration cache with itimers 480 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. 481 */ 482 483 switch (CPUCLOCK_WHICH(timer->it_clock)) { 484 case CPUCLOCK_PROF: 485 if (expires_gt(cputime_expires->prof_exp, exp)) 486 cputime_expires->prof_exp = exp; 487 break; 488 case CPUCLOCK_VIRT: 489 if (expires_gt(cputime_expires->virt_exp, exp)) 490 cputime_expires->virt_exp = exp; 491 break; 492 case CPUCLOCK_SCHED: 493 if (expires_gt(cputime_expires->sched_exp, exp)) 494 cputime_expires->sched_exp = exp; 495 break; 496 } 497 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 498 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); 499 else 500 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER); 501 } 502 } 503 504 /* 505 * The timer is locked, fire it and arrange for its reload. 506 */ 507 static void cpu_timer_fire(struct k_itimer *timer) 508 { 509 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { 510 /* 511 * User don't want any signal. 512 */ 513 timer->it.cpu.expires = 0; 514 } else if (unlikely(timer->sigq == NULL)) { 515 /* 516 * This a special case for clock_nanosleep, 517 * not a normal timer from sys_timer_create. 518 */ 519 wake_up_process(timer->it_process); 520 timer->it.cpu.expires = 0; 521 } else if (timer->it.cpu.incr == 0) { 522 /* 523 * One-shot timer. Clear it as soon as it's fired. 524 */ 525 posix_timer_event(timer, 0); 526 timer->it.cpu.expires = 0; 527 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { 528 /* 529 * The signal did not get queued because the signal 530 * was ignored, so we won't get any callback to 531 * reload the timer. But we need to keep it 532 * ticking in case the signal is deliverable next time. 533 */ 534 posix_cpu_timer_rearm(timer); 535 ++timer->it_requeue_pending; 536 } 537 } 538 539 /* 540 * Sample a process (thread group) timer for the given group_leader task. 541 * Must be called with task sighand lock held for safe while_each_thread() 542 * traversal. 543 */ 544 static int cpu_timer_sample_group(const clockid_t which_clock, 545 struct task_struct *p, u64 *sample) 546 { 547 struct task_cputime cputime; 548 549 thread_group_cputimer(p, &cputime); 550 switch (CPUCLOCK_WHICH(which_clock)) { 551 default: 552 return -EINVAL; 553 case CPUCLOCK_PROF: 554 *sample = cputime.utime + cputime.stime; 555 break; 556 case CPUCLOCK_VIRT: 557 *sample = cputime.utime; 558 break; 559 case CPUCLOCK_SCHED: 560 *sample = cputime.sum_exec_runtime; 561 break; 562 } 563 return 0; 564 } 565 566 /* 567 * Guts of sys_timer_settime for CPU timers. 568 * This is called with the timer locked and interrupts disabled. 569 * If we return TIMER_RETRY, it's necessary to release the timer's lock 570 * and try again. (This happens when the timer is in the middle of firing.) 571 */ 572 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, 573 struct itimerspec64 *new, struct itimerspec64 *old) 574 { 575 unsigned long flags; 576 struct sighand_struct *sighand; 577 struct task_struct *p = timer->it.cpu.task; 578 u64 old_expires, new_expires, old_incr, val; 579 int ret; 580 581 WARN_ON_ONCE(p == NULL); 582 583 /* 584 * Use the to_ktime conversion because that clamps the maximum 585 * value to KTIME_MAX and avoid multiplication overflows. 586 */ 587 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); 588 589 /* 590 * Protect against sighand release/switch in exit/exec and p->cpu_timers 591 * and p->signal->cpu_timers read/write in arm_timer() 592 */ 593 sighand = lock_task_sighand(p, &flags); 594 /* 595 * If p has just been reaped, we can no 596 * longer get any information about it at all. 597 */ 598 if (unlikely(sighand == NULL)) { 599 return -ESRCH; 600 } 601 602 /* 603 * Disarm any old timer after extracting its expiry time. 604 */ 605 WARN_ON_ONCE(!irqs_disabled()); 606 607 ret = 0; 608 old_incr = timer->it.cpu.incr; 609 old_expires = timer->it.cpu.expires; 610 if (unlikely(timer->it.cpu.firing)) { 611 timer->it.cpu.firing = -1; 612 ret = TIMER_RETRY; 613 } else 614 list_del_init(&timer->it.cpu.entry); 615 616 /* 617 * We need to sample the current value to convert the new 618 * value from to relative and absolute, and to convert the 619 * old value from absolute to relative. To set a process 620 * timer, we need a sample to balance the thread expiry 621 * times (in arm_timer). With an absolute time, we must 622 * check if it's already passed. In short, we need a sample. 623 */ 624 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 625 cpu_clock_sample(timer->it_clock, p, &val); 626 } else { 627 cpu_timer_sample_group(timer->it_clock, p, &val); 628 } 629 630 if (old) { 631 if (old_expires == 0) { 632 old->it_value.tv_sec = 0; 633 old->it_value.tv_nsec = 0; 634 } else { 635 /* 636 * Update the timer in case it has 637 * overrun already. If it has, 638 * we'll report it as having overrun 639 * and with the next reloaded timer 640 * already ticking, though we are 641 * swallowing that pending 642 * notification here to install the 643 * new setting. 644 */ 645 bump_cpu_timer(timer, val); 646 if (val < timer->it.cpu.expires) { 647 old_expires = timer->it.cpu.expires - val; 648 old->it_value = ns_to_timespec64(old_expires); 649 } else { 650 old->it_value.tv_nsec = 1; 651 old->it_value.tv_sec = 0; 652 } 653 } 654 } 655 656 if (unlikely(ret)) { 657 /* 658 * We are colliding with the timer actually firing. 659 * Punt after filling in the timer's old value, and 660 * disable this firing since we are already reporting 661 * it as an overrun (thanks to bump_cpu_timer above). 662 */ 663 unlock_task_sighand(p, &flags); 664 goto out; 665 } 666 667 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { 668 new_expires += val; 669 } 670 671 /* 672 * Install the new expiry time (or zero). 673 * For a timer with no notification action, we don't actually 674 * arm the timer (we'll just fake it for timer_gettime). 675 */ 676 timer->it.cpu.expires = new_expires; 677 if (new_expires != 0 && val < new_expires) { 678 arm_timer(timer); 679 } 680 681 unlock_task_sighand(p, &flags); 682 /* 683 * Install the new reload setting, and 684 * set up the signal and overrun bookkeeping. 685 */ 686 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); 687 688 /* 689 * This acts as a modification timestamp for the timer, 690 * so any automatic reload attempt will punt on seeing 691 * that we have reset the timer manually. 692 */ 693 timer->it_requeue_pending = (timer->it_requeue_pending + 2) & 694 ~REQUEUE_PENDING; 695 timer->it_overrun_last = 0; 696 timer->it_overrun = -1; 697 698 if (new_expires != 0 && !(val < new_expires)) { 699 /* 700 * The designated time already passed, so we notify 701 * immediately, even if the thread never runs to 702 * accumulate more time on this clock. 703 */ 704 cpu_timer_fire(timer); 705 } 706 707 ret = 0; 708 out: 709 if (old) 710 old->it_interval = ns_to_timespec64(old_incr); 711 712 return ret; 713 } 714 715 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) 716 { 717 u64 now; 718 struct task_struct *p = timer->it.cpu.task; 719 720 WARN_ON_ONCE(p == NULL); 721 722 /* 723 * Easy part: convert the reload time. 724 */ 725 itp->it_interval = ns_to_timespec64(timer->it.cpu.incr); 726 727 if (!timer->it.cpu.expires) 728 return; 729 730 /* 731 * Sample the clock to take the difference with the expiry time. 732 */ 733 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 734 cpu_clock_sample(timer->it_clock, p, &now); 735 } else { 736 struct sighand_struct *sighand; 737 unsigned long flags; 738 739 /* 740 * Protect against sighand release/switch in exit/exec and 741 * also make timer sampling safe if it ends up calling 742 * thread_group_cputime(). 743 */ 744 sighand = lock_task_sighand(p, &flags); 745 if (unlikely(sighand == NULL)) { 746 /* 747 * The process has been reaped. 748 * We can't even collect a sample any more. 749 * Call the timer disarmed, nothing else to do. 750 */ 751 timer->it.cpu.expires = 0; 752 return; 753 } else { 754 cpu_timer_sample_group(timer->it_clock, p, &now); 755 unlock_task_sighand(p, &flags); 756 } 757 } 758 759 if (now < timer->it.cpu.expires) { 760 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now); 761 } else { 762 /* 763 * The timer should have expired already, but the firing 764 * hasn't taken place yet. Say it's just about to expire. 765 */ 766 itp->it_value.tv_nsec = 1; 767 itp->it_value.tv_sec = 0; 768 } 769 } 770 771 static unsigned long long 772 check_timers_list(struct list_head *timers, 773 struct list_head *firing, 774 unsigned long long curr) 775 { 776 int maxfire = 20; 777 778 while (!list_empty(timers)) { 779 struct cpu_timer_list *t; 780 781 t = list_first_entry(timers, struct cpu_timer_list, entry); 782 783 if (!--maxfire || curr < t->expires) 784 return t->expires; 785 786 t->firing = 1; 787 list_move_tail(&t->entry, firing); 788 } 789 790 return 0; 791 } 792 793 /* 794 * Check for any per-thread CPU timers that have fired and move them off 795 * the tsk->cpu_timers[N] list onto the firing list. Here we update the 796 * tsk->it_*_expires values to reflect the remaining thread CPU timers. 797 */ 798 static void check_thread_timers(struct task_struct *tsk, 799 struct list_head *firing) 800 { 801 struct list_head *timers = tsk->cpu_timers; 802 struct signal_struct *const sig = tsk->signal; 803 struct task_cputime *tsk_expires = &tsk->cputime_expires; 804 u64 expires; 805 unsigned long soft; 806 807 /* 808 * If cputime_expires is zero, then there are no active 809 * per thread CPU timers. 810 */ 811 if (task_cputime_zero(&tsk->cputime_expires)) 812 return; 813 814 expires = check_timers_list(timers, firing, prof_ticks(tsk)); 815 tsk_expires->prof_exp = expires; 816 817 expires = check_timers_list(++timers, firing, virt_ticks(tsk)); 818 tsk_expires->virt_exp = expires; 819 820 tsk_expires->sched_exp = check_timers_list(++timers, firing, 821 tsk->se.sum_exec_runtime); 822 823 /* 824 * Check for the special case thread timers. 825 */ 826 soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); 827 if (soft != RLIM_INFINITY) { 828 unsigned long hard = 829 READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); 830 831 if (hard != RLIM_INFINITY && 832 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { 833 /* 834 * At the hard limit, we just die. 835 * No need to calculate anything else now. 836 */ 837 if (print_fatal_signals) { 838 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", 839 tsk->comm, task_pid_nr(tsk)); 840 } 841 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 842 return; 843 } 844 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { 845 /* 846 * At the soft limit, send a SIGXCPU every second. 847 */ 848 if (soft < hard) { 849 soft += USEC_PER_SEC; 850 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; 851 } 852 if (print_fatal_signals) { 853 pr_info("RT Watchdog Timeout (soft): %s[%d]\n", 854 tsk->comm, task_pid_nr(tsk)); 855 } 856 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 857 } 858 } 859 if (task_cputime_zero(tsk_expires)) 860 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER); 861 } 862 863 static inline void stop_process_timers(struct signal_struct *sig) 864 { 865 struct thread_group_cputimer *cputimer = &sig->cputimer; 866 867 /* Turn off cputimer->running. This is done without locking. */ 868 WRITE_ONCE(cputimer->running, false); 869 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); 870 } 871 872 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, 873 u64 *expires, u64 cur_time, int signo) 874 { 875 if (!it->expires) 876 return; 877 878 if (cur_time >= it->expires) { 879 if (it->incr) 880 it->expires += it->incr; 881 else 882 it->expires = 0; 883 884 trace_itimer_expire(signo == SIGPROF ? 885 ITIMER_PROF : ITIMER_VIRTUAL, 886 tsk->signal->leader_pid, cur_time); 887 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); 888 } 889 890 if (it->expires && (!*expires || it->expires < *expires)) 891 *expires = it->expires; 892 } 893 894 /* 895 * Check for any per-thread CPU timers that have fired and move them 896 * off the tsk->*_timers list onto the firing list. Per-thread timers 897 * have already been taken off. 898 */ 899 static void check_process_timers(struct task_struct *tsk, 900 struct list_head *firing) 901 { 902 struct signal_struct *const sig = tsk->signal; 903 u64 utime, ptime, virt_expires, prof_expires; 904 u64 sum_sched_runtime, sched_expires; 905 struct list_head *timers = sig->cpu_timers; 906 struct task_cputime cputime; 907 unsigned long soft; 908 909 /* 910 * If cputimer is not running, then there are no active 911 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). 912 */ 913 if (!READ_ONCE(tsk->signal->cputimer.running)) 914 return; 915 916 /* 917 * Signify that a thread is checking for process timers. 918 * Write access to this field is protected by the sighand lock. 919 */ 920 sig->cputimer.checking_timer = true; 921 922 /* 923 * Collect the current process totals. 924 */ 925 thread_group_cputimer(tsk, &cputime); 926 utime = cputime.utime; 927 ptime = utime + cputime.stime; 928 sum_sched_runtime = cputime.sum_exec_runtime; 929 930 prof_expires = check_timers_list(timers, firing, ptime); 931 virt_expires = check_timers_list(++timers, firing, utime); 932 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime); 933 934 /* 935 * Check for the special case process timers. 936 */ 937 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, 938 SIGPROF); 939 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, 940 SIGVTALRM); 941 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 942 if (soft != RLIM_INFINITY) { 943 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC); 944 unsigned long hard = 945 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); 946 u64 x; 947 if (psecs >= hard) { 948 /* 949 * At the hard limit, we just die. 950 * No need to calculate anything else now. 951 */ 952 if (print_fatal_signals) { 953 pr_info("RT Watchdog Timeout (hard): %s[%d]\n", 954 tsk->comm, task_pid_nr(tsk)); 955 } 956 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 957 return; 958 } 959 if (psecs >= soft) { 960 /* 961 * At the soft limit, send a SIGXCPU every second. 962 */ 963 if (print_fatal_signals) { 964 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", 965 tsk->comm, task_pid_nr(tsk)); 966 } 967 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 968 if (soft < hard) { 969 soft++; 970 sig->rlim[RLIMIT_CPU].rlim_cur = soft; 971 } 972 } 973 x = soft * NSEC_PER_SEC; 974 if (!prof_expires || x < prof_expires) 975 prof_expires = x; 976 } 977 978 sig->cputime_expires.prof_exp = prof_expires; 979 sig->cputime_expires.virt_exp = virt_expires; 980 sig->cputime_expires.sched_exp = sched_expires; 981 if (task_cputime_zero(&sig->cputime_expires)) 982 stop_process_timers(sig); 983 984 sig->cputimer.checking_timer = false; 985 } 986 987 /* 988 * This is called from the signal code (via posixtimer_rearm) 989 * when the last timer signal was delivered and we have to reload the timer. 990 */ 991 static void posix_cpu_timer_rearm(struct k_itimer *timer) 992 { 993 struct sighand_struct *sighand; 994 unsigned long flags; 995 struct task_struct *p = timer->it.cpu.task; 996 u64 now; 997 998 WARN_ON_ONCE(p == NULL); 999 1000 /* 1001 * Fetch the current sample and update the timer's expiry time. 1002 */ 1003 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 1004 cpu_clock_sample(timer->it_clock, p, &now); 1005 bump_cpu_timer(timer, now); 1006 if (unlikely(p->exit_state)) 1007 return; 1008 1009 /* Protect timer list r/w in arm_timer() */ 1010 sighand = lock_task_sighand(p, &flags); 1011 if (!sighand) 1012 return; 1013 } else { 1014 /* 1015 * Protect arm_timer() and timer sampling in case of call to 1016 * thread_group_cputime(). 1017 */ 1018 sighand = lock_task_sighand(p, &flags); 1019 if (unlikely(sighand == NULL)) { 1020 /* 1021 * The process has been reaped. 1022 * We can't even collect a sample any more. 1023 */ 1024 timer->it.cpu.expires = 0; 1025 return; 1026 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1027 /* If the process is dying, no need to rearm */ 1028 goto unlock; 1029 } 1030 cpu_timer_sample_group(timer->it_clock, p, &now); 1031 bump_cpu_timer(timer, now); 1032 /* Leave the sighand locked for the call below. */ 1033 } 1034 1035 /* 1036 * Now re-arm for the new expiry time. 1037 */ 1038 WARN_ON_ONCE(!irqs_disabled()); 1039 arm_timer(timer); 1040 unlock: 1041 unlock_task_sighand(p, &flags); 1042 } 1043 1044 /** 1045 * task_cputime_expired - Compare two task_cputime entities. 1046 * 1047 * @sample: The task_cputime structure to be checked for expiration. 1048 * @expires: Expiration times, against which @sample will be checked. 1049 * 1050 * Checks @sample against @expires to see if any field of @sample has expired. 1051 * Returns true if any field of the former is greater than the corresponding 1052 * field of the latter if the latter field is set. Otherwise returns false. 1053 */ 1054 static inline int task_cputime_expired(const struct task_cputime *sample, 1055 const struct task_cputime *expires) 1056 { 1057 if (expires->utime && sample->utime >= expires->utime) 1058 return 1; 1059 if (expires->stime && sample->utime + sample->stime >= expires->stime) 1060 return 1; 1061 if (expires->sum_exec_runtime != 0 && 1062 sample->sum_exec_runtime >= expires->sum_exec_runtime) 1063 return 1; 1064 return 0; 1065 } 1066 1067 /** 1068 * fastpath_timer_check - POSIX CPU timers fast path. 1069 * 1070 * @tsk: The task (thread) being checked. 1071 * 1072 * Check the task and thread group timers. If both are zero (there are no 1073 * timers set) return false. Otherwise snapshot the task and thread group 1074 * timers and compare them with the corresponding expiration times. Return 1075 * true if a timer has expired, else return false. 1076 */ 1077 static inline int fastpath_timer_check(struct task_struct *tsk) 1078 { 1079 struct signal_struct *sig; 1080 1081 if (!task_cputime_zero(&tsk->cputime_expires)) { 1082 struct task_cputime task_sample; 1083 1084 task_cputime(tsk, &task_sample.utime, &task_sample.stime); 1085 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; 1086 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1087 return 1; 1088 } 1089 1090 sig = tsk->signal; 1091 /* 1092 * Check if thread group timers expired when the cputimer is 1093 * running and no other thread in the group is already checking 1094 * for thread group cputimers. These fields are read without the 1095 * sighand lock. However, this is fine because this is meant to 1096 * be a fastpath heuristic to determine whether we should try to 1097 * acquire the sighand lock to check/handle timers. 1098 * 1099 * In the worst case scenario, if 'running' or 'checking_timer' gets 1100 * set but the current thread doesn't see the change yet, we'll wait 1101 * until the next thread in the group gets a scheduler interrupt to 1102 * handle the timer. This isn't an issue in practice because these 1103 * types of delays with signals actually getting sent are expected. 1104 */ 1105 if (READ_ONCE(sig->cputimer.running) && 1106 !READ_ONCE(sig->cputimer.checking_timer)) { 1107 struct task_cputime group_sample; 1108 1109 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); 1110 1111 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1112 return 1; 1113 } 1114 1115 return 0; 1116 } 1117 1118 /* 1119 * This is called from the timer interrupt handler. The irq handler has 1120 * already updated our counts. We need to check if any timers fire now. 1121 * Interrupts are disabled. 1122 */ 1123 void run_posix_cpu_timers(struct task_struct *tsk) 1124 { 1125 LIST_HEAD(firing); 1126 struct k_itimer *timer, *next; 1127 unsigned long flags; 1128 1129 WARN_ON_ONCE(!irqs_disabled()); 1130 1131 /* 1132 * The fast path checks that there are no expired thread or thread 1133 * group timers. If that's so, just return. 1134 */ 1135 if (!fastpath_timer_check(tsk)) 1136 return; 1137 1138 if (!lock_task_sighand(tsk, &flags)) 1139 return; 1140 /* 1141 * Here we take off tsk->signal->cpu_timers[N] and 1142 * tsk->cpu_timers[N] all the timers that are firing, and 1143 * put them on the firing list. 1144 */ 1145 check_thread_timers(tsk, &firing); 1146 1147 check_process_timers(tsk, &firing); 1148 1149 /* 1150 * We must release these locks before taking any timer's lock. 1151 * There is a potential race with timer deletion here, as the 1152 * siglock now protects our private firing list. We have set 1153 * the firing flag in each timer, so that a deletion attempt 1154 * that gets the timer lock before we do will give it up and 1155 * spin until we've taken care of that timer below. 1156 */ 1157 unlock_task_sighand(tsk, &flags); 1158 1159 /* 1160 * Now that all the timers on our list have the firing flag, 1161 * no one will touch their list entries but us. We'll take 1162 * each timer's lock before clearing its firing flag, so no 1163 * timer call will interfere. 1164 */ 1165 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { 1166 int cpu_firing; 1167 1168 spin_lock(&timer->it_lock); 1169 list_del_init(&timer->it.cpu.entry); 1170 cpu_firing = timer->it.cpu.firing; 1171 timer->it.cpu.firing = 0; 1172 /* 1173 * The firing flag is -1 if we collided with a reset 1174 * of the timer, which already reported this 1175 * almost-firing as an overrun. So don't generate an event. 1176 */ 1177 if (likely(cpu_firing >= 0)) 1178 cpu_timer_fire(timer); 1179 spin_unlock(&timer->it_lock); 1180 } 1181 } 1182 1183 /* 1184 * Set one of the process-wide special case CPU timers or RLIMIT_CPU. 1185 * The tsk->sighand->siglock must be held by the caller. 1186 */ 1187 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1188 u64 *newval, u64 *oldval) 1189 { 1190 u64 now; 1191 1192 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); 1193 cpu_timer_sample_group(clock_idx, tsk, &now); 1194 1195 if (oldval) { 1196 /* 1197 * We are setting itimer. The *oldval is absolute and we update 1198 * it to be relative, *newval argument is relative and we update 1199 * it to be absolute. 1200 */ 1201 if (*oldval) { 1202 if (*oldval <= now) { 1203 /* Just about to fire. */ 1204 *oldval = TICK_NSEC; 1205 } else { 1206 *oldval -= now; 1207 } 1208 } 1209 1210 if (!*newval) 1211 return; 1212 *newval += now; 1213 } 1214 1215 /* 1216 * Update expiration cache if we are the earliest timer, or eventually 1217 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. 1218 */ 1219 switch (clock_idx) { 1220 case CPUCLOCK_PROF: 1221 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) 1222 tsk->signal->cputime_expires.prof_exp = *newval; 1223 break; 1224 case CPUCLOCK_VIRT: 1225 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) 1226 tsk->signal->cputime_expires.virt_exp = *newval; 1227 break; 1228 } 1229 1230 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER); 1231 } 1232 1233 static int do_cpu_nanosleep(const clockid_t which_clock, int flags, 1234 const struct timespec64 *rqtp) 1235 { 1236 struct itimerspec64 it; 1237 struct k_itimer timer; 1238 u64 expires; 1239 int error; 1240 1241 /* 1242 * Set up a temporary timer and then wait for it to go off. 1243 */ 1244 memset(&timer, 0, sizeof timer); 1245 spin_lock_init(&timer.it_lock); 1246 timer.it_clock = which_clock; 1247 timer.it_overrun = -1; 1248 error = posix_cpu_timer_create(&timer); 1249 timer.it_process = current; 1250 if (!error) { 1251 static struct itimerspec64 zero_it; 1252 struct restart_block *restart; 1253 1254 memset(&it, 0, sizeof(it)); 1255 it.it_value = *rqtp; 1256 1257 spin_lock_irq(&timer.it_lock); 1258 error = posix_cpu_timer_set(&timer, flags, &it, NULL); 1259 if (error) { 1260 spin_unlock_irq(&timer.it_lock); 1261 return error; 1262 } 1263 1264 while (!signal_pending(current)) { 1265 if (timer.it.cpu.expires == 0) { 1266 /* 1267 * Our timer fired and was reset, below 1268 * deletion can not fail. 1269 */ 1270 posix_cpu_timer_del(&timer); 1271 spin_unlock_irq(&timer.it_lock); 1272 return 0; 1273 } 1274 1275 /* 1276 * Block until cpu_timer_fire (or a signal) wakes us. 1277 */ 1278 __set_current_state(TASK_INTERRUPTIBLE); 1279 spin_unlock_irq(&timer.it_lock); 1280 schedule(); 1281 spin_lock_irq(&timer.it_lock); 1282 } 1283 1284 /* 1285 * We were interrupted by a signal. 1286 */ 1287 expires = timer.it.cpu.expires; 1288 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); 1289 if (!error) { 1290 /* 1291 * Timer is now unarmed, deletion can not fail. 1292 */ 1293 posix_cpu_timer_del(&timer); 1294 } 1295 spin_unlock_irq(&timer.it_lock); 1296 1297 while (error == TIMER_RETRY) { 1298 /* 1299 * We need to handle case when timer was or is in the 1300 * middle of firing. In other cases we already freed 1301 * resources. 1302 */ 1303 spin_lock_irq(&timer.it_lock); 1304 error = posix_cpu_timer_del(&timer); 1305 spin_unlock_irq(&timer.it_lock); 1306 } 1307 1308 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { 1309 /* 1310 * It actually did fire already. 1311 */ 1312 return 0; 1313 } 1314 1315 error = -ERESTART_RESTARTBLOCK; 1316 /* 1317 * Report back to the user the time still remaining. 1318 */ 1319 restart = ¤t->restart_block; 1320 restart->nanosleep.expires = expires; 1321 if (restart->nanosleep.type != TT_NONE) 1322 error = nanosleep_copyout(restart, &it.it_value); 1323 } 1324 1325 return error; 1326 } 1327 1328 static long posix_cpu_nsleep_restart(struct restart_block *restart_block); 1329 1330 static int posix_cpu_nsleep(const clockid_t which_clock, int flags, 1331 const struct timespec64 *rqtp) 1332 { 1333 struct restart_block *restart_block = ¤t->restart_block; 1334 int error; 1335 1336 /* 1337 * Diagnose required errors first. 1338 */ 1339 if (CPUCLOCK_PERTHREAD(which_clock) && 1340 (CPUCLOCK_PID(which_clock) == 0 || 1341 CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) 1342 return -EINVAL; 1343 1344 error = do_cpu_nanosleep(which_clock, flags, rqtp); 1345 1346 if (error == -ERESTART_RESTARTBLOCK) { 1347 1348 if (flags & TIMER_ABSTIME) 1349 return -ERESTARTNOHAND; 1350 1351 restart_block->fn = posix_cpu_nsleep_restart; 1352 restart_block->nanosleep.clockid = which_clock; 1353 } 1354 return error; 1355 } 1356 1357 static long posix_cpu_nsleep_restart(struct restart_block *restart_block) 1358 { 1359 clockid_t which_clock = restart_block->nanosleep.clockid; 1360 struct timespec64 t; 1361 1362 t = ns_to_timespec64(restart_block->nanosleep.expires); 1363 1364 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); 1365 } 1366 1367 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) 1368 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) 1369 1370 static int process_cpu_clock_getres(const clockid_t which_clock, 1371 struct timespec64 *tp) 1372 { 1373 return posix_cpu_clock_getres(PROCESS_CLOCK, tp); 1374 } 1375 static int process_cpu_clock_get(const clockid_t which_clock, 1376 struct timespec64 *tp) 1377 { 1378 return posix_cpu_clock_get(PROCESS_CLOCK, tp); 1379 } 1380 static int process_cpu_timer_create(struct k_itimer *timer) 1381 { 1382 timer->it_clock = PROCESS_CLOCK; 1383 return posix_cpu_timer_create(timer); 1384 } 1385 static int process_cpu_nsleep(const clockid_t which_clock, int flags, 1386 const struct timespec64 *rqtp) 1387 { 1388 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); 1389 } 1390 static int thread_cpu_clock_getres(const clockid_t which_clock, 1391 struct timespec64 *tp) 1392 { 1393 return posix_cpu_clock_getres(THREAD_CLOCK, tp); 1394 } 1395 static int thread_cpu_clock_get(const clockid_t which_clock, 1396 struct timespec64 *tp) 1397 { 1398 return posix_cpu_clock_get(THREAD_CLOCK, tp); 1399 } 1400 static int thread_cpu_timer_create(struct k_itimer *timer) 1401 { 1402 timer->it_clock = THREAD_CLOCK; 1403 return posix_cpu_timer_create(timer); 1404 } 1405 1406 const struct k_clock clock_posix_cpu = { 1407 .clock_getres = posix_cpu_clock_getres, 1408 .clock_set = posix_cpu_clock_set, 1409 .clock_get = posix_cpu_clock_get, 1410 .timer_create = posix_cpu_timer_create, 1411 .nsleep = posix_cpu_nsleep, 1412 .timer_set = posix_cpu_timer_set, 1413 .timer_del = posix_cpu_timer_del, 1414 .timer_get = posix_cpu_timer_get, 1415 .timer_rearm = posix_cpu_timer_rearm, 1416 }; 1417 1418 const struct k_clock clock_process = { 1419 .clock_getres = process_cpu_clock_getres, 1420 .clock_get = process_cpu_clock_get, 1421 .timer_create = process_cpu_timer_create, 1422 .nsleep = process_cpu_nsleep, 1423 }; 1424 1425 const struct k_clock clock_thread = { 1426 .clock_getres = thread_cpu_clock_getres, 1427 .clock_get = thread_cpu_clock_get, 1428 .timer_create = thread_cpu_timer_create, 1429 }; 1430