1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implement CPU time clocks for the POSIX clock interface. 4 */ 5 6 #include <linux/sched/signal.h> 7 #include <linux/sched/cputime.h> 8 #include <linux/posix-timers.h> 9 #include <linux/errno.h> 10 #include <linux/math64.h> 11 #include <linux/uaccess.h> 12 #include <linux/kernel_stat.h> 13 #include <trace/events/timer.h> 14 #include <linux/tick.h> 15 #include <linux/workqueue.h> 16 #include <linux/compat.h> 17 #include <linux/sched/deadline.h> 18 19 #include "posix-timers.h" 20 21 static void posix_cpu_timer_rearm(struct k_itimer *timer); 22 23 /* 24 * Called after updating RLIMIT_CPU to run cpu timer and update 25 * tsk->signal->cputime_expires expiration cache if necessary. Needs 26 * siglock protection since other code may update expiration cache as 27 * well. 28 */ 29 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) 30 { 31 u64 nsecs = rlim_new * NSEC_PER_SEC; 32 33 spin_lock_irq(&task->sighand->siglock); 34 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); 35 spin_unlock_irq(&task->sighand->siglock); 36 } 37 38 static int check_clock(const clockid_t which_clock) 39 { 40 int error = 0; 41 struct task_struct *p; 42 const pid_t pid = CPUCLOCK_PID(which_clock); 43 44 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) 45 return -EINVAL; 46 47 if (pid == 0) 48 return 0; 49 50 rcu_read_lock(); 51 p = find_task_by_vpid(pid); 52 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? 53 same_thread_group(p, current) : has_group_leader_pid(p))) { 54 error = -EINVAL; 55 } 56 rcu_read_unlock(); 57 58 return error; 59 } 60 61 /* 62 * Update expiry time from increment, and increase overrun count, 63 * given the current clock sample. 64 */ 65 static void bump_cpu_timer(struct k_itimer *timer, u64 now) 66 { 67 int i; 68 u64 delta, incr; 69 70 if (!timer->it_interval) 71 return; 72 73 if (now < timer->it.cpu.expires) 74 return; 75 76 incr = timer->it_interval; 77 delta = now + incr - timer->it.cpu.expires; 78 79 /* Don't use (incr*2 < delta), incr*2 might overflow. */ 80 for (i = 0; incr < delta - incr; i++) 81 incr = incr << 1; 82 83 for (; i >= 0; incr >>= 1, i--) { 84 if (delta < incr) 85 continue; 86 87 timer->it.cpu.expires += incr; 88 timer->it_overrun += 1LL << i; 89 delta -= incr; 90 } 91 } 92 93 /** 94 * task_cputime_zero - Check a task_cputime struct for all zero fields. 95 * 96 * @cputime: The struct to compare. 97 * 98 * Checks @cputime to see if all fields are zero. Returns true if all fields 99 * are zero, false if any field is nonzero. 100 */ 101 static inline int task_cputime_zero(const struct task_cputime *cputime) 102 { 103 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) 104 return 1; 105 return 0; 106 } 107 108 static inline u64 prof_ticks(struct task_struct *p) 109 { 110 u64 utime, stime; 111 112 task_cputime(p, &utime, &stime); 113 114 return utime + stime; 115 } 116 static inline u64 virt_ticks(struct task_struct *p) 117 { 118 u64 utime, stime; 119 120 task_cputime(p, &utime, &stime); 121 122 return utime; 123 } 124 125 static int 126 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp) 127 { 128 int error = check_clock(which_clock); 129 if (!error) { 130 tp->tv_sec = 0; 131 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); 132 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 133 /* 134 * If sched_clock is using a cycle counter, we 135 * don't have any idea of its true resolution 136 * exported, but it is much more than 1s/HZ. 137 */ 138 tp->tv_nsec = 1; 139 } 140 } 141 return error; 142 } 143 144 static int 145 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp) 146 { 147 /* 148 * You can never reset a CPU clock, but we check for other errors 149 * in the call before failing with EPERM. 150 */ 151 int error = check_clock(which_clock); 152 if (error == 0) { 153 error = -EPERM; 154 } 155 return error; 156 } 157 158 159 /* 160 * Sample a per-thread clock for the given task. 161 */ 162 static int cpu_clock_sample(const clockid_t which_clock, 163 struct task_struct *p, u64 *sample) 164 { 165 switch (CPUCLOCK_WHICH(which_clock)) { 166 default: 167 return -EINVAL; 168 case CPUCLOCK_PROF: 169 *sample = prof_ticks(p); 170 break; 171 case CPUCLOCK_VIRT: 172 *sample = virt_ticks(p); 173 break; 174 case CPUCLOCK_SCHED: 175 *sample = task_sched_runtime(p); 176 break; 177 } 178 return 0; 179 } 180 181 /* 182 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg 183 * to avoid race conditions with concurrent updates to cputime. 184 */ 185 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) 186 { 187 u64 curr_cputime; 188 retry: 189 curr_cputime = atomic64_read(cputime); 190 if (sum_cputime > curr_cputime) { 191 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) 192 goto retry; 193 } 194 } 195 196 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) 197 { 198 __update_gt_cputime(&cputime_atomic->utime, sum->utime); 199 __update_gt_cputime(&cputime_atomic->stime, sum->stime); 200 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); 201 } 202 203 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ 204 static inline void sample_cputime_atomic(struct task_cputime *times, 205 struct task_cputime_atomic *atomic_times) 206 { 207 times->utime = atomic64_read(&atomic_times->utime); 208 times->stime = atomic64_read(&atomic_times->stime); 209 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); 210 } 211 212 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) 213 { 214 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 215 struct task_cputime sum; 216 217 /* Check if cputimer isn't running. This is accessed without locking. */ 218 if (!READ_ONCE(cputimer->running)) { 219 /* 220 * The POSIX timer interface allows for absolute time expiry 221 * values through the TIMER_ABSTIME flag, therefore we have 222 * to synchronize the timer to the clock every time we start it. 223 */ 224 thread_group_cputime(tsk, &sum); 225 update_gt_cputime(&cputimer->cputime_atomic, &sum); 226 227 /* 228 * We're setting cputimer->running without a lock. Ensure 229 * this only gets written to in one operation. We set 230 * running after update_gt_cputime() as a small optimization, 231 * but barriers are not required because update_gt_cputime() 232 * can handle concurrent updates. 233 */ 234 WRITE_ONCE(cputimer->running, true); 235 } 236 sample_cputime_atomic(times, &cputimer->cputime_atomic); 237 } 238 239 /* 240 * Sample a process (thread group) clock for the given group_leader task. 241 * Must be called with task sighand lock held for safe while_each_thread() 242 * traversal. 243 */ 244 static int cpu_clock_sample_group(const clockid_t which_clock, 245 struct task_struct *p, 246 u64 *sample) 247 { 248 struct task_cputime cputime; 249 250 switch (CPUCLOCK_WHICH(which_clock)) { 251 default: 252 return -EINVAL; 253 case CPUCLOCK_PROF: 254 thread_group_cputime(p, &cputime); 255 *sample = cputime.utime + cputime.stime; 256 break; 257 case CPUCLOCK_VIRT: 258 thread_group_cputime(p, &cputime); 259 *sample = cputime.utime; 260 break; 261 case CPUCLOCK_SCHED: 262 thread_group_cputime(p, &cputime); 263 *sample = cputime.sum_exec_runtime; 264 break; 265 } 266 return 0; 267 } 268 269 static int posix_cpu_clock_get_task(struct task_struct *tsk, 270 const clockid_t which_clock, 271 struct timespec64 *tp) 272 { 273 int err = -EINVAL; 274 u64 rtn; 275 276 if (CPUCLOCK_PERTHREAD(which_clock)) { 277 if (same_thread_group(tsk, current)) 278 err = cpu_clock_sample(which_clock, tsk, &rtn); 279 } else { 280 if (tsk == current || thread_group_leader(tsk)) 281 err = cpu_clock_sample_group(which_clock, tsk, &rtn); 282 } 283 284 if (!err) 285 *tp = ns_to_timespec64(rtn); 286 287 return err; 288 } 289 290 291 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp) 292 { 293 const pid_t pid = CPUCLOCK_PID(which_clock); 294 int err = -EINVAL; 295 296 if (pid == 0) { 297 /* 298 * Special case constant value for our own clocks. 299 * We don't have to do any lookup to find ourselves. 300 */ 301 err = posix_cpu_clock_get_task(current, which_clock, tp); 302 } else { 303 /* 304 * Find the given PID, and validate that the caller 305 * should be able to see it. 306 */ 307 struct task_struct *p; 308 rcu_read_lock(); 309 p = find_task_by_vpid(pid); 310 if (p) 311 err = posix_cpu_clock_get_task(p, which_clock, tp); 312 rcu_read_unlock(); 313 } 314 315 return err; 316 } 317 318 /* 319 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. 320 * This is called from sys_timer_create() and do_cpu_nanosleep() with the 321 * new timer already all-zeros initialized. 322 */ 323 static int posix_cpu_timer_create(struct k_itimer *new_timer) 324 { 325 int ret = 0; 326 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); 327 struct task_struct *p; 328 329 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) 330 return -EINVAL; 331 332 new_timer->kclock = &clock_posix_cpu; 333 334 INIT_LIST_HEAD(&new_timer->it.cpu.entry); 335 336 rcu_read_lock(); 337 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { 338 if (pid == 0) { 339 p = current; 340 } else { 341 p = find_task_by_vpid(pid); 342 if (p && !same_thread_group(p, current)) 343 p = NULL; 344 } 345 } else { 346 if (pid == 0) { 347 p = current->group_leader; 348 } else { 349 p = find_task_by_vpid(pid); 350 if (p && !has_group_leader_pid(p)) 351 p = NULL; 352 } 353 } 354 new_timer->it.cpu.task = p; 355 if (p) { 356 get_task_struct(p); 357 } else { 358 ret = -EINVAL; 359 } 360 rcu_read_unlock(); 361 362 return ret; 363 } 364 365 /* 366 * Clean up a CPU-clock timer that is about to be destroyed. 367 * This is called from timer deletion with the timer already locked. 368 * If we return TIMER_RETRY, it's necessary to release the timer's lock 369 * and try again. (This happens when the timer is in the middle of firing.) 370 */ 371 static int posix_cpu_timer_del(struct k_itimer *timer) 372 { 373 int ret = 0; 374 unsigned long flags; 375 struct sighand_struct *sighand; 376 struct task_struct *p = timer->it.cpu.task; 377 378 WARN_ON_ONCE(p == NULL); 379 380 /* 381 * Protect against sighand release/switch in exit/exec and process/ 382 * thread timer list entry concurrent read/writes. 383 */ 384 sighand = lock_task_sighand(p, &flags); 385 if (unlikely(sighand == NULL)) { 386 /* 387 * We raced with the reaping of the task. 388 * The deletion should have cleared us off the list. 389 */ 390 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); 391 } else { 392 if (timer->it.cpu.firing) 393 ret = TIMER_RETRY; 394 else 395 list_del(&timer->it.cpu.entry); 396 397 unlock_task_sighand(p, &flags); 398 } 399 400 if (!ret) 401 put_task_struct(p); 402 403 return ret; 404 } 405 406 static void cleanup_timers_list(struct list_head *head) 407 { 408 struct cpu_timer_list *timer, *next; 409 410 list_for_each_entry_safe(timer, next, head, entry) 411 list_del_init(&timer->entry); 412 } 413 414 /* 415 * Clean out CPU timers still ticking when a thread exited. The task 416 * pointer is cleared, and the expiry time is replaced with the residual 417 * time for later timer_gettime calls to return. 418 * This must be called with the siglock held. 419 */ 420 static void cleanup_timers(struct list_head *head) 421 { 422 cleanup_timers_list(head); 423 cleanup_timers_list(++head); 424 cleanup_timers_list(++head); 425 } 426 427 /* 428 * These are both called with the siglock held, when the current thread 429 * is being reaped. When the final (leader) thread in the group is reaped, 430 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. 431 */ 432 void posix_cpu_timers_exit(struct task_struct *tsk) 433 { 434 cleanup_timers(tsk->cpu_timers); 435 } 436 void posix_cpu_timers_exit_group(struct task_struct *tsk) 437 { 438 cleanup_timers(tsk->signal->cpu_timers); 439 } 440 441 static inline int expires_gt(u64 expires, u64 new_exp) 442 { 443 return expires == 0 || expires > new_exp; 444 } 445 446 /* 447 * Insert the timer on the appropriate list before any timers that 448 * expire later. This must be called with the sighand lock held. 449 */ 450 static void arm_timer(struct k_itimer *timer) 451 { 452 struct task_struct *p = timer->it.cpu.task; 453 struct list_head *head, *listpos; 454 struct task_cputime *cputime_expires; 455 struct cpu_timer_list *const nt = &timer->it.cpu; 456 struct cpu_timer_list *next; 457 458 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 459 head = p->cpu_timers; 460 cputime_expires = &p->cputime_expires; 461 } else { 462 head = p->signal->cpu_timers; 463 cputime_expires = &p->signal->cputime_expires; 464 } 465 head += CPUCLOCK_WHICH(timer->it_clock); 466 467 listpos = head; 468 list_for_each_entry(next, head, entry) { 469 if (nt->expires < next->expires) 470 break; 471 listpos = &next->entry; 472 } 473 list_add(&nt->entry, listpos); 474 475 if (listpos == head) { 476 u64 exp = nt->expires; 477 478 /* 479 * We are the new earliest-expiring POSIX 1.b timer, hence 480 * need to update expiration cache. Take into account that 481 * for process timers we share expiration cache with itimers 482 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. 483 */ 484 485 switch (CPUCLOCK_WHICH(timer->it_clock)) { 486 case CPUCLOCK_PROF: 487 if (expires_gt(cputime_expires->prof_exp, exp)) 488 cputime_expires->prof_exp = exp; 489 break; 490 case CPUCLOCK_VIRT: 491 if (expires_gt(cputime_expires->virt_exp, exp)) 492 cputime_expires->virt_exp = exp; 493 break; 494 case CPUCLOCK_SCHED: 495 if (expires_gt(cputime_expires->sched_exp, exp)) 496 cputime_expires->sched_exp = exp; 497 break; 498 } 499 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 500 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); 501 else 502 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER); 503 } 504 } 505 506 /* 507 * The timer is locked, fire it and arrange for its reload. 508 */ 509 static void cpu_timer_fire(struct k_itimer *timer) 510 { 511 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { 512 /* 513 * User don't want any signal. 514 */ 515 timer->it.cpu.expires = 0; 516 } else if (unlikely(timer->sigq == NULL)) { 517 /* 518 * This a special case for clock_nanosleep, 519 * not a normal timer from sys_timer_create. 520 */ 521 wake_up_process(timer->it_process); 522 timer->it.cpu.expires = 0; 523 } else if (!timer->it_interval) { 524 /* 525 * One-shot timer. Clear it as soon as it's fired. 526 */ 527 posix_timer_event(timer, 0); 528 timer->it.cpu.expires = 0; 529 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { 530 /* 531 * The signal did not get queued because the signal 532 * was ignored, so we won't get any callback to 533 * reload the timer. But we need to keep it 534 * ticking in case the signal is deliverable next time. 535 */ 536 posix_cpu_timer_rearm(timer); 537 ++timer->it_requeue_pending; 538 } 539 } 540 541 /* 542 * Sample a process (thread group) timer for the given group_leader task. 543 * Must be called with task sighand lock held for safe while_each_thread() 544 * traversal. 545 */ 546 static int cpu_timer_sample_group(const clockid_t which_clock, 547 struct task_struct *p, u64 *sample) 548 { 549 struct task_cputime cputime; 550 551 thread_group_cputimer(p, &cputime); 552 switch (CPUCLOCK_WHICH(which_clock)) { 553 default: 554 return -EINVAL; 555 case CPUCLOCK_PROF: 556 *sample = cputime.utime + cputime.stime; 557 break; 558 case CPUCLOCK_VIRT: 559 *sample = cputime.utime; 560 break; 561 case CPUCLOCK_SCHED: 562 *sample = cputime.sum_exec_runtime; 563 break; 564 } 565 return 0; 566 } 567 568 /* 569 * Guts of sys_timer_settime for CPU timers. 570 * This is called with the timer locked and interrupts disabled. 571 * If we return TIMER_RETRY, it's necessary to release the timer's lock 572 * and try again. (This happens when the timer is in the middle of firing.) 573 */ 574 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, 575 struct itimerspec64 *new, struct itimerspec64 *old) 576 { 577 unsigned long flags; 578 struct sighand_struct *sighand; 579 struct task_struct *p = timer->it.cpu.task; 580 u64 old_expires, new_expires, old_incr, val; 581 int ret; 582 583 WARN_ON_ONCE(p == NULL); 584 585 /* 586 * Use the to_ktime conversion because that clamps the maximum 587 * value to KTIME_MAX and avoid multiplication overflows. 588 */ 589 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); 590 591 /* 592 * Protect against sighand release/switch in exit/exec and p->cpu_timers 593 * and p->signal->cpu_timers read/write in arm_timer() 594 */ 595 sighand = lock_task_sighand(p, &flags); 596 /* 597 * If p has just been reaped, we can no 598 * longer get any information about it at all. 599 */ 600 if (unlikely(sighand == NULL)) { 601 return -ESRCH; 602 } 603 604 /* 605 * Disarm any old timer after extracting its expiry time. 606 */ 607 608 ret = 0; 609 old_incr = timer->it_interval; 610 old_expires = timer->it.cpu.expires; 611 if (unlikely(timer->it.cpu.firing)) { 612 timer->it.cpu.firing = -1; 613 ret = TIMER_RETRY; 614 } else 615 list_del_init(&timer->it.cpu.entry); 616 617 /* 618 * We need to sample the current value to convert the new 619 * value from to relative and absolute, and to convert the 620 * old value from absolute to relative. To set a process 621 * timer, we need a sample to balance the thread expiry 622 * times (in arm_timer). With an absolute time, we must 623 * check if it's already passed. In short, we need a sample. 624 */ 625 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 626 cpu_clock_sample(timer->it_clock, p, &val); 627 } else { 628 cpu_timer_sample_group(timer->it_clock, p, &val); 629 } 630 631 if (old) { 632 if (old_expires == 0) { 633 old->it_value.tv_sec = 0; 634 old->it_value.tv_nsec = 0; 635 } else { 636 /* 637 * Update the timer in case it has 638 * overrun already. If it has, 639 * we'll report it as having overrun 640 * and with the next reloaded timer 641 * already ticking, though we are 642 * swallowing that pending 643 * notification here to install the 644 * new setting. 645 */ 646 bump_cpu_timer(timer, val); 647 if (val < timer->it.cpu.expires) { 648 old_expires = timer->it.cpu.expires - val; 649 old->it_value = ns_to_timespec64(old_expires); 650 } else { 651 old->it_value.tv_nsec = 1; 652 old->it_value.tv_sec = 0; 653 } 654 } 655 } 656 657 if (unlikely(ret)) { 658 /* 659 * We are colliding with the timer actually firing. 660 * Punt after filling in the timer's old value, and 661 * disable this firing since we are already reporting 662 * it as an overrun (thanks to bump_cpu_timer above). 663 */ 664 unlock_task_sighand(p, &flags); 665 goto out; 666 } 667 668 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { 669 new_expires += val; 670 } 671 672 /* 673 * Install the new expiry time (or zero). 674 * For a timer with no notification action, we don't actually 675 * arm the timer (we'll just fake it for timer_gettime). 676 */ 677 timer->it.cpu.expires = new_expires; 678 if (new_expires != 0 && val < new_expires) { 679 arm_timer(timer); 680 } 681 682 unlock_task_sighand(p, &flags); 683 /* 684 * Install the new reload setting, and 685 * set up the signal and overrun bookkeeping. 686 */ 687 timer->it_interval = timespec64_to_ktime(new->it_interval); 688 689 /* 690 * This acts as a modification timestamp for the timer, 691 * so any automatic reload attempt will punt on seeing 692 * that we have reset the timer manually. 693 */ 694 timer->it_requeue_pending = (timer->it_requeue_pending + 2) & 695 ~REQUEUE_PENDING; 696 timer->it_overrun_last = 0; 697 timer->it_overrun = -1; 698 699 if (new_expires != 0 && !(val < new_expires)) { 700 /* 701 * The designated time already passed, so we notify 702 * immediately, even if the thread never runs to 703 * accumulate more time on this clock. 704 */ 705 cpu_timer_fire(timer); 706 } 707 708 ret = 0; 709 out: 710 if (old) 711 old->it_interval = ns_to_timespec64(old_incr); 712 713 return ret; 714 } 715 716 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) 717 { 718 u64 now; 719 struct task_struct *p = timer->it.cpu.task; 720 721 WARN_ON_ONCE(p == NULL); 722 723 /* 724 * Easy part: convert the reload time. 725 */ 726 itp->it_interval = ktime_to_timespec64(timer->it_interval); 727 728 if (!timer->it.cpu.expires) 729 return; 730 731 /* 732 * Sample the clock to take the difference with the expiry time. 733 */ 734 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 735 cpu_clock_sample(timer->it_clock, p, &now); 736 } else { 737 struct sighand_struct *sighand; 738 unsigned long flags; 739 740 /* 741 * Protect against sighand release/switch in exit/exec and 742 * also make timer sampling safe if it ends up calling 743 * thread_group_cputime(). 744 */ 745 sighand = lock_task_sighand(p, &flags); 746 if (unlikely(sighand == NULL)) { 747 /* 748 * The process has been reaped. 749 * We can't even collect a sample any more. 750 * Call the timer disarmed, nothing else to do. 751 */ 752 timer->it.cpu.expires = 0; 753 return; 754 } else { 755 cpu_timer_sample_group(timer->it_clock, p, &now); 756 unlock_task_sighand(p, &flags); 757 } 758 } 759 760 if (now < timer->it.cpu.expires) { 761 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now); 762 } else { 763 /* 764 * The timer should have expired already, but the firing 765 * hasn't taken place yet. Say it's just about to expire. 766 */ 767 itp->it_value.tv_nsec = 1; 768 itp->it_value.tv_sec = 0; 769 } 770 } 771 772 static unsigned long long 773 check_timers_list(struct list_head *timers, 774 struct list_head *firing, 775 unsigned long long curr) 776 { 777 int maxfire = 20; 778 779 while (!list_empty(timers)) { 780 struct cpu_timer_list *t; 781 782 t = list_first_entry(timers, struct cpu_timer_list, entry); 783 784 if (!--maxfire || curr < t->expires) 785 return t->expires; 786 787 t->firing = 1; 788 list_move_tail(&t->entry, firing); 789 } 790 791 return 0; 792 } 793 794 static inline void check_dl_overrun(struct task_struct *tsk) 795 { 796 if (tsk->dl.dl_overrun) { 797 tsk->dl.dl_overrun = 0; 798 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 799 } 800 } 801 802 /* 803 * Check for any per-thread CPU timers that have fired and move them off 804 * the tsk->cpu_timers[N] list onto the firing list. Here we update the 805 * tsk->it_*_expires values to reflect the remaining thread CPU timers. 806 */ 807 static void check_thread_timers(struct task_struct *tsk, 808 struct list_head *firing) 809 { 810 struct list_head *timers = tsk->cpu_timers; 811 struct task_cputime *tsk_expires = &tsk->cputime_expires; 812 u64 expires; 813 unsigned long soft; 814 815 if (dl_task(tsk)) 816 check_dl_overrun(tsk); 817 818 /* 819 * If cputime_expires is zero, then there are no active 820 * per thread CPU timers. 821 */ 822 if (task_cputime_zero(&tsk->cputime_expires)) 823 return; 824 825 expires = check_timers_list(timers, firing, prof_ticks(tsk)); 826 tsk_expires->prof_exp = expires; 827 828 expires = check_timers_list(++timers, firing, virt_ticks(tsk)); 829 tsk_expires->virt_exp = expires; 830 831 tsk_expires->sched_exp = check_timers_list(++timers, firing, 832 tsk->se.sum_exec_runtime); 833 834 /* 835 * Check for the special case thread timers. 836 */ 837 soft = task_rlimit(tsk, RLIMIT_RTTIME); 838 if (soft != RLIM_INFINITY) { 839 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); 840 841 if (hard != RLIM_INFINITY && 842 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { 843 /* 844 * At the hard limit, we just die. 845 * No need to calculate anything else now. 846 */ 847 if (print_fatal_signals) { 848 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", 849 tsk->comm, task_pid_nr(tsk)); 850 } 851 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 852 return; 853 } 854 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { 855 /* 856 * At the soft limit, send a SIGXCPU every second. 857 */ 858 if (soft < hard) { 859 soft += USEC_PER_SEC; 860 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = 861 soft; 862 } 863 if (print_fatal_signals) { 864 pr_info("RT Watchdog Timeout (soft): %s[%d]\n", 865 tsk->comm, task_pid_nr(tsk)); 866 } 867 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 868 } 869 } 870 if (task_cputime_zero(tsk_expires)) 871 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER); 872 } 873 874 static inline void stop_process_timers(struct signal_struct *sig) 875 { 876 struct thread_group_cputimer *cputimer = &sig->cputimer; 877 878 /* Turn off cputimer->running. This is done without locking. */ 879 WRITE_ONCE(cputimer->running, false); 880 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); 881 } 882 883 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, 884 u64 *expires, u64 cur_time, int signo) 885 { 886 if (!it->expires) 887 return; 888 889 if (cur_time >= it->expires) { 890 if (it->incr) 891 it->expires += it->incr; 892 else 893 it->expires = 0; 894 895 trace_itimer_expire(signo == SIGPROF ? 896 ITIMER_PROF : ITIMER_VIRTUAL, 897 task_tgid(tsk), cur_time); 898 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); 899 } 900 901 if (it->expires && (!*expires || it->expires < *expires)) 902 *expires = it->expires; 903 } 904 905 /* 906 * Check for any per-thread CPU timers that have fired and move them 907 * off the tsk->*_timers list onto the firing list. Per-thread timers 908 * have already been taken off. 909 */ 910 static void check_process_timers(struct task_struct *tsk, 911 struct list_head *firing) 912 { 913 struct signal_struct *const sig = tsk->signal; 914 u64 utime, ptime, virt_expires, prof_expires; 915 u64 sum_sched_runtime, sched_expires; 916 struct list_head *timers = sig->cpu_timers; 917 struct task_cputime cputime; 918 unsigned long soft; 919 920 /* 921 * If cputimer is not running, then there are no active 922 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). 923 */ 924 if (!READ_ONCE(tsk->signal->cputimer.running)) 925 return; 926 927 /* 928 * Signify that a thread is checking for process timers. 929 * Write access to this field is protected by the sighand lock. 930 */ 931 sig->cputimer.checking_timer = true; 932 933 /* 934 * Collect the current process totals. 935 */ 936 thread_group_cputimer(tsk, &cputime); 937 utime = cputime.utime; 938 ptime = utime + cputime.stime; 939 sum_sched_runtime = cputime.sum_exec_runtime; 940 941 prof_expires = check_timers_list(timers, firing, ptime); 942 virt_expires = check_timers_list(++timers, firing, utime); 943 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime); 944 945 /* 946 * Check for the special case process timers. 947 */ 948 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, 949 SIGPROF); 950 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, 951 SIGVTALRM); 952 soft = task_rlimit(tsk, RLIMIT_CPU); 953 if (soft != RLIM_INFINITY) { 954 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC); 955 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU); 956 u64 x; 957 if (psecs >= hard) { 958 /* 959 * At the hard limit, we just die. 960 * No need to calculate anything else now. 961 */ 962 if (print_fatal_signals) { 963 pr_info("RT Watchdog Timeout (hard): %s[%d]\n", 964 tsk->comm, task_pid_nr(tsk)); 965 } 966 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 967 return; 968 } 969 if (psecs >= soft) { 970 /* 971 * At the soft limit, send a SIGXCPU every second. 972 */ 973 if (print_fatal_signals) { 974 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", 975 tsk->comm, task_pid_nr(tsk)); 976 } 977 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 978 if (soft < hard) { 979 soft++; 980 sig->rlim[RLIMIT_CPU].rlim_cur = soft; 981 } 982 } 983 x = soft * NSEC_PER_SEC; 984 if (!prof_expires || x < prof_expires) 985 prof_expires = x; 986 } 987 988 sig->cputime_expires.prof_exp = prof_expires; 989 sig->cputime_expires.virt_exp = virt_expires; 990 sig->cputime_expires.sched_exp = sched_expires; 991 if (task_cputime_zero(&sig->cputime_expires)) 992 stop_process_timers(sig); 993 994 sig->cputimer.checking_timer = false; 995 } 996 997 /* 998 * This is called from the signal code (via posixtimer_rearm) 999 * when the last timer signal was delivered and we have to reload the timer. 1000 */ 1001 static void posix_cpu_timer_rearm(struct k_itimer *timer) 1002 { 1003 struct sighand_struct *sighand; 1004 unsigned long flags; 1005 struct task_struct *p = timer->it.cpu.task; 1006 u64 now; 1007 1008 WARN_ON_ONCE(p == NULL); 1009 1010 /* 1011 * Fetch the current sample and update the timer's expiry time. 1012 */ 1013 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 1014 cpu_clock_sample(timer->it_clock, p, &now); 1015 bump_cpu_timer(timer, now); 1016 if (unlikely(p->exit_state)) 1017 return; 1018 1019 /* Protect timer list r/w in arm_timer() */ 1020 sighand = lock_task_sighand(p, &flags); 1021 if (!sighand) 1022 return; 1023 } else { 1024 /* 1025 * Protect arm_timer() and timer sampling in case of call to 1026 * thread_group_cputime(). 1027 */ 1028 sighand = lock_task_sighand(p, &flags); 1029 if (unlikely(sighand == NULL)) { 1030 /* 1031 * The process has been reaped. 1032 * We can't even collect a sample any more. 1033 */ 1034 timer->it.cpu.expires = 0; 1035 return; 1036 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1037 /* If the process is dying, no need to rearm */ 1038 goto unlock; 1039 } 1040 cpu_timer_sample_group(timer->it_clock, p, &now); 1041 bump_cpu_timer(timer, now); 1042 /* Leave the sighand locked for the call below. */ 1043 } 1044 1045 /* 1046 * Now re-arm for the new expiry time. 1047 */ 1048 arm_timer(timer); 1049 unlock: 1050 unlock_task_sighand(p, &flags); 1051 } 1052 1053 /** 1054 * task_cputime_expired - Compare two task_cputime entities. 1055 * 1056 * @sample: The task_cputime structure to be checked for expiration. 1057 * @expires: Expiration times, against which @sample will be checked. 1058 * 1059 * Checks @sample against @expires to see if any field of @sample has expired. 1060 * Returns true if any field of the former is greater than the corresponding 1061 * field of the latter if the latter field is set. Otherwise returns false. 1062 */ 1063 static inline int task_cputime_expired(const struct task_cputime *sample, 1064 const struct task_cputime *expires) 1065 { 1066 if (expires->utime && sample->utime >= expires->utime) 1067 return 1; 1068 if (expires->stime && sample->utime + sample->stime >= expires->stime) 1069 return 1; 1070 if (expires->sum_exec_runtime != 0 && 1071 sample->sum_exec_runtime >= expires->sum_exec_runtime) 1072 return 1; 1073 return 0; 1074 } 1075 1076 /** 1077 * fastpath_timer_check - POSIX CPU timers fast path. 1078 * 1079 * @tsk: The task (thread) being checked. 1080 * 1081 * Check the task and thread group timers. If both are zero (there are no 1082 * timers set) return false. Otherwise snapshot the task and thread group 1083 * timers and compare them with the corresponding expiration times. Return 1084 * true if a timer has expired, else return false. 1085 */ 1086 static inline int fastpath_timer_check(struct task_struct *tsk) 1087 { 1088 struct signal_struct *sig; 1089 1090 if (!task_cputime_zero(&tsk->cputime_expires)) { 1091 struct task_cputime task_sample; 1092 1093 task_cputime(tsk, &task_sample.utime, &task_sample.stime); 1094 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; 1095 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1096 return 1; 1097 } 1098 1099 sig = tsk->signal; 1100 /* 1101 * Check if thread group timers expired when the cputimer is 1102 * running and no other thread in the group is already checking 1103 * for thread group cputimers. These fields are read without the 1104 * sighand lock. However, this is fine because this is meant to 1105 * be a fastpath heuristic to determine whether we should try to 1106 * acquire the sighand lock to check/handle timers. 1107 * 1108 * In the worst case scenario, if 'running' or 'checking_timer' gets 1109 * set but the current thread doesn't see the change yet, we'll wait 1110 * until the next thread in the group gets a scheduler interrupt to 1111 * handle the timer. This isn't an issue in practice because these 1112 * types of delays with signals actually getting sent are expected. 1113 */ 1114 if (READ_ONCE(sig->cputimer.running) && 1115 !READ_ONCE(sig->cputimer.checking_timer)) { 1116 struct task_cputime group_sample; 1117 1118 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); 1119 1120 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1121 return 1; 1122 } 1123 1124 if (dl_task(tsk) && tsk->dl.dl_overrun) 1125 return 1; 1126 1127 return 0; 1128 } 1129 1130 /* 1131 * This is called from the timer interrupt handler. The irq handler has 1132 * already updated our counts. We need to check if any timers fire now. 1133 * Interrupts are disabled. 1134 */ 1135 void run_posix_cpu_timers(struct task_struct *tsk) 1136 { 1137 LIST_HEAD(firing); 1138 struct k_itimer *timer, *next; 1139 unsigned long flags; 1140 1141 lockdep_assert_irqs_disabled(); 1142 1143 /* 1144 * The fast path checks that there are no expired thread or thread 1145 * group timers. If that's so, just return. 1146 */ 1147 if (!fastpath_timer_check(tsk)) 1148 return; 1149 1150 if (!lock_task_sighand(tsk, &flags)) 1151 return; 1152 /* 1153 * Here we take off tsk->signal->cpu_timers[N] and 1154 * tsk->cpu_timers[N] all the timers that are firing, and 1155 * put them on the firing list. 1156 */ 1157 check_thread_timers(tsk, &firing); 1158 1159 check_process_timers(tsk, &firing); 1160 1161 /* 1162 * We must release these locks before taking any timer's lock. 1163 * There is a potential race with timer deletion here, as the 1164 * siglock now protects our private firing list. We have set 1165 * the firing flag in each timer, so that a deletion attempt 1166 * that gets the timer lock before we do will give it up and 1167 * spin until we've taken care of that timer below. 1168 */ 1169 unlock_task_sighand(tsk, &flags); 1170 1171 /* 1172 * Now that all the timers on our list have the firing flag, 1173 * no one will touch their list entries but us. We'll take 1174 * each timer's lock before clearing its firing flag, so no 1175 * timer call will interfere. 1176 */ 1177 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { 1178 int cpu_firing; 1179 1180 spin_lock(&timer->it_lock); 1181 list_del_init(&timer->it.cpu.entry); 1182 cpu_firing = timer->it.cpu.firing; 1183 timer->it.cpu.firing = 0; 1184 /* 1185 * The firing flag is -1 if we collided with a reset 1186 * of the timer, which already reported this 1187 * almost-firing as an overrun. So don't generate an event. 1188 */ 1189 if (likely(cpu_firing >= 0)) 1190 cpu_timer_fire(timer); 1191 spin_unlock(&timer->it_lock); 1192 } 1193 } 1194 1195 /* 1196 * Set one of the process-wide special case CPU timers or RLIMIT_CPU. 1197 * The tsk->sighand->siglock must be held by the caller. 1198 */ 1199 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1200 u64 *newval, u64 *oldval) 1201 { 1202 u64 now; 1203 int ret; 1204 1205 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); 1206 ret = cpu_timer_sample_group(clock_idx, tsk, &now); 1207 1208 if (oldval && ret != -EINVAL) { 1209 /* 1210 * We are setting itimer. The *oldval is absolute and we update 1211 * it to be relative, *newval argument is relative and we update 1212 * it to be absolute. 1213 */ 1214 if (*oldval) { 1215 if (*oldval <= now) { 1216 /* Just about to fire. */ 1217 *oldval = TICK_NSEC; 1218 } else { 1219 *oldval -= now; 1220 } 1221 } 1222 1223 if (!*newval) 1224 return; 1225 *newval += now; 1226 } 1227 1228 /* 1229 * Update expiration cache if we are the earliest timer, or eventually 1230 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. 1231 */ 1232 switch (clock_idx) { 1233 case CPUCLOCK_PROF: 1234 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) 1235 tsk->signal->cputime_expires.prof_exp = *newval; 1236 break; 1237 case CPUCLOCK_VIRT: 1238 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) 1239 tsk->signal->cputime_expires.virt_exp = *newval; 1240 break; 1241 } 1242 1243 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER); 1244 } 1245 1246 static int do_cpu_nanosleep(const clockid_t which_clock, int flags, 1247 const struct timespec64 *rqtp) 1248 { 1249 struct itimerspec64 it; 1250 struct k_itimer timer; 1251 u64 expires; 1252 int error; 1253 1254 /* 1255 * Set up a temporary timer and then wait for it to go off. 1256 */ 1257 memset(&timer, 0, sizeof timer); 1258 spin_lock_init(&timer.it_lock); 1259 timer.it_clock = which_clock; 1260 timer.it_overrun = -1; 1261 error = posix_cpu_timer_create(&timer); 1262 timer.it_process = current; 1263 if (!error) { 1264 static struct itimerspec64 zero_it; 1265 struct restart_block *restart; 1266 1267 memset(&it, 0, sizeof(it)); 1268 it.it_value = *rqtp; 1269 1270 spin_lock_irq(&timer.it_lock); 1271 error = posix_cpu_timer_set(&timer, flags, &it, NULL); 1272 if (error) { 1273 spin_unlock_irq(&timer.it_lock); 1274 return error; 1275 } 1276 1277 while (!signal_pending(current)) { 1278 if (timer.it.cpu.expires == 0) { 1279 /* 1280 * Our timer fired and was reset, below 1281 * deletion can not fail. 1282 */ 1283 posix_cpu_timer_del(&timer); 1284 spin_unlock_irq(&timer.it_lock); 1285 return 0; 1286 } 1287 1288 /* 1289 * Block until cpu_timer_fire (or a signal) wakes us. 1290 */ 1291 __set_current_state(TASK_INTERRUPTIBLE); 1292 spin_unlock_irq(&timer.it_lock); 1293 schedule(); 1294 spin_lock_irq(&timer.it_lock); 1295 } 1296 1297 /* 1298 * We were interrupted by a signal. 1299 */ 1300 expires = timer.it.cpu.expires; 1301 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); 1302 if (!error) { 1303 /* 1304 * Timer is now unarmed, deletion can not fail. 1305 */ 1306 posix_cpu_timer_del(&timer); 1307 } 1308 spin_unlock_irq(&timer.it_lock); 1309 1310 while (error == TIMER_RETRY) { 1311 /* 1312 * We need to handle case when timer was or is in the 1313 * middle of firing. In other cases we already freed 1314 * resources. 1315 */ 1316 spin_lock_irq(&timer.it_lock); 1317 error = posix_cpu_timer_del(&timer); 1318 spin_unlock_irq(&timer.it_lock); 1319 } 1320 1321 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { 1322 /* 1323 * It actually did fire already. 1324 */ 1325 return 0; 1326 } 1327 1328 error = -ERESTART_RESTARTBLOCK; 1329 /* 1330 * Report back to the user the time still remaining. 1331 */ 1332 restart = ¤t->restart_block; 1333 restart->nanosleep.expires = expires; 1334 if (restart->nanosleep.type != TT_NONE) 1335 error = nanosleep_copyout(restart, &it.it_value); 1336 } 1337 1338 return error; 1339 } 1340 1341 static long posix_cpu_nsleep_restart(struct restart_block *restart_block); 1342 1343 static int posix_cpu_nsleep(const clockid_t which_clock, int flags, 1344 const struct timespec64 *rqtp) 1345 { 1346 struct restart_block *restart_block = ¤t->restart_block; 1347 int error; 1348 1349 /* 1350 * Diagnose required errors first. 1351 */ 1352 if (CPUCLOCK_PERTHREAD(which_clock) && 1353 (CPUCLOCK_PID(which_clock) == 0 || 1354 CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) 1355 return -EINVAL; 1356 1357 error = do_cpu_nanosleep(which_clock, flags, rqtp); 1358 1359 if (error == -ERESTART_RESTARTBLOCK) { 1360 1361 if (flags & TIMER_ABSTIME) 1362 return -ERESTARTNOHAND; 1363 1364 restart_block->fn = posix_cpu_nsleep_restart; 1365 restart_block->nanosleep.clockid = which_clock; 1366 } 1367 return error; 1368 } 1369 1370 static long posix_cpu_nsleep_restart(struct restart_block *restart_block) 1371 { 1372 clockid_t which_clock = restart_block->nanosleep.clockid; 1373 struct timespec64 t; 1374 1375 t = ns_to_timespec64(restart_block->nanosleep.expires); 1376 1377 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); 1378 } 1379 1380 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED) 1381 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED) 1382 1383 static int process_cpu_clock_getres(const clockid_t which_clock, 1384 struct timespec64 *tp) 1385 { 1386 return posix_cpu_clock_getres(PROCESS_CLOCK, tp); 1387 } 1388 static int process_cpu_clock_get(const clockid_t which_clock, 1389 struct timespec64 *tp) 1390 { 1391 return posix_cpu_clock_get(PROCESS_CLOCK, tp); 1392 } 1393 static int process_cpu_timer_create(struct k_itimer *timer) 1394 { 1395 timer->it_clock = PROCESS_CLOCK; 1396 return posix_cpu_timer_create(timer); 1397 } 1398 static int process_cpu_nsleep(const clockid_t which_clock, int flags, 1399 const struct timespec64 *rqtp) 1400 { 1401 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); 1402 } 1403 static int thread_cpu_clock_getres(const clockid_t which_clock, 1404 struct timespec64 *tp) 1405 { 1406 return posix_cpu_clock_getres(THREAD_CLOCK, tp); 1407 } 1408 static int thread_cpu_clock_get(const clockid_t which_clock, 1409 struct timespec64 *tp) 1410 { 1411 return posix_cpu_clock_get(THREAD_CLOCK, tp); 1412 } 1413 static int thread_cpu_timer_create(struct k_itimer *timer) 1414 { 1415 timer->it_clock = THREAD_CLOCK; 1416 return posix_cpu_timer_create(timer); 1417 } 1418 1419 const struct k_clock clock_posix_cpu = { 1420 .clock_getres = posix_cpu_clock_getres, 1421 .clock_set = posix_cpu_clock_set, 1422 .clock_get = posix_cpu_clock_get, 1423 .timer_create = posix_cpu_timer_create, 1424 .nsleep = posix_cpu_nsleep, 1425 .timer_set = posix_cpu_timer_set, 1426 .timer_del = posix_cpu_timer_del, 1427 .timer_get = posix_cpu_timer_get, 1428 .timer_rearm = posix_cpu_timer_rearm, 1429 }; 1430 1431 const struct k_clock clock_process = { 1432 .clock_getres = process_cpu_clock_getres, 1433 .clock_get = process_cpu_clock_get, 1434 .timer_create = process_cpu_timer_create, 1435 .nsleep = process_cpu_nsleep, 1436 }; 1437 1438 const struct k_clock clock_thread = { 1439 .clock_getres = thread_cpu_clock_getres, 1440 .clock_get = thread_cpu_clock_get, 1441 .timer_create = thread_cpu_timer_create, 1442 }; 1443