1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implement CPU time clocks for the POSIX clock interface. 4 */ 5 6 #include <linux/sched/signal.h> 7 #include <linux/sched/cputime.h> 8 #include <linux/posix-timers.h> 9 #include <linux/errno.h> 10 #include <linux/math64.h> 11 #include <linux/uaccess.h> 12 #include <linux/kernel_stat.h> 13 #include <trace/events/timer.h> 14 #include <linux/tick.h> 15 #include <linux/workqueue.h> 16 #include <linux/compat.h> 17 #include <linux/sched/deadline.h> 18 19 #include "posix-timers.h" 20 21 static void posix_cpu_timer_rearm(struct k_itimer *timer); 22 23 /* 24 * Called after updating RLIMIT_CPU to run cpu timer and update 25 * tsk->signal->cputime_expires expiration cache if necessary. Needs 26 * siglock protection since other code may update expiration cache as 27 * well. 28 */ 29 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) 30 { 31 u64 nsecs = rlim_new * NSEC_PER_SEC; 32 33 spin_lock_irq(&task->sighand->siglock); 34 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); 35 spin_unlock_irq(&task->sighand->siglock); 36 } 37 38 static int check_clock(const clockid_t which_clock) 39 { 40 int error = 0; 41 struct task_struct *p; 42 const pid_t pid = CPUCLOCK_PID(which_clock); 43 44 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) 45 return -EINVAL; 46 47 if (pid == 0) 48 return 0; 49 50 rcu_read_lock(); 51 p = find_task_by_vpid(pid); 52 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? 53 same_thread_group(p, current) : has_group_leader_pid(p))) { 54 error = -EINVAL; 55 } 56 rcu_read_unlock(); 57 58 return error; 59 } 60 61 /* 62 * Update expiry time from increment, and increase overrun count, 63 * given the current clock sample. 64 */ 65 static void bump_cpu_timer(struct k_itimer *timer, u64 now) 66 { 67 int i; 68 u64 delta, incr; 69 70 if (timer->it.cpu.incr == 0) 71 return; 72 73 if (now < timer->it.cpu.expires) 74 return; 75 76 incr = timer->it.cpu.incr; 77 delta = now + incr - timer->it.cpu.expires; 78 79 /* Don't use (incr*2 < delta), incr*2 might overflow. */ 80 for (i = 0; incr < delta - incr; i++) 81 incr = incr << 1; 82 83 for (; i >= 0; incr >>= 1, i--) { 84 if (delta < incr) 85 continue; 86 87 timer->it.cpu.expires += incr; 88 timer->it_overrun += 1LL << i; 89 delta -= incr; 90 } 91 } 92 93 /** 94 * task_cputime_zero - Check a task_cputime struct for all zero fields. 95 * 96 * @cputime: The struct to compare. 97 * 98 * Checks @cputime to see if all fields are zero. Returns true if all fields 99 * are zero, false if any field is nonzero. 100 */ 101 static inline int task_cputime_zero(const struct task_cputime *cputime) 102 { 103 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) 104 return 1; 105 return 0; 106 } 107 108 static inline u64 prof_ticks(struct task_struct *p) 109 { 110 u64 utime, stime; 111 112 task_cputime(p, &utime, &stime); 113 114 return utime + stime; 115 } 116 static inline u64 virt_ticks(struct task_struct *p) 117 { 118 u64 utime, stime; 119 120 task_cputime(p, &utime, &stime); 121 122 return utime; 123 } 124 125 static int 126 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp) 127 { 128 int error = check_clock(which_clock); 129 if (!error) { 130 tp->tv_sec = 0; 131 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); 132 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 133 /* 134 * If sched_clock is using a cycle counter, we 135 * don't have any idea of its true resolution 136 * exported, but it is much more than 1s/HZ. 137 */ 138 tp->tv_nsec = 1; 139 } 140 } 141 return error; 142 } 143 144 static int 145 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp) 146 { 147 /* 148 * You can never reset a CPU clock, but we check for other errors 149 * in the call before failing with EPERM. 150 */ 151 int error = check_clock(which_clock); 152 if (error == 0) { 153 error = -EPERM; 154 } 155 return error; 156 } 157 158 159 /* 160 * Sample a per-thread clock for the given task. 161 */ 162 static int cpu_clock_sample(const clockid_t which_clock, 163 struct task_struct *p, u64 *sample) 164 { 165 switch (CPUCLOCK_WHICH(which_clock)) { 166 default: 167 return -EINVAL; 168 case CPUCLOCK_PROF: 169 *sample = prof_ticks(p); 170 break; 171 case CPUCLOCK_VIRT: 172 *sample = virt_ticks(p); 173 break; 174 case CPUCLOCK_SCHED: 175 *sample = task_sched_runtime(p); 176 break; 177 } 178 return 0; 179 } 180 181 /* 182 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg 183 * to avoid race conditions with concurrent updates to cputime. 184 */ 185 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) 186 { 187 u64 curr_cputime; 188 retry: 189 curr_cputime = atomic64_read(cputime); 190 if (sum_cputime > curr_cputime) { 191 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) 192 goto retry; 193 } 194 } 195 196 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) 197 { 198 __update_gt_cputime(&cputime_atomic->utime, sum->utime); 199 __update_gt_cputime(&cputime_atomic->stime, sum->stime); 200 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); 201 } 202 203 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ 204 static inline void sample_cputime_atomic(struct task_cputime *times, 205 struct task_cputime_atomic *atomic_times) 206 { 207 times->utime = atomic64_read(&atomic_times->utime); 208 times->stime = atomic64_read(&atomic_times->stime); 209 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); 210 } 211 212 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) 213 { 214 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 215 struct task_cputime sum; 216 217 /* Check if cputimer isn't running. This is accessed without locking. */ 218 if (!READ_ONCE(cputimer->running)) { 219 /* 220 * The POSIX timer interface allows for absolute time expiry 221 * values through the TIMER_ABSTIME flag, therefore we have 222 * to synchronize the timer to the clock every time we start it. 223 */ 224 thread_group_cputime(tsk, &sum); 225 update_gt_cputime(&cputimer->cputime_atomic, &sum); 226 227 /* 228 * We're setting cputimer->running without a lock. Ensure 229 * this only gets written to in one operation. We set 230 * running after update_gt_cputime() as a small optimization, 231 * but barriers are not required because update_gt_cputime() 232 * can handle concurrent updates. 233 */ 234 WRITE_ONCE(cputimer->running, true); 235 } 236 sample_cputime_atomic(times, &cputimer->cputime_atomic); 237 } 238 239 /* 240 * Sample a process (thread group) clock for the given group_leader task. 241 * Must be called with task sighand lock held for safe while_each_thread() 242 * traversal. 243 */ 244 static int cpu_clock_sample_group(const clockid_t which_clock, 245 struct task_struct *p, 246 u64 *sample) 247 { 248 struct task_cputime cputime; 249 250 switch (CPUCLOCK_WHICH(which_clock)) { 251 default: 252 return -EINVAL; 253 case CPUCLOCK_PROF: 254 thread_group_cputime(p, &cputime); 255 *sample = cputime.utime + cputime.stime; 256 break; 257 case CPUCLOCK_VIRT: 258 thread_group_cputime(p, &cputime); 259 *sample = cputime.utime; 260 break; 261 case CPUCLOCK_SCHED: 262 thread_group_cputime(p, &cputime); 263 *sample = cputime.sum_exec_runtime; 264 break; 265 } 266 return 0; 267 } 268 269 static int posix_cpu_clock_get_task(struct task_struct *tsk, 270 const clockid_t which_clock, 271 struct timespec64 *tp) 272 { 273 int err = -EINVAL; 274 u64 rtn; 275 276 if (CPUCLOCK_PERTHREAD(which_clock)) { 277 if (same_thread_group(tsk, current)) 278 err = cpu_clock_sample(which_clock, tsk, &rtn); 279 } else { 280 if (tsk == current || thread_group_leader(tsk)) 281 err = cpu_clock_sample_group(which_clock, tsk, &rtn); 282 } 283 284 if (!err) 285 *tp = ns_to_timespec64(rtn); 286 287 return err; 288 } 289 290 291 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp) 292 { 293 const pid_t pid = CPUCLOCK_PID(which_clock); 294 int err = -EINVAL; 295 296 if (pid == 0) { 297 /* 298 * Special case constant value for our own clocks. 299 * We don't have to do any lookup to find ourselves. 300 */ 301 err = posix_cpu_clock_get_task(current, which_clock, tp); 302 } else { 303 /* 304 * Find the given PID, and validate that the caller 305 * should be able to see it. 306 */ 307 struct task_struct *p; 308 rcu_read_lock(); 309 p = find_task_by_vpid(pid); 310 if (p) 311 err = posix_cpu_clock_get_task(p, which_clock, tp); 312 rcu_read_unlock(); 313 } 314 315 return err; 316 } 317 318 /* 319 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. 320 * This is called from sys_timer_create() and do_cpu_nanosleep() with the 321 * new timer already all-zeros initialized. 322 */ 323 static int posix_cpu_timer_create(struct k_itimer *new_timer) 324 { 325 int ret = 0; 326 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); 327 struct task_struct *p; 328 329 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) 330 return -EINVAL; 331 332 new_timer->kclock = &clock_posix_cpu; 333 334 INIT_LIST_HEAD(&new_timer->it.cpu.entry); 335 336 rcu_read_lock(); 337 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { 338 if (pid == 0) { 339 p = current; 340 } else { 341 p = find_task_by_vpid(pid); 342 if (p && !same_thread_group(p, current)) 343 p = NULL; 344 } 345 } else { 346 if (pid == 0) { 347 p = current->group_leader; 348 } else { 349 p = find_task_by_vpid(pid); 350 if (p && !has_group_leader_pid(p)) 351 p = NULL; 352 } 353 } 354 new_timer->it.cpu.task = p; 355 if (p) { 356 get_task_struct(p); 357 } else { 358 ret = -EINVAL; 359 } 360 rcu_read_unlock(); 361 362 return ret; 363 } 364 365 /* 366 * Clean up a CPU-clock timer that is about to be destroyed. 367 * This is called from timer deletion with the timer already locked. 368 * If we return TIMER_RETRY, it's necessary to release the timer's lock 369 * and try again. (This happens when the timer is in the middle of firing.) 370 */ 371 static int posix_cpu_timer_del(struct k_itimer *timer) 372 { 373 int ret = 0; 374 unsigned long flags; 375 struct sighand_struct *sighand; 376 struct task_struct *p = timer->it.cpu.task; 377 378 WARN_ON_ONCE(p == NULL); 379 380 /* 381 * Protect against sighand release/switch in exit/exec and process/ 382 * thread timer list entry concurrent read/writes. 383 */ 384 sighand = lock_task_sighand(p, &flags); 385 if (unlikely(sighand == NULL)) { 386 /* 387 * We raced with the reaping of the task. 388 * The deletion should have cleared us off the list. 389 */ 390 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); 391 } else { 392 if (timer->it.cpu.firing) 393 ret = TIMER_RETRY; 394 else 395 list_del(&timer->it.cpu.entry); 396 397 unlock_task_sighand(p, &flags); 398 } 399 400 if (!ret) 401 put_task_struct(p); 402 403 return ret; 404 } 405 406 static void cleanup_timers_list(struct list_head *head) 407 { 408 struct cpu_timer_list *timer, *next; 409 410 list_for_each_entry_safe(timer, next, head, entry) 411 list_del_init(&timer->entry); 412 } 413 414 /* 415 * Clean out CPU timers still ticking when a thread exited. The task 416 * pointer is cleared, and the expiry time is replaced with the residual 417 * time for later timer_gettime calls to return. 418 * This must be called with the siglock held. 419 */ 420 static void cleanup_timers(struct list_head *head) 421 { 422 cleanup_timers_list(head); 423 cleanup_timers_list(++head); 424 cleanup_timers_list(++head); 425 } 426 427 /* 428 * These are both called with the siglock held, when the current thread 429 * is being reaped. When the final (leader) thread in the group is reaped, 430 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. 431 */ 432 void posix_cpu_timers_exit(struct task_struct *tsk) 433 { 434 cleanup_timers(tsk->cpu_timers); 435 } 436 void posix_cpu_timers_exit_group(struct task_struct *tsk) 437 { 438 cleanup_timers(tsk->signal->cpu_timers); 439 } 440 441 static inline int expires_gt(u64 expires, u64 new_exp) 442 { 443 return expires == 0 || expires > new_exp; 444 } 445 446 /* 447 * Insert the timer on the appropriate list before any timers that 448 * expire later. This must be called with the sighand lock held. 449 */ 450 static void arm_timer(struct k_itimer *timer) 451 { 452 struct task_struct *p = timer->it.cpu.task; 453 struct list_head *head, *listpos; 454 struct task_cputime *cputime_expires; 455 struct cpu_timer_list *const nt = &timer->it.cpu; 456 struct cpu_timer_list *next; 457 458 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 459 head = p->cpu_timers; 460 cputime_expires = &p->cputime_expires; 461 } else { 462 head = p->signal->cpu_timers; 463 cputime_expires = &p->signal->cputime_expires; 464 } 465 head += CPUCLOCK_WHICH(timer->it_clock); 466 467 listpos = head; 468 list_for_each_entry(next, head, entry) { 469 if (nt->expires < next->expires) 470 break; 471 listpos = &next->entry; 472 } 473 list_add(&nt->entry, listpos); 474 475 if (listpos == head) { 476 u64 exp = nt->expires; 477 478 /* 479 * We are the new earliest-expiring POSIX 1.b timer, hence 480 * need to update expiration cache. Take into account that 481 * for process timers we share expiration cache with itimers 482 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. 483 */ 484 485 switch (CPUCLOCK_WHICH(timer->it_clock)) { 486 case CPUCLOCK_PROF: 487 if (expires_gt(cputime_expires->prof_exp, exp)) 488 cputime_expires->prof_exp = exp; 489 break; 490 case CPUCLOCK_VIRT: 491 if (expires_gt(cputime_expires->virt_exp, exp)) 492 cputime_expires->virt_exp = exp; 493 break; 494 case CPUCLOCK_SCHED: 495 if (expires_gt(cputime_expires->sched_exp, exp)) 496 cputime_expires->sched_exp = exp; 497 break; 498 } 499 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 500 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); 501 else 502 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER); 503 } 504 } 505 506 /* 507 * The timer is locked, fire it and arrange for its reload. 508 */ 509 static void cpu_timer_fire(struct k_itimer *timer) 510 { 511 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { 512 /* 513 * User don't want any signal. 514 */ 515 timer->it.cpu.expires = 0; 516 } else if (unlikely(timer->sigq == NULL)) { 517 /* 518 * This a special case for clock_nanosleep, 519 * not a normal timer from sys_timer_create. 520 */ 521 wake_up_process(timer->it_process); 522 timer->it.cpu.expires = 0; 523 } else if (timer->it.cpu.incr == 0) { 524 /* 525 * One-shot timer. Clear it as soon as it's fired. 526 */ 527 posix_timer_event(timer, 0); 528 timer->it.cpu.expires = 0; 529 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { 530 /* 531 * The signal did not get queued because the signal 532 * was ignored, so we won't get any callback to 533 * reload the timer. But we need to keep it 534 * ticking in case the signal is deliverable next time. 535 */ 536 posix_cpu_timer_rearm(timer); 537 ++timer->it_requeue_pending; 538 } 539 } 540 541 /* 542 * Sample a process (thread group) timer for the given group_leader task. 543 * Must be called with task sighand lock held for safe while_each_thread() 544 * traversal. 545 */ 546 static int cpu_timer_sample_group(const clockid_t which_clock, 547 struct task_struct *p, u64 *sample) 548 { 549 struct task_cputime cputime; 550 551 thread_group_cputimer(p, &cputime); 552 switch (CPUCLOCK_WHICH(which_clock)) { 553 default: 554 return -EINVAL; 555 case CPUCLOCK_PROF: 556 *sample = cputime.utime + cputime.stime; 557 break; 558 case CPUCLOCK_VIRT: 559 *sample = cputime.utime; 560 break; 561 case CPUCLOCK_SCHED: 562 *sample = cputime.sum_exec_runtime; 563 break; 564 } 565 return 0; 566 } 567 568 /* 569 * Guts of sys_timer_settime for CPU timers. 570 * This is called with the timer locked and interrupts disabled. 571 * If we return TIMER_RETRY, it's necessary to release the timer's lock 572 * and try again. (This happens when the timer is in the middle of firing.) 573 */ 574 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, 575 struct itimerspec64 *new, struct itimerspec64 *old) 576 { 577 unsigned long flags; 578 struct sighand_struct *sighand; 579 struct task_struct *p = timer->it.cpu.task; 580 u64 old_expires, new_expires, old_incr, val; 581 int ret; 582 583 WARN_ON_ONCE(p == NULL); 584 585 /* 586 * Use the to_ktime conversion because that clamps the maximum 587 * value to KTIME_MAX and avoid multiplication overflows. 588 */ 589 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); 590 591 /* 592 * Protect against sighand release/switch in exit/exec and p->cpu_timers 593 * and p->signal->cpu_timers read/write in arm_timer() 594 */ 595 sighand = lock_task_sighand(p, &flags); 596 /* 597 * If p has just been reaped, we can no 598 * longer get any information about it at all. 599 */ 600 if (unlikely(sighand == NULL)) { 601 return -ESRCH; 602 } 603 604 /* 605 * Disarm any old timer after extracting its expiry time. 606 */ 607 608 ret = 0; 609 old_incr = timer->it.cpu.incr; 610 old_expires = timer->it.cpu.expires; 611 if (unlikely(timer->it.cpu.firing)) { 612 timer->it.cpu.firing = -1; 613 ret = TIMER_RETRY; 614 } else 615 list_del_init(&timer->it.cpu.entry); 616 617 /* 618 * We need to sample the current value to convert the new 619 * value from to relative and absolute, and to convert the 620 * old value from absolute to relative. To set a process 621 * timer, we need a sample to balance the thread expiry 622 * times (in arm_timer). With an absolute time, we must 623 * check if it's already passed. In short, we need a sample. 624 */ 625 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 626 cpu_clock_sample(timer->it_clock, p, &val); 627 } else { 628 cpu_timer_sample_group(timer->it_clock, p, &val); 629 } 630 631 if (old) { 632 if (old_expires == 0) { 633 old->it_value.tv_sec = 0; 634 old->it_value.tv_nsec = 0; 635 } else { 636 /* 637 * Update the timer in case it has 638 * overrun already. If it has, 639 * we'll report it as having overrun 640 * and with the next reloaded timer 641 * already ticking, though we are 642 * swallowing that pending 643 * notification here to install the 644 * new setting. 645 */ 646 bump_cpu_timer(timer, val); 647 if (val < timer->it.cpu.expires) { 648 old_expires = timer->it.cpu.expires - val; 649 old->it_value = ns_to_timespec64(old_expires); 650 } else { 651 old->it_value.tv_nsec = 1; 652 old->it_value.tv_sec = 0; 653 } 654 } 655 } 656 657 if (unlikely(ret)) { 658 /* 659 * We are colliding with the timer actually firing. 660 * Punt after filling in the timer's old value, and 661 * disable this firing since we are already reporting 662 * it as an overrun (thanks to bump_cpu_timer above). 663 */ 664 unlock_task_sighand(p, &flags); 665 goto out; 666 } 667 668 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { 669 new_expires += val; 670 } 671 672 /* 673 * Install the new expiry time (or zero). 674 * For a timer with no notification action, we don't actually 675 * arm the timer (we'll just fake it for timer_gettime). 676 */ 677 timer->it.cpu.expires = new_expires; 678 if (new_expires != 0 && val < new_expires) { 679 arm_timer(timer); 680 } 681 682 unlock_task_sighand(p, &flags); 683 /* 684 * Install the new reload setting, and 685 * set up the signal and overrun bookkeeping. 686 */ 687 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); 688 timer->it_interval = ns_to_ktime(timer->it.cpu.incr); 689 690 /* 691 * This acts as a modification timestamp for the timer, 692 * so any automatic reload attempt will punt on seeing 693 * that we have reset the timer manually. 694 */ 695 timer->it_requeue_pending = (timer->it_requeue_pending + 2) & 696 ~REQUEUE_PENDING; 697 timer->it_overrun_last = 0; 698 timer->it_overrun = -1; 699 700 if (new_expires != 0 && !(val < new_expires)) { 701 /* 702 * The designated time already passed, so we notify 703 * immediately, even if the thread never runs to 704 * accumulate more time on this clock. 705 */ 706 cpu_timer_fire(timer); 707 } 708 709 ret = 0; 710 out: 711 if (old) 712 old->it_interval = ns_to_timespec64(old_incr); 713 714 return ret; 715 } 716 717 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) 718 { 719 u64 now; 720 struct task_struct *p = timer->it.cpu.task; 721 722 WARN_ON_ONCE(p == NULL); 723 724 /* 725 * Easy part: convert the reload time. 726 */ 727 itp->it_interval = ns_to_timespec64(timer->it.cpu.incr); 728 729 if (!timer->it.cpu.expires) 730 return; 731 732 /* 733 * Sample the clock to take the difference with the expiry time. 734 */ 735 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 736 cpu_clock_sample(timer->it_clock, p, &now); 737 } else { 738 struct sighand_struct *sighand; 739 unsigned long flags; 740 741 /* 742 * Protect against sighand release/switch in exit/exec and 743 * also make timer sampling safe if it ends up calling 744 * thread_group_cputime(). 745 */ 746 sighand = lock_task_sighand(p, &flags); 747 if (unlikely(sighand == NULL)) { 748 /* 749 * The process has been reaped. 750 * We can't even collect a sample any more. 751 * Call the timer disarmed, nothing else to do. 752 */ 753 timer->it.cpu.expires = 0; 754 return; 755 } else { 756 cpu_timer_sample_group(timer->it_clock, p, &now); 757 unlock_task_sighand(p, &flags); 758 } 759 } 760 761 if (now < timer->it.cpu.expires) { 762 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now); 763 } else { 764 /* 765 * The timer should have expired already, but the firing 766 * hasn't taken place yet. Say it's just about to expire. 767 */ 768 itp->it_value.tv_nsec = 1; 769 itp->it_value.tv_sec = 0; 770 } 771 } 772 773 static unsigned long long 774 check_timers_list(struct list_head *timers, 775 struct list_head *firing, 776 unsigned long long curr) 777 { 778 int maxfire = 20; 779 780 while (!list_empty(timers)) { 781 struct cpu_timer_list *t; 782 783 t = list_first_entry(timers, struct cpu_timer_list, entry); 784 785 if (!--maxfire || curr < t->expires) 786 return t->expires; 787 788 t->firing = 1; 789 list_move_tail(&t->entry, firing); 790 } 791 792 return 0; 793 } 794 795 static inline void check_dl_overrun(struct task_struct *tsk) 796 { 797 if (tsk->dl.dl_overrun) { 798 tsk->dl.dl_overrun = 0; 799 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 800 } 801 } 802 803 /* 804 * Check for any per-thread CPU timers that have fired and move them off 805 * the tsk->cpu_timers[N] list onto the firing list. Here we update the 806 * tsk->it_*_expires values to reflect the remaining thread CPU timers. 807 */ 808 static void check_thread_timers(struct task_struct *tsk, 809 struct list_head *firing) 810 { 811 struct list_head *timers = tsk->cpu_timers; 812 struct task_cputime *tsk_expires = &tsk->cputime_expires; 813 u64 expires; 814 unsigned long soft; 815 816 if (dl_task(tsk)) 817 check_dl_overrun(tsk); 818 819 /* 820 * If cputime_expires is zero, then there are no active 821 * per thread CPU timers. 822 */ 823 if (task_cputime_zero(&tsk->cputime_expires)) 824 return; 825 826 expires = check_timers_list(timers, firing, prof_ticks(tsk)); 827 tsk_expires->prof_exp = expires; 828 829 expires = check_timers_list(++timers, firing, virt_ticks(tsk)); 830 tsk_expires->virt_exp = expires; 831 832 tsk_expires->sched_exp = check_timers_list(++timers, firing, 833 tsk->se.sum_exec_runtime); 834 835 /* 836 * Check for the special case thread timers. 837 */ 838 soft = task_rlimit(tsk, RLIMIT_RTTIME); 839 if (soft != RLIM_INFINITY) { 840 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); 841 842 if (hard != RLIM_INFINITY && 843 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { 844 /* 845 * At the hard limit, we just die. 846 * No need to calculate anything else now. 847 */ 848 if (print_fatal_signals) { 849 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", 850 tsk->comm, task_pid_nr(tsk)); 851 } 852 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 853 return; 854 } 855 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { 856 /* 857 * At the soft limit, send a SIGXCPU every second. 858 */ 859 if (soft < hard) { 860 soft += USEC_PER_SEC; 861 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = 862 soft; 863 } 864 if (print_fatal_signals) { 865 pr_info("RT Watchdog Timeout (soft): %s[%d]\n", 866 tsk->comm, task_pid_nr(tsk)); 867 } 868 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 869 } 870 } 871 if (task_cputime_zero(tsk_expires)) 872 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER); 873 } 874 875 static inline void stop_process_timers(struct signal_struct *sig) 876 { 877 struct thread_group_cputimer *cputimer = &sig->cputimer; 878 879 /* Turn off cputimer->running. This is done without locking. */ 880 WRITE_ONCE(cputimer->running, false); 881 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); 882 } 883 884 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, 885 u64 *expires, u64 cur_time, int signo) 886 { 887 if (!it->expires) 888 return; 889 890 if (cur_time >= it->expires) { 891 if (it->incr) 892 it->expires += it->incr; 893 else 894 it->expires = 0; 895 896 trace_itimer_expire(signo == SIGPROF ? 897 ITIMER_PROF : ITIMER_VIRTUAL, 898 task_tgid(tsk), cur_time); 899 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); 900 } 901 902 if (it->expires && (!*expires || it->expires < *expires)) 903 *expires = it->expires; 904 } 905 906 /* 907 * Check for any per-thread CPU timers that have fired and move them 908 * off the tsk->*_timers list onto the firing list. Per-thread timers 909 * have already been taken off. 910 */ 911 static void check_process_timers(struct task_struct *tsk, 912 struct list_head *firing) 913 { 914 struct signal_struct *const sig = tsk->signal; 915 u64 utime, ptime, virt_expires, prof_expires; 916 u64 sum_sched_runtime, sched_expires; 917 struct list_head *timers = sig->cpu_timers; 918 struct task_cputime cputime; 919 unsigned long soft; 920 921 /* 922 * If cputimer is not running, then there are no active 923 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). 924 */ 925 if (!READ_ONCE(tsk->signal->cputimer.running)) 926 return; 927 928 /* 929 * Signify that a thread is checking for process timers. 930 * Write access to this field is protected by the sighand lock. 931 */ 932 sig->cputimer.checking_timer = true; 933 934 /* 935 * Collect the current process totals. 936 */ 937 thread_group_cputimer(tsk, &cputime); 938 utime = cputime.utime; 939 ptime = utime + cputime.stime; 940 sum_sched_runtime = cputime.sum_exec_runtime; 941 942 prof_expires = check_timers_list(timers, firing, ptime); 943 virt_expires = check_timers_list(++timers, firing, utime); 944 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime); 945 946 /* 947 * Check for the special case process timers. 948 */ 949 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, 950 SIGPROF); 951 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, 952 SIGVTALRM); 953 soft = task_rlimit(tsk, RLIMIT_CPU); 954 if (soft != RLIM_INFINITY) { 955 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC); 956 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU); 957 u64 x; 958 if (psecs >= hard) { 959 /* 960 * At the hard limit, we just die. 961 * No need to calculate anything else now. 962 */ 963 if (print_fatal_signals) { 964 pr_info("RT Watchdog Timeout (hard): %s[%d]\n", 965 tsk->comm, task_pid_nr(tsk)); 966 } 967 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 968 return; 969 } 970 if (psecs >= soft) { 971 /* 972 * At the soft limit, send a SIGXCPU every second. 973 */ 974 if (print_fatal_signals) { 975 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", 976 tsk->comm, task_pid_nr(tsk)); 977 } 978 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 979 if (soft < hard) { 980 soft++; 981 sig->rlim[RLIMIT_CPU].rlim_cur = soft; 982 } 983 } 984 x = soft * NSEC_PER_SEC; 985 if (!prof_expires || x < prof_expires) 986 prof_expires = x; 987 } 988 989 sig->cputime_expires.prof_exp = prof_expires; 990 sig->cputime_expires.virt_exp = virt_expires; 991 sig->cputime_expires.sched_exp = sched_expires; 992 if (task_cputime_zero(&sig->cputime_expires)) 993 stop_process_timers(sig); 994 995 sig->cputimer.checking_timer = false; 996 } 997 998 /* 999 * This is called from the signal code (via posixtimer_rearm) 1000 * when the last timer signal was delivered and we have to reload the timer. 1001 */ 1002 static void posix_cpu_timer_rearm(struct k_itimer *timer) 1003 { 1004 struct sighand_struct *sighand; 1005 unsigned long flags; 1006 struct task_struct *p = timer->it.cpu.task; 1007 u64 now; 1008 1009 WARN_ON_ONCE(p == NULL); 1010 1011 /* 1012 * Fetch the current sample and update the timer's expiry time. 1013 */ 1014 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 1015 cpu_clock_sample(timer->it_clock, p, &now); 1016 bump_cpu_timer(timer, now); 1017 if (unlikely(p->exit_state)) 1018 return; 1019 1020 /* Protect timer list r/w in arm_timer() */ 1021 sighand = lock_task_sighand(p, &flags); 1022 if (!sighand) 1023 return; 1024 } else { 1025 /* 1026 * Protect arm_timer() and timer sampling in case of call to 1027 * thread_group_cputime(). 1028 */ 1029 sighand = lock_task_sighand(p, &flags); 1030 if (unlikely(sighand == NULL)) { 1031 /* 1032 * The process has been reaped. 1033 * We can't even collect a sample any more. 1034 */ 1035 timer->it.cpu.expires = 0; 1036 return; 1037 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1038 /* If the process is dying, no need to rearm */ 1039 goto unlock; 1040 } 1041 cpu_timer_sample_group(timer->it_clock, p, &now); 1042 bump_cpu_timer(timer, now); 1043 /* Leave the sighand locked for the call below. */ 1044 } 1045 1046 /* 1047 * Now re-arm for the new expiry time. 1048 */ 1049 arm_timer(timer); 1050 unlock: 1051 unlock_task_sighand(p, &flags); 1052 } 1053 1054 /** 1055 * task_cputime_expired - Compare two task_cputime entities. 1056 * 1057 * @sample: The task_cputime structure to be checked for expiration. 1058 * @expires: Expiration times, against which @sample will be checked. 1059 * 1060 * Checks @sample against @expires to see if any field of @sample has expired. 1061 * Returns true if any field of the former is greater than the corresponding 1062 * field of the latter if the latter field is set. Otherwise returns false. 1063 */ 1064 static inline int task_cputime_expired(const struct task_cputime *sample, 1065 const struct task_cputime *expires) 1066 { 1067 if (expires->utime && sample->utime >= expires->utime) 1068 return 1; 1069 if (expires->stime && sample->utime + sample->stime >= expires->stime) 1070 return 1; 1071 if (expires->sum_exec_runtime != 0 && 1072 sample->sum_exec_runtime >= expires->sum_exec_runtime) 1073 return 1; 1074 return 0; 1075 } 1076 1077 /** 1078 * fastpath_timer_check - POSIX CPU timers fast path. 1079 * 1080 * @tsk: The task (thread) being checked. 1081 * 1082 * Check the task and thread group timers. If both are zero (there are no 1083 * timers set) return false. Otherwise snapshot the task and thread group 1084 * timers and compare them with the corresponding expiration times. Return 1085 * true if a timer has expired, else return false. 1086 */ 1087 static inline int fastpath_timer_check(struct task_struct *tsk) 1088 { 1089 struct signal_struct *sig; 1090 1091 if (!task_cputime_zero(&tsk->cputime_expires)) { 1092 struct task_cputime task_sample; 1093 1094 task_cputime(tsk, &task_sample.utime, &task_sample.stime); 1095 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; 1096 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1097 return 1; 1098 } 1099 1100 sig = tsk->signal; 1101 /* 1102 * Check if thread group timers expired when the cputimer is 1103 * running and no other thread in the group is already checking 1104 * for thread group cputimers. These fields are read without the 1105 * sighand lock. However, this is fine because this is meant to 1106 * be a fastpath heuristic to determine whether we should try to 1107 * acquire the sighand lock to check/handle timers. 1108 * 1109 * In the worst case scenario, if 'running' or 'checking_timer' gets 1110 * set but the current thread doesn't see the change yet, we'll wait 1111 * until the next thread in the group gets a scheduler interrupt to 1112 * handle the timer. This isn't an issue in practice because these 1113 * types of delays with signals actually getting sent are expected. 1114 */ 1115 if (READ_ONCE(sig->cputimer.running) && 1116 !READ_ONCE(sig->cputimer.checking_timer)) { 1117 struct task_cputime group_sample; 1118 1119 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); 1120 1121 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1122 return 1; 1123 } 1124 1125 if (dl_task(tsk) && tsk->dl.dl_overrun) 1126 return 1; 1127 1128 return 0; 1129 } 1130 1131 /* 1132 * This is called from the timer interrupt handler. The irq handler has 1133 * already updated our counts. We need to check if any timers fire now. 1134 * Interrupts are disabled. 1135 */ 1136 void run_posix_cpu_timers(struct task_struct *tsk) 1137 { 1138 LIST_HEAD(firing); 1139 struct k_itimer *timer, *next; 1140 unsigned long flags; 1141 1142 lockdep_assert_irqs_disabled(); 1143 1144 /* 1145 * The fast path checks that there are no expired thread or thread 1146 * group timers. If that's so, just return. 1147 */ 1148 if (!fastpath_timer_check(tsk)) 1149 return; 1150 1151 if (!lock_task_sighand(tsk, &flags)) 1152 return; 1153 /* 1154 * Here we take off tsk->signal->cpu_timers[N] and 1155 * tsk->cpu_timers[N] all the timers that are firing, and 1156 * put them on the firing list. 1157 */ 1158 check_thread_timers(tsk, &firing); 1159 1160 check_process_timers(tsk, &firing); 1161 1162 /* 1163 * We must release these locks before taking any timer's lock. 1164 * There is a potential race with timer deletion here, as the 1165 * siglock now protects our private firing list. We have set 1166 * the firing flag in each timer, so that a deletion attempt 1167 * that gets the timer lock before we do will give it up and 1168 * spin until we've taken care of that timer below. 1169 */ 1170 unlock_task_sighand(tsk, &flags); 1171 1172 /* 1173 * Now that all the timers on our list have the firing flag, 1174 * no one will touch their list entries but us. We'll take 1175 * each timer's lock before clearing its firing flag, so no 1176 * timer call will interfere. 1177 */ 1178 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { 1179 int cpu_firing; 1180 1181 spin_lock(&timer->it_lock); 1182 list_del_init(&timer->it.cpu.entry); 1183 cpu_firing = timer->it.cpu.firing; 1184 timer->it.cpu.firing = 0; 1185 /* 1186 * The firing flag is -1 if we collided with a reset 1187 * of the timer, which already reported this 1188 * almost-firing as an overrun. So don't generate an event. 1189 */ 1190 if (likely(cpu_firing >= 0)) 1191 cpu_timer_fire(timer); 1192 spin_unlock(&timer->it_lock); 1193 } 1194 } 1195 1196 /* 1197 * Set one of the process-wide special case CPU timers or RLIMIT_CPU. 1198 * The tsk->sighand->siglock must be held by the caller. 1199 */ 1200 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1201 u64 *newval, u64 *oldval) 1202 { 1203 u64 now; 1204 int ret; 1205 1206 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); 1207 ret = cpu_timer_sample_group(clock_idx, tsk, &now); 1208 1209 if (oldval && ret != -EINVAL) { 1210 /* 1211 * We are setting itimer. The *oldval is absolute and we update 1212 * it to be relative, *newval argument is relative and we update 1213 * it to be absolute. 1214 */ 1215 if (*oldval) { 1216 if (*oldval <= now) { 1217 /* Just about to fire. */ 1218 *oldval = TICK_NSEC; 1219 } else { 1220 *oldval -= now; 1221 } 1222 } 1223 1224 if (!*newval) 1225 return; 1226 *newval += now; 1227 } 1228 1229 /* 1230 * Update expiration cache if we are the earliest timer, or eventually 1231 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. 1232 */ 1233 switch (clock_idx) { 1234 case CPUCLOCK_PROF: 1235 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) 1236 tsk->signal->cputime_expires.prof_exp = *newval; 1237 break; 1238 case CPUCLOCK_VIRT: 1239 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) 1240 tsk->signal->cputime_expires.virt_exp = *newval; 1241 break; 1242 } 1243 1244 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER); 1245 } 1246 1247 static int do_cpu_nanosleep(const clockid_t which_clock, int flags, 1248 const struct timespec64 *rqtp) 1249 { 1250 struct itimerspec64 it; 1251 struct k_itimer timer; 1252 u64 expires; 1253 int error; 1254 1255 /* 1256 * Set up a temporary timer and then wait for it to go off. 1257 */ 1258 memset(&timer, 0, sizeof timer); 1259 spin_lock_init(&timer.it_lock); 1260 timer.it_clock = which_clock; 1261 timer.it_overrun = -1; 1262 error = posix_cpu_timer_create(&timer); 1263 timer.it_process = current; 1264 if (!error) { 1265 static struct itimerspec64 zero_it; 1266 struct restart_block *restart; 1267 1268 memset(&it, 0, sizeof(it)); 1269 it.it_value = *rqtp; 1270 1271 spin_lock_irq(&timer.it_lock); 1272 error = posix_cpu_timer_set(&timer, flags, &it, NULL); 1273 if (error) { 1274 spin_unlock_irq(&timer.it_lock); 1275 return error; 1276 } 1277 1278 while (!signal_pending(current)) { 1279 if (timer.it.cpu.expires == 0) { 1280 /* 1281 * Our timer fired and was reset, below 1282 * deletion can not fail. 1283 */ 1284 posix_cpu_timer_del(&timer); 1285 spin_unlock_irq(&timer.it_lock); 1286 return 0; 1287 } 1288 1289 /* 1290 * Block until cpu_timer_fire (or a signal) wakes us. 1291 */ 1292 __set_current_state(TASK_INTERRUPTIBLE); 1293 spin_unlock_irq(&timer.it_lock); 1294 schedule(); 1295 spin_lock_irq(&timer.it_lock); 1296 } 1297 1298 /* 1299 * We were interrupted by a signal. 1300 */ 1301 expires = timer.it.cpu.expires; 1302 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); 1303 if (!error) { 1304 /* 1305 * Timer is now unarmed, deletion can not fail. 1306 */ 1307 posix_cpu_timer_del(&timer); 1308 } 1309 spin_unlock_irq(&timer.it_lock); 1310 1311 while (error == TIMER_RETRY) { 1312 /* 1313 * We need to handle case when timer was or is in the 1314 * middle of firing. In other cases we already freed 1315 * resources. 1316 */ 1317 spin_lock_irq(&timer.it_lock); 1318 error = posix_cpu_timer_del(&timer); 1319 spin_unlock_irq(&timer.it_lock); 1320 } 1321 1322 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { 1323 /* 1324 * It actually did fire already. 1325 */ 1326 return 0; 1327 } 1328 1329 error = -ERESTART_RESTARTBLOCK; 1330 /* 1331 * Report back to the user the time still remaining. 1332 */ 1333 restart = ¤t->restart_block; 1334 restart->nanosleep.expires = expires; 1335 if (restart->nanosleep.type != TT_NONE) 1336 error = nanosleep_copyout(restart, &it.it_value); 1337 } 1338 1339 return error; 1340 } 1341 1342 static long posix_cpu_nsleep_restart(struct restart_block *restart_block); 1343 1344 static int posix_cpu_nsleep(const clockid_t which_clock, int flags, 1345 const struct timespec64 *rqtp) 1346 { 1347 struct restart_block *restart_block = ¤t->restart_block; 1348 int error; 1349 1350 /* 1351 * Diagnose required errors first. 1352 */ 1353 if (CPUCLOCK_PERTHREAD(which_clock) && 1354 (CPUCLOCK_PID(which_clock) == 0 || 1355 CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) 1356 return -EINVAL; 1357 1358 error = do_cpu_nanosleep(which_clock, flags, rqtp); 1359 1360 if (error == -ERESTART_RESTARTBLOCK) { 1361 1362 if (flags & TIMER_ABSTIME) 1363 return -ERESTARTNOHAND; 1364 1365 restart_block->fn = posix_cpu_nsleep_restart; 1366 restart_block->nanosleep.clockid = which_clock; 1367 } 1368 return error; 1369 } 1370 1371 static long posix_cpu_nsleep_restart(struct restart_block *restart_block) 1372 { 1373 clockid_t which_clock = restart_block->nanosleep.clockid; 1374 struct timespec64 t; 1375 1376 t = ns_to_timespec64(restart_block->nanosleep.expires); 1377 1378 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); 1379 } 1380 1381 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED) 1382 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED) 1383 1384 static int process_cpu_clock_getres(const clockid_t which_clock, 1385 struct timespec64 *tp) 1386 { 1387 return posix_cpu_clock_getres(PROCESS_CLOCK, tp); 1388 } 1389 static int process_cpu_clock_get(const clockid_t which_clock, 1390 struct timespec64 *tp) 1391 { 1392 return posix_cpu_clock_get(PROCESS_CLOCK, tp); 1393 } 1394 static int process_cpu_timer_create(struct k_itimer *timer) 1395 { 1396 timer->it_clock = PROCESS_CLOCK; 1397 return posix_cpu_timer_create(timer); 1398 } 1399 static int process_cpu_nsleep(const clockid_t which_clock, int flags, 1400 const struct timespec64 *rqtp) 1401 { 1402 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); 1403 } 1404 static int thread_cpu_clock_getres(const clockid_t which_clock, 1405 struct timespec64 *tp) 1406 { 1407 return posix_cpu_clock_getres(THREAD_CLOCK, tp); 1408 } 1409 static int thread_cpu_clock_get(const clockid_t which_clock, 1410 struct timespec64 *tp) 1411 { 1412 return posix_cpu_clock_get(THREAD_CLOCK, tp); 1413 } 1414 static int thread_cpu_timer_create(struct k_itimer *timer) 1415 { 1416 timer->it_clock = THREAD_CLOCK; 1417 return posix_cpu_timer_create(timer); 1418 } 1419 1420 const struct k_clock clock_posix_cpu = { 1421 .clock_getres = posix_cpu_clock_getres, 1422 .clock_set = posix_cpu_clock_set, 1423 .clock_get = posix_cpu_clock_get, 1424 .timer_create = posix_cpu_timer_create, 1425 .nsleep = posix_cpu_nsleep, 1426 .timer_set = posix_cpu_timer_set, 1427 .timer_del = posix_cpu_timer_del, 1428 .timer_get = posix_cpu_timer_get, 1429 .timer_rearm = posix_cpu_timer_rearm, 1430 }; 1431 1432 const struct k_clock clock_process = { 1433 .clock_getres = process_cpu_clock_getres, 1434 .clock_get = process_cpu_clock_get, 1435 .timer_create = process_cpu_timer_create, 1436 .nsleep = process_cpu_nsleep, 1437 }; 1438 1439 const struct k_clock clock_thread = { 1440 .clock_getres = thread_cpu_clock_getres, 1441 .clock_get = thread_cpu_clock_get, 1442 .timer_create = thread_cpu_timer_create, 1443 }; 1444