1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implement CPU time clocks for the POSIX clock interface. 4 */ 5 6 #include <linux/sched/signal.h> 7 #include <linux/sched/cputime.h> 8 #include <linux/posix-timers.h> 9 #include <linux/errno.h> 10 #include <linux/math64.h> 11 #include <linux/uaccess.h> 12 #include <linux/kernel_stat.h> 13 #include <trace/events/timer.h> 14 #include <linux/tick.h> 15 #include <linux/workqueue.h> 16 #include <linux/compat.h> 17 #include <linux/sched/deadline.h> 18 19 #include "posix-timers.h" 20 21 static void posix_cpu_timer_rearm(struct k_itimer *timer); 22 23 /* 24 * Called after updating RLIMIT_CPU to run cpu timer and update 25 * tsk->signal->cputime_expires expiration cache if necessary. Needs 26 * siglock protection since other code may update expiration cache as 27 * well. 28 */ 29 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) 30 { 31 u64 nsecs = rlim_new * NSEC_PER_SEC; 32 33 spin_lock_irq(&task->sighand->siglock); 34 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); 35 spin_unlock_irq(&task->sighand->siglock); 36 } 37 38 static int check_clock(const clockid_t which_clock) 39 { 40 int error = 0; 41 struct task_struct *p; 42 const pid_t pid = CPUCLOCK_PID(which_clock); 43 44 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) 45 return -EINVAL; 46 47 if (pid == 0) 48 return 0; 49 50 rcu_read_lock(); 51 p = find_task_by_vpid(pid); 52 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? 53 same_thread_group(p, current) : has_group_leader_pid(p))) { 54 error = -EINVAL; 55 } 56 rcu_read_unlock(); 57 58 return error; 59 } 60 61 /* 62 * Update expiry time from increment, and increase overrun count, 63 * given the current clock sample. 64 */ 65 static void bump_cpu_timer(struct k_itimer *timer, u64 now) 66 { 67 int i; 68 u64 delta, incr; 69 70 if (timer->it.cpu.incr == 0) 71 return; 72 73 if (now < timer->it.cpu.expires) 74 return; 75 76 incr = timer->it.cpu.incr; 77 delta = now + incr - timer->it.cpu.expires; 78 79 /* Don't use (incr*2 < delta), incr*2 might overflow. */ 80 for (i = 0; incr < delta - incr; i++) 81 incr = incr << 1; 82 83 for (; i >= 0; incr >>= 1, i--) { 84 if (delta < incr) 85 continue; 86 87 timer->it.cpu.expires += incr; 88 timer->it_overrun += 1LL << i; 89 delta -= incr; 90 } 91 } 92 93 /** 94 * task_cputime_zero - Check a task_cputime struct for all zero fields. 95 * 96 * @cputime: The struct to compare. 97 * 98 * Checks @cputime to see if all fields are zero. Returns true if all fields 99 * are zero, false if any field is nonzero. 100 */ 101 static inline int task_cputime_zero(const struct task_cputime *cputime) 102 { 103 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) 104 return 1; 105 return 0; 106 } 107 108 static inline u64 prof_ticks(struct task_struct *p) 109 { 110 u64 utime, stime; 111 112 task_cputime(p, &utime, &stime); 113 114 return utime + stime; 115 } 116 static inline u64 virt_ticks(struct task_struct *p) 117 { 118 u64 utime, stime; 119 120 task_cputime(p, &utime, &stime); 121 122 return utime; 123 } 124 125 static int 126 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp) 127 { 128 int error = check_clock(which_clock); 129 if (!error) { 130 tp->tv_sec = 0; 131 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); 132 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 133 /* 134 * If sched_clock is using a cycle counter, we 135 * don't have any idea of its true resolution 136 * exported, but it is much more than 1s/HZ. 137 */ 138 tp->tv_nsec = 1; 139 } 140 } 141 return error; 142 } 143 144 static int 145 posix_cpu_clock_set(const clockid_t which_clock, const struct timespec64 *tp) 146 { 147 /* 148 * You can never reset a CPU clock, but we check for other errors 149 * in the call before failing with EPERM. 150 */ 151 int error = check_clock(which_clock); 152 if (error == 0) { 153 error = -EPERM; 154 } 155 return error; 156 } 157 158 159 /* 160 * Sample a per-thread clock for the given task. 161 */ 162 static int cpu_clock_sample(const clockid_t which_clock, 163 struct task_struct *p, u64 *sample) 164 { 165 switch (CPUCLOCK_WHICH(which_clock)) { 166 default: 167 return -EINVAL; 168 case CPUCLOCK_PROF: 169 *sample = prof_ticks(p); 170 break; 171 case CPUCLOCK_VIRT: 172 *sample = virt_ticks(p); 173 break; 174 case CPUCLOCK_SCHED: 175 *sample = task_sched_runtime(p); 176 break; 177 } 178 return 0; 179 } 180 181 /* 182 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg 183 * to avoid race conditions with concurrent updates to cputime. 184 */ 185 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) 186 { 187 u64 curr_cputime; 188 retry: 189 curr_cputime = atomic64_read(cputime); 190 if (sum_cputime > curr_cputime) { 191 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) 192 goto retry; 193 } 194 } 195 196 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) 197 { 198 __update_gt_cputime(&cputime_atomic->utime, sum->utime); 199 __update_gt_cputime(&cputime_atomic->stime, sum->stime); 200 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); 201 } 202 203 /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ 204 static inline void sample_cputime_atomic(struct task_cputime *times, 205 struct task_cputime_atomic *atomic_times) 206 { 207 times->utime = atomic64_read(&atomic_times->utime); 208 times->stime = atomic64_read(&atomic_times->stime); 209 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); 210 } 211 212 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) 213 { 214 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 215 struct task_cputime sum; 216 217 /* Check if cputimer isn't running. This is accessed without locking. */ 218 if (!READ_ONCE(cputimer->running)) { 219 /* 220 * The POSIX timer interface allows for absolute time expiry 221 * values through the TIMER_ABSTIME flag, therefore we have 222 * to synchronize the timer to the clock every time we start it. 223 */ 224 thread_group_cputime(tsk, &sum); 225 update_gt_cputime(&cputimer->cputime_atomic, &sum); 226 227 /* 228 * We're setting cputimer->running without a lock. Ensure 229 * this only gets written to in one operation. We set 230 * running after update_gt_cputime() as a small optimization, 231 * but barriers are not required because update_gt_cputime() 232 * can handle concurrent updates. 233 */ 234 WRITE_ONCE(cputimer->running, true); 235 } 236 sample_cputime_atomic(times, &cputimer->cputime_atomic); 237 } 238 239 /* 240 * Sample a process (thread group) clock for the given group_leader task. 241 * Must be called with task sighand lock held for safe while_each_thread() 242 * traversal. 243 */ 244 static int cpu_clock_sample_group(const clockid_t which_clock, 245 struct task_struct *p, 246 u64 *sample) 247 { 248 struct task_cputime cputime; 249 250 switch (CPUCLOCK_WHICH(which_clock)) { 251 default: 252 return -EINVAL; 253 case CPUCLOCK_PROF: 254 thread_group_cputime(p, &cputime); 255 *sample = cputime.utime + cputime.stime; 256 break; 257 case CPUCLOCK_VIRT: 258 thread_group_cputime(p, &cputime); 259 *sample = cputime.utime; 260 break; 261 case CPUCLOCK_SCHED: 262 thread_group_cputime(p, &cputime); 263 *sample = cputime.sum_exec_runtime; 264 break; 265 } 266 return 0; 267 } 268 269 static int posix_cpu_clock_get_task(struct task_struct *tsk, 270 const clockid_t which_clock, 271 struct timespec64 *tp) 272 { 273 int err = -EINVAL; 274 u64 rtn; 275 276 if (CPUCLOCK_PERTHREAD(which_clock)) { 277 if (same_thread_group(tsk, current)) 278 err = cpu_clock_sample(which_clock, tsk, &rtn); 279 } else { 280 if (tsk == current || thread_group_leader(tsk)) 281 err = cpu_clock_sample_group(which_clock, tsk, &rtn); 282 } 283 284 if (!err) 285 *tp = ns_to_timespec64(rtn); 286 287 return err; 288 } 289 290 291 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp) 292 { 293 const pid_t pid = CPUCLOCK_PID(which_clock); 294 int err = -EINVAL; 295 296 if (pid == 0) { 297 /* 298 * Special case constant value for our own clocks. 299 * We don't have to do any lookup to find ourselves. 300 */ 301 err = posix_cpu_clock_get_task(current, which_clock, tp); 302 } else { 303 /* 304 * Find the given PID, and validate that the caller 305 * should be able to see it. 306 */ 307 struct task_struct *p; 308 rcu_read_lock(); 309 p = find_task_by_vpid(pid); 310 if (p) 311 err = posix_cpu_clock_get_task(p, which_clock, tp); 312 rcu_read_unlock(); 313 } 314 315 return err; 316 } 317 318 /* 319 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. 320 * This is called from sys_timer_create() and do_cpu_nanosleep() with the 321 * new timer already all-zeros initialized. 322 */ 323 static int posix_cpu_timer_create(struct k_itimer *new_timer) 324 { 325 int ret = 0; 326 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); 327 struct task_struct *p; 328 329 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) 330 return -EINVAL; 331 332 new_timer->kclock = &clock_posix_cpu; 333 334 INIT_LIST_HEAD(&new_timer->it.cpu.entry); 335 336 rcu_read_lock(); 337 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { 338 if (pid == 0) { 339 p = current; 340 } else { 341 p = find_task_by_vpid(pid); 342 if (p && !same_thread_group(p, current)) 343 p = NULL; 344 } 345 } else { 346 if (pid == 0) { 347 p = current->group_leader; 348 } else { 349 p = find_task_by_vpid(pid); 350 if (p && !has_group_leader_pid(p)) 351 p = NULL; 352 } 353 } 354 new_timer->it.cpu.task = p; 355 if (p) { 356 get_task_struct(p); 357 } else { 358 ret = -EINVAL; 359 } 360 rcu_read_unlock(); 361 362 return ret; 363 } 364 365 /* 366 * Clean up a CPU-clock timer that is about to be destroyed. 367 * This is called from timer deletion with the timer already locked. 368 * If we return TIMER_RETRY, it's necessary to release the timer's lock 369 * and try again. (This happens when the timer is in the middle of firing.) 370 */ 371 static int posix_cpu_timer_del(struct k_itimer *timer) 372 { 373 int ret = 0; 374 unsigned long flags; 375 struct sighand_struct *sighand; 376 struct task_struct *p = timer->it.cpu.task; 377 378 WARN_ON_ONCE(p == NULL); 379 380 /* 381 * Protect against sighand release/switch in exit/exec and process/ 382 * thread timer list entry concurrent read/writes. 383 */ 384 sighand = lock_task_sighand(p, &flags); 385 if (unlikely(sighand == NULL)) { 386 /* 387 * We raced with the reaping of the task. 388 * The deletion should have cleared us off the list. 389 */ 390 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); 391 } else { 392 if (timer->it.cpu.firing) 393 ret = TIMER_RETRY; 394 else 395 list_del(&timer->it.cpu.entry); 396 397 unlock_task_sighand(p, &flags); 398 } 399 400 if (!ret) 401 put_task_struct(p); 402 403 return ret; 404 } 405 406 static void cleanup_timers_list(struct list_head *head) 407 { 408 struct cpu_timer_list *timer, *next; 409 410 list_for_each_entry_safe(timer, next, head, entry) 411 list_del_init(&timer->entry); 412 } 413 414 /* 415 * Clean out CPU timers still ticking when a thread exited. The task 416 * pointer is cleared, and the expiry time is replaced with the residual 417 * time for later timer_gettime calls to return. 418 * This must be called with the siglock held. 419 */ 420 static void cleanup_timers(struct list_head *head) 421 { 422 cleanup_timers_list(head); 423 cleanup_timers_list(++head); 424 cleanup_timers_list(++head); 425 } 426 427 /* 428 * These are both called with the siglock held, when the current thread 429 * is being reaped. When the final (leader) thread in the group is reaped, 430 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. 431 */ 432 void posix_cpu_timers_exit(struct task_struct *tsk) 433 { 434 cleanup_timers(tsk->cpu_timers); 435 } 436 void posix_cpu_timers_exit_group(struct task_struct *tsk) 437 { 438 cleanup_timers(tsk->signal->cpu_timers); 439 } 440 441 static inline int expires_gt(u64 expires, u64 new_exp) 442 { 443 return expires == 0 || expires > new_exp; 444 } 445 446 /* 447 * Insert the timer on the appropriate list before any timers that 448 * expire later. This must be called with the sighand lock held. 449 */ 450 static void arm_timer(struct k_itimer *timer) 451 { 452 struct task_struct *p = timer->it.cpu.task; 453 struct list_head *head, *listpos; 454 struct task_cputime *cputime_expires; 455 struct cpu_timer_list *const nt = &timer->it.cpu; 456 struct cpu_timer_list *next; 457 458 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 459 head = p->cpu_timers; 460 cputime_expires = &p->cputime_expires; 461 } else { 462 head = p->signal->cpu_timers; 463 cputime_expires = &p->signal->cputime_expires; 464 } 465 head += CPUCLOCK_WHICH(timer->it_clock); 466 467 listpos = head; 468 list_for_each_entry(next, head, entry) { 469 if (nt->expires < next->expires) 470 break; 471 listpos = &next->entry; 472 } 473 list_add(&nt->entry, listpos); 474 475 if (listpos == head) { 476 u64 exp = nt->expires; 477 478 /* 479 * We are the new earliest-expiring POSIX 1.b timer, hence 480 * need to update expiration cache. Take into account that 481 * for process timers we share expiration cache with itimers 482 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. 483 */ 484 485 switch (CPUCLOCK_WHICH(timer->it_clock)) { 486 case CPUCLOCK_PROF: 487 if (expires_gt(cputime_expires->prof_exp, exp)) 488 cputime_expires->prof_exp = exp; 489 break; 490 case CPUCLOCK_VIRT: 491 if (expires_gt(cputime_expires->virt_exp, exp)) 492 cputime_expires->virt_exp = exp; 493 break; 494 case CPUCLOCK_SCHED: 495 if (expires_gt(cputime_expires->sched_exp, exp)) 496 cputime_expires->sched_exp = exp; 497 break; 498 } 499 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 500 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); 501 else 502 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER); 503 } 504 } 505 506 /* 507 * The timer is locked, fire it and arrange for its reload. 508 */ 509 static void cpu_timer_fire(struct k_itimer *timer) 510 { 511 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { 512 /* 513 * User don't want any signal. 514 */ 515 timer->it.cpu.expires = 0; 516 } else if (unlikely(timer->sigq == NULL)) { 517 /* 518 * This a special case for clock_nanosleep, 519 * not a normal timer from sys_timer_create. 520 */ 521 wake_up_process(timer->it_process); 522 timer->it.cpu.expires = 0; 523 } else if (timer->it.cpu.incr == 0) { 524 /* 525 * One-shot timer. Clear it as soon as it's fired. 526 */ 527 posix_timer_event(timer, 0); 528 timer->it.cpu.expires = 0; 529 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { 530 /* 531 * The signal did not get queued because the signal 532 * was ignored, so we won't get any callback to 533 * reload the timer. But we need to keep it 534 * ticking in case the signal is deliverable next time. 535 */ 536 posix_cpu_timer_rearm(timer); 537 ++timer->it_requeue_pending; 538 } 539 } 540 541 /* 542 * Sample a process (thread group) timer for the given group_leader task. 543 * Must be called with task sighand lock held for safe while_each_thread() 544 * traversal. 545 */ 546 static int cpu_timer_sample_group(const clockid_t which_clock, 547 struct task_struct *p, u64 *sample) 548 { 549 struct task_cputime cputime; 550 551 thread_group_cputimer(p, &cputime); 552 switch (CPUCLOCK_WHICH(which_clock)) { 553 default: 554 return -EINVAL; 555 case CPUCLOCK_PROF: 556 *sample = cputime.utime + cputime.stime; 557 break; 558 case CPUCLOCK_VIRT: 559 *sample = cputime.utime; 560 break; 561 case CPUCLOCK_SCHED: 562 *sample = cputime.sum_exec_runtime; 563 break; 564 } 565 return 0; 566 } 567 568 /* 569 * Guts of sys_timer_settime for CPU timers. 570 * This is called with the timer locked and interrupts disabled. 571 * If we return TIMER_RETRY, it's necessary to release the timer's lock 572 * and try again. (This happens when the timer is in the middle of firing.) 573 */ 574 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, 575 struct itimerspec64 *new, struct itimerspec64 *old) 576 { 577 unsigned long flags; 578 struct sighand_struct *sighand; 579 struct task_struct *p = timer->it.cpu.task; 580 u64 old_expires, new_expires, old_incr, val; 581 int ret; 582 583 WARN_ON_ONCE(p == NULL); 584 585 /* 586 * Use the to_ktime conversion because that clamps the maximum 587 * value to KTIME_MAX and avoid multiplication overflows. 588 */ 589 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); 590 591 /* 592 * Protect against sighand release/switch in exit/exec and p->cpu_timers 593 * and p->signal->cpu_timers read/write in arm_timer() 594 */ 595 sighand = lock_task_sighand(p, &flags); 596 /* 597 * If p has just been reaped, we can no 598 * longer get any information about it at all. 599 */ 600 if (unlikely(sighand == NULL)) { 601 return -ESRCH; 602 } 603 604 /* 605 * Disarm any old timer after extracting its expiry time. 606 */ 607 608 ret = 0; 609 old_incr = timer->it.cpu.incr; 610 old_expires = timer->it.cpu.expires; 611 if (unlikely(timer->it.cpu.firing)) { 612 timer->it.cpu.firing = -1; 613 ret = TIMER_RETRY; 614 } else 615 list_del_init(&timer->it.cpu.entry); 616 617 /* 618 * We need to sample the current value to convert the new 619 * value from to relative and absolute, and to convert the 620 * old value from absolute to relative. To set a process 621 * timer, we need a sample to balance the thread expiry 622 * times (in arm_timer). With an absolute time, we must 623 * check if it's already passed. In short, we need a sample. 624 */ 625 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 626 cpu_clock_sample(timer->it_clock, p, &val); 627 } else { 628 cpu_timer_sample_group(timer->it_clock, p, &val); 629 } 630 631 if (old) { 632 if (old_expires == 0) { 633 old->it_value.tv_sec = 0; 634 old->it_value.tv_nsec = 0; 635 } else { 636 /* 637 * Update the timer in case it has 638 * overrun already. If it has, 639 * we'll report it as having overrun 640 * and with the next reloaded timer 641 * already ticking, though we are 642 * swallowing that pending 643 * notification here to install the 644 * new setting. 645 */ 646 bump_cpu_timer(timer, val); 647 if (val < timer->it.cpu.expires) { 648 old_expires = timer->it.cpu.expires - val; 649 old->it_value = ns_to_timespec64(old_expires); 650 } else { 651 old->it_value.tv_nsec = 1; 652 old->it_value.tv_sec = 0; 653 } 654 } 655 } 656 657 if (unlikely(ret)) { 658 /* 659 * We are colliding with the timer actually firing. 660 * Punt after filling in the timer's old value, and 661 * disable this firing since we are already reporting 662 * it as an overrun (thanks to bump_cpu_timer above). 663 */ 664 unlock_task_sighand(p, &flags); 665 goto out; 666 } 667 668 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { 669 new_expires += val; 670 } 671 672 /* 673 * Install the new expiry time (or zero). 674 * For a timer with no notification action, we don't actually 675 * arm the timer (we'll just fake it for timer_gettime). 676 */ 677 timer->it.cpu.expires = new_expires; 678 if (new_expires != 0 && val < new_expires) { 679 arm_timer(timer); 680 } 681 682 unlock_task_sighand(p, &flags); 683 /* 684 * Install the new reload setting, and 685 * set up the signal and overrun bookkeeping. 686 */ 687 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); 688 689 /* 690 * This acts as a modification timestamp for the timer, 691 * so any automatic reload attempt will punt on seeing 692 * that we have reset the timer manually. 693 */ 694 timer->it_requeue_pending = (timer->it_requeue_pending + 2) & 695 ~REQUEUE_PENDING; 696 timer->it_overrun_last = 0; 697 timer->it_overrun = -1; 698 699 if (new_expires != 0 && !(val < new_expires)) { 700 /* 701 * The designated time already passed, so we notify 702 * immediately, even if the thread never runs to 703 * accumulate more time on this clock. 704 */ 705 cpu_timer_fire(timer); 706 } 707 708 ret = 0; 709 out: 710 if (old) 711 old->it_interval = ns_to_timespec64(old_incr); 712 713 return ret; 714 } 715 716 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) 717 { 718 u64 now; 719 struct task_struct *p = timer->it.cpu.task; 720 721 WARN_ON_ONCE(p == NULL); 722 723 /* 724 * Easy part: convert the reload time. 725 */ 726 itp->it_interval = ns_to_timespec64(timer->it.cpu.incr); 727 728 if (!timer->it.cpu.expires) 729 return; 730 731 /* 732 * Sample the clock to take the difference with the expiry time. 733 */ 734 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 735 cpu_clock_sample(timer->it_clock, p, &now); 736 } else { 737 struct sighand_struct *sighand; 738 unsigned long flags; 739 740 /* 741 * Protect against sighand release/switch in exit/exec and 742 * also make timer sampling safe if it ends up calling 743 * thread_group_cputime(). 744 */ 745 sighand = lock_task_sighand(p, &flags); 746 if (unlikely(sighand == NULL)) { 747 /* 748 * The process has been reaped. 749 * We can't even collect a sample any more. 750 * Call the timer disarmed, nothing else to do. 751 */ 752 timer->it.cpu.expires = 0; 753 return; 754 } else { 755 cpu_timer_sample_group(timer->it_clock, p, &now); 756 unlock_task_sighand(p, &flags); 757 } 758 } 759 760 if (now < timer->it.cpu.expires) { 761 itp->it_value = ns_to_timespec64(timer->it.cpu.expires - now); 762 } else { 763 /* 764 * The timer should have expired already, but the firing 765 * hasn't taken place yet. Say it's just about to expire. 766 */ 767 itp->it_value.tv_nsec = 1; 768 itp->it_value.tv_sec = 0; 769 } 770 } 771 772 static unsigned long long 773 check_timers_list(struct list_head *timers, 774 struct list_head *firing, 775 unsigned long long curr) 776 { 777 int maxfire = 20; 778 779 while (!list_empty(timers)) { 780 struct cpu_timer_list *t; 781 782 t = list_first_entry(timers, struct cpu_timer_list, entry); 783 784 if (!--maxfire || curr < t->expires) 785 return t->expires; 786 787 t->firing = 1; 788 list_move_tail(&t->entry, firing); 789 } 790 791 return 0; 792 } 793 794 static inline void check_dl_overrun(struct task_struct *tsk) 795 { 796 if (tsk->dl.dl_overrun) { 797 tsk->dl.dl_overrun = 0; 798 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 799 } 800 } 801 802 /* 803 * Check for any per-thread CPU timers that have fired and move them off 804 * the tsk->cpu_timers[N] list onto the firing list. Here we update the 805 * tsk->it_*_expires values to reflect the remaining thread CPU timers. 806 */ 807 static void check_thread_timers(struct task_struct *tsk, 808 struct list_head *firing) 809 { 810 struct list_head *timers = tsk->cpu_timers; 811 struct task_cputime *tsk_expires = &tsk->cputime_expires; 812 u64 expires; 813 unsigned long soft; 814 815 if (dl_task(tsk)) 816 check_dl_overrun(tsk); 817 818 /* 819 * If cputime_expires is zero, then there are no active 820 * per thread CPU timers. 821 */ 822 if (task_cputime_zero(&tsk->cputime_expires)) 823 return; 824 825 expires = check_timers_list(timers, firing, prof_ticks(tsk)); 826 tsk_expires->prof_exp = expires; 827 828 expires = check_timers_list(++timers, firing, virt_ticks(tsk)); 829 tsk_expires->virt_exp = expires; 830 831 tsk_expires->sched_exp = check_timers_list(++timers, firing, 832 tsk->se.sum_exec_runtime); 833 834 /* 835 * Check for the special case thread timers. 836 */ 837 soft = task_rlimit(tsk, RLIMIT_RTTIME); 838 if (soft != RLIM_INFINITY) { 839 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); 840 841 if (hard != RLIM_INFINITY && 842 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { 843 /* 844 * At the hard limit, we just die. 845 * No need to calculate anything else now. 846 */ 847 if (print_fatal_signals) { 848 pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", 849 tsk->comm, task_pid_nr(tsk)); 850 } 851 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 852 return; 853 } 854 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { 855 /* 856 * At the soft limit, send a SIGXCPU every second. 857 */ 858 if (soft < hard) { 859 soft += USEC_PER_SEC; 860 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = 861 soft; 862 } 863 if (print_fatal_signals) { 864 pr_info("RT Watchdog Timeout (soft): %s[%d]\n", 865 tsk->comm, task_pid_nr(tsk)); 866 } 867 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 868 } 869 } 870 if (task_cputime_zero(tsk_expires)) 871 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER); 872 } 873 874 static inline void stop_process_timers(struct signal_struct *sig) 875 { 876 struct thread_group_cputimer *cputimer = &sig->cputimer; 877 878 /* Turn off cputimer->running. This is done without locking. */ 879 WRITE_ONCE(cputimer->running, false); 880 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); 881 } 882 883 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, 884 u64 *expires, u64 cur_time, int signo) 885 { 886 if (!it->expires) 887 return; 888 889 if (cur_time >= it->expires) { 890 if (it->incr) 891 it->expires += it->incr; 892 else 893 it->expires = 0; 894 895 trace_itimer_expire(signo == SIGPROF ? 896 ITIMER_PROF : ITIMER_VIRTUAL, 897 task_tgid(tsk), cur_time); 898 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); 899 } 900 901 if (it->expires && (!*expires || it->expires < *expires)) 902 *expires = it->expires; 903 } 904 905 /* 906 * Check for any per-thread CPU timers that have fired and move them 907 * off the tsk->*_timers list onto the firing list. Per-thread timers 908 * have already been taken off. 909 */ 910 static void check_process_timers(struct task_struct *tsk, 911 struct list_head *firing) 912 { 913 struct signal_struct *const sig = tsk->signal; 914 u64 utime, ptime, virt_expires, prof_expires; 915 u64 sum_sched_runtime, sched_expires; 916 struct list_head *timers = sig->cpu_timers; 917 struct task_cputime cputime; 918 unsigned long soft; 919 920 if (dl_task(tsk)) 921 check_dl_overrun(tsk); 922 923 /* 924 * If cputimer is not running, then there are no active 925 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). 926 */ 927 if (!READ_ONCE(tsk->signal->cputimer.running)) 928 return; 929 930 /* 931 * Signify that a thread is checking for process timers. 932 * Write access to this field is protected by the sighand lock. 933 */ 934 sig->cputimer.checking_timer = true; 935 936 /* 937 * Collect the current process totals. 938 */ 939 thread_group_cputimer(tsk, &cputime); 940 utime = cputime.utime; 941 ptime = utime + cputime.stime; 942 sum_sched_runtime = cputime.sum_exec_runtime; 943 944 prof_expires = check_timers_list(timers, firing, ptime); 945 virt_expires = check_timers_list(++timers, firing, utime); 946 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime); 947 948 /* 949 * Check for the special case process timers. 950 */ 951 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, 952 SIGPROF); 953 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, 954 SIGVTALRM); 955 soft = task_rlimit(tsk, RLIMIT_CPU); 956 if (soft != RLIM_INFINITY) { 957 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC); 958 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU); 959 u64 x; 960 if (psecs >= hard) { 961 /* 962 * At the hard limit, we just die. 963 * No need to calculate anything else now. 964 */ 965 if (print_fatal_signals) { 966 pr_info("RT Watchdog Timeout (hard): %s[%d]\n", 967 tsk->comm, task_pid_nr(tsk)); 968 } 969 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 970 return; 971 } 972 if (psecs >= soft) { 973 /* 974 * At the soft limit, send a SIGXCPU every second. 975 */ 976 if (print_fatal_signals) { 977 pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", 978 tsk->comm, task_pid_nr(tsk)); 979 } 980 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 981 if (soft < hard) { 982 soft++; 983 sig->rlim[RLIMIT_CPU].rlim_cur = soft; 984 } 985 } 986 x = soft * NSEC_PER_SEC; 987 if (!prof_expires || x < prof_expires) 988 prof_expires = x; 989 } 990 991 sig->cputime_expires.prof_exp = prof_expires; 992 sig->cputime_expires.virt_exp = virt_expires; 993 sig->cputime_expires.sched_exp = sched_expires; 994 if (task_cputime_zero(&sig->cputime_expires)) 995 stop_process_timers(sig); 996 997 sig->cputimer.checking_timer = false; 998 } 999 1000 /* 1001 * This is called from the signal code (via posixtimer_rearm) 1002 * when the last timer signal was delivered and we have to reload the timer. 1003 */ 1004 static void posix_cpu_timer_rearm(struct k_itimer *timer) 1005 { 1006 struct sighand_struct *sighand; 1007 unsigned long flags; 1008 struct task_struct *p = timer->it.cpu.task; 1009 u64 now; 1010 1011 WARN_ON_ONCE(p == NULL); 1012 1013 /* 1014 * Fetch the current sample and update the timer's expiry time. 1015 */ 1016 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 1017 cpu_clock_sample(timer->it_clock, p, &now); 1018 bump_cpu_timer(timer, now); 1019 if (unlikely(p->exit_state)) 1020 return; 1021 1022 /* Protect timer list r/w in arm_timer() */ 1023 sighand = lock_task_sighand(p, &flags); 1024 if (!sighand) 1025 return; 1026 } else { 1027 /* 1028 * Protect arm_timer() and timer sampling in case of call to 1029 * thread_group_cputime(). 1030 */ 1031 sighand = lock_task_sighand(p, &flags); 1032 if (unlikely(sighand == NULL)) { 1033 /* 1034 * The process has been reaped. 1035 * We can't even collect a sample any more. 1036 */ 1037 timer->it.cpu.expires = 0; 1038 return; 1039 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1040 /* If the process is dying, no need to rearm */ 1041 goto unlock; 1042 } 1043 cpu_timer_sample_group(timer->it_clock, p, &now); 1044 bump_cpu_timer(timer, now); 1045 /* Leave the sighand locked for the call below. */ 1046 } 1047 1048 /* 1049 * Now re-arm for the new expiry time. 1050 */ 1051 arm_timer(timer); 1052 unlock: 1053 unlock_task_sighand(p, &flags); 1054 } 1055 1056 /** 1057 * task_cputime_expired - Compare two task_cputime entities. 1058 * 1059 * @sample: The task_cputime structure to be checked for expiration. 1060 * @expires: Expiration times, against which @sample will be checked. 1061 * 1062 * Checks @sample against @expires to see if any field of @sample has expired. 1063 * Returns true if any field of the former is greater than the corresponding 1064 * field of the latter if the latter field is set. Otherwise returns false. 1065 */ 1066 static inline int task_cputime_expired(const struct task_cputime *sample, 1067 const struct task_cputime *expires) 1068 { 1069 if (expires->utime && sample->utime >= expires->utime) 1070 return 1; 1071 if (expires->stime && sample->utime + sample->stime >= expires->stime) 1072 return 1; 1073 if (expires->sum_exec_runtime != 0 && 1074 sample->sum_exec_runtime >= expires->sum_exec_runtime) 1075 return 1; 1076 return 0; 1077 } 1078 1079 /** 1080 * fastpath_timer_check - POSIX CPU timers fast path. 1081 * 1082 * @tsk: The task (thread) being checked. 1083 * 1084 * Check the task and thread group timers. If both are zero (there are no 1085 * timers set) return false. Otherwise snapshot the task and thread group 1086 * timers and compare them with the corresponding expiration times. Return 1087 * true if a timer has expired, else return false. 1088 */ 1089 static inline int fastpath_timer_check(struct task_struct *tsk) 1090 { 1091 struct signal_struct *sig; 1092 1093 if (!task_cputime_zero(&tsk->cputime_expires)) { 1094 struct task_cputime task_sample; 1095 1096 task_cputime(tsk, &task_sample.utime, &task_sample.stime); 1097 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; 1098 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1099 return 1; 1100 } 1101 1102 sig = tsk->signal; 1103 /* 1104 * Check if thread group timers expired when the cputimer is 1105 * running and no other thread in the group is already checking 1106 * for thread group cputimers. These fields are read without the 1107 * sighand lock. However, this is fine because this is meant to 1108 * be a fastpath heuristic to determine whether we should try to 1109 * acquire the sighand lock to check/handle timers. 1110 * 1111 * In the worst case scenario, if 'running' or 'checking_timer' gets 1112 * set but the current thread doesn't see the change yet, we'll wait 1113 * until the next thread in the group gets a scheduler interrupt to 1114 * handle the timer. This isn't an issue in practice because these 1115 * types of delays with signals actually getting sent are expected. 1116 */ 1117 if (READ_ONCE(sig->cputimer.running) && 1118 !READ_ONCE(sig->cputimer.checking_timer)) { 1119 struct task_cputime group_sample; 1120 1121 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); 1122 1123 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1124 return 1; 1125 } 1126 1127 if (dl_task(tsk) && tsk->dl.dl_overrun) 1128 return 1; 1129 1130 return 0; 1131 } 1132 1133 /* 1134 * This is called from the timer interrupt handler. The irq handler has 1135 * already updated our counts. We need to check if any timers fire now. 1136 * Interrupts are disabled. 1137 */ 1138 void run_posix_cpu_timers(struct task_struct *tsk) 1139 { 1140 LIST_HEAD(firing); 1141 struct k_itimer *timer, *next; 1142 unsigned long flags; 1143 1144 lockdep_assert_irqs_disabled(); 1145 1146 /* 1147 * The fast path checks that there are no expired thread or thread 1148 * group timers. If that's so, just return. 1149 */ 1150 if (!fastpath_timer_check(tsk)) 1151 return; 1152 1153 if (!lock_task_sighand(tsk, &flags)) 1154 return; 1155 /* 1156 * Here we take off tsk->signal->cpu_timers[N] and 1157 * tsk->cpu_timers[N] all the timers that are firing, and 1158 * put them on the firing list. 1159 */ 1160 check_thread_timers(tsk, &firing); 1161 1162 check_process_timers(tsk, &firing); 1163 1164 /* 1165 * We must release these locks before taking any timer's lock. 1166 * There is a potential race with timer deletion here, as the 1167 * siglock now protects our private firing list. We have set 1168 * the firing flag in each timer, so that a deletion attempt 1169 * that gets the timer lock before we do will give it up and 1170 * spin until we've taken care of that timer below. 1171 */ 1172 unlock_task_sighand(tsk, &flags); 1173 1174 /* 1175 * Now that all the timers on our list have the firing flag, 1176 * no one will touch their list entries but us. We'll take 1177 * each timer's lock before clearing its firing flag, so no 1178 * timer call will interfere. 1179 */ 1180 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { 1181 int cpu_firing; 1182 1183 spin_lock(&timer->it_lock); 1184 list_del_init(&timer->it.cpu.entry); 1185 cpu_firing = timer->it.cpu.firing; 1186 timer->it.cpu.firing = 0; 1187 /* 1188 * The firing flag is -1 if we collided with a reset 1189 * of the timer, which already reported this 1190 * almost-firing as an overrun. So don't generate an event. 1191 */ 1192 if (likely(cpu_firing >= 0)) 1193 cpu_timer_fire(timer); 1194 spin_unlock(&timer->it_lock); 1195 } 1196 } 1197 1198 /* 1199 * Set one of the process-wide special case CPU timers or RLIMIT_CPU. 1200 * The tsk->sighand->siglock must be held by the caller. 1201 */ 1202 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1203 u64 *newval, u64 *oldval) 1204 { 1205 u64 now; 1206 int ret; 1207 1208 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); 1209 ret = cpu_timer_sample_group(clock_idx, tsk, &now); 1210 1211 if (oldval && ret != -EINVAL) { 1212 /* 1213 * We are setting itimer. The *oldval is absolute and we update 1214 * it to be relative, *newval argument is relative and we update 1215 * it to be absolute. 1216 */ 1217 if (*oldval) { 1218 if (*oldval <= now) { 1219 /* Just about to fire. */ 1220 *oldval = TICK_NSEC; 1221 } else { 1222 *oldval -= now; 1223 } 1224 } 1225 1226 if (!*newval) 1227 return; 1228 *newval += now; 1229 } 1230 1231 /* 1232 * Update expiration cache if we are the earliest timer, or eventually 1233 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. 1234 */ 1235 switch (clock_idx) { 1236 case CPUCLOCK_PROF: 1237 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) 1238 tsk->signal->cputime_expires.prof_exp = *newval; 1239 break; 1240 case CPUCLOCK_VIRT: 1241 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) 1242 tsk->signal->cputime_expires.virt_exp = *newval; 1243 break; 1244 } 1245 1246 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER); 1247 } 1248 1249 static int do_cpu_nanosleep(const clockid_t which_clock, int flags, 1250 const struct timespec64 *rqtp) 1251 { 1252 struct itimerspec64 it; 1253 struct k_itimer timer; 1254 u64 expires; 1255 int error; 1256 1257 /* 1258 * Set up a temporary timer and then wait for it to go off. 1259 */ 1260 memset(&timer, 0, sizeof timer); 1261 spin_lock_init(&timer.it_lock); 1262 timer.it_clock = which_clock; 1263 timer.it_overrun = -1; 1264 error = posix_cpu_timer_create(&timer); 1265 timer.it_process = current; 1266 if (!error) { 1267 static struct itimerspec64 zero_it; 1268 struct restart_block *restart; 1269 1270 memset(&it, 0, sizeof(it)); 1271 it.it_value = *rqtp; 1272 1273 spin_lock_irq(&timer.it_lock); 1274 error = posix_cpu_timer_set(&timer, flags, &it, NULL); 1275 if (error) { 1276 spin_unlock_irq(&timer.it_lock); 1277 return error; 1278 } 1279 1280 while (!signal_pending(current)) { 1281 if (timer.it.cpu.expires == 0) { 1282 /* 1283 * Our timer fired and was reset, below 1284 * deletion can not fail. 1285 */ 1286 posix_cpu_timer_del(&timer); 1287 spin_unlock_irq(&timer.it_lock); 1288 return 0; 1289 } 1290 1291 /* 1292 * Block until cpu_timer_fire (or a signal) wakes us. 1293 */ 1294 __set_current_state(TASK_INTERRUPTIBLE); 1295 spin_unlock_irq(&timer.it_lock); 1296 schedule(); 1297 spin_lock_irq(&timer.it_lock); 1298 } 1299 1300 /* 1301 * We were interrupted by a signal. 1302 */ 1303 expires = timer.it.cpu.expires; 1304 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); 1305 if (!error) { 1306 /* 1307 * Timer is now unarmed, deletion can not fail. 1308 */ 1309 posix_cpu_timer_del(&timer); 1310 } 1311 spin_unlock_irq(&timer.it_lock); 1312 1313 while (error == TIMER_RETRY) { 1314 /* 1315 * We need to handle case when timer was or is in the 1316 * middle of firing. In other cases we already freed 1317 * resources. 1318 */ 1319 spin_lock_irq(&timer.it_lock); 1320 error = posix_cpu_timer_del(&timer); 1321 spin_unlock_irq(&timer.it_lock); 1322 } 1323 1324 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { 1325 /* 1326 * It actually did fire already. 1327 */ 1328 return 0; 1329 } 1330 1331 error = -ERESTART_RESTARTBLOCK; 1332 /* 1333 * Report back to the user the time still remaining. 1334 */ 1335 restart = ¤t->restart_block; 1336 restart->nanosleep.expires = expires; 1337 if (restart->nanosleep.type != TT_NONE) 1338 error = nanosleep_copyout(restart, &it.it_value); 1339 } 1340 1341 return error; 1342 } 1343 1344 static long posix_cpu_nsleep_restart(struct restart_block *restart_block); 1345 1346 static int posix_cpu_nsleep(const clockid_t which_clock, int flags, 1347 const struct timespec64 *rqtp) 1348 { 1349 struct restart_block *restart_block = ¤t->restart_block; 1350 int error; 1351 1352 /* 1353 * Diagnose required errors first. 1354 */ 1355 if (CPUCLOCK_PERTHREAD(which_clock) && 1356 (CPUCLOCK_PID(which_clock) == 0 || 1357 CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) 1358 return -EINVAL; 1359 1360 error = do_cpu_nanosleep(which_clock, flags, rqtp); 1361 1362 if (error == -ERESTART_RESTARTBLOCK) { 1363 1364 if (flags & TIMER_ABSTIME) 1365 return -ERESTARTNOHAND; 1366 1367 restart_block->fn = posix_cpu_nsleep_restart; 1368 restart_block->nanosleep.clockid = which_clock; 1369 } 1370 return error; 1371 } 1372 1373 static long posix_cpu_nsleep_restart(struct restart_block *restart_block) 1374 { 1375 clockid_t which_clock = restart_block->nanosleep.clockid; 1376 struct timespec64 t; 1377 1378 t = ns_to_timespec64(restart_block->nanosleep.expires); 1379 1380 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); 1381 } 1382 1383 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED) 1384 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED) 1385 1386 static int process_cpu_clock_getres(const clockid_t which_clock, 1387 struct timespec64 *tp) 1388 { 1389 return posix_cpu_clock_getres(PROCESS_CLOCK, tp); 1390 } 1391 static int process_cpu_clock_get(const clockid_t which_clock, 1392 struct timespec64 *tp) 1393 { 1394 return posix_cpu_clock_get(PROCESS_CLOCK, tp); 1395 } 1396 static int process_cpu_timer_create(struct k_itimer *timer) 1397 { 1398 timer->it_clock = PROCESS_CLOCK; 1399 return posix_cpu_timer_create(timer); 1400 } 1401 static int process_cpu_nsleep(const clockid_t which_clock, int flags, 1402 const struct timespec64 *rqtp) 1403 { 1404 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); 1405 } 1406 static int thread_cpu_clock_getres(const clockid_t which_clock, 1407 struct timespec64 *tp) 1408 { 1409 return posix_cpu_clock_getres(THREAD_CLOCK, tp); 1410 } 1411 static int thread_cpu_clock_get(const clockid_t which_clock, 1412 struct timespec64 *tp) 1413 { 1414 return posix_cpu_clock_get(THREAD_CLOCK, tp); 1415 } 1416 static int thread_cpu_timer_create(struct k_itimer *timer) 1417 { 1418 timer->it_clock = THREAD_CLOCK; 1419 return posix_cpu_timer_create(timer); 1420 } 1421 1422 const struct k_clock clock_posix_cpu = { 1423 .clock_getres = posix_cpu_clock_getres, 1424 .clock_set = posix_cpu_clock_set, 1425 .clock_get = posix_cpu_clock_get, 1426 .timer_create = posix_cpu_timer_create, 1427 .nsleep = posix_cpu_nsleep, 1428 .timer_set = posix_cpu_timer_set, 1429 .timer_del = posix_cpu_timer_del, 1430 .timer_get = posix_cpu_timer_get, 1431 .timer_rearm = posix_cpu_timer_rearm, 1432 }; 1433 1434 const struct k_clock clock_process = { 1435 .clock_getres = process_cpu_clock_getres, 1436 .clock_get = process_cpu_clock_get, 1437 .timer_create = process_cpu_timer_create, 1438 .nsleep = process_cpu_nsleep, 1439 }; 1440 1441 const struct k_clock clock_thread = { 1442 .clock_getres = thread_cpu_clock_getres, 1443 .clock_get = thread_cpu_clock_get, 1444 .timer_create = thread_cpu_timer_create, 1445 }; 1446