1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implement CPU time clocks for the POSIX clock interface. 4 */ 5 6 #include <linux/sched/signal.h> 7 #include <linux/sched/cputime.h> 8 #include <linux/posix-timers.h> 9 #include <linux/errno.h> 10 #include <linux/math64.h> 11 #include <linux/uaccess.h> 12 #include <linux/kernel_stat.h> 13 #include <trace/events/timer.h> 14 #include <linux/tick.h> 15 #include <linux/workqueue.h> 16 #include <linux/compat.h> 17 #include <linux/sched/deadline.h> 18 19 #include "posix-timers.h" 20 21 static void posix_cpu_timer_rearm(struct k_itimer *timer); 22 23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) 24 { 25 posix_cputimers_init(pct); 26 if (cpu_limit != RLIM_INFINITY) { 27 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC; 28 pct->timers_active = true; 29 } 30 } 31 32 /* 33 * Called after updating RLIMIT_CPU to run cpu timer and update 34 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if 35 * necessary. Needs siglock protection since other code may update the 36 * expiration cache as well. 37 */ 38 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) 39 { 40 u64 nsecs = rlim_new * NSEC_PER_SEC; 41 42 spin_lock_irq(&task->sighand->siglock); 43 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL); 44 spin_unlock_irq(&task->sighand->siglock); 45 } 46 47 /* 48 * Functions for validating access to tasks. 49 */ 50 static struct task_struct *lookup_task(const pid_t pid, bool thread, 51 bool gettime) 52 { 53 struct task_struct *p; 54 55 /* 56 * If the encoded PID is 0, then the timer is targeted at current 57 * or the process to which current belongs. 58 */ 59 if (!pid) 60 return thread ? current : current->group_leader; 61 62 p = find_task_by_vpid(pid); 63 if (!p) 64 return p; 65 66 if (thread) 67 return same_thread_group(p, current) ? p : NULL; 68 69 if (gettime) { 70 /* 71 * For clock_gettime(PROCESS) the task does not need to be 72 * the actual group leader. tsk->sighand gives 73 * access to the group's clock. 74 * 75 * Timers need the group leader because they take a 76 * reference on it and store the task pointer until the 77 * timer is destroyed. 78 */ 79 return (p == current || thread_group_leader(p)) ? p : NULL; 80 } 81 82 /* 83 * For processes require that p is group leader. 84 */ 85 return has_group_leader_pid(p) ? p : NULL; 86 } 87 88 static struct task_struct *__get_task_for_clock(const clockid_t clock, 89 bool getref, bool gettime) 90 { 91 const bool thread = !!CPUCLOCK_PERTHREAD(clock); 92 const pid_t pid = CPUCLOCK_PID(clock); 93 struct task_struct *p; 94 95 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX) 96 return NULL; 97 98 rcu_read_lock(); 99 p = lookup_task(pid, thread, gettime); 100 if (p && getref) 101 get_task_struct(p); 102 rcu_read_unlock(); 103 return p; 104 } 105 106 static inline struct task_struct *get_task_for_clock(const clockid_t clock) 107 { 108 return __get_task_for_clock(clock, true, false); 109 } 110 111 static inline struct task_struct *get_task_for_clock_get(const clockid_t clock) 112 { 113 return __get_task_for_clock(clock, true, true); 114 } 115 116 static inline int validate_clock_permissions(const clockid_t clock) 117 { 118 return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL; 119 } 120 121 static inline enum pid_type cpu_timer_pid_type(struct k_itimer *timer) 122 { 123 return CPUCLOCK_PERTHREAD(timer->it_clock) ? PIDTYPE_PID : PIDTYPE_TGID; 124 } 125 126 static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer) 127 { 128 return pid_task(timer->it.cpu.pid, cpu_timer_pid_type(timer)); 129 } 130 131 /* 132 * Update expiry time from increment, and increase overrun count, 133 * given the current clock sample. 134 */ 135 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now) 136 { 137 u64 delta, incr, expires = timer->it.cpu.node.expires; 138 int i; 139 140 if (!timer->it_interval) 141 return expires; 142 143 if (now < expires) 144 return expires; 145 146 incr = timer->it_interval; 147 delta = now + incr - expires; 148 149 /* Don't use (incr*2 < delta), incr*2 might overflow. */ 150 for (i = 0; incr < delta - incr; i++) 151 incr = incr << 1; 152 153 for (; i >= 0; incr >>= 1, i--) { 154 if (delta < incr) 155 continue; 156 157 timer->it.cpu.node.expires += incr; 158 timer->it_overrun += 1LL << i; 159 delta -= incr; 160 } 161 return timer->it.cpu.node.expires; 162 } 163 164 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */ 165 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct) 166 { 167 return !(~pct->bases[CPUCLOCK_PROF].nextevt | 168 ~pct->bases[CPUCLOCK_VIRT].nextevt | 169 ~pct->bases[CPUCLOCK_SCHED].nextevt); 170 } 171 172 static int 173 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp) 174 { 175 int error = validate_clock_permissions(which_clock); 176 177 if (!error) { 178 tp->tv_sec = 0; 179 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); 180 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 181 /* 182 * If sched_clock is using a cycle counter, we 183 * don't have any idea of its true resolution 184 * exported, but it is much more than 1s/HZ. 185 */ 186 tp->tv_nsec = 1; 187 } 188 } 189 return error; 190 } 191 192 static int 193 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp) 194 { 195 int error = validate_clock_permissions(clock); 196 197 /* 198 * You can never reset a CPU clock, but we check for other errors 199 * in the call before failing with EPERM. 200 */ 201 return error ? : -EPERM; 202 } 203 204 /* 205 * Sample a per-thread clock for the given task. clkid is validated. 206 */ 207 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p) 208 { 209 u64 utime, stime; 210 211 if (clkid == CPUCLOCK_SCHED) 212 return task_sched_runtime(p); 213 214 task_cputime(p, &utime, &stime); 215 216 switch (clkid) { 217 case CPUCLOCK_PROF: 218 return utime + stime; 219 case CPUCLOCK_VIRT: 220 return utime; 221 default: 222 WARN_ON_ONCE(1); 223 } 224 return 0; 225 } 226 227 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime) 228 { 229 samples[CPUCLOCK_PROF] = stime + utime; 230 samples[CPUCLOCK_VIRT] = utime; 231 samples[CPUCLOCK_SCHED] = rtime; 232 } 233 234 static void task_sample_cputime(struct task_struct *p, u64 *samples) 235 { 236 u64 stime, utime; 237 238 task_cputime(p, &utime, &stime); 239 store_samples(samples, stime, utime, p->se.sum_exec_runtime); 240 } 241 242 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at, 243 u64 *samples) 244 { 245 u64 stime, utime, rtime; 246 247 utime = atomic64_read(&at->utime); 248 stime = atomic64_read(&at->stime); 249 rtime = atomic64_read(&at->sum_exec_runtime); 250 store_samples(samples, stime, utime, rtime); 251 } 252 253 /* 254 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg 255 * to avoid race conditions with concurrent updates to cputime. 256 */ 257 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) 258 { 259 u64 curr_cputime; 260 retry: 261 curr_cputime = atomic64_read(cputime); 262 if (sum_cputime > curr_cputime) { 263 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime) 264 goto retry; 265 } 266 } 267 268 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, 269 struct task_cputime *sum) 270 { 271 __update_gt_cputime(&cputime_atomic->utime, sum->utime); 272 __update_gt_cputime(&cputime_atomic->stime, sum->stime); 273 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime); 274 } 275 276 /** 277 * thread_group_sample_cputime - Sample cputime for a given task 278 * @tsk: Task for which cputime needs to be started 279 * @samples: Storage for time samples 280 * 281 * Called from sys_getitimer() to calculate the expiry time of an active 282 * timer. That means group cputime accounting is already active. Called 283 * with task sighand lock held. 284 * 285 * Updates @times with an uptodate sample of the thread group cputimes. 286 */ 287 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples) 288 { 289 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 290 struct posix_cputimers *pct = &tsk->signal->posix_cputimers; 291 292 WARN_ON_ONCE(!pct->timers_active); 293 294 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); 295 } 296 297 /** 298 * thread_group_start_cputime - Start cputime and return a sample 299 * @tsk: Task for which cputime needs to be started 300 * @samples: Storage for time samples 301 * 302 * The thread group cputime accouting is avoided when there are no posix 303 * CPU timers armed. Before starting a timer it's required to check whether 304 * the time accounting is active. If not, a full update of the atomic 305 * accounting store needs to be done and the accounting enabled. 306 * 307 * Updates @times with an uptodate sample of the thread group cputimes. 308 */ 309 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples) 310 { 311 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 312 struct posix_cputimers *pct = &tsk->signal->posix_cputimers; 313 314 /* Check if cputimer isn't running. This is accessed without locking. */ 315 if (!READ_ONCE(pct->timers_active)) { 316 struct task_cputime sum; 317 318 /* 319 * The POSIX timer interface allows for absolute time expiry 320 * values through the TIMER_ABSTIME flag, therefore we have 321 * to synchronize the timer to the clock every time we start it. 322 */ 323 thread_group_cputime(tsk, &sum); 324 update_gt_cputime(&cputimer->cputime_atomic, &sum); 325 326 /* 327 * We're setting timers_active without a lock. Ensure this 328 * only gets written to in one operation. We set it after 329 * update_gt_cputime() as a small optimization, but 330 * barriers are not required because update_gt_cputime() 331 * can handle concurrent updates. 332 */ 333 WRITE_ONCE(pct->timers_active, true); 334 } 335 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); 336 } 337 338 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples) 339 { 340 struct task_cputime ct; 341 342 thread_group_cputime(tsk, &ct); 343 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime); 344 } 345 346 /* 347 * Sample a process (thread group) clock for the given task clkid. If the 348 * group's cputime accounting is already enabled, read the atomic 349 * store. Otherwise a full update is required. clkid is already validated. 350 */ 351 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p, 352 bool start) 353 { 354 struct thread_group_cputimer *cputimer = &p->signal->cputimer; 355 struct posix_cputimers *pct = &p->signal->posix_cputimers; 356 u64 samples[CPUCLOCK_MAX]; 357 358 if (!READ_ONCE(pct->timers_active)) { 359 if (start) 360 thread_group_start_cputime(p, samples); 361 else 362 __thread_group_cputime(p, samples); 363 } else { 364 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); 365 } 366 367 return samples[clkid]; 368 } 369 370 static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp) 371 { 372 const clockid_t clkid = CPUCLOCK_WHICH(clock); 373 struct task_struct *tsk; 374 u64 t; 375 376 tsk = get_task_for_clock_get(clock); 377 if (!tsk) 378 return -EINVAL; 379 380 if (CPUCLOCK_PERTHREAD(clock)) 381 t = cpu_clock_sample(clkid, tsk); 382 else 383 t = cpu_clock_sample_group(clkid, tsk, false); 384 put_task_struct(tsk); 385 386 *tp = ns_to_timespec64(t); 387 return 0; 388 } 389 390 /* 391 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. 392 * This is called from sys_timer_create() and do_cpu_nanosleep() with the 393 * new timer already all-zeros initialized. 394 */ 395 static int posix_cpu_timer_create(struct k_itimer *new_timer) 396 { 397 struct task_struct *p = get_task_for_clock(new_timer->it_clock); 398 399 if (!p) 400 return -EINVAL; 401 402 new_timer->kclock = &clock_posix_cpu; 403 timerqueue_init(&new_timer->it.cpu.node); 404 new_timer->it.cpu.pid = get_task_pid(p, cpu_timer_pid_type(new_timer)); 405 /* 406 * get_task_for_clock() took a reference on @p. Drop it as the timer 407 * holds a reference on the pid of @p. 408 */ 409 put_task_struct(p); 410 return 0; 411 } 412 413 /* 414 * Clean up a CPU-clock timer that is about to be destroyed. 415 * This is called from timer deletion with the timer already locked. 416 * If we return TIMER_RETRY, it's necessary to release the timer's lock 417 * and try again. (This happens when the timer is in the middle of firing.) 418 */ 419 static int posix_cpu_timer_del(struct k_itimer *timer) 420 { 421 struct cpu_timer *ctmr = &timer->it.cpu; 422 struct sighand_struct *sighand; 423 struct task_struct *p; 424 unsigned long flags; 425 int ret = 0; 426 427 rcu_read_lock(); 428 p = cpu_timer_task_rcu(timer); 429 if (!p) 430 goto out; 431 432 /* 433 * Protect against sighand release/switch in exit/exec and process/ 434 * thread timer list entry concurrent read/writes. 435 */ 436 sighand = lock_task_sighand(p, &flags); 437 if (unlikely(sighand == NULL)) { 438 /* 439 * This raced with the reaping of the task. The exit cleanup 440 * should have removed this timer from the timer queue. 441 */ 442 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node)); 443 } else { 444 if (timer->it.cpu.firing) 445 ret = TIMER_RETRY; 446 else 447 cpu_timer_dequeue(ctmr); 448 449 unlock_task_sighand(p, &flags); 450 } 451 452 out: 453 rcu_read_unlock(); 454 if (!ret) 455 put_pid(ctmr->pid); 456 457 return ret; 458 } 459 460 static void cleanup_timerqueue(struct timerqueue_head *head) 461 { 462 struct timerqueue_node *node; 463 struct cpu_timer *ctmr; 464 465 while ((node = timerqueue_getnext(head))) { 466 timerqueue_del(head, node); 467 ctmr = container_of(node, struct cpu_timer, node); 468 ctmr->head = NULL; 469 } 470 } 471 472 /* 473 * Clean out CPU timers which are still armed when a thread exits. The 474 * timers are only removed from the list. No other updates are done. The 475 * corresponding posix timers are still accessible, but cannot be rearmed. 476 * 477 * This must be called with the siglock held. 478 */ 479 static void cleanup_timers(struct posix_cputimers *pct) 480 { 481 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead); 482 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead); 483 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead); 484 } 485 486 /* 487 * These are both called with the siglock held, when the current thread 488 * is being reaped. When the final (leader) thread in the group is reaped, 489 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. 490 */ 491 void posix_cpu_timers_exit(struct task_struct *tsk) 492 { 493 cleanup_timers(&tsk->posix_cputimers); 494 } 495 void posix_cpu_timers_exit_group(struct task_struct *tsk) 496 { 497 cleanup_timers(&tsk->signal->posix_cputimers); 498 } 499 500 /* 501 * Insert the timer on the appropriate list before any timers that 502 * expire later. This must be called with the sighand lock held. 503 */ 504 static void arm_timer(struct k_itimer *timer, struct task_struct *p) 505 { 506 int clkidx = CPUCLOCK_WHICH(timer->it_clock); 507 struct cpu_timer *ctmr = &timer->it.cpu; 508 u64 newexp = cpu_timer_getexpires(ctmr); 509 struct posix_cputimer_base *base; 510 511 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 512 base = p->posix_cputimers.bases + clkidx; 513 else 514 base = p->signal->posix_cputimers.bases + clkidx; 515 516 if (!cpu_timer_enqueue(&base->tqhead, ctmr)) 517 return; 518 519 /* 520 * We are the new earliest-expiring POSIX 1.b timer, hence 521 * need to update expiration cache. Take into account that 522 * for process timers we share expiration cache with itimers 523 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. 524 */ 525 if (newexp < base->nextevt) 526 base->nextevt = newexp; 527 528 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 529 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER); 530 else 531 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER); 532 } 533 534 /* 535 * The timer is locked, fire it and arrange for its reload. 536 */ 537 static void cpu_timer_fire(struct k_itimer *timer) 538 { 539 struct cpu_timer *ctmr = &timer->it.cpu; 540 541 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { 542 /* 543 * User don't want any signal. 544 */ 545 cpu_timer_setexpires(ctmr, 0); 546 } else if (unlikely(timer->sigq == NULL)) { 547 /* 548 * This a special case for clock_nanosleep, 549 * not a normal timer from sys_timer_create. 550 */ 551 wake_up_process(timer->it_process); 552 cpu_timer_setexpires(ctmr, 0); 553 } else if (!timer->it_interval) { 554 /* 555 * One-shot timer. Clear it as soon as it's fired. 556 */ 557 posix_timer_event(timer, 0); 558 cpu_timer_setexpires(ctmr, 0); 559 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { 560 /* 561 * The signal did not get queued because the signal 562 * was ignored, so we won't get any callback to 563 * reload the timer. But we need to keep it 564 * ticking in case the signal is deliverable next time. 565 */ 566 posix_cpu_timer_rearm(timer); 567 ++timer->it_requeue_pending; 568 } 569 } 570 571 /* 572 * Guts of sys_timer_settime for CPU timers. 573 * This is called with the timer locked and interrupts disabled. 574 * If we return TIMER_RETRY, it's necessary to release the timer's lock 575 * and try again. (This happens when the timer is in the middle of firing.) 576 */ 577 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, 578 struct itimerspec64 *new, struct itimerspec64 *old) 579 { 580 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); 581 u64 old_expires, new_expires, old_incr, val; 582 struct cpu_timer *ctmr = &timer->it.cpu; 583 struct sighand_struct *sighand; 584 struct task_struct *p; 585 unsigned long flags; 586 int ret = 0; 587 588 rcu_read_lock(); 589 p = cpu_timer_task_rcu(timer); 590 if (!p) { 591 /* 592 * If p has just been reaped, we can no 593 * longer get any information about it at all. 594 */ 595 rcu_read_unlock(); 596 return -ESRCH; 597 } 598 599 /* 600 * Use the to_ktime conversion because that clamps the maximum 601 * value to KTIME_MAX and avoid multiplication overflows. 602 */ 603 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); 604 605 /* 606 * Protect against sighand release/switch in exit/exec and p->cpu_timers 607 * and p->signal->cpu_timers read/write in arm_timer() 608 */ 609 sighand = lock_task_sighand(p, &flags); 610 /* 611 * If p has just been reaped, we can no 612 * longer get any information about it at all. 613 */ 614 if (unlikely(sighand == NULL)) { 615 rcu_read_unlock(); 616 return -ESRCH; 617 } 618 619 /* 620 * Disarm any old timer after extracting its expiry time. 621 */ 622 old_incr = timer->it_interval; 623 old_expires = cpu_timer_getexpires(ctmr); 624 625 if (unlikely(timer->it.cpu.firing)) { 626 timer->it.cpu.firing = -1; 627 ret = TIMER_RETRY; 628 } else { 629 cpu_timer_dequeue(ctmr); 630 } 631 632 /* 633 * We need to sample the current value to convert the new 634 * value from to relative and absolute, and to convert the 635 * old value from absolute to relative. To set a process 636 * timer, we need a sample to balance the thread expiry 637 * times (in arm_timer). With an absolute time, we must 638 * check if it's already passed. In short, we need a sample. 639 */ 640 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 641 val = cpu_clock_sample(clkid, p); 642 else 643 val = cpu_clock_sample_group(clkid, p, true); 644 645 if (old) { 646 if (old_expires == 0) { 647 old->it_value.tv_sec = 0; 648 old->it_value.tv_nsec = 0; 649 } else { 650 /* 651 * Update the timer in case it has overrun already. 652 * If it has, we'll report it as having overrun and 653 * with the next reloaded timer already ticking, 654 * though we are swallowing that pending 655 * notification here to install the new setting. 656 */ 657 u64 exp = bump_cpu_timer(timer, val); 658 659 if (val < exp) { 660 old_expires = exp - val; 661 old->it_value = ns_to_timespec64(old_expires); 662 } else { 663 old->it_value.tv_nsec = 1; 664 old->it_value.tv_sec = 0; 665 } 666 } 667 } 668 669 if (unlikely(ret)) { 670 /* 671 * We are colliding with the timer actually firing. 672 * Punt after filling in the timer's old value, and 673 * disable this firing since we are already reporting 674 * it as an overrun (thanks to bump_cpu_timer above). 675 */ 676 unlock_task_sighand(p, &flags); 677 goto out; 678 } 679 680 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { 681 new_expires += val; 682 } 683 684 /* 685 * Install the new expiry time (or zero). 686 * For a timer with no notification action, we don't actually 687 * arm the timer (we'll just fake it for timer_gettime). 688 */ 689 cpu_timer_setexpires(ctmr, new_expires); 690 if (new_expires != 0 && val < new_expires) { 691 arm_timer(timer, p); 692 } 693 694 unlock_task_sighand(p, &flags); 695 /* 696 * Install the new reload setting, and 697 * set up the signal and overrun bookkeeping. 698 */ 699 timer->it_interval = timespec64_to_ktime(new->it_interval); 700 701 /* 702 * This acts as a modification timestamp for the timer, 703 * so any automatic reload attempt will punt on seeing 704 * that we have reset the timer manually. 705 */ 706 timer->it_requeue_pending = (timer->it_requeue_pending + 2) & 707 ~REQUEUE_PENDING; 708 timer->it_overrun_last = 0; 709 timer->it_overrun = -1; 710 711 if (new_expires != 0 && !(val < new_expires)) { 712 /* 713 * The designated time already passed, so we notify 714 * immediately, even if the thread never runs to 715 * accumulate more time on this clock. 716 */ 717 cpu_timer_fire(timer); 718 } 719 720 ret = 0; 721 out: 722 rcu_read_unlock(); 723 if (old) 724 old->it_interval = ns_to_timespec64(old_incr); 725 726 return ret; 727 } 728 729 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) 730 { 731 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); 732 struct cpu_timer *ctmr = &timer->it.cpu; 733 u64 now, expires = cpu_timer_getexpires(ctmr); 734 struct task_struct *p; 735 736 rcu_read_lock(); 737 p = cpu_timer_task_rcu(timer); 738 if (!p) 739 goto out; 740 741 /* 742 * Easy part: convert the reload time. 743 */ 744 itp->it_interval = ktime_to_timespec64(timer->it_interval); 745 746 if (!expires) 747 goto out; 748 749 /* 750 * Sample the clock to take the difference with the expiry time. 751 */ 752 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 753 now = cpu_clock_sample(clkid, p); 754 else 755 now = cpu_clock_sample_group(clkid, p, false); 756 757 if (now < expires) { 758 itp->it_value = ns_to_timespec64(expires - now); 759 } else { 760 /* 761 * The timer should have expired already, but the firing 762 * hasn't taken place yet. Say it's just about to expire. 763 */ 764 itp->it_value.tv_nsec = 1; 765 itp->it_value.tv_sec = 0; 766 } 767 out: 768 rcu_read_unlock(); 769 } 770 771 #define MAX_COLLECTED 20 772 773 static u64 collect_timerqueue(struct timerqueue_head *head, 774 struct list_head *firing, u64 now) 775 { 776 struct timerqueue_node *next; 777 int i = 0; 778 779 while ((next = timerqueue_getnext(head))) { 780 struct cpu_timer *ctmr; 781 u64 expires; 782 783 ctmr = container_of(next, struct cpu_timer, node); 784 expires = cpu_timer_getexpires(ctmr); 785 /* Limit the number of timers to expire at once */ 786 if (++i == MAX_COLLECTED || now < expires) 787 return expires; 788 789 ctmr->firing = 1; 790 cpu_timer_dequeue(ctmr); 791 list_add_tail(&ctmr->elist, firing); 792 } 793 794 return U64_MAX; 795 } 796 797 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, 798 struct list_head *firing) 799 { 800 struct posix_cputimer_base *base = pct->bases; 801 int i; 802 803 for (i = 0; i < CPUCLOCK_MAX; i++, base++) { 804 base->nextevt = collect_timerqueue(&base->tqhead, firing, 805 samples[i]); 806 } 807 } 808 809 static inline void check_dl_overrun(struct task_struct *tsk) 810 { 811 if (tsk->dl.dl_overrun) { 812 tsk->dl.dl_overrun = 0; 813 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 814 } 815 } 816 817 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) 818 { 819 if (time < limit) 820 return false; 821 822 if (print_fatal_signals) { 823 pr_info("%s Watchdog Timeout (%s): %s[%d]\n", 824 rt ? "RT" : "CPU", hard ? "hard" : "soft", 825 current->comm, task_pid_nr(current)); 826 } 827 __group_send_sig_info(signo, SEND_SIG_PRIV, current); 828 return true; 829 } 830 831 /* 832 * Check for any per-thread CPU timers that have fired and move them off 833 * the tsk->cpu_timers[N] list onto the firing list. Here we update the 834 * tsk->it_*_expires values to reflect the remaining thread CPU timers. 835 */ 836 static void check_thread_timers(struct task_struct *tsk, 837 struct list_head *firing) 838 { 839 struct posix_cputimers *pct = &tsk->posix_cputimers; 840 u64 samples[CPUCLOCK_MAX]; 841 unsigned long soft; 842 843 if (dl_task(tsk)) 844 check_dl_overrun(tsk); 845 846 if (expiry_cache_is_inactive(pct)) 847 return; 848 849 task_sample_cputime(tsk, samples); 850 collect_posix_cputimers(pct, samples, firing); 851 852 /* 853 * Check for the special case thread timers. 854 */ 855 soft = task_rlimit(tsk, RLIMIT_RTTIME); 856 if (soft != RLIM_INFINITY) { 857 /* Task RT timeout is accounted in jiffies. RTTIME is usec */ 858 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ); 859 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); 860 861 /* At the hard limit, send SIGKILL. No further action. */ 862 if (hard != RLIM_INFINITY && 863 check_rlimit(rttime, hard, SIGKILL, true, true)) 864 return; 865 866 /* At the soft limit, send a SIGXCPU every second */ 867 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) { 868 soft += USEC_PER_SEC; 869 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft; 870 } 871 } 872 873 if (expiry_cache_is_inactive(pct)) 874 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER); 875 } 876 877 static inline void stop_process_timers(struct signal_struct *sig) 878 { 879 struct posix_cputimers *pct = &sig->posix_cputimers; 880 881 /* Turn off the active flag. This is done without locking. */ 882 WRITE_ONCE(pct->timers_active, false); 883 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); 884 } 885 886 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, 887 u64 *expires, u64 cur_time, int signo) 888 { 889 if (!it->expires) 890 return; 891 892 if (cur_time >= it->expires) { 893 if (it->incr) 894 it->expires += it->incr; 895 else 896 it->expires = 0; 897 898 trace_itimer_expire(signo == SIGPROF ? 899 ITIMER_PROF : ITIMER_VIRTUAL, 900 task_tgid(tsk), cur_time); 901 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); 902 } 903 904 if (it->expires && it->expires < *expires) 905 *expires = it->expires; 906 } 907 908 /* 909 * Check for any per-thread CPU timers that have fired and move them 910 * off the tsk->*_timers list onto the firing list. Per-thread timers 911 * have already been taken off. 912 */ 913 static void check_process_timers(struct task_struct *tsk, 914 struct list_head *firing) 915 { 916 struct signal_struct *const sig = tsk->signal; 917 struct posix_cputimers *pct = &sig->posix_cputimers; 918 u64 samples[CPUCLOCK_MAX]; 919 unsigned long soft; 920 921 /* 922 * If there are no active process wide timers (POSIX 1.b, itimers, 923 * RLIMIT_CPU) nothing to check. Also skip the process wide timer 924 * processing when there is already another task handling them. 925 */ 926 if (!READ_ONCE(pct->timers_active) || pct->expiry_active) 927 return; 928 929 /* 930 * Signify that a thread is checking for process timers. 931 * Write access to this field is protected by the sighand lock. 932 */ 933 pct->expiry_active = true; 934 935 /* 936 * Collect the current process totals. Group accounting is active 937 * so the sample can be taken directly. 938 */ 939 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples); 940 collect_posix_cputimers(pct, samples, firing); 941 942 /* 943 * Check for the special case process timers. 944 */ 945 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], 946 &pct->bases[CPUCLOCK_PROF].nextevt, 947 samples[CPUCLOCK_PROF], SIGPROF); 948 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], 949 &pct->bases[CPUCLOCK_VIRT].nextevt, 950 samples[CPUCLOCK_VIRT], SIGVTALRM); 951 952 soft = task_rlimit(tsk, RLIMIT_CPU); 953 if (soft != RLIM_INFINITY) { 954 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */ 955 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU); 956 u64 ptime = samples[CPUCLOCK_PROF]; 957 u64 softns = (u64)soft * NSEC_PER_SEC; 958 u64 hardns = (u64)hard * NSEC_PER_SEC; 959 960 /* At the hard limit, send SIGKILL. No further action. */ 961 if (hard != RLIM_INFINITY && 962 check_rlimit(ptime, hardns, SIGKILL, false, true)) 963 return; 964 965 /* At the soft limit, send a SIGXCPU every second */ 966 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) { 967 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1; 968 softns += NSEC_PER_SEC; 969 } 970 971 /* Update the expiry cache */ 972 if (softns < pct->bases[CPUCLOCK_PROF].nextevt) 973 pct->bases[CPUCLOCK_PROF].nextevt = softns; 974 } 975 976 if (expiry_cache_is_inactive(pct)) 977 stop_process_timers(sig); 978 979 pct->expiry_active = false; 980 } 981 982 /* 983 * This is called from the signal code (via posixtimer_rearm) 984 * when the last timer signal was delivered and we have to reload the timer. 985 */ 986 static void posix_cpu_timer_rearm(struct k_itimer *timer) 987 { 988 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock); 989 struct task_struct *p; 990 struct sighand_struct *sighand; 991 unsigned long flags; 992 u64 now; 993 994 rcu_read_lock(); 995 p = cpu_timer_task_rcu(timer); 996 if (!p) 997 goto out; 998 999 /* 1000 * Fetch the current sample and update the timer's expiry time. 1001 */ 1002 if (CPUCLOCK_PERTHREAD(timer->it_clock)) 1003 now = cpu_clock_sample(clkid, p); 1004 else 1005 now = cpu_clock_sample_group(clkid, p, true); 1006 1007 bump_cpu_timer(timer, now); 1008 1009 /* Protect timer list r/w in arm_timer() */ 1010 sighand = lock_task_sighand(p, &flags); 1011 if (unlikely(sighand == NULL)) 1012 goto out; 1013 1014 /* 1015 * Now re-arm for the new expiry time. 1016 */ 1017 arm_timer(timer, p); 1018 unlock_task_sighand(p, &flags); 1019 out: 1020 rcu_read_unlock(); 1021 } 1022 1023 /** 1024 * task_cputimers_expired - Check whether posix CPU timers are expired 1025 * 1026 * @samples: Array of current samples for the CPUCLOCK clocks 1027 * @pct: Pointer to a posix_cputimers container 1028 * 1029 * Returns true if any member of @samples is greater than the corresponding 1030 * member of @pct->bases[CLK].nextevt. False otherwise 1031 */ 1032 static inline bool 1033 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct) 1034 { 1035 int i; 1036 1037 for (i = 0; i < CPUCLOCK_MAX; i++) { 1038 if (samples[i] >= pct->bases[i].nextevt) 1039 return true; 1040 } 1041 return false; 1042 } 1043 1044 /** 1045 * fastpath_timer_check - POSIX CPU timers fast path. 1046 * 1047 * @tsk: The task (thread) being checked. 1048 * 1049 * Check the task and thread group timers. If both are zero (there are no 1050 * timers set) return false. Otherwise snapshot the task and thread group 1051 * timers and compare them with the corresponding expiration times. Return 1052 * true if a timer has expired, else return false. 1053 */ 1054 static inline bool fastpath_timer_check(struct task_struct *tsk) 1055 { 1056 struct posix_cputimers *pct = &tsk->posix_cputimers; 1057 struct signal_struct *sig; 1058 1059 if (!expiry_cache_is_inactive(pct)) { 1060 u64 samples[CPUCLOCK_MAX]; 1061 1062 task_sample_cputime(tsk, samples); 1063 if (task_cputimers_expired(samples, pct)) 1064 return true; 1065 } 1066 1067 sig = tsk->signal; 1068 pct = &sig->posix_cputimers; 1069 /* 1070 * Check if thread group timers expired when timers are active and 1071 * no other thread in the group is already handling expiry for 1072 * thread group cputimers. These fields are read without the 1073 * sighand lock. However, this is fine because this is meant to be 1074 * a fastpath heuristic to determine whether we should try to 1075 * acquire the sighand lock to handle timer expiry. 1076 * 1077 * In the worst case scenario, if concurrently timers_active is set 1078 * or expiry_active is cleared, but the current thread doesn't see 1079 * the change yet, the timer checks are delayed until the next 1080 * thread in the group gets a scheduler interrupt to handle the 1081 * timer. This isn't an issue in practice because these types of 1082 * delays with signals actually getting sent are expected. 1083 */ 1084 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) { 1085 u64 samples[CPUCLOCK_MAX]; 1086 1087 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, 1088 samples); 1089 1090 if (task_cputimers_expired(samples, pct)) 1091 return true; 1092 } 1093 1094 if (dl_task(tsk) && tsk->dl.dl_overrun) 1095 return true; 1096 1097 return false; 1098 } 1099 1100 /* 1101 * This is called from the timer interrupt handler. The irq handler has 1102 * already updated our counts. We need to check if any timers fire now. 1103 * Interrupts are disabled. 1104 */ 1105 void run_posix_cpu_timers(void) 1106 { 1107 struct task_struct *tsk = current; 1108 struct k_itimer *timer, *next; 1109 unsigned long flags; 1110 LIST_HEAD(firing); 1111 1112 lockdep_assert_irqs_disabled(); 1113 1114 /* 1115 * The fast path checks that there are no expired thread or thread 1116 * group timers. If that's so, just return. 1117 */ 1118 if (!fastpath_timer_check(tsk)) 1119 return; 1120 1121 lockdep_posixtimer_enter(); 1122 if (!lock_task_sighand(tsk, &flags)) { 1123 lockdep_posixtimer_exit(); 1124 return; 1125 } 1126 /* 1127 * Here we take off tsk->signal->cpu_timers[N] and 1128 * tsk->cpu_timers[N] all the timers that are firing, and 1129 * put them on the firing list. 1130 */ 1131 check_thread_timers(tsk, &firing); 1132 1133 check_process_timers(tsk, &firing); 1134 1135 /* 1136 * We must release these locks before taking any timer's lock. 1137 * There is a potential race with timer deletion here, as the 1138 * siglock now protects our private firing list. We have set 1139 * the firing flag in each timer, so that a deletion attempt 1140 * that gets the timer lock before we do will give it up and 1141 * spin until we've taken care of that timer below. 1142 */ 1143 unlock_task_sighand(tsk, &flags); 1144 1145 /* 1146 * Now that all the timers on our list have the firing flag, 1147 * no one will touch their list entries but us. We'll take 1148 * each timer's lock before clearing its firing flag, so no 1149 * timer call will interfere. 1150 */ 1151 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) { 1152 int cpu_firing; 1153 1154 spin_lock(&timer->it_lock); 1155 list_del_init(&timer->it.cpu.elist); 1156 cpu_firing = timer->it.cpu.firing; 1157 timer->it.cpu.firing = 0; 1158 /* 1159 * The firing flag is -1 if we collided with a reset 1160 * of the timer, which already reported this 1161 * almost-firing as an overrun. So don't generate an event. 1162 */ 1163 if (likely(cpu_firing >= 0)) 1164 cpu_timer_fire(timer); 1165 spin_unlock(&timer->it_lock); 1166 } 1167 lockdep_posixtimer_exit(); 1168 } 1169 1170 /* 1171 * Set one of the process-wide special case CPU timers or RLIMIT_CPU. 1172 * The tsk->sighand->siglock must be held by the caller. 1173 */ 1174 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid, 1175 u64 *newval, u64 *oldval) 1176 { 1177 u64 now, *nextevt; 1178 1179 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED)) 1180 return; 1181 1182 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt; 1183 now = cpu_clock_sample_group(clkid, tsk, true); 1184 1185 if (oldval) { 1186 /* 1187 * We are setting itimer. The *oldval is absolute and we update 1188 * it to be relative, *newval argument is relative and we update 1189 * it to be absolute. 1190 */ 1191 if (*oldval) { 1192 if (*oldval <= now) { 1193 /* Just about to fire. */ 1194 *oldval = TICK_NSEC; 1195 } else { 1196 *oldval -= now; 1197 } 1198 } 1199 1200 if (!*newval) 1201 return; 1202 *newval += now; 1203 } 1204 1205 /* 1206 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF 1207 * expiry cache is also used by RLIMIT_CPU!. 1208 */ 1209 if (*newval < *nextevt) 1210 *nextevt = *newval; 1211 1212 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER); 1213 } 1214 1215 static int do_cpu_nanosleep(const clockid_t which_clock, int flags, 1216 const struct timespec64 *rqtp) 1217 { 1218 struct itimerspec64 it; 1219 struct k_itimer timer; 1220 u64 expires; 1221 int error; 1222 1223 /* 1224 * Set up a temporary timer and then wait for it to go off. 1225 */ 1226 memset(&timer, 0, sizeof timer); 1227 spin_lock_init(&timer.it_lock); 1228 timer.it_clock = which_clock; 1229 timer.it_overrun = -1; 1230 error = posix_cpu_timer_create(&timer); 1231 timer.it_process = current; 1232 1233 if (!error) { 1234 static struct itimerspec64 zero_it; 1235 struct restart_block *restart; 1236 1237 memset(&it, 0, sizeof(it)); 1238 it.it_value = *rqtp; 1239 1240 spin_lock_irq(&timer.it_lock); 1241 error = posix_cpu_timer_set(&timer, flags, &it, NULL); 1242 if (error) { 1243 spin_unlock_irq(&timer.it_lock); 1244 return error; 1245 } 1246 1247 while (!signal_pending(current)) { 1248 if (!cpu_timer_getexpires(&timer.it.cpu)) { 1249 /* 1250 * Our timer fired and was reset, below 1251 * deletion can not fail. 1252 */ 1253 posix_cpu_timer_del(&timer); 1254 spin_unlock_irq(&timer.it_lock); 1255 return 0; 1256 } 1257 1258 /* 1259 * Block until cpu_timer_fire (or a signal) wakes us. 1260 */ 1261 __set_current_state(TASK_INTERRUPTIBLE); 1262 spin_unlock_irq(&timer.it_lock); 1263 schedule(); 1264 spin_lock_irq(&timer.it_lock); 1265 } 1266 1267 /* 1268 * We were interrupted by a signal. 1269 */ 1270 expires = cpu_timer_getexpires(&timer.it.cpu); 1271 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); 1272 if (!error) { 1273 /* 1274 * Timer is now unarmed, deletion can not fail. 1275 */ 1276 posix_cpu_timer_del(&timer); 1277 } 1278 spin_unlock_irq(&timer.it_lock); 1279 1280 while (error == TIMER_RETRY) { 1281 /* 1282 * We need to handle case when timer was or is in the 1283 * middle of firing. In other cases we already freed 1284 * resources. 1285 */ 1286 spin_lock_irq(&timer.it_lock); 1287 error = posix_cpu_timer_del(&timer); 1288 spin_unlock_irq(&timer.it_lock); 1289 } 1290 1291 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { 1292 /* 1293 * It actually did fire already. 1294 */ 1295 return 0; 1296 } 1297 1298 error = -ERESTART_RESTARTBLOCK; 1299 /* 1300 * Report back to the user the time still remaining. 1301 */ 1302 restart = ¤t->restart_block; 1303 restart->nanosleep.expires = expires; 1304 if (restart->nanosleep.type != TT_NONE) 1305 error = nanosleep_copyout(restart, &it.it_value); 1306 } 1307 1308 return error; 1309 } 1310 1311 static long posix_cpu_nsleep_restart(struct restart_block *restart_block); 1312 1313 static int posix_cpu_nsleep(const clockid_t which_clock, int flags, 1314 const struct timespec64 *rqtp) 1315 { 1316 struct restart_block *restart_block = ¤t->restart_block; 1317 int error; 1318 1319 /* 1320 * Diagnose required errors first. 1321 */ 1322 if (CPUCLOCK_PERTHREAD(which_clock) && 1323 (CPUCLOCK_PID(which_clock) == 0 || 1324 CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) 1325 return -EINVAL; 1326 1327 error = do_cpu_nanosleep(which_clock, flags, rqtp); 1328 1329 if (error == -ERESTART_RESTARTBLOCK) { 1330 1331 if (flags & TIMER_ABSTIME) 1332 return -ERESTARTNOHAND; 1333 1334 restart_block->fn = posix_cpu_nsleep_restart; 1335 restart_block->nanosleep.clockid = which_clock; 1336 } 1337 return error; 1338 } 1339 1340 static long posix_cpu_nsleep_restart(struct restart_block *restart_block) 1341 { 1342 clockid_t which_clock = restart_block->nanosleep.clockid; 1343 struct timespec64 t; 1344 1345 t = ns_to_timespec64(restart_block->nanosleep.expires); 1346 1347 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); 1348 } 1349 1350 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED) 1351 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED) 1352 1353 static int process_cpu_clock_getres(const clockid_t which_clock, 1354 struct timespec64 *tp) 1355 { 1356 return posix_cpu_clock_getres(PROCESS_CLOCK, tp); 1357 } 1358 static int process_cpu_clock_get(const clockid_t which_clock, 1359 struct timespec64 *tp) 1360 { 1361 return posix_cpu_clock_get(PROCESS_CLOCK, tp); 1362 } 1363 static int process_cpu_timer_create(struct k_itimer *timer) 1364 { 1365 timer->it_clock = PROCESS_CLOCK; 1366 return posix_cpu_timer_create(timer); 1367 } 1368 static int process_cpu_nsleep(const clockid_t which_clock, int flags, 1369 const struct timespec64 *rqtp) 1370 { 1371 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); 1372 } 1373 static int thread_cpu_clock_getres(const clockid_t which_clock, 1374 struct timespec64 *tp) 1375 { 1376 return posix_cpu_clock_getres(THREAD_CLOCK, tp); 1377 } 1378 static int thread_cpu_clock_get(const clockid_t which_clock, 1379 struct timespec64 *tp) 1380 { 1381 return posix_cpu_clock_get(THREAD_CLOCK, tp); 1382 } 1383 static int thread_cpu_timer_create(struct k_itimer *timer) 1384 { 1385 timer->it_clock = THREAD_CLOCK; 1386 return posix_cpu_timer_create(timer); 1387 } 1388 1389 const struct k_clock clock_posix_cpu = { 1390 .clock_getres = posix_cpu_clock_getres, 1391 .clock_set = posix_cpu_clock_set, 1392 .clock_get_timespec = posix_cpu_clock_get, 1393 .timer_create = posix_cpu_timer_create, 1394 .nsleep = posix_cpu_nsleep, 1395 .timer_set = posix_cpu_timer_set, 1396 .timer_del = posix_cpu_timer_del, 1397 .timer_get = posix_cpu_timer_get, 1398 .timer_rearm = posix_cpu_timer_rearm, 1399 }; 1400 1401 const struct k_clock clock_process = { 1402 .clock_getres = process_cpu_clock_getres, 1403 .clock_get_timespec = process_cpu_clock_get, 1404 .timer_create = process_cpu_timer_create, 1405 .nsleep = process_cpu_nsleep, 1406 }; 1407 1408 const struct k_clock clock_thread = { 1409 .clock_getres = thread_cpu_clock_getres, 1410 .clock_get_timespec = thread_cpu_clock_get, 1411 .timer_create = thread_cpu_timer_create, 1412 }; 1413