1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * 2002-10-15 Posix Clocks & timers 4 * by George Anzinger george@mvista.com 5 * Copyright (C) 2002 2003 by MontaVista Software. 6 * 7 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. 8 * Copyright (C) 2004 Boris Hu 9 * 10 * These are all the functions necessary to implement POSIX clocks & timers 11 */ 12 #include <linux/mm.h> 13 #include <linux/interrupt.h> 14 #include <linux/slab.h> 15 #include <linux/time.h> 16 #include <linux/mutex.h> 17 #include <linux/sched/task.h> 18 19 #include <linux/uaccess.h> 20 #include <linux/list.h> 21 #include <linux/init.h> 22 #include <linux/compiler.h> 23 #include <linux/hash.h> 24 #include <linux/posix-clock.h> 25 #include <linux/posix-timers.h> 26 #include <linux/syscalls.h> 27 #include <linux/wait.h> 28 #include <linux/workqueue.h> 29 #include <linux/export.h> 30 #include <linux/hashtable.h> 31 #include <linux/compat.h> 32 #include <linux/nospec.h> 33 #include <linux/time_namespace.h> 34 35 #include "timekeeping.h" 36 #include "posix-timers.h" 37 38 /* 39 * Management arrays for POSIX timers. Timers are now kept in static hash table 40 * with 512 entries. 41 * Timer ids are allocated by local routine, which selects proper hash head by 42 * key, constructed from current->signal address and per signal struct counter. 43 * This keeps timer ids unique per process, but now they can intersect between 44 * processes. 45 */ 46 47 /* 48 * Lets keep our timers in a slab cache :-) 49 */ 50 static struct kmem_cache *posix_timers_cache; 51 52 static DEFINE_HASHTABLE(posix_timers_hashtable, 9); 53 static DEFINE_SPINLOCK(hash_lock); 54 55 static const struct k_clock * const posix_clocks[]; 56 static const struct k_clock *clockid_to_kclock(const clockid_t id); 57 static const struct k_clock clock_realtime, clock_monotonic; 58 59 /* 60 * we assume that the new SIGEV_THREAD_ID shares no bits with the other 61 * SIGEV values. Here we put out an error if this assumption fails. 62 */ 63 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ 64 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) 65 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" 66 #endif 67 68 /* 69 * The timer ID is turned into a timer address by idr_find(). 70 * Verifying a valid ID consists of: 71 * 72 * a) checking that idr_find() returns other than -1. 73 * b) checking that the timer id matches the one in the timer itself. 74 * c) that the timer owner is in the callers thread group. 75 */ 76 77 /* 78 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us 79 * to implement others. This structure defines the various 80 * clocks. 81 * 82 * RESOLUTION: Clock resolution is used to round up timer and interval 83 * times, NOT to report clock times, which are reported with as 84 * much resolution as the system can muster. In some cases this 85 * resolution may depend on the underlying clock hardware and 86 * may not be quantifiable until run time, and only then is the 87 * necessary code is written. The standard says we should say 88 * something about this issue in the documentation... 89 * 90 * FUNCTIONS: The CLOCKs structure defines possible functions to 91 * handle various clock functions. 92 * 93 * The standard POSIX timer management code assumes the 94 * following: 1.) The k_itimer struct (sched.h) is used for 95 * the timer. 2.) The list, it_lock, it_clock, it_id and 96 * it_pid fields are not modified by timer code. 97 * 98 * Permissions: It is assumed that the clock_settime() function defined 99 * for each clock will take care of permission checks. Some 100 * clocks may be set able by any user (i.e. local process 101 * clocks) others not. Currently the only set able clock we 102 * have is CLOCK_REALTIME and its high res counter part, both of 103 * which we beg off on and pass to do_sys_settimeofday(). 104 */ 105 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); 106 107 #define lock_timer(tid, flags) \ 108 ({ struct k_itimer *__timr; \ 109 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ 110 __timr; \ 111 }) 112 113 static int hash(struct signal_struct *sig, unsigned int nr) 114 { 115 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); 116 } 117 118 static struct k_itimer *__posix_timers_find(struct hlist_head *head, 119 struct signal_struct *sig, 120 timer_t id) 121 { 122 struct k_itimer *timer; 123 124 hlist_for_each_entry_rcu(timer, head, t_hash) { 125 if ((timer->it_signal == sig) && (timer->it_id == id)) 126 return timer; 127 } 128 return NULL; 129 } 130 131 static struct k_itimer *posix_timer_by_id(timer_t id) 132 { 133 struct signal_struct *sig = current->signal; 134 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; 135 136 return __posix_timers_find(head, sig, id); 137 } 138 139 static int posix_timer_add(struct k_itimer *timer) 140 { 141 struct signal_struct *sig = current->signal; 142 int first_free_id = sig->posix_timer_id; 143 struct hlist_head *head; 144 int ret = -ENOENT; 145 146 do { 147 spin_lock(&hash_lock); 148 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; 149 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { 150 hlist_add_head_rcu(&timer->t_hash, head); 151 ret = sig->posix_timer_id; 152 } 153 if (++sig->posix_timer_id < 0) 154 sig->posix_timer_id = 0; 155 if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT)) 156 /* Loop over all possible ids completed */ 157 ret = -EAGAIN; 158 spin_unlock(&hash_lock); 159 } while (ret == -ENOENT); 160 return ret; 161 } 162 163 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) 164 { 165 spin_unlock_irqrestore(&timr->it_lock, flags); 166 } 167 168 /* Get clock_realtime */ 169 static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp) 170 { 171 ktime_get_real_ts64(tp); 172 return 0; 173 } 174 175 static ktime_t posix_get_realtime_ktime(clockid_t which_clock) 176 { 177 return ktime_get_real(); 178 } 179 180 /* Set clock_realtime */ 181 static int posix_clock_realtime_set(const clockid_t which_clock, 182 const struct timespec64 *tp) 183 { 184 return do_sys_settimeofday64(tp, NULL); 185 } 186 187 static int posix_clock_realtime_adj(const clockid_t which_clock, 188 struct __kernel_timex *t) 189 { 190 return do_adjtimex(t); 191 } 192 193 /* 194 * Get monotonic time for posix timers 195 */ 196 static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp) 197 { 198 ktime_get_ts64(tp); 199 timens_add_monotonic(tp); 200 return 0; 201 } 202 203 static ktime_t posix_get_monotonic_ktime(clockid_t which_clock) 204 { 205 return ktime_get(); 206 } 207 208 /* 209 * Get monotonic-raw time for posix timers 210 */ 211 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) 212 { 213 ktime_get_raw_ts64(tp); 214 timens_add_monotonic(tp); 215 return 0; 216 } 217 218 219 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) 220 { 221 ktime_get_coarse_real_ts64(tp); 222 return 0; 223 } 224 225 static int posix_get_monotonic_coarse(clockid_t which_clock, 226 struct timespec64 *tp) 227 { 228 ktime_get_coarse_ts64(tp); 229 timens_add_monotonic(tp); 230 return 0; 231 } 232 233 static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) 234 { 235 *tp = ktime_to_timespec64(KTIME_LOW_RES); 236 return 0; 237 } 238 239 static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp) 240 { 241 ktime_get_boottime_ts64(tp); 242 timens_add_boottime(tp); 243 return 0; 244 } 245 246 static ktime_t posix_get_boottime_ktime(const clockid_t which_clock) 247 { 248 return ktime_get_boottime(); 249 } 250 251 static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp) 252 { 253 ktime_get_clocktai_ts64(tp); 254 return 0; 255 } 256 257 static ktime_t posix_get_tai_ktime(clockid_t which_clock) 258 { 259 return ktime_get_clocktai(); 260 } 261 262 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) 263 { 264 tp->tv_sec = 0; 265 tp->tv_nsec = hrtimer_resolution; 266 return 0; 267 } 268 269 /* 270 * Initialize everything, well, just everything in Posix clocks/timers ;) 271 */ 272 static __init int init_posix_timers(void) 273 { 274 posix_timers_cache = kmem_cache_create("posix_timers_cache", 275 sizeof (struct k_itimer), 0, SLAB_PANIC, 276 NULL); 277 return 0; 278 } 279 __initcall(init_posix_timers); 280 281 /* 282 * The siginfo si_overrun field and the return value of timer_getoverrun(2) 283 * are of type int. Clamp the overrun value to INT_MAX 284 */ 285 static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval) 286 { 287 s64 sum = timr->it_overrun_last + (s64)baseval; 288 289 return sum > (s64)INT_MAX ? INT_MAX : (int)sum; 290 } 291 292 static void common_hrtimer_rearm(struct k_itimer *timr) 293 { 294 struct hrtimer *timer = &timr->it.real.timer; 295 296 timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), 297 timr->it_interval); 298 hrtimer_restart(timer); 299 } 300 301 /* 302 * This function is exported for use by the signal deliver code. It is 303 * called just prior to the info block being released and passes that 304 * block to us. It's function is to update the overrun entry AND to 305 * restart the timer. It should only be called if the timer is to be 306 * restarted (i.e. we have flagged this in the sys_private entry of the 307 * info block). 308 * 309 * To protect against the timer going away while the interrupt is queued, 310 * we require that the it_requeue_pending flag be set. 311 */ 312 void posixtimer_rearm(struct kernel_siginfo *info) 313 { 314 struct k_itimer *timr; 315 unsigned long flags; 316 317 timr = lock_timer(info->si_tid, &flags); 318 if (!timr) 319 return; 320 321 if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) { 322 timr->kclock->timer_rearm(timr); 323 324 timr->it_active = 1; 325 timr->it_overrun_last = timr->it_overrun; 326 timr->it_overrun = -1LL; 327 ++timr->it_requeue_pending; 328 329 info->si_overrun = timer_overrun_to_int(timr, info->si_overrun); 330 } 331 332 unlock_timer(timr, flags); 333 } 334 335 int posix_timer_event(struct k_itimer *timr, int si_private) 336 { 337 enum pid_type type; 338 int ret = -1; 339 /* 340 * FIXME: if ->sigq is queued we can race with 341 * dequeue_signal()->posixtimer_rearm(). 342 * 343 * If dequeue_signal() sees the "right" value of 344 * si_sys_private it calls posixtimer_rearm(). 345 * We re-queue ->sigq and drop ->it_lock(). 346 * posixtimer_rearm() locks the timer 347 * and re-schedules it while ->sigq is pending. 348 * Not really bad, but not that we want. 349 */ 350 timr->sigq->info.si_sys_private = si_private; 351 352 type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID; 353 ret = send_sigqueue(timr->sigq, timr->it_pid, type); 354 /* If we failed to send the signal the timer stops. */ 355 return ret > 0; 356 } 357 358 /* 359 * This function gets called when a POSIX.1b interval timer expires. It 360 * is used as a callback from the kernel internal timer. The 361 * run_timer_list code ALWAYS calls with interrupts on. 362 363 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. 364 */ 365 static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) 366 { 367 struct k_itimer *timr; 368 unsigned long flags; 369 int si_private = 0; 370 enum hrtimer_restart ret = HRTIMER_NORESTART; 371 372 timr = container_of(timer, struct k_itimer, it.real.timer); 373 spin_lock_irqsave(&timr->it_lock, flags); 374 375 timr->it_active = 0; 376 if (timr->it_interval != 0) 377 si_private = ++timr->it_requeue_pending; 378 379 if (posix_timer_event(timr, si_private)) { 380 /* 381 * signal was not sent because of sig_ignor 382 * we will not get a call back to restart it AND 383 * it should be restarted. 384 */ 385 if (timr->it_interval != 0) { 386 ktime_t now = hrtimer_cb_get_time(timer); 387 388 /* 389 * FIXME: What we really want, is to stop this 390 * timer completely and restart it in case the 391 * SIG_IGN is removed. This is a non trivial 392 * change which involves sighand locking 393 * (sigh !), which we don't want to do late in 394 * the release cycle. 395 * 396 * For now we just let timers with an interval 397 * less than a jiffie expire every jiffie to 398 * avoid softirq starvation in case of SIG_IGN 399 * and a very small interval, which would put 400 * the timer right back on the softirq pending 401 * list. By moving now ahead of time we trick 402 * hrtimer_forward() to expire the timer 403 * later, while we still maintain the overrun 404 * accuracy, but have some inconsistency in 405 * the timer_gettime() case. This is at least 406 * better than a starved softirq. A more 407 * complex fix which solves also another related 408 * inconsistency is already in the pipeline. 409 */ 410 #ifdef CONFIG_HIGH_RES_TIMERS 411 { 412 ktime_t kj = NSEC_PER_SEC / HZ; 413 414 if (timr->it_interval < kj) 415 now = ktime_add(now, kj); 416 } 417 #endif 418 timr->it_overrun += hrtimer_forward(timer, now, 419 timr->it_interval); 420 ret = HRTIMER_RESTART; 421 ++timr->it_requeue_pending; 422 timr->it_active = 1; 423 } 424 } 425 426 unlock_timer(timr, flags); 427 return ret; 428 } 429 430 static struct pid *good_sigevent(sigevent_t * event) 431 { 432 struct pid *pid = task_tgid(current); 433 struct task_struct *rtn; 434 435 switch (event->sigev_notify) { 436 case SIGEV_SIGNAL | SIGEV_THREAD_ID: 437 pid = find_vpid(event->sigev_notify_thread_id); 438 rtn = pid_task(pid, PIDTYPE_PID); 439 if (!rtn || !same_thread_group(rtn, current)) 440 return NULL; 441 /* FALLTHRU */ 442 case SIGEV_SIGNAL: 443 case SIGEV_THREAD: 444 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) 445 return NULL; 446 /* FALLTHRU */ 447 case SIGEV_NONE: 448 return pid; 449 default: 450 return NULL; 451 } 452 } 453 454 static struct k_itimer * alloc_posix_timer(void) 455 { 456 struct k_itimer *tmr; 457 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); 458 if (!tmr) 459 return tmr; 460 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { 461 kmem_cache_free(posix_timers_cache, tmr); 462 return NULL; 463 } 464 clear_siginfo(&tmr->sigq->info); 465 return tmr; 466 } 467 468 static void k_itimer_rcu_free(struct rcu_head *head) 469 { 470 struct k_itimer *tmr = container_of(head, struct k_itimer, rcu); 471 472 kmem_cache_free(posix_timers_cache, tmr); 473 } 474 475 #define IT_ID_SET 1 476 #define IT_ID_NOT_SET 0 477 static void release_posix_timer(struct k_itimer *tmr, int it_id_set) 478 { 479 if (it_id_set) { 480 unsigned long flags; 481 spin_lock_irqsave(&hash_lock, flags); 482 hlist_del_rcu(&tmr->t_hash); 483 spin_unlock_irqrestore(&hash_lock, flags); 484 } 485 put_pid(tmr->it_pid); 486 sigqueue_free(tmr->sigq); 487 call_rcu(&tmr->rcu, k_itimer_rcu_free); 488 } 489 490 static int common_timer_create(struct k_itimer *new_timer) 491 { 492 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); 493 return 0; 494 } 495 496 /* Create a POSIX.1b interval timer. */ 497 static int do_timer_create(clockid_t which_clock, struct sigevent *event, 498 timer_t __user *created_timer_id) 499 { 500 const struct k_clock *kc = clockid_to_kclock(which_clock); 501 struct k_itimer *new_timer; 502 int error, new_timer_id; 503 int it_id_set = IT_ID_NOT_SET; 504 505 if (!kc) 506 return -EINVAL; 507 if (!kc->timer_create) 508 return -EOPNOTSUPP; 509 510 new_timer = alloc_posix_timer(); 511 if (unlikely(!new_timer)) 512 return -EAGAIN; 513 514 spin_lock_init(&new_timer->it_lock); 515 new_timer_id = posix_timer_add(new_timer); 516 if (new_timer_id < 0) { 517 error = new_timer_id; 518 goto out; 519 } 520 521 it_id_set = IT_ID_SET; 522 new_timer->it_id = (timer_t) new_timer_id; 523 new_timer->it_clock = which_clock; 524 new_timer->kclock = kc; 525 new_timer->it_overrun = -1LL; 526 527 if (event) { 528 rcu_read_lock(); 529 new_timer->it_pid = get_pid(good_sigevent(event)); 530 rcu_read_unlock(); 531 if (!new_timer->it_pid) { 532 error = -EINVAL; 533 goto out; 534 } 535 new_timer->it_sigev_notify = event->sigev_notify; 536 new_timer->sigq->info.si_signo = event->sigev_signo; 537 new_timer->sigq->info.si_value = event->sigev_value; 538 } else { 539 new_timer->it_sigev_notify = SIGEV_SIGNAL; 540 new_timer->sigq->info.si_signo = SIGALRM; 541 memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t)); 542 new_timer->sigq->info.si_value.sival_int = new_timer->it_id; 543 new_timer->it_pid = get_pid(task_tgid(current)); 544 } 545 546 new_timer->sigq->info.si_tid = new_timer->it_id; 547 new_timer->sigq->info.si_code = SI_TIMER; 548 549 if (copy_to_user(created_timer_id, 550 &new_timer_id, sizeof (new_timer_id))) { 551 error = -EFAULT; 552 goto out; 553 } 554 555 error = kc->timer_create(new_timer); 556 if (error) 557 goto out; 558 559 spin_lock_irq(¤t->sighand->siglock); 560 new_timer->it_signal = current->signal; 561 list_add(&new_timer->list, ¤t->signal->posix_timers); 562 spin_unlock_irq(¤t->sighand->siglock); 563 564 return 0; 565 /* 566 * In the case of the timer belonging to another task, after 567 * the task is unlocked, the timer is owned by the other task 568 * and may cease to exist at any time. Don't use or modify 569 * new_timer after the unlock call. 570 */ 571 out: 572 release_posix_timer(new_timer, it_id_set); 573 return error; 574 } 575 576 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, 577 struct sigevent __user *, timer_event_spec, 578 timer_t __user *, created_timer_id) 579 { 580 if (timer_event_spec) { 581 sigevent_t event; 582 583 if (copy_from_user(&event, timer_event_spec, sizeof (event))) 584 return -EFAULT; 585 return do_timer_create(which_clock, &event, created_timer_id); 586 } 587 return do_timer_create(which_clock, NULL, created_timer_id); 588 } 589 590 #ifdef CONFIG_COMPAT 591 COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock, 592 struct compat_sigevent __user *, timer_event_spec, 593 timer_t __user *, created_timer_id) 594 { 595 if (timer_event_spec) { 596 sigevent_t event; 597 598 if (get_compat_sigevent(&event, timer_event_spec)) 599 return -EFAULT; 600 return do_timer_create(which_clock, &event, created_timer_id); 601 } 602 return do_timer_create(which_clock, NULL, created_timer_id); 603 } 604 #endif 605 606 /* 607 * Locking issues: We need to protect the result of the id look up until 608 * we get the timer locked down so it is not deleted under us. The 609 * removal is done under the idr spinlock so we use that here to bridge 610 * the find to the timer lock. To avoid a dead lock, the timer id MUST 611 * be release with out holding the timer lock. 612 */ 613 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) 614 { 615 struct k_itimer *timr; 616 617 /* 618 * timer_t could be any type >= int and we want to make sure any 619 * @timer_id outside positive int range fails lookup. 620 */ 621 if ((unsigned long long)timer_id > INT_MAX) 622 return NULL; 623 624 rcu_read_lock(); 625 timr = posix_timer_by_id(timer_id); 626 if (timr) { 627 spin_lock_irqsave(&timr->it_lock, *flags); 628 if (timr->it_signal == current->signal) { 629 rcu_read_unlock(); 630 return timr; 631 } 632 spin_unlock_irqrestore(&timr->it_lock, *flags); 633 } 634 rcu_read_unlock(); 635 636 return NULL; 637 } 638 639 static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) 640 { 641 struct hrtimer *timer = &timr->it.real.timer; 642 643 return __hrtimer_expires_remaining_adjusted(timer, now); 644 } 645 646 static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) 647 { 648 struct hrtimer *timer = &timr->it.real.timer; 649 650 return hrtimer_forward(timer, now, timr->it_interval); 651 } 652 653 /* 654 * Get the time remaining on a POSIX.1b interval timer. This function 655 * is ALWAYS called with spin_lock_irq on the timer, thus it must not 656 * mess with irq. 657 * 658 * We have a couple of messes to clean up here. First there is the case 659 * of a timer that has a requeue pending. These timers should appear to 660 * be in the timer list with an expiry as if we were to requeue them 661 * now. 662 * 663 * The second issue is the SIGEV_NONE timer which may be active but is 664 * not really ever put in the timer list (to save system resources). 665 * This timer may be expired, and if so, we will do it here. Otherwise 666 * it is the same as a requeue pending timer WRT to what we should 667 * report. 668 */ 669 void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) 670 { 671 const struct k_clock *kc = timr->kclock; 672 ktime_t now, remaining, iv; 673 bool sig_none; 674 675 sig_none = timr->it_sigev_notify == SIGEV_NONE; 676 iv = timr->it_interval; 677 678 /* interval timer ? */ 679 if (iv) { 680 cur_setting->it_interval = ktime_to_timespec64(iv); 681 } else if (!timr->it_active) { 682 /* 683 * SIGEV_NONE oneshot timers are never queued. Check them 684 * below. 685 */ 686 if (!sig_none) 687 return; 688 } 689 690 now = kc->clock_get_ktime(timr->it_clock); 691 692 /* 693 * When a requeue is pending or this is a SIGEV_NONE timer move the 694 * expiry time forward by intervals, so expiry is > now. 695 */ 696 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none)) 697 timr->it_overrun += kc->timer_forward(timr, now); 698 699 remaining = kc->timer_remaining(timr, now); 700 /* Return 0 only, when the timer is expired and not pending */ 701 if (remaining <= 0) { 702 /* 703 * A single shot SIGEV_NONE timer must return 0, when 704 * it is expired ! 705 */ 706 if (!sig_none) 707 cur_setting->it_value.tv_nsec = 1; 708 } else { 709 cur_setting->it_value = ktime_to_timespec64(remaining); 710 } 711 } 712 713 /* Get the time remaining on a POSIX.1b interval timer. */ 714 static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting) 715 { 716 struct k_itimer *timr; 717 const struct k_clock *kc; 718 unsigned long flags; 719 int ret = 0; 720 721 timr = lock_timer(timer_id, &flags); 722 if (!timr) 723 return -EINVAL; 724 725 memset(setting, 0, sizeof(*setting)); 726 kc = timr->kclock; 727 if (WARN_ON_ONCE(!kc || !kc->timer_get)) 728 ret = -EINVAL; 729 else 730 kc->timer_get(timr, setting); 731 732 unlock_timer(timr, flags); 733 return ret; 734 } 735 736 /* Get the time remaining on a POSIX.1b interval timer. */ 737 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, 738 struct __kernel_itimerspec __user *, setting) 739 { 740 struct itimerspec64 cur_setting; 741 742 int ret = do_timer_gettime(timer_id, &cur_setting); 743 if (!ret) { 744 if (put_itimerspec64(&cur_setting, setting)) 745 ret = -EFAULT; 746 } 747 return ret; 748 } 749 750 #ifdef CONFIG_COMPAT_32BIT_TIME 751 752 SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id, 753 struct old_itimerspec32 __user *, setting) 754 { 755 struct itimerspec64 cur_setting; 756 757 int ret = do_timer_gettime(timer_id, &cur_setting); 758 if (!ret) { 759 if (put_old_itimerspec32(&cur_setting, setting)) 760 ret = -EFAULT; 761 } 762 return ret; 763 } 764 765 #endif 766 767 /* 768 * Get the number of overruns of a POSIX.1b interval timer. This is to 769 * be the overrun of the timer last delivered. At the same time we are 770 * accumulating overruns on the next timer. The overrun is frozen when 771 * the signal is delivered, either at the notify time (if the info block 772 * is not queued) or at the actual delivery time (as we are informed by 773 * the call back to posixtimer_rearm(). So all we need to do is 774 * to pick up the frozen overrun. 775 */ 776 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) 777 { 778 struct k_itimer *timr; 779 int overrun; 780 unsigned long flags; 781 782 timr = lock_timer(timer_id, &flags); 783 if (!timr) 784 return -EINVAL; 785 786 overrun = timer_overrun_to_int(timr, 0); 787 unlock_timer(timr, flags); 788 789 return overrun; 790 } 791 792 static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, 793 bool absolute, bool sigev_none) 794 { 795 struct hrtimer *timer = &timr->it.real.timer; 796 enum hrtimer_mode mode; 797 798 mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; 799 /* 800 * Posix magic: Relative CLOCK_REALTIME timers are not affected by 801 * clock modifications, so they become CLOCK_MONOTONIC based under the 802 * hood. See hrtimer_init(). Update timr->kclock, so the generic 803 * functions which use timr->kclock->clock_get_*() work. 804 * 805 * Note: it_clock stays unmodified, because the next timer_set() might 806 * use ABSTIME, so it needs to switch back. 807 */ 808 if (timr->it_clock == CLOCK_REALTIME) 809 timr->kclock = absolute ? &clock_realtime : &clock_monotonic; 810 811 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); 812 timr->it.real.timer.function = posix_timer_fn; 813 814 if (!absolute) 815 expires = ktime_add_safe(expires, timer->base->get_time()); 816 hrtimer_set_expires(timer, expires); 817 818 if (!sigev_none) 819 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 820 } 821 822 static int common_hrtimer_try_to_cancel(struct k_itimer *timr) 823 { 824 return hrtimer_try_to_cancel(&timr->it.real.timer); 825 } 826 827 static void common_timer_wait_running(struct k_itimer *timer) 828 { 829 hrtimer_cancel_wait_running(&timer->it.real.timer); 830 } 831 832 /* 833 * On PREEMPT_RT this prevent priority inversion against softirq kthread in 834 * case it gets preempted while executing a timer callback. See comments in 835 * hrtimer_cancel_wait_running. For PREEMPT_RT=n this just results in a 836 * cpu_relax(). 837 */ 838 static struct k_itimer *timer_wait_running(struct k_itimer *timer, 839 unsigned long *flags) 840 { 841 const struct k_clock *kc = READ_ONCE(timer->kclock); 842 timer_t timer_id = READ_ONCE(timer->it_id); 843 844 /* Prevent kfree(timer) after dropping the lock */ 845 rcu_read_lock(); 846 unlock_timer(timer, *flags); 847 848 if (!WARN_ON_ONCE(!kc->timer_wait_running)) 849 kc->timer_wait_running(timer); 850 851 rcu_read_unlock(); 852 /* Relock the timer. It might be not longer hashed. */ 853 return lock_timer(timer_id, flags); 854 } 855 856 /* Set a POSIX.1b interval timer. */ 857 int common_timer_set(struct k_itimer *timr, int flags, 858 struct itimerspec64 *new_setting, 859 struct itimerspec64 *old_setting) 860 { 861 const struct k_clock *kc = timr->kclock; 862 bool sigev_none; 863 ktime_t expires; 864 865 if (old_setting) 866 common_timer_get(timr, old_setting); 867 868 /* Prevent rearming by clearing the interval */ 869 timr->it_interval = 0; 870 /* 871 * Careful here. On SMP systems the timer expiry function could be 872 * active and spinning on timr->it_lock. 873 */ 874 if (kc->timer_try_to_cancel(timr) < 0) 875 return TIMER_RETRY; 876 877 timr->it_active = 0; 878 timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 879 ~REQUEUE_PENDING; 880 timr->it_overrun_last = 0; 881 882 /* Switch off the timer when it_value is zero */ 883 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) 884 return 0; 885 886 timr->it_interval = timespec64_to_ktime(new_setting->it_interval); 887 expires = timespec64_to_ktime(new_setting->it_value); 888 if (flags & TIMER_ABSTIME) 889 expires = timens_ktime_to_host(timr->it_clock, expires); 890 sigev_none = timr->it_sigev_notify == SIGEV_NONE; 891 892 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); 893 timr->it_active = !sigev_none; 894 return 0; 895 } 896 897 static int do_timer_settime(timer_t timer_id, int tmr_flags, 898 struct itimerspec64 *new_spec64, 899 struct itimerspec64 *old_spec64) 900 { 901 const struct k_clock *kc; 902 struct k_itimer *timr; 903 unsigned long flags; 904 int error = 0; 905 906 if (!timespec64_valid(&new_spec64->it_interval) || 907 !timespec64_valid(&new_spec64->it_value)) 908 return -EINVAL; 909 910 if (old_spec64) 911 memset(old_spec64, 0, sizeof(*old_spec64)); 912 913 timr = lock_timer(timer_id, &flags); 914 retry: 915 if (!timr) 916 return -EINVAL; 917 918 kc = timr->kclock; 919 if (WARN_ON_ONCE(!kc || !kc->timer_set)) 920 error = -EINVAL; 921 else 922 error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64); 923 924 if (error == TIMER_RETRY) { 925 // We already got the old time... 926 old_spec64 = NULL; 927 /* Unlocks and relocks the timer if it still exists */ 928 timr = timer_wait_running(timr, &flags); 929 goto retry; 930 } 931 unlock_timer(timr, flags); 932 933 return error; 934 } 935 936 /* Set a POSIX.1b interval timer */ 937 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, 938 const struct __kernel_itimerspec __user *, new_setting, 939 struct __kernel_itimerspec __user *, old_setting) 940 { 941 struct itimerspec64 new_spec, old_spec; 942 struct itimerspec64 *rtn = old_setting ? &old_spec : NULL; 943 int error = 0; 944 945 if (!new_setting) 946 return -EINVAL; 947 948 if (get_itimerspec64(&new_spec, new_setting)) 949 return -EFAULT; 950 951 error = do_timer_settime(timer_id, flags, &new_spec, rtn); 952 if (!error && old_setting) { 953 if (put_itimerspec64(&old_spec, old_setting)) 954 error = -EFAULT; 955 } 956 return error; 957 } 958 959 #ifdef CONFIG_COMPAT_32BIT_TIME 960 SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags, 961 struct old_itimerspec32 __user *, new, 962 struct old_itimerspec32 __user *, old) 963 { 964 struct itimerspec64 new_spec, old_spec; 965 struct itimerspec64 *rtn = old ? &old_spec : NULL; 966 int error = 0; 967 968 if (!new) 969 return -EINVAL; 970 if (get_old_itimerspec32(&new_spec, new)) 971 return -EFAULT; 972 973 error = do_timer_settime(timer_id, flags, &new_spec, rtn); 974 if (!error && old) { 975 if (put_old_itimerspec32(&old_spec, old)) 976 error = -EFAULT; 977 } 978 return error; 979 } 980 #endif 981 982 int common_timer_del(struct k_itimer *timer) 983 { 984 const struct k_clock *kc = timer->kclock; 985 986 timer->it_interval = 0; 987 if (kc->timer_try_to_cancel(timer) < 0) 988 return TIMER_RETRY; 989 timer->it_active = 0; 990 return 0; 991 } 992 993 static inline int timer_delete_hook(struct k_itimer *timer) 994 { 995 const struct k_clock *kc = timer->kclock; 996 997 if (WARN_ON_ONCE(!kc || !kc->timer_del)) 998 return -EINVAL; 999 return kc->timer_del(timer); 1000 } 1001 1002 /* Delete a POSIX.1b interval timer. */ 1003 SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) 1004 { 1005 struct k_itimer *timer; 1006 unsigned long flags; 1007 1008 timer = lock_timer(timer_id, &flags); 1009 1010 retry_delete: 1011 if (!timer) 1012 return -EINVAL; 1013 1014 if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) { 1015 /* Unlocks and relocks the timer if it still exists */ 1016 timer = timer_wait_running(timer, &flags); 1017 goto retry_delete; 1018 } 1019 1020 spin_lock(¤t->sighand->siglock); 1021 list_del(&timer->list); 1022 spin_unlock(¤t->sighand->siglock); 1023 /* 1024 * This keeps any tasks waiting on the spin lock from thinking 1025 * they got something (see the lock code above). 1026 */ 1027 timer->it_signal = NULL; 1028 1029 unlock_timer(timer, flags); 1030 release_posix_timer(timer, IT_ID_SET); 1031 return 0; 1032 } 1033 1034 /* 1035 * return timer owned by the process, used by exit_itimers 1036 */ 1037 static void itimer_delete(struct k_itimer *timer) 1038 { 1039 retry_delete: 1040 spin_lock_irq(&timer->it_lock); 1041 1042 if (timer_delete_hook(timer) == TIMER_RETRY) { 1043 spin_unlock_irq(&timer->it_lock); 1044 goto retry_delete; 1045 } 1046 list_del(&timer->list); 1047 1048 spin_unlock_irq(&timer->it_lock); 1049 release_posix_timer(timer, IT_ID_SET); 1050 } 1051 1052 /* 1053 * This is called by do_exit or de_thread, only when there are no more 1054 * references to the shared signal_struct. 1055 */ 1056 void exit_itimers(struct signal_struct *sig) 1057 { 1058 struct k_itimer *tmr; 1059 1060 while (!list_empty(&sig->posix_timers)) { 1061 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list); 1062 itimer_delete(tmr); 1063 } 1064 } 1065 1066 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, 1067 const struct __kernel_timespec __user *, tp) 1068 { 1069 const struct k_clock *kc = clockid_to_kclock(which_clock); 1070 struct timespec64 new_tp; 1071 1072 if (!kc || !kc->clock_set) 1073 return -EINVAL; 1074 1075 if (get_timespec64(&new_tp, tp)) 1076 return -EFAULT; 1077 1078 return kc->clock_set(which_clock, &new_tp); 1079 } 1080 1081 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, 1082 struct __kernel_timespec __user *, tp) 1083 { 1084 const struct k_clock *kc = clockid_to_kclock(which_clock); 1085 struct timespec64 kernel_tp; 1086 int error; 1087 1088 if (!kc) 1089 return -EINVAL; 1090 1091 error = kc->clock_get_timespec(which_clock, &kernel_tp); 1092 1093 if (!error && put_timespec64(&kernel_tp, tp)) 1094 error = -EFAULT; 1095 1096 return error; 1097 } 1098 1099 int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx) 1100 { 1101 const struct k_clock *kc = clockid_to_kclock(which_clock); 1102 1103 if (!kc) 1104 return -EINVAL; 1105 if (!kc->clock_adj) 1106 return -EOPNOTSUPP; 1107 1108 return kc->clock_adj(which_clock, ktx); 1109 } 1110 1111 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, 1112 struct __kernel_timex __user *, utx) 1113 { 1114 struct __kernel_timex ktx; 1115 int err; 1116 1117 if (copy_from_user(&ktx, utx, sizeof(ktx))) 1118 return -EFAULT; 1119 1120 err = do_clock_adjtime(which_clock, &ktx); 1121 1122 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) 1123 return -EFAULT; 1124 1125 return err; 1126 } 1127 1128 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, 1129 struct __kernel_timespec __user *, tp) 1130 { 1131 const struct k_clock *kc = clockid_to_kclock(which_clock); 1132 struct timespec64 rtn_tp; 1133 int error; 1134 1135 if (!kc) 1136 return -EINVAL; 1137 1138 error = kc->clock_getres(which_clock, &rtn_tp); 1139 1140 if (!error && tp && put_timespec64(&rtn_tp, tp)) 1141 error = -EFAULT; 1142 1143 return error; 1144 } 1145 1146 #ifdef CONFIG_COMPAT_32BIT_TIME 1147 1148 SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock, 1149 struct old_timespec32 __user *, tp) 1150 { 1151 const struct k_clock *kc = clockid_to_kclock(which_clock); 1152 struct timespec64 ts; 1153 1154 if (!kc || !kc->clock_set) 1155 return -EINVAL; 1156 1157 if (get_old_timespec32(&ts, tp)) 1158 return -EFAULT; 1159 1160 return kc->clock_set(which_clock, &ts); 1161 } 1162 1163 SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock, 1164 struct old_timespec32 __user *, tp) 1165 { 1166 const struct k_clock *kc = clockid_to_kclock(which_clock); 1167 struct timespec64 ts; 1168 int err; 1169 1170 if (!kc) 1171 return -EINVAL; 1172 1173 err = kc->clock_get_timespec(which_clock, &ts); 1174 1175 if (!err && put_old_timespec32(&ts, tp)) 1176 err = -EFAULT; 1177 1178 return err; 1179 } 1180 1181 SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, 1182 struct old_timex32 __user *, utp) 1183 { 1184 struct __kernel_timex ktx; 1185 int err; 1186 1187 err = get_old_timex32(&ktx, utp); 1188 if (err) 1189 return err; 1190 1191 err = do_clock_adjtime(which_clock, &ktx); 1192 1193 if (err >= 0) 1194 err = put_old_timex32(utp, &ktx); 1195 1196 return err; 1197 } 1198 1199 SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock, 1200 struct old_timespec32 __user *, tp) 1201 { 1202 const struct k_clock *kc = clockid_to_kclock(which_clock); 1203 struct timespec64 ts; 1204 int err; 1205 1206 if (!kc) 1207 return -EINVAL; 1208 1209 err = kc->clock_getres(which_clock, &ts); 1210 if (!err && tp && put_old_timespec32(&ts, tp)) 1211 return -EFAULT; 1212 1213 return err; 1214 } 1215 1216 #endif 1217 1218 /* 1219 * nanosleep for monotonic and realtime clocks 1220 */ 1221 static int common_nsleep(const clockid_t which_clock, int flags, 1222 const struct timespec64 *rqtp) 1223 { 1224 ktime_t texp = timespec64_to_ktime(*rqtp); 1225 1226 return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? 1227 HRTIMER_MODE_ABS : HRTIMER_MODE_REL, 1228 which_clock); 1229 } 1230 1231 static int common_nsleep_timens(const clockid_t which_clock, int flags, 1232 const struct timespec64 *rqtp) 1233 { 1234 ktime_t texp = timespec64_to_ktime(*rqtp); 1235 1236 if (flags & TIMER_ABSTIME) 1237 texp = timens_ktime_to_host(which_clock, texp); 1238 1239 return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? 1240 HRTIMER_MODE_ABS : HRTIMER_MODE_REL, 1241 which_clock); 1242 } 1243 1244 SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, 1245 const struct __kernel_timespec __user *, rqtp, 1246 struct __kernel_timespec __user *, rmtp) 1247 { 1248 const struct k_clock *kc = clockid_to_kclock(which_clock); 1249 struct timespec64 t; 1250 1251 if (!kc) 1252 return -EINVAL; 1253 if (!kc->nsleep) 1254 return -EOPNOTSUPP; 1255 1256 if (get_timespec64(&t, rqtp)) 1257 return -EFAULT; 1258 1259 if (!timespec64_valid(&t)) 1260 return -EINVAL; 1261 if (flags & TIMER_ABSTIME) 1262 rmtp = NULL; 1263 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; 1264 current->restart_block.nanosleep.rmtp = rmtp; 1265 1266 return kc->nsleep(which_clock, flags, &t); 1267 } 1268 1269 #ifdef CONFIG_COMPAT_32BIT_TIME 1270 1271 SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, 1272 struct old_timespec32 __user *, rqtp, 1273 struct old_timespec32 __user *, rmtp) 1274 { 1275 const struct k_clock *kc = clockid_to_kclock(which_clock); 1276 struct timespec64 t; 1277 1278 if (!kc) 1279 return -EINVAL; 1280 if (!kc->nsleep) 1281 return -EOPNOTSUPP; 1282 1283 if (get_old_timespec32(&t, rqtp)) 1284 return -EFAULT; 1285 1286 if (!timespec64_valid(&t)) 1287 return -EINVAL; 1288 if (flags & TIMER_ABSTIME) 1289 rmtp = NULL; 1290 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; 1291 current->restart_block.nanosleep.compat_rmtp = rmtp; 1292 1293 return kc->nsleep(which_clock, flags, &t); 1294 } 1295 1296 #endif 1297 1298 static const struct k_clock clock_realtime = { 1299 .clock_getres = posix_get_hrtimer_res, 1300 .clock_get_timespec = posix_get_realtime_timespec, 1301 .clock_get_ktime = posix_get_realtime_ktime, 1302 .clock_set = posix_clock_realtime_set, 1303 .clock_adj = posix_clock_realtime_adj, 1304 .nsleep = common_nsleep, 1305 .timer_create = common_timer_create, 1306 .timer_set = common_timer_set, 1307 .timer_get = common_timer_get, 1308 .timer_del = common_timer_del, 1309 .timer_rearm = common_hrtimer_rearm, 1310 .timer_forward = common_hrtimer_forward, 1311 .timer_remaining = common_hrtimer_remaining, 1312 .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1313 .timer_wait_running = common_timer_wait_running, 1314 .timer_arm = common_hrtimer_arm, 1315 }; 1316 1317 static const struct k_clock clock_monotonic = { 1318 .clock_getres = posix_get_hrtimer_res, 1319 .clock_get_timespec = posix_get_monotonic_timespec, 1320 .clock_get_ktime = posix_get_monotonic_ktime, 1321 .nsleep = common_nsleep_timens, 1322 .timer_create = common_timer_create, 1323 .timer_set = common_timer_set, 1324 .timer_get = common_timer_get, 1325 .timer_del = common_timer_del, 1326 .timer_rearm = common_hrtimer_rearm, 1327 .timer_forward = common_hrtimer_forward, 1328 .timer_remaining = common_hrtimer_remaining, 1329 .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1330 .timer_wait_running = common_timer_wait_running, 1331 .timer_arm = common_hrtimer_arm, 1332 }; 1333 1334 static const struct k_clock clock_monotonic_raw = { 1335 .clock_getres = posix_get_hrtimer_res, 1336 .clock_get_timespec = posix_get_monotonic_raw, 1337 }; 1338 1339 static const struct k_clock clock_realtime_coarse = { 1340 .clock_getres = posix_get_coarse_res, 1341 .clock_get_timespec = posix_get_realtime_coarse, 1342 }; 1343 1344 static const struct k_clock clock_monotonic_coarse = { 1345 .clock_getres = posix_get_coarse_res, 1346 .clock_get_timespec = posix_get_monotonic_coarse, 1347 }; 1348 1349 static const struct k_clock clock_tai = { 1350 .clock_getres = posix_get_hrtimer_res, 1351 .clock_get_ktime = posix_get_tai_ktime, 1352 .clock_get_timespec = posix_get_tai_timespec, 1353 .nsleep = common_nsleep, 1354 .timer_create = common_timer_create, 1355 .timer_set = common_timer_set, 1356 .timer_get = common_timer_get, 1357 .timer_del = common_timer_del, 1358 .timer_rearm = common_hrtimer_rearm, 1359 .timer_forward = common_hrtimer_forward, 1360 .timer_remaining = common_hrtimer_remaining, 1361 .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1362 .timer_wait_running = common_timer_wait_running, 1363 .timer_arm = common_hrtimer_arm, 1364 }; 1365 1366 static const struct k_clock clock_boottime = { 1367 .clock_getres = posix_get_hrtimer_res, 1368 .clock_get_ktime = posix_get_boottime_ktime, 1369 .clock_get_timespec = posix_get_boottime_timespec, 1370 .nsleep = common_nsleep_timens, 1371 .timer_create = common_timer_create, 1372 .timer_set = common_timer_set, 1373 .timer_get = common_timer_get, 1374 .timer_del = common_timer_del, 1375 .timer_rearm = common_hrtimer_rearm, 1376 .timer_forward = common_hrtimer_forward, 1377 .timer_remaining = common_hrtimer_remaining, 1378 .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1379 .timer_wait_running = common_timer_wait_running, 1380 .timer_arm = common_hrtimer_arm, 1381 }; 1382 1383 static const struct k_clock * const posix_clocks[] = { 1384 [CLOCK_REALTIME] = &clock_realtime, 1385 [CLOCK_MONOTONIC] = &clock_monotonic, 1386 [CLOCK_PROCESS_CPUTIME_ID] = &clock_process, 1387 [CLOCK_THREAD_CPUTIME_ID] = &clock_thread, 1388 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, 1389 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, 1390 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, 1391 [CLOCK_BOOTTIME] = &clock_boottime, 1392 [CLOCK_REALTIME_ALARM] = &alarm_clock, 1393 [CLOCK_BOOTTIME_ALARM] = &alarm_clock, 1394 [CLOCK_TAI] = &clock_tai, 1395 }; 1396 1397 static const struct k_clock *clockid_to_kclock(const clockid_t id) 1398 { 1399 clockid_t idx = id; 1400 1401 if (id < 0) { 1402 return (id & CLOCKFD_MASK) == CLOCKFD ? 1403 &clock_posix_dynamic : &clock_posix_cpu; 1404 } 1405 1406 if (id >= ARRAY_SIZE(posix_clocks)) 1407 return NULL; 1408 1409 return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))]; 1410 } 1411