1 /* 2 * linux/kernel/hrtimer.c 3 * 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 7 * 8 * High-resolution kernel timers 9 * 10 * In contrast to the low-resolution timeout API implemented in 11 * kernel/timer.c, hrtimers provide finer resolution and accuracy 12 * depending on system configuration and capabilities. 13 * 14 * These timers are currently used for: 15 * - itimers 16 * - POSIX timers 17 * - nanosleep 18 * - precise in-kernel timing 19 * 20 * Started by: Thomas Gleixner and Ingo Molnar 21 * 22 * Credits: 23 * based on kernel/timer.c 24 * 25 * Help, testing, suggestions, bugfixes, improvements were 26 * provided by: 27 * 28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel 29 * et. al. 30 * 31 * For licencing details see kernel-base/COPYING 32 */ 33 34 #include <linux/cpu.h> 35 #include <linux/export.h> 36 #include <linux/percpu.h> 37 #include <linux/hrtimer.h> 38 #include <linux/notifier.h> 39 #include <linux/syscalls.h> 40 #include <linux/kallsyms.h> 41 #include <linux/interrupt.h> 42 #include <linux/tick.h> 43 #include <linux/seq_file.h> 44 #include <linux/err.h> 45 #include <linux/debugobjects.h> 46 #include <linux/sched/signal.h> 47 #include <linux/sched/sysctl.h> 48 #include <linux/sched/rt.h> 49 #include <linux/sched/deadline.h> 50 #include <linux/sched/nohz.h> 51 #include <linux/sched/debug.h> 52 #include <linux/timer.h> 53 #include <linux/freezer.h> 54 #include <linux/compat.h> 55 56 #include <linux/uaccess.h> 57 58 #include <trace/events/timer.h> 59 60 #include "tick-internal.h" 61 62 /* 63 * The timer bases: 64 * 65 * There are more clockids than hrtimer bases. Thus, we index 66 * into the timer bases by the hrtimer_base_type enum. When trying 67 * to reach a base using a clockid, hrtimer_clockid_to_base() 68 * is used to convert from clockid to the proper hrtimer_base_type. 69 */ 70 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = 71 { 72 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), 73 .seq = SEQCNT_ZERO(hrtimer_bases.seq), 74 .clock_base = 75 { 76 { 77 .index = HRTIMER_BASE_MONOTONIC, 78 .clockid = CLOCK_MONOTONIC, 79 .get_time = &ktime_get, 80 }, 81 { 82 .index = HRTIMER_BASE_REALTIME, 83 .clockid = CLOCK_REALTIME, 84 .get_time = &ktime_get_real, 85 }, 86 { 87 .index = HRTIMER_BASE_BOOTTIME, 88 .clockid = CLOCK_BOOTTIME, 89 .get_time = &ktime_get_boottime, 90 }, 91 { 92 .index = HRTIMER_BASE_TAI, 93 .clockid = CLOCK_TAI, 94 .get_time = &ktime_get_clocktai, 95 }, 96 } 97 }; 98 99 static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { 100 /* Make sure we catch unsupported clockids */ 101 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES, 102 103 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 104 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 105 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, 106 [CLOCK_TAI] = HRTIMER_BASE_TAI, 107 }; 108 109 /* 110 * Functions and macros which are different for UP/SMP systems are kept in a 111 * single place 112 */ 113 #ifdef CONFIG_SMP 114 115 /* 116 * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base() 117 * such that hrtimer_callback_running() can unconditionally dereference 118 * timer->base->cpu_base 119 */ 120 static struct hrtimer_cpu_base migration_cpu_base = { 121 .seq = SEQCNT_ZERO(migration_cpu_base), 122 .clock_base = { { .cpu_base = &migration_cpu_base, }, }, 123 }; 124 125 #define migration_base migration_cpu_base.clock_base[0] 126 127 /* 128 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock 129 * means that all timers which are tied to this base via timer->base are 130 * locked, and the base itself is locked too. 131 * 132 * So __run_timers/migrate_timers can safely modify all timers which could 133 * be found on the lists/queues. 134 * 135 * When the timer's base is locked, and the timer removed from list, it is 136 * possible to set timer->base = &migration_base and drop the lock: the timer 137 * remains locked. 138 */ 139 static 140 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, 141 unsigned long *flags) 142 { 143 struct hrtimer_clock_base *base; 144 145 for (;;) { 146 base = timer->base; 147 if (likely(base != &migration_base)) { 148 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 149 if (likely(base == timer->base)) 150 return base; 151 /* The timer has migrated to another CPU: */ 152 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); 153 } 154 cpu_relax(); 155 } 156 } 157 158 /* 159 * With HIGHRES=y we do not migrate the timer when it is expiring 160 * before the next event on the target cpu because we cannot reprogram 161 * the target cpu hardware and we would cause it to fire late. 162 * 163 * Called with cpu_base->lock of target cpu held. 164 */ 165 static int 166 hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) 167 { 168 #ifdef CONFIG_HIGH_RES_TIMERS 169 ktime_t expires; 170 171 if (!new_base->cpu_base->hres_active) 172 return 0; 173 174 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); 175 return expires <= new_base->cpu_base->expires_next; 176 #else 177 return 0; 178 #endif 179 } 180 181 #ifdef CONFIG_NO_HZ_COMMON 182 static inline 183 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, 184 int pinned) 185 { 186 if (pinned || !base->migration_enabled) 187 return base; 188 return &per_cpu(hrtimer_bases, get_nohz_timer_target()); 189 } 190 #else 191 static inline 192 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, 193 int pinned) 194 { 195 return base; 196 } 197 #endif 198 199 /* 200 * We switch the timer base to a power-optimized selected CPU target, 201 * if: 202 * - NO_HZ_COMMON is enabled 203 * - timer migration is enabled 204 * - the timer callback is not running 205 * - the timer is not the first expiring timer on the new target 206 * 207 * If one of the above requirements is not fulfilled we move the timer 208 * to the current CPU or leave it on the previously assigned CPU if 209 * the timer callback is currently running. 210 */ 211 static inline struct hrtimer_clock_base * 212 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, 213 int pinned) 214 { 215 struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; 216 struct hrtimer_clock_base *new_base; 217 int basenum = base->index; 218 219 this_cpu_base = this_cpu_ptr(&hrtimer_bases); 220 new_cpu_base = get_target_base(this_cpu_base, pinned); 221 again: 222 new_base = &new_cpu_base->clock_base[basenum]; 223 224 if (base != new_base) { 225 /* 226 * We are trying to move timer to new_base. 227 * However we can't change timer's base while it is running, 228 * so we keep it on the same CPU. No hassle vs. reprogramming 229 * the event source in the high resolution case. The softirq 230 * code will take care of this when the timer function has 231 * completed. There is no conflict as we hold the lock until 232 * the timer is enqueued. 233 */ 234 if (unlikely(hrtimer_callback_running(timer))) 235 return base; 236 237 /* See the comment in lock_hrtimer_base() */ 238 timer->base = &migration_base; 239 raw_spin_unlock(&base->cpu_base->lock); 240 raw_spin_lock(&new_base->cpu_base->lock); 241 242 if (new_cpu_base != this_cpu_base && 243 hrtimer_check_target(timer, new_base)) { 244 raw_spin_unlock(&new_base->cpu_base->lock); 245 raw_spin_lock(&base->cpu_base->lock); 246 new_cpu_base = this_cpu_base; 247 timer->base = base; 248 goto again; 249 } 250 timer->base = new_base; 251 } else { 252 if (new_cpu_base != this_cpu_base && 253 hrtimer_check_target(timer, new_base)) { 254 new_cpu_base = this_cpu_base; 255 goto again; 256 } 257 } 258 return new_base; 259 } 260 261 #else /* CONFIG_SMP */ 262 263 static inline struct hrtimer_clock_base * 264 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 265 { 266 struct hrtimer_clock_base *base = timer->base; 267 268 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 269 270 return base; 271 } 272 273 # define switch_hrtimer_base(t, b, p) (b) 274 275 #endif /* !CONFIG_SMP */ 276 277 /* 278 * Functions for the union type storage format of ktime_t which are 279 * too large for inlining: 280 */ 281 #if BITS_PER_LONG < 64 282 /* 283 * Divide a ktime value by a nanosecond value 284 */ 285 s64 __ktime_divns(const ktime_t kt, s64 div) 286 { 287 int sft = 0; 288 s64 dclc; 289 u64 tmp; 290 291 dclc = ktime_to_ns(kt); 292 tmp = dclc < 0 ? -dclc : dclc; 293 294 /* Make sure the divisor is less than 2^32: */ 295 while (div >> 32) { 296 sft++; 297 div >>= 1; 298 } 299 tmp >>= sft; 300 do_div(tmp, (unsigned long) div); 301 return dclc < 0 ? -tmp : tmp; 302 } 303 EXPORT_SYMBOL_GPL(__ktime_divns); 304 #endif /* BITS_PER_LONG >= 64 */ 305 306 /* 307 * Add two ktime values and do a safety check for overflow: 308 */ 309 ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) 310 { 311 ktime_t res = ktime_add_unsafe(lhs, rhs); 312 313 /* 314 * We use KTIME_SEC_MAX here, the maximum timeout which we can 315 * return to user space in a timespec: 316 */ 317 if (res < 0 || res < lhs || res < rhs) 318 res = ktime_set(KTIME_SEC_MAX, 0); 319 320 return res; 321 } 322 323 EXPORT_SYMBOL_GPL(ktime_add_safe); 324 325 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 326 327 static struct debug_obj_descr hrtimer_debug_descr; 328 329 static void *hrtimer_debug_hint(void *addr) 330 { 331 return ((struct hrtimer *) addr)->function; 332 } 333 334 /* 335 * fixup_init is called when: 336 * - an active object is initialized 337 */ 338 static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state) 339 { 340 struct hrtimer *timer = addr; 341 342 switch (state) { 343 case ODEBUG_STATE_ACTIVE: 344 hrtimer_cancel(timer); 345 debug_object_init(timer, &hrtimer_debug_descr); 346 return true; 347 default: 348 return false; 349 } 350 } 351 352 /* 353 * fixup_activate is called when: 354 * - an active object is activated 355 * - an unknown non-static object is activated 356 */ 357 static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) 358 { 359 switch (state) { 360 case ODEBUG_STATE_ACTIVE: 361 WARN_ON(1); 362 363 default: 364 return false; 365 } 366 } 367 368 /* 369 * fixup_free is called when: 370 * - an active object is freed 371 */ 372 static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state) 373 { 374 struct hrtimer *timer = addr; 375 376 switch (state) { 377 case ODEBUG_STATE_ACTIVE: 378 hrtimer_cancel(timer); 379 debug_object_free(timer, &hrtimer_debug_descr); 380 return true; 381 default: 382 return false; 383 } 384 } 385 386 static struct debug_obj_descr hrtimer_debug_descr = { 387 .name = "hrtimer", 388 .debug_hint = hrtimer_debug_hint, 389 .fixup_init = hrtimer_fixup_init, 390 .fixup_activate = hrtimer_fixup_activate, 391 .fixup_free = hrtimer_fixup_free, 392 }; 393 394 static inline void debug_hrtimer_init(struct hrtimer *timer) 395 { 396 debug_object_init(timer, &hrtimer_debug_descr); 397 } 398 399 static inline void debug_hrtimer_activate(struct hrtimer *timer) 400 { 401 debug_object_activate(timer, &hrtimer_debug_descr); 402 } 403 404 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) 405 { 406 debug_object_deactivate(timer, &hrtimer_debug_descr); 407 } 408 409 static inline void debug_hrtimer_free(struct hrtimer *timer) 410 { 411 debug_object_free(timer, &hrtimer_debug_descr); 412 } 413 414 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 415 enum hrtimer_mode mode); 416 417 void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, 418 enum hrtimer_mode mode) 419 { 420 debug_object_init_on_stack(timer, &hrtimer_debug_descr); 421 __hrtimer_init(timer, clock_id, mode); 422 } 423 EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); 424 425 void destroy_hrtimer_on_stack(struct hrtimer *timer) 426 { 427 debug_object_free(timer, &hrtimer_debug_descr); 428 } 429 EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); 430 431 #else 432 static inline void debug_hrtimer_init(struct hrtimer *timer) { } 433 static inline void debug_hrtimer_activate(struct hrtimer *timer) { } 434 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 435 #endif 436 437 static inline void 438 debug_init(struct hrtimer *timer, clockid_t clockid, 439 enum hrtimer_mode mode) 440 { 441 debug_hrtimer_init(timer); 442 trace_hrtimer_init(timer, clockid, mode); 443 } 444 445 static inline void debug_activate(struct hrtimer *timer) 446 { 447 debug_hrtimer_activate(timer); 448 trace_hrtimer_start(timer); 449 } 450 451 static inline void debug_deactivate(struct hrtimer *timer) 452 { 453 debug_hrtimer_deactivate(timer); 454 trace_hrtimer_cancel(timer); 455 } 456 457 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 458 static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base, 459 struct hrtimer *timer) 460 { 461 #ifdef CONFIG_HIGH_RES_TIMERS 462 cpu_base->next_timer = timer; 463 #endif 464 } 465 466 static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) 467 { 468 struct hrtimer_clock_base *base = cpu_base->clock_base; 469 unsigned int active = cpu_base->active_bases; 470 ktime_t expires, expires_next = KTIME_MAX; 471 472 hrtimer_update_next_timer(cpu_base, NULL); 473 for (; active; base++, active >>= 1) { 474 struct timerqueue_node *next; 475 struct hrtimer *timer; 476 477 if (!(active & 0x01)) 478 continue; 479 480 next = timerqueue_getnext(&base->active); 481 timer = container_of(next, struct hrtimer, node); 482 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 483 if (expires < expires_next) { 484 expires_next = expires; 485 hrtimer_update_next_timer(cpu_base, timer); 486 } 487 } 488 /* 489 * clock_was_set() might have changed base->offset of any of 490 * the clock bases so the result might be negative. Fix it up 491 * to prevent a false positive in clockevents_program_event(). 492 */ 493 if (expires_next < 0) 494 expires_next = 0; 495 return expires_next; 496 } 497 #endif 498 499 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 500 { 501 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; 502 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; 503 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; 504 505 return ktime_get_update_offsets_now(&base->clock_was_set_seq, 506 offs_real, offs_boot, offs_tai); 507 } 508 509 /* High resolution timer related functions */ 510 #ifdef CONFIG_HIGH_RES_TIMERS 511 512 /* 513 * High resolution timer enabled ? 514 */ 515 static bool hrtimer_hres_enabled __read_mostly = true; 516 unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC; 517 EXPORT_SYMBOL_GPL(hrtimer_resolution); 518 519 /* 520 * Enable / Disable high resolution mode 521 */ 522 static int __init setup_hrtimer_hres(char *str) 523 { 524 return (kstrtobool(str, &hrtimer_hres_enabled) == 0); 525 } 526 527 __setup("highres=", setup_hrtimer_hres); 528 529 /* 530 * hrtimer_high_res_enabled - query, if the highres mode is enabled 531 */ 532 static inline int hrtimer_is_hres_enabled(void) 533 { 534 return hrtimer_hres_enabled; 535 } 536 537 /* 538 * Is the high resolution mode active ? 539 */ 540 static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) 541 { 542 return cpu_base->hres_active; 543 } 544 545 static inline int hrtimer_hres_active(void) 546 { 547 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases)); 548 } 549 550 /* 551 * Reprogram the event source with checking both queues for the 552 * next event 553 * Called with interrupts disabled and base->lock held 554 */ 555 static void 556 hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) 557 { 558 ktime_t expires_next; 559 560 if (!cpu_base->hres_active) 561 return; 562 563 expires_next = __hrtimer_get_next_event(cpu_base); 564 565 if (skip_equal && expires_next == cpu_base->expires_next) 566 return; 567 568 cpu_base->expires_next = expires_next; 569 570 /* 571 * If a hang was detected in the last timer interrupt then we 572 * leave the hang delay active in the hardware. We want the 573 * system to make progress. That also prevents the following 574 * scenario: 575 * T1 expires 50ms from now 576 * T2 expires 5s from now 577 * 578 * T1 is removed, so this code is called and would reprogram 579 * the hardware to 5s from now. Any hrtimer_start after that 580 * will not reprogram the hardware due to hang_detected being 581 * set. So we'd effectivly block all timers until the T2 event 582 * fires. 583 */ 584 if (cpu_base->hang_detected) 585 return; 586 587 tick_program_event(cpu_base->expires_next, 1); 588 } 589 590 /* 591 * When a timer is enqueued and expires earlier than the already enqueued 592 * timers, we have to check, whether it expires earlier than the timer for 593 * which the clock event device was armed. 594 * 595 * Called with interrupts disabled and base->cpu_base.lock held 596 */ 597 static void hrtimer_reprogram(struct hrtimer *timer, 598 struct hrtimer_clock_base *base) 599 { 600 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 601 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 602 603 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); 604 605 /* 606 * If the timer is not on the current cpu, we cannot reprogram 607 * the other cpus clock event device. 608 */ 609 if (base->cpu_base != cpu_base) 610 return; 611 612 /* 613 * If the hrtimer interrupt is running, then it will 614 * reevaluate the clock bases and reprogram the clock event 615 * device. The callbacks are always executed in hard interrupt 616 * context so we don't need an extra check for a running 617 * callback. 618 */ 619 if (cpu_base->in_hrtirq) 620 return; 621 622 /* 623 * CLOCK_REALTIME timer might be requested with an absolute 624 * expiry time which is less than base->offset. Set it to 0. 625 */ 626 if (expires < 0) 627 expires = 0; 628 629 if (expires >= cpu_base->expires_next) 630 return; 631 632 /* Update the pointer to the next expiring timer */ 633 cpu_base->next_timer = timer; 634 635 /* 636 * If a hang was detected in the last timer interrupt then we 637 * do not schedule a timer which is earlier than the expiry 638 * which we enforced in the hang detection. We want the system 639 * to make progress. 640 */ 641 if (cpu_base->hang_detected) 642 return; 643 644 /* 645 * Program the timer hardware. We enforce the expiry for 646 * events which are already in the past. 647 */ 648 cpu_base->expires_next = expires; 649 tick_program_event(expires, 1); 650 } 651 652 /* 653 * Initialize the high resolution related parts of cpu_base 654 */ 655 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 656 { 657 base->expires_next = KTIME_MAX; 658 base->hres_active = 0; 659 } 660 661 /* 662 * Retrigger next event is called after clock was set 663 * 664 * Called with interrupts disabled via on_each_cpu() 665 */ 666 static void retrigger_next_event(void *arg) 667 { 668 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); 669 670 if (!base->hres_active) 671 return; 672 673 raw_spin_lock(&base->lock); 674 hrtimer_update_base(base); 675 hrtimer_force_reprogram(base, 0); 676 raw_spin_unlock(&base->lock); 677 } 678 679 /* 680 * Switch to high resolution mode 681 */ 682 static void hrtimer_switch_to_hres(void) 683 { 684 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); 685 686 if (tick_init_highres()) { 687 printk(KERN_WARNING "Could not switch to high resolution " 688 "mode on CPU %d\n", base->cpu); 689 return; 690 } 691 base->hres_active = 1; 692 hrtimer_resolution = HIGH_RES_NSEC; 693 694 tick_setup_sched_timer(); 695 /* "Retrigger" the interrupt to get things going */ 696 retrigger_next_event(NULL); 697 } 698 699 static void clock_was_set_work(struct work_struct *work) 700 { 701 clock_was_set(); 702 } 703 704 static DECLARE_WORK(hrtimer_work, clock_was_set_work); 705 706 /* 707 * Called from timekeeping and resume code to reprogram the hrtimer 708 * interrupt device on all cpus. 709 */ 710 void clock_was_set_delayed(void) 711 { 712 schedule_work(&hrtimer_work); 713 } 714 715 #else 716 717 static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; } 718 static inline int hrtimer_hres_active(void) { return 0; } 719 static inline int hrtimer_is_hres_enabled(void) { return 0; } 720 static inline void hrtimer_switch_to_hres(void) { } 721 static inline void 722 hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } 723 static inline int hrtimer_reprogram(struct hrtimer *timer, 724 struct hrtimer_clock_base *base) 725 { 726 return 0; 727 } 728 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } 729 static inline void retrigger_next_event(void *arg) { } 730 731 #endif /* CONFIG_HIGH_RES_TIMERS */ 732 733 /* 734 * Clock realtime was set 735 * 736 * Change the offset of the realtime clock vs. the monotonic 737 * clock. 738 * 739 * We might have to reprogram the high resolution timer interrupt. On 740 * SMP we call the architecture specific code to retrigger _all_ high 741 * resolution timer interrupts. On UP we just disable interrupts and 742 * call the high resolution interrupt code. 743 */ 744 void clock_was_set(void) 745 { 746 #ifdef CONFIG_HIGH_RES_TIMERS 747 /* Retrigger the CPU local events everywhere */ 748 on_each_cpu(retrigger_next_event, NULL, 1); 749 #endif 750 timerfd_clock_was_set(); 751 } 752 753 /* 754 * During resume we might have to reprogram the high resolution timer 755 * interrupt on all online CPUs. However, all other CPUs will be 756 * stopped with IRQs interrupts disabled so the clock_was_set() call 757 * must be deferred. 758 */ 759 void hrtimers_resume(void) 760 { 761 WARN_ONCE(!irqs_disabled(), 762 KERN_INFO "hrtimers_resume() called with IRQs enabled!"); 763 764 /* Retrigger on the local CPU */ 765 retrigger_next_event(NULL); 766 /* And schedule a retrigger for all others */ 767 clock_was_set_delayed(); 768 } 769 770 /* 771 * Counterpart to lock_hrtimer_base above: 772 */ 773 static inline 774 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 775 { 776 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); 777 } 778 779 /** 780 * hrtimer_forward - forward the timer expiry 781 * @timer: hrtimer to forward 782 * @now: forward past this time 783 * @interval: the interval to forward 784 * 785 * Forward the timer expiry so it will expire in the future. 786 * Returns the number of overruns. 787 * 788 * Can be safely called from the callback function of @timer. If 789 * called from other contexts @timer must neither be enqueued nor 790 * running the callback and the caller needs to take care of 791 * serialization. 792 * 793 * Note: This only updates the timer expiry value and does not requeue 794 * the timer. 795 */ 796 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) 797 { 798 u64 orun = 1; 799 ktime_t delta; 800 801 delta = ktime_sub(now, hrtimer_get_expires(timer)); 802 803 if (delta < 0) 804 return 0; 805 806 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) 807 return 0; 808 809 if (interval < hrtimer_resolution) 810 interval = hrtimer_resolution; 811 812 if (unlikely(delta >= interval)) { 813 s64 incr = ktime_to_ns(interval); 814 815 orun = ktime_divns(delta, incr); 816 hrtimer_add_expires_ns(timer, incr * orun); 817 if (hrtimer_get_expires_tv64(timer) > now) 818 return orun; 819 /* 820 * This (and the ktime_add() below) is the 821 * correction for exact: 822 */ 823 orun++; 824 } 825 hrtimer_add_expires(timer, interval); 826 827 return orun; 828 } 829 EXPORT_SYMBOL_GPL(hrtimer_forward); 830 831 /* 832 * enqueue_hrtimer - internal function to (re)start a timer 833 * 834 * The timer is inserted in expiry order. Insertion into the 835 * red black tree is O(log(n)). Must hold the base lock. 836 * 837 * Returns 1 when the new timer is the leftmost timer in the tree. 838 */ 839 static int enqueue_hrtimer(struct hrtimer *timer, 840 struct hrtimer_clock_base *base) 841 { 842 debug_activate(timer); 843 844 base->cpu_base->active_bases |= 1 << base->index; 845 846 timer->state = HRTIMER_STATE_ENQUEUED; 847 848 return timerqueue_add(&base->active, &timer->node); 849 } 850 851 /* 852 * __remove_hrtimer - internal function to remove a timer 853 * 854 * Caller must hold the base lock. 855 * 856 * High resolution timer mode reprograms the clock event device when the 857 * timer is the one which expires next. The caller can disable this by setting 858 * reprogram to zero. This is useful, when the context does a reprogramming 859 * anyway (e.g. timer interrupt) 860 */ 861 static void __remove_hrtimer(struct hrtimer *timer, 862 struct hrtimer_clock_base *base, 863 u8 newstate, int reprogram) 864 { 865 struct hrtimer_cpu_base *cpu_base = base->cpu_base; 866 u8 state = timer->state; 867 868 timer->state = newstate; 869 if (!(state & HRTIMER_STATE_ENQUEUED)) 870 return; 871 872 if (!timerqueue_del(&base->active, &timer->node)) 873 cpu_base->active_bases &= ~(1 << base->index); 874 875 #ifdef CONFIG_HIGH_RES_TIMERS 876 /* 877 * Note: If reprogram is false we do not update 878 * cpu_base->next_timer. This happens when we remove the first 879 * timer on a remote cpu. No harm as we never dereference 880 * cpu_base->next_timer. So the worst thing what can happen is 881 * an superflous call to hrtimer_force_reprogram() on the 882 * remote cpu later on if the same timer gets enqueued again. 883 */ 884 if (reprogram && timer == cpu_base->next_timer) 885 hrtimer_force_reprogram(cpu_base, 1); 886 #endif 887 } 888 889 /* 890 * remove hrtimer, called with base lock held 891 */ 892 static inline int 893 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) 894 { 895 if (hrtimer_is_queued(timer)) { 896 u8 state = timer->state; 897 int reprogram; 898 899 /* 900 * Remove the timer and force reprogramming when high 901 * resolution mode is active and the timer is on the current 902 * CPU. If we remove a timer on another CPU, reprogramming is 903 * skipped. The interrupt event on this CPU is fired and 904 * reprogramming happens in the interrupt handler. This is a 905 * rare case and less expensive than a smp call. 906 */ 907 debug_deactivate(timer); 908 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); 909 910 if (!restart) 911 state = HRTIMER_STATE_INACTIVE; 912 913 __remove_hrtimer(timer, base, state, reprogram); 914 return 1; 915 } 916 return 0; 917 } 918 919 static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, 920 const enum hrtimer_mode mode) 921 { 922 #ifdef CONFIG_TIME_LOW_RES 923 /* 924 * CONFIG_TIME_LOW_RES indicates that the system has no way to return 925 * granular time values. For relative timers we add hrtimer_resolution 926 * (i.e. one jiffie) to prevent short timeouts. 927 */ 928 timer->is_rel = mode & HRTIMER_MODE_REL; 929 if (timer->is_rel) 930 tim = ktime_add_safe(tim, hrtimer_resolution); 931 #endif 932 return tim; 933 } 934 935 /** 936 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU 937 * @timer: the timer to be added 938 * @tim: expiry time 939 * @delta_ns: "slack" range for the timer 940 * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or 941 * relative (HRTIMER_MODE_REL) 942 */ 943 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 944 u64 delta_ns, const enum hrtimer_mode mode) 945 { 946 struct hrtimer_clock_base *base, *new_base; 947 unsigned long flags; 948 int leftmost; 949 950 base = lock_hrtimer_base(timer, &flags); 951 952 /* Remove an active timer from the queue: */ 953 remove_hrtimer(timer, base, true); 954 955 if (mode & HRTIMER_MODE_REL) 956 tim = ktime_add_safe(tim, base->get_time()); 957 958 tim = hrtimer_update_lowres(timer, tim, mode); 959 960 hrtimer_set_expires_range_ns(timer, tim, delta_ns); 961 962 /* Switch the timer base, if necessary: */ 963 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); 964 965 leftmost = enqueue_hrtimer(timer, new_base); 966 if (!leftmost) 967 goto unlock; 968 969 if (!hrtimer_is_hres_active(timer)) { 970 /* 971 * Kick to reschedule the next tick to handle the new timer 972 * on dynticks target. 973 */ 974 if (new_base->cpu_base->nohz_active) 975 wake_up_nohz_cpu(new_base->cpu_base->cpu); 976 } else { 977 hrtimer_reprogram(timer, new_base); 978 } 979 unlock: 980 unlock_hrtimer_base(timer, &flags); 981 } 982 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); 983 984 /** 985 * hrtimer_try_to_cancel - try to deactivate a timer 986 * @timer: hrtimer to stop 987 * 988 * Returns: 989 * 0 when the timer was not active 990 * 1 when the timer was active 991 * -1 when the timer is currently executing the callback function and 992 * cannot be stopped 993 */ 994 int hrtimer_try_to_cancel(struct hrtimer *timer) 995 { 996 struct hrtimer_clock_base *base; 997 unsigned long flags; 998 int ret = -1; 999 1000 /* 1001 * Check lockless first. If the timer is not active (neither 1002 * enqueued nor running the callback, nothing to do here. The 1003 * base lock does not serialize against a concurrent enqueue, 1004 * so we can avoid taking it. 1005 */ 1006 if (!hrtimer_active(timer)) 1007 return 0; 1008 1009 base = lock_hrtimer_base(timer, &flags); 1010 1011 if (!hrtimer_callback_running(timer)) 1012 ret = remove_hrtimer(timer, base, false); 1013 1014 unlock_hrtimer_base(timer, &flags); 1015 1016 return ret; 1017 1018 } 1019 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); 1020 1021 /** 1022 * hrtimer_cancel - cancel a timer and wait for the handler to finish. 1023 * @timer: the timer to be cancelled 1024 * 1025 * Returns: 1026 * 0 when the timer was not active 1027 * 1 when the timer was active 1028 */ 1029 int hrtimer_cancel(struct hrtimer *timer) 1030 { 1031 for (;;) { 1032 int ret = hrtimer_try_to_cancel(timer); 1033 1034 if (ret >= 0) 1035 return ret; 1036 cpu_relax(); 1037 } 1038 } 1039 EXPORT_SYMBOL_GPL(hrtimer_cancel); 1040 1041 /** 1042 * hrtimer_get_remaining - get remaining time for the timer 1043 * @timer: the timer to read 1044 * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y 1045 */ 1046 ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) 1047 { 1048 unsigned long flags; 1049 ktime_t rem; 1050 1051 lock_hrtimer_base(timer, &flags); 1052 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust) 1053 rem = hrtimer_expires_remaining_adjusted(timer); 1054 else 1055 rem = hrtimer_expires_remaining(timer); 1056 unlock_hrtimer_base(timer, &flags); 1057 1058 return rem; 1059 } 1060 EXPORT_SYMBOL_GPL(__hrtimer_get_remaining); 1061 1062 #ifdef CONFIG_NO_HZ_COMMON 1063 /** 1064 * hrtimer_get_next_event - get the time until next expiry event 1065 * 1066 * Returns the next expiry time or KTIME_MAX if no timer is pending. 1067 */ 1068 u64 hrtimer_get_next_event(void) 1069 { 1070 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1071 u64 expires = KTIME_MAX; 1072 unsigned long flags; 1073 1074 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1075 1076 if (!__hrtimer_hres_active(cpu_base)) 1077 expires = __hrtimer_get_next_event(cpu_base); 1078 1079 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1080 1081 return expires; 1082 } 1083 #endif 1084 1085 static inline int hrtimer_clockid_to_base(clockid_t clock_id) 1086 { 1087 if (likely(clock_id < MAX_CLOCKS)) { 1088 int base = hrtimer_clock_to_base_table[clock_id]; 1089 1090 if (likely(base != HRTIMER_MAX_CLOCK_BASES)) 1091 return base; 1092 } 1093 WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id); 1094 return HRTIMER_BASE_MONOTONIC; 1095 } 1096 1097 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1098 enum hrtimer_mode mode) 1099 { 1100 struct hrtimer_cpu_base *cpu_base; 1101 int base; 1102 1103 memset(timer, 0, sizeof(struct hrtimer)); 1104 1105 cpu_base = raw_cpu_ptr(&hrtimer_bases); 1106 1107 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) 1108 clock_id = CLOCK_MONOTONIC; 1109 1110 base = hrtimer_clockid_to_base(clock_id); 1111 timer->base = &cpu_base->clock_base[base]; 1112 timerqueue_init(&timer->node); 1113 } 1114 1115 /** 1116 * hrtimer_init - initialize a timer to the given clock 1117 * @timer: the timer to be initialized 1118 * @clock_id: the clock to be used 1119 * @mode: timer mode abs/rel 1120 */ 1121 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1122 enum hrtimer_mode mode) 1123 { 1124 debug_init(timer, clock_id, mode); 1125 __hrtimer_init(timer, clock_id, mode); 1126 } 1127 EXPORT_SYMBOL_GPL(hrtimer_init); 1128 1129 /* 1130 * A timer is active, when it is enqueued into the rbtree or the 1131 * callback function is running or it's in the state of being migrated 1132 * to another cpu. 1133 * 1134 * It is important for this function to not return a false negative. 1135 */ 1136 bool hrtimer_active(const struct hrtimer *timer) 1137 { 1138 struct hrtimer_cpu_base *cpu_base; 1139 unsigned int seq; 1140 1141 do { 1142 cpu_base = READ_ONCE(timer->base->cpu_base); 1143 seq = raw_read_seqcount_begin(&cpu_base->seq); 1144 1145 if (timer->state != HRTIMER_STATE_INACTIVE || 1146 cpu_base->running == timer) 1147 return true; 1148 1149 } while (read_seqcount_retry(&cpu_base->seq, seq) || 1150 cpu_base != READ_ONCE(timer->base->cpu_base)); 1151 1152 return false; 1153 } 1154 EXPORT_SYMBOL_GPL(hrtimer_active); 1155 1156 /* 1157 * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3 1158 * distinct sections: 1159 * 1160 * - queued: the timer is queued 1161 * - callback: the timer is being ran 1162 * - post: the timer is inactive or (re)queued 1163 * 1164 * On the read side we ensure we observe timer->state and cpu_base->running 1165 * from the same section, if anything changed while we looked at it, we retry. 1166 * This includes timer->base changing because sequence numbers alone are 1167 * insufficient for that. 1168 * 1169 * The sequence numbers are required because otherwise we could still observe 1170 * a false negative if the read side got smeared over multiple consequtive 1171 * __run_hrtimer() invocations. 1172 */ 1173 1174 static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, 1175 struct hrtimer_clock_base *base, 1176 struct hrtimer *timer, ktime_t *now) 1177 { 1178 enum hrtimer_restart (*fn)(struct hrtimer *); 1179 int restart; 1180 1181 lockdep_assert_held(&cpu_base->lock); 1182 1183 debug_deactivate(timer); 1184 cpu_base->running = timer; 1185 1186 /* 1187 * Separate the ->running assignment from the ->state assignment. 1188 * 1189 * As with a regular write barrier, this ensures the read side in 1190 * hrtimer_active() cannot observe cpu_base->running == NULL && 1191 * timer->state == INACTIVE. 1192 */ 1193 raw_write_seqcount_barrier(&cpu_base->seq); 1194 1195 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); 1196 fn = timer->function; 1197 1198 /* 1199 * Clear the 'is relative' flag for the TIME_LOW_RES case. If the 1200 * timer is restarted with a period then it becomes an absolute 1201 * timer. If its not restarted it does not matter. 1202 */ 1203 if (IS_ENABLED(CONFIG_TIME_LOW_RES)) 1204 timer->is_rel = false; 1205 1206 /* 1207 * Because we run timers from hardirq context, there is no chance 1208 * they get migrated to another cpu, therefore its safe to unlock 1209 * the timer base. 1210 */ 1211 raw_spin_unlock(&cpu_base->lock); 1212 trace_hrtimer_expire_entry(timer, now); 1213 restart = fn(timer); 1214 trace_hrtimer_expire_exit(timer); 1215 raw_spin_lock(&cpu_base->lock); 1216 1217 /* 1218 * Note: We clear the running state after enqueue_hrtimer and 1219 * we do not reprogram the event hardware. Happens either in 1220 * hrtimer_start_range_ns() or in hrtimer_interrupt() 1221 * 1222 * Note: Because we dropped the cpu_base->lock above, 1223 * hrtimer_start_range_ns() can have popped in and enqueued the timer 1224 * for us already. 1225 */ 1226 if (restart != HRTIMER_NORESTART && 1227 !(timer->state & HRTIMER_STATE_ENQUEUED)) 1228 enqueue_hrtimer(timer, base); 1229 1230 /* 1231 * Separate the ->running assignment from the ->state assignment. 1232 * 1233 * As with a regular write barrier, this ensures the read side in 1234 * hrtimer_active() cannot observe cpu_base->running == NULL && 1235 * timer->state == INACTIVE. 1236 */ 1237 raw_write_seqcount_barrier(&cpu_base->seq); 1238 1239 WARN_ON_ONCE(cpu_base->running != timer); 1240 cpu_base->running = NULL; 1241 } 1242 1243 static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) 1244 { 1245 struct hrtimer_clock_base *base = cpu_base->clock_base; 1246 unsigned int active = cpu_base->active_bases; 1247 1248 for (; active; base++, active >>= 1) { 1249 struct timerqueue_node *node; 1250 ktime_t basenow; 1251 1252 if (!(active & 0x01)) 1253 continue; 1254 1255 basenow = ktime_add(now, base->offset); 1256 1257 while ((node = timerqueue_getnext(&base->active))) { 1258 struct hrtimer *timer; 1259 1260 timer = container_of(node, struct hrtimer, node); 1261 1262 /* 1263 * The immediate goal for using the softexpires is 1264 * minimizing wakeups, not running timers at the 1265 * earliest interrupt after their soft expiration. 1266 * This allows us to avoid using a Priority Search 1267 * Tree, which can answer a stabbing querry for 1268 * overlapping intervals and instead use the simple 1269 * BST we already have. 1270 * We don't add extra wakeups by delaying timers that 1271 * are right-of a not yet expired timer, because that 1272 * timer will have to trigger a wakeup anyway. 1273 */ 1274 if (basenow < hrtimer_get_softexpires_tv64(timer)) 1275 break; 1276 1277 __run_hrtimer(cpu_base, base, timer, &basenow); 1278 } 1279 } 1280 } 1281 1282 #ifdef CONFIG_HIGH_RES_TIMERS 1283 1284 /* 1285 * High resolution timer interrupt 1286 * Called with interrupts disabled 1287 */ 1288 void hrtimer_interrupt(struct clock_event_device *dev) 1289 { 1290 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1291 ktime_t expires_next, now, entry_time, delta; 1292 int retries = 0; 1293 1294 BUG_ON(!cpu_base->hres_active); 1295 cpu_base->nr_events++; 1296 dev->next_event = KTIME_MAX; 1297 1298 raw_spin_lock(&cpu_base->lock); 1299 entry_time = now = hrtimer_update_base(cpu_base); 1300 retry: 1301 cpu_base->in_hrtirq = 1; 1302 /* 1303 * We set expires_next to KTIME_MAX here with cpu_base->lock 1304 * held to prevent that a timer is enqueued in our queue via 1305 * the migration code. This does not affect enqueueing of 1306 * timers which run their callback and need to be requeued on 1307 * this CPU. 1308 */ 1309 cpu_base->expires_next = KTIME_MAX; 1310 1311 __hrtimer_run_queues(cpu_base, now); 1312 1313 /* Reevaluate the clock bases for the next expiry */ 1314 expires_next = __hrtimer_get_next_event(cpu_base); 1315 /* 1316 * Store the new expiry value so the migration code can verify 1317 * against it. 1318 */ 1319 cpu_base->expires_next = expires_next; 1320 cpu_base->in_hrtirq = 0; 1321 raw_spin_unlock(&cpu_base->lock); 1322 1323 /* Reprogramming necessary ? */ 1324 if (!tick_program_event(expires_next, 0)) { 1325 cpu_base->hang_detected = 0; 1326 return; 1327 } 1328 1329 /* 1330 * The next timer was already expired due to: 1331 * - tracing 1332 * - long lasting callbacks 1333 * - being scheduled away when running in a VM 1334 * 1335 * We need to prevent that we loop forever in the hrtimer 1336 * interrupt routine. We give it 3 attempts to avoid 1337 * overreacting on some spurious event. 1338 * 1339 * Acquire base lock for updating the offsets and retrieving 1340 * the current time. 1341 */ 1342 raw_spin_lock(&cpu_base->lock); 1343 now = hrtimer_update_base(cpu_base); 1344 cpu_base->nr_retries++; 1345 if (++retries < 3) 1346 goto retry; 1347 /* 1348 * Give the system a chance to do something else than looping 1349 * here. We stored the entry time, so we know exactly how long 1350 * we spent here. We schedule the next event this amount of 1351 * time away. 1352 */ 1353 cpu_base->nr_hangs++; 1354 cpu_base->hang_detected = 1; 1355 raw_spin_unlock(&cpu_base->lock); 1356 delta = ktime_sub(now, entry_time); 1357 if ((unsigned int)delta > cpu_base->max_hang_time) 1358 cpu_base->max_hang_time = (unsigned int) delta; 1359 /* 1360 * Limit it to a sensible value as we enforce a longer 1361 * delay. Give the CPU at least 100ms to catch up. 1362 */ 1363 if (delta > 100 * NSEC_PER_MSEC) 1364 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); 1365 else 1366 expires_next = ktime_add(now, delta); 1367 tick_program_event(expires_next, 1); 1368 printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", 1369 ktime_to_ns(delta)); 1370 } 1371 1372 /* called with interrupts disabled */ 1373 static inline void __hrtimer_peek_ahead_timers(void) 1374 { 1375 struct tick_device *td; 1376 1377 if (!hrtimer_hres_active()) 1378 return; 1379 1380 td = this_cpu_ptr(&tick_cpu_device); 1381 if (td && td->evtdev) 1382 hrtimer_interrupt(td->evtdev); 1383 } 1384 1385 #else /* CONFIG_HIGH_RES_TIMERS */ 1386 1387 static inline void __hrtimer_peek_ahead_timers(void) { } 1388 1389 #endif /* !CONFIG_HIGH_RES_TIMERS */ 1390 1391 /* 1392 * Called from run_local_timers in hardirq context every jiffy 1393 */ 1394 void hrtimer_run_queues(void) 1395 { 1396 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1397 ktime_t now; 1398 1399 if (__hrtimer_hres_active(cpu_base)) 1400 return; 1401 1402 /* 1403 * This _is_ ugly: We have to check periodically, whether we 1404 * can switch to highres and / or nohz mode. The clocksource 1405 * switch happens with xtime_lock held. Notification from 1406 * there only sets the check bit in the tick_oneshot code, 1407 * otherwise we might deadlock vs. xtime_lock. 1408 */ 1409 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { 1410 hrtimer_switch_to_hres(); 1411 return; 1412 } 1413 1414 raw_spin_lock(&cpu_base->lock); 1415 now = hrtimer_update_base(cpu_base); 1416 __hrtimer_run_queues(cpu_base, now); 1417 raw_spin_unlock(&cpu_base->lock); 1418 } 1419 1420 /* 1421 * Sleep related functions: 1422 */ 1423 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) 1424 { 1425 struct hrtimer_sleeper *t = 1426 container_of(timer, struct hrtimer_sleeper, timer); 1427 struct task_struct *task = t->task; 1428 1429 t->task = NULL; 1430 if (task) 1431 wake_up_process(task); 1432 1433 return HRTIMER_NORESTART; 1434 } 1435 1436 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) 1437 { 1438 sl->timer.function = hrtimer_wakeup; 1439 sl->task = task; 1440 } 1441 EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); 1442 1443 int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) 1444 { 1445 switch(restart->nanosleep.type) { 1446 #ifdef CONFIG_COMPAT 1447 case TT_COMPAT: 1448 if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp)) 1449 return -EFAULT; 1450 break; 1451 #endif 1452 case TT_NATIVE: 1453 if (put_timespec64(ts, restart->nanosleep.rmtp)) 1454 return -EFAULT; 1455 break; 1456 default: 1457 BUG(); 1458 } 1459 return -ERESTART_RESTARTBLOCK; 1460 } 1461 1462 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 1463 { 1464 struct restart_block *restart; 1465 1466 hrtimer_init_sleeper(t, current); 1467 1468 do { 1469 set_current_state(TASK_INTERRUPTIBLE); 1470 hrtimer_start_expires(&t->timer, mode); 1471 1472 if (likely(t->task)) 1473 freezable_schedule(); 1474 1475 hrtimer_cancel(&t->timer); 1476 mode = HRTIMER_MODE_ABS; 1477 1478 } while (t->task && !signal_pending(current)); 1479 1480 __set_current_state(TASK_RUNNING); 1481 1482 if (!t->task) 1483 return 0; 1484 1485 restart = ¤t->restart_block; 1486 if (restart->nanosleep.type != TT_NONE) { 1487 ktime_t rem = hrtimer_expires_remaining(&t->timer); 1488 struct timespec64 rmt; 1489 1490 if (rem <= 0) 1491 return 0; 1492 rmt = ktime_to_timespec64(rem); 1493 1494 return nanosleep_copyout(restart, &rmt); 1495 } 1496 return -ERESTART_RESTARTBLOCK; 1497 } 1498 1499 static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) 1500 { 1501 struct hrtimer_sleeper t; 1502 int ret; 1503 1504 hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, 1505 HRTIMER_MODE_ABS); 1506 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); 1507 1508 ret = do_nanosleep(&t, HRTIMER_MODE_ABS); 1509 destroy_hrtimer_on_stack(&t.timer); 1510 return ret; 1511 } 1512 1513 long hrtimer_nanosleep(const struct timespec64 *rqtp, 1514 const enum hrtimer_mode mode, const clockid_t clockid) 1515 { 1516 struct restart_block *restart; 1517 struct hrtimer_sleeper t; 1518 int ret = 0; 1519 u64 slack; 1520 1521 slack = current->timer_slack_ns; 1522 if (dl_task(current) || rt_task(current)) 1523 slack = 0; 1524 1525 hrtimer_init_on_stack(&t.timer, clockid, mode); 1526 hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); 1527 ret = do_nanosleep(&t, mode); 1528 if (ret != -ERESTART_RESTARTBLOCK) 1529 goto out; 1530 1531 /* Absolute timers do not update the rmtp value and restart: */ 1532 if (mode == HRTIMER_MODE_ABS) { 1533 ret = -ERESTARTNOHAND; 1534 goto out; 1535 } 1536 1537 restart = ¤t->restart_block; 1538 restart->fn = hrtimer_nanosleep_restart; 1539 restart->nanosleep.clockid = t.timer.base->clockid; 1540 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); 1541 out: 1542 destroy_hrtimer_on_stack(&t.timer); 1543 return ret; 1544 } 1545 1546 SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, 1547 struct timespec __user *, rmtp) 1548 { 1549 struct timespec64 tu; 1550 1551 if (get_timespec64(&tu, rqtp)) 1552 return -EFAULT; 1553 1554 if (!timespec64_valid(&tu)) 1555 return -EINVAL; 1556 1557 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; 1558 current->restart_block.nanosleep.rmtp = rmtp; 1559 return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); 1560 } 1561 1562 #ifdef CONFIG_COMPAT 1563 1564 COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, 1565 struct compat_timespec __user *, rmtp) 1566 { 1567 struct timespec64 tu; 1568 1569 if (compat_get_timespec64(&tu, rqtp)) 1570 return -EFAULT; 1571 1572 if (!timespec64_valid(&tu)) 1573 return -EINVAL; 1574 1575 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; 1576 current->restart_block.nanosleep.compat_rmtp = rmtp; 1577 return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); 1578 } 1579 #endif 1580 1581 /* 1582 * Functions related to boot-time initialization: 1583 */ 1584 int hrtimers_prepare_cpu(unsigned int cpu) 1585 { 1586 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1587 int i; 1588 1589 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1590 cpu_base->clock_base[i].cpu_base = cpu_base; 1591 timerqueue_init_head(&cpu_base->clock_base[i].active); 1592 } 1593 1594 cpu_base->cpu = cpu; 1595 hrtimer_init_hres(cpu_base); 1596 return 0; 1597 } 1598 1599 #ifdef CONFIG_HOTPLUG_CPU 1600 1601 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1602 struct hrtimer_clock_base *new_base) 1603 { 1604 struct hrtimer *timer; 1605 struct timerqueue_node *node; 1606 1607 while ((node = timerqueue_getnext(&old_base->active))) { 1608 timer = container_of(node, struct hrtimer, node); 1609 BUG_ON(hrtimer_callback_running(timer)); 1610 debug_deactivate(timer); 1611 1612 /* 1613 * Mark it as ENQUEUED not INACTIVE otherwise the 1614 * timer could be seen as !active and just vanish away 1615 * under us on another CPU 1616 */ 1617 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); 1618 timer->base = new_base; 1619 /* 1620 * Enqueue the timers on the new cpu. This does not 1621 * reprogram the event device in case the timer 1622 * expires before the earliest on this CPU, but we run 1623 * hrtimer_interrupt after we migrated everything to 1624 * sort out already expired timers and reprogram the 1625 * event device. 1626 */ 1627 enqueue_hrtimer(timer, new_base); 1628 } 1629 } 1630 1631 int hrtimers_dead_cpu(unsigned int scpu) 1632 { 1633 struct hrtimer_cpu_base *old_base, *new_base; 1634 int i; 1635 1636 BUG_ON(cpu_online(scpu)); 1637 tick_cancel_sched_timer(scpu); 1638 1639 local_irq_disable(); 1640 old_base = &per_cpu(hrtimer_bases, scpu); 1641 new_base = this_cpu_ptr(&hrtimer_bases); 1642 /* 1643 * The caller is globally serialized and nobody else 1644 * takes two locks at once, deadlock is not possible. 1645 */ 1646 raw_spin_lock(&new_base->lock); 1647 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1648 1649 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1650 migrate_hrtimer_list(&old_base->clock_base[i], 1651 &new_base->clock_base[i]); 1652 } 1653 1654 raw_spin_unlock(&old_base->lock); 1655 raw_spin_unlock(&new_base->lock); 1656 1657 /* Check, if we got expired work to do */ 1658 __hrtimer_peek_ahead_timers(); 1659 local_irq_enable(); 1660 return 0; 1661 } 1662 1663 #endif /* CONFIG_HOTPLUG_CPU */ 1664 1665 void __init hrtimers_init(void) 1666 { 1667 hrtimers_prepare_cpu(smp_processor_id()); 1668 } 1669 1670 /** 1671 * schedule_hrtimeout_range_clock - sleep until timeout 1672 * @expires: timeout value (ktime_t) 1673 * @delta: slack in expires timeout (ktime_t) 1674 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1675 * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME 1676 */ 1677 int __sched 1678 schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, 1679 const enum hrtimer_mode mode, int clock) 1680 { 1681 struct hrtimer_sleeper t; 1682 1683 /* 1684 * Optimize when a zero timeout value is given. It does not 1685 * matter whether this is an absolute or a relative time. 1686 */ 1687 if (expires && *expires == 0) { 1688 __set_current_state(TASK_RUNNING); 1689 return 0; 1690 } 1691 1692 /* 1693 * A NULL parameter means "infinite" 1694 */ 1695 if (!expires) { 1696 schedule(); 1697 return -EINTR; 1698 } 1699 1700 hrtimer_init_on_stack(&t.timer, clock, mode); 1701 hrtimer_set_expires_range_ns(&t.timer, *expires, delta); 1702 1703 hrtimer_init_sleeper(&t, current); 1704 1705 hrtimer_start_expires(&t.timer, mode); 1706 1707 if (likely(t.task)) 1708 schedule(); 1709 1710 hrtimer_cancel(&t.timer); 1711 destroy_hrtimer_on_stack(&t.timer); 1712 1713 __set_current_state(TASK_RUNNING); 1714 1715 return !t.task ? 0 : -EINTR; 1716 } 1717 1718 /** 1719 * schedule_hrtimeout_range - sleep until timeout 1720 * @expires: timeout value (ktime_t) 1721 * @delta: slack in expires timeout (ktime_t) 1722 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1723 * 1724 * Make the current task sleep until the given expiry time has 1725 * elapsed. The routine will return immediately unless 1726 * the current task state has been set (see set_current_state()). 1727 * 1728 * The @delta argument gives the kernel the freedom to schedule the 1729 * actual wakeup to a time that is both power and performance friendly. 1730 * The kernel give the normal best effort behavior for "@expires+@delta", 1731 * but may decide to fire the timer earlier, but no earlier than @expires. 1732 * 1733 * You can set the task state as follows - 1734 * 1735 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 1736 * pass before the routine returns unless the current task is explicitly 1737 * woken up, (e.g. by wake_up_process()). 1738 * 1739 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1740 * delivered to the current task or the current task is explicitly woken 1741 * up. 1742 * 1743 * The current task state is guaranteed to be TASK_RUNNING when this 1744 * routine returns. 1745 * 1746 * Returns 0 when the timer has expired. If the task was woken before the 1747 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or 1748 * by an explicit wakeup, it returns -EINTR. 1749 */ 1750 int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, 1751 const enum hrtimer_mode mode) 1752 { 1753 return schedule_hrtimeout_range_clock(expires, delta, mode, 1754 CLOCK_MONOTONIC); 1755 } 1756 EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); 1757 1758 /** 1759 * schedule_hrtimeout - sleep until timeout 1760 * @expires: timeout value (ktime_t) 1761 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1762 * 1763 * Make the current task sleep until the given expiry time has 1764 * elapsed. The routine will return immediately unless 1765 * the current task state has been set (see set_current_state()). 1766 * 1767 * You can set the task state as follows - 1768 * 1769 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 1770 * pass before the routine returns unless the current task is explicitly 1771 * woken up, (e.g. by wake_up_process()). 1772 * 1773 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1774 * delivered to the current task or the current task is explicitly woken 1775 * up. 1776 * 1777 * The current task state is guaranteed to be TASK_RUNNING when this 1778 * routine returns. 1779 * 1780 * Returns 0 when the timer has expired. If the task was woken before the 1781 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or 1782 * by an explicit wakeup, it returns -EINTR. 1783 */ 1784 int __sched schedule_hrtimeout(ktime_t *expires, 1785 const enum hrtimer_mode mode) 1786 { 1787 return schedule_hrtimeout_range(expires, 0, mode); 1788 } 1789 EXPORT_SYMBOL_GPL(schedule_hrtimeout); 1790