1 /* 2 * linux/kernel/hrtimer.c 3 * 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 7 * 8 * High-resolution kernel timers 9 * 10 * In contrast to the low-resolution timeout API implemented in 11 * kernel/timer.c, hrtimers provide finer resolution and accuracy 12 * depending on system configuration and capabilities. 13 * 14 * These timers are currently used for: 15 * - itimers 16 * - POSIX timers 17 * - nanosleep 18 * - precise in-kernel timing 19 * 20 * Started by: Thomas Gleixner and Ingo Molnar 21 * 22 * Credits: 23 * based on kernel/timer.c 24 * 25 * Help, testing, suggestions, bugfixes, improvements were 26 * provided by: 27 * 28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel 29 * et. al. 30 * 31 * For licencing details see kernel-base/COPYING 32 */ 33 34 #include <linux/cpu.h> 35 #include <linux/export.h> 36 #include <linux/percpu.h> 37 #include <linux/hrtimer.h> 38 #include <linux/notifier.h> 39 #include <linux/syscalls.h> 40 #include <linux/kallsyms.h> 41 #include <linux/interrupt.h> 42 #include <linux/tick.h> 43 #include <linux/seq_file.h> 44 #include <linux/err.h> 45 #include <linux/debugobjects.h> 46 #include <linux/sched.h> 47 #include <linux/sched/sysctl.h> 48 #include <linux/sched/rt.h> 49 #include <linux/sched/deadline.h> 50 #include <linux/timer.h> 51 #include <linux/freezer.h> 52 53 #include <asm/uaccess.h> 54 55 #include <trace/events/timer.h> 56 57 #include "tick-internal.h" 58 59 /* 60 * The timer bases: 61 * 62 * There are more clockids than hrtimer bases. Thus, we index 63 * into the timer bases by the hrtimer_base_type enum. When trying 64 * to reach a base using a clockid, hrtimer_clockid_to_base() 65 * is used to convert from clockid to the proper hrtimer_base_type. 66 */ 67 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = 68 { 69 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), 70 .seq = SEQCNT_ZERO(hrtimer_bases.seq), 71 .clock_base = 72 { 73 { 74 .index = HRTIMER_BASE_MONOTONIC, 75 .clockid = CLOCK_MONOTONIC, 76 .get_time = &ktime_get, 77 }, 78 { 79 .index = HRTIMER_BASE_REALTIME, 80 .clockid = CLOCK_REALTIME, 81 .get_time = &ktime_get_real, 82 }, 83 { 84 .index = HRTIMER_BASE_BOOTTIME, 85 .clockid = CLOCK_BOOTTIME, 86 .get_time = &ktime_get_boottime, 87 }, 88 { 89 .index = HRTIMER_BASE_TAI, 90 .clockid = CLOCK_TAI, 91 .get_time = &ktime_get_clocktai, 92 }, 93 } 94 }; 95 96 static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { 97 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 98 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 99 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, 100 [CLOCK_TAI] = HRTIMER_BASE_TAI, 101 }; 102 103 static inline int hrtimer_clockid_to_base(clockid_t clock_id) 104 { 105 return hrtimer_clock_to_base_table[clock_id]; 106 } 107 108 /* 109 * Functions and macros which are different for UP/SMP systems are kept in a 110 * single place 111 */ 112 #ifdef CONFIG_SMP 113 114 /* 115 * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base() 116 * such that hrtimer_callback_running() can unconditionally dereference 117 * timer->base->cpu_base 118 */ 119 static struct hrtimer_cpu_base migration_cpu_base = { 120 .seq = SEQCNT_ZERO(migration_cpu_base), 121 .clock_base = { { .cpu_base = &migration_cpu_base, }, }, 122 }; 123 124 #define migration_base migration_cpu_base.clock_base[0] 125 126 /* 127 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock 128 * means that all timers which are tied to this base via timer->base are 129 * locked, and the base itself is locked too. 130 * 131 * So __run_timers/migrate_timers can safely modify all timers which could 132 * be found on the lists/queues. 133 * 134 * When the timer's base is locked, and the timer removed from list, it is 135 * possible to set timer->base = &migration_base and drop the lock: the timer 136 * remains locked. 137 */ 138 static 139 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, 140 unsigned long *flags) 141 { 142 struct hrtimer_clock_base *base; 143 144 for (;;) { 145 base = timer->base; 146 if (likely(base != &migration_base)) { 147 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 148 if (likely(base == timer->base)) 149 return base; 150 /* The timer has migrated to another CPU: */ 151 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); 152 } 153 cpu_relax(); 154 } 155 } 156 157 /* 158 * With HIGHRES=y we do not migrate the timer when it is expiring 159 * before the next event on the target cpu because we cannot reprogram 160 * the target cpu hardware and we would cause it to fire late. 161 * 162 * Called with cpu_base->lock of target cpu held. 163 */ 164 static int 165 hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) 166 { 167 #ifdef CONFIG_HIGH_RES_TIMERS 168 ktime_t expires; 169 170 if (!new_base->cpu_base->hres_active) 171 return 0; 172 173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); 174 return expires.tv64 <= new_base->cpu_base->expires_next.tv64; 175 #else 176 return 0; 177 #endif 178 } 179 180 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 181 static inline 182 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, 183 int pinned) 184 { 185 if (pinned || !base->migration_enabled) 186 return base; 187 return &per_cpu(hrtimer_bases, get_nohz_timer_target()); 188 } 189 #else 190 static inline 191 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, 192 int pinned) 193 { 194 return base; 195 } 196 #endif 197 198 /* 199 * We switch the timer base to a power-optimized selected CPU target, 200 * if: 201 * - NO_HZ_COMMON is enabled 202 * - timer migration is enabled 203 * - the timer callback is not running 204 * - the timer is not the first expiring timer on the new target 205 * 206 * If one of the above requirements is not fulfilled we move the timer 207 * to the current CPU or leave it on the previously assigned CPU if 208 * the timer callback is currently running. 209 */ 210 static inline struct hrtimer_clock_base * 211 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, 212 int pinned) 213 { 214 struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; 215 struct hrtimer_clock_base *new_base; 216 int basenum = base->index; 217 218 this_cpu_base = this_cpu_ptr(&hrtimer_bases); 219 new_cpu_base = get_target_base(this_cpu_base, pinned); 220 again: 221 new_base = &new_cpu_base->clock_base[basenum]; 222 223 if (base != new_base) { 224 /* 225 * We are trying to move timer to new_base. 226 * However we can't change timer's base while it is running, 227 * so we keep it on the same CPU. No hassle vs. reprogramming 228 * the event source in the high resolution case. The softirq 229 * code will take care of this when the timer function has 230 * completed. There is no conflict as we hold the lock until 231 * the timer is enqueued. 232 */ 233 if (unlikely(hrtimer_callback_running(timer))) 234 return base; 235 236 /* See the comment in lock_hrtimer_base() */ 237 timer->base = &migration_base; 238 raw_spin_unlock(&base->cpu_base->lock); 239 raw_spin_lock(&new_base->cpu_base->lock); 240 241 if (new_cpu_base != this_cpu_base && 242 hrtimer_check_target(timer, new_base)) { 243 raw_spin_unlock(&new_base->cpu_base->lock); 244 raw_spin_lock(&base->cpu_base->lock); 245 new_cpu_base = this_cpu_base; 246 timer->base = base; 247 goto again; 248 } 249 timer->base = new_base; 250 } else { 251 if (new_cpu_base != this_cpu_base && 252 hrtimer_check_target(timer, new_base)) { 253 new_cpu_base = this_cpu_base; 254 goto again; 255 } 256 } 257 return new_base; 258 } 259 260 #else /* CONFIG_SMP */ 261 262 static inline struct hrtimer_clock_base * 263 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 264 { 265 struct hrtimer_clock_base *base = timer->base; 266 267 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 268 269 return base; 270 } 271 272 # define switch_hrtimer_base(t, b, p) (b) 273 274 #endif /* !CONFIG_SMP */ 275 276 /* 277 * Functions for the union type storage format of ktime_t which are 278 * too large for inlining: 279 */ 280 #if BITS_PER_LONG < 64 281 /* 282 * Divide a ktime value by a nanosecond value 283 */ 284 s64 __ktime_divns(const ktime_t kt, s64 div) 285 { 286 int sft = 0; 287 s64 dclc; 288 u64 tmp; 289 290 dclc = ktime_to_ns(kt); 291 tmp = dclc < 0 ? -dclc : dclc; 292 293 /* Make sure the divisor is less than 2^32: */ 294 while (div >> 32) { 295 sft++; 296 div >>= 1; 297 } 298 tmp >>= sft; 299 do_div(tmp, (unsigned long) div); 300 return dclc < 0 ? -tmp : tmp; 301 } 302 EXPORT_SYMBOL_GPL(__ktime_divns); 303 #endif /* BITS_PER_LONG >= 64 */ 304 305 /* 306 * Add two ktime values and do a safety check for overflow: 307 */ 308 ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) 309 { 310 ktime_t res = ktime_add(lhs, rhs); 311 312 /* 313 * We use KTIME_SEC_MAX here, the maximum timeout which we can 314 * return to user space in a timespec: 315 */ 316 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) 317 res = ktime_set(KTIME_SEC_MAX, 0); 318 319 return res; 320 } 321 322 EXPORT_SYMBOL_GPL(ktime_add_safe); 323 324 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 325 326 static struct debug_obj_descr hrtimer_debug_descr; 327 328 static void *hrtimer_debug_hint(void *addr) 329 { 330 return ((struct hrtimer *) addr)->function; 331 } 332 333 /* 334 * fixup_init is called when: 335 * - an active object is initialized 336 */ 337 static int hrtimer_fixup_init(void *addr, enum debug_obj_state state) 338 { 339 struct hrtimer *timer = addr; 340 341 switch (state) { 342 case ODEBUG_STATE_ACTIVE: 343 hrtimer_cancel(timer); 344 debug_object_init(timer, &hrtimer_debug_descr); 345 return 1; 346 default: 347 return 0; 348 } 349 } 350 351 /* 352 * fixup_activate is called when: 353 * - an active object is activated 354 * - an unknown object is activated (might be a statically initialized object) 355 */ 356 static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state) 357 { 358 switch (state) { 359 360 case ODEBUG_STATE_NOTAVAILABLE: 361 WARN_ON_ONCE(1); 362 return 0; 363 364 case ODEBUG_STATE_ACTIVE: 365 WARN_ON(1); 366 367 default: 368 return 0; 369 } 370 } 371 372 /* 373 * fixup_free is called when: 374 * - an active object is freed 375 */ 376 static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) 377 { 378 struct hrtimer *timer = addr; 379 380 switch (state) { 381 case ODEBUG_STATE_ACTIVE: 382 hrtimer_cancel(timer); 383 debug_object_free(timer, &hrtimer_debug_descr); 384 return 1; 385 default: 386 return 0; 387 } 388 } 389 390 static struct debug_obj_descr hrtimer_debug_descr = { 391 .name = "hrtimer", 392 .debug_hint = hrtimer_debug_hint, 393 .fixup_init = hrtimer_fixup_init, 394 .fixup_activate = hrtimer_fixup_activate, 395 .fixup_free = hrtimer_fixup_free, 396 }; 397 398 static inline void debug_hrtimer_init(struct hrtimer *timer) 399 { 400 debug_object_init(timer, &hrtimer_debug_descr); 401 } 402 403 static inline void debug_hrtimer_activate(struct hrtimer *timer) 404 { 405 debug_object_activate(timer, &hrtimer_debug_descr); 406 } 407 408 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) 409 { 410 debug_object_deactivate(timer, &hrtimer_debug_descr); 411 } 412 413 static inline void debug_hrtimer_free(struct hrtimer *timer) 414 { 415 debug_object_free(timer, &hrtimer_debug_descr); 416 } 417 418 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 419 enum hrtimer_mode mode); 420 421 void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, 422 enum hrtimer_mode mode) 423 { 424 debug_object_init_on_stack(timer, &hrtimer_debug_descr); 425 __hrtimer_init(timer, clock_id, mode); 426 } 427 EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); 428 429 void destroy_hrtimer_on_stack(struct hrtimer *timer) 430 { 431 debug_object_free(timer, &hrtimer_debug_descr); 432 } 433 434 #else 435 static inline void debug_hrtimer_init(struct hrtimer *timer) { } 436 static inline void debug_hrtimer_activate(struct hrtimer *timer) { } 437 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 438 #endif 439 440 static inline void 441 debug_init(struct hrtimer *timer, clockid_t clockid, 442 enum hrtimer_mode mode) 443 { 444 debug_hrtimer_init(timer); 445 trace_hrtimer_init(timer, clockid, mode); 446 } 447 448 static inline void debug_activate(struct hrtimer *timer) 449 { 450 debug_hrtimer_activate(timer); 451 trace_hrtimer_start(timer); 452 } 453 454 static inline void debug_deactivate(struct hrtimer *timer) 455 { 456 debug_hrtimer_deactivate(timer); 457 trace_hrtimer_cancel(timer); 458 } 459 460 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 461 static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base, 462 struct hrtimer *timer) 463 { 464 #ifdef CONFIG_HIGH_RES_TIMERS 465 cpu_base->next_timer = timer; 466 #endif 467 } 468 469 static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) 470 { 471 struct hrtimer_clock_base *base = cpu_base->clock_base; 472 ktime_t expires, expires_next = { .tv64 = KTIME_MAX }; 473 unsigned int active = cpu_base->active_bases; 474 475 hrtimer_update_next_timer(cpu_base, NULL); 476 for (; active; base++, active >>= 1) { 477 struct timerqueue_node *next; 478 struct hrtimer *timer; 479 480 if (!(active & 0x01)) 481 continue; 482 483 next = timerqueue_getnext(&base->active); 484 timer = container_of(next, struct hrtimer, node); 485 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 486 if (expires.tv64 < expires_next.tv64) { 487 expires_next = expires; 488 hrtimer_update_next_timer(cpu_base, timer); 489 } 490 } 491 /* 492 * clock_was_set() might have changed base->offset of any of 493 * the clock bases so the result might be negative. Fix it up 494 * to prevent a false positive in clockevents_program_event(). 495 */ 496 if (expires_next.tv64 < 0) 497 expires_next.tv64 = 0; 498 return expires_next; 499 } 500 #endif 501 502 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 503 { 504 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; 505 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; 506 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; 507 508 return ktime_get_update_offsets_now(&base->clock_was_set_seq, 509 offs_real, offs_boot, offs_tai); 510 } 511 512 /* High resolution timer related functions */ 513 #ifdef CONFIG_HIGH_RES_TIMERS 514 515 /* 516 * High resolution timer enabled ? 517 */ 518 static bool hrtimer_hres_enabled __read_mostly = true; 519 unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC; 520 EXPORT_SYMBOL_GPL(hrtimer_resolution); 521 522 /* 523 * Enable / Disable high resolution mode 524 */ 525 static int __init setup_hrtimer_hres(char *str) 526 { 527 return (kstrtobool(str, &hrtimer_hres_enabled) == 0); 528 } 529 530 __setup("highres=", setup_hrtimer_hres); 531 532 /* 533 * hrtimer_high_res_enabled - query, if the highres mode is enabled 534 */ 535 static inline int hrtimer_is_hres_enabled(void) 536 { 537 return hrtimer_hres_enabled; 538 } 539 540 /* 541 * Is the high resolution mode active ? 542 */ 543 static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) 544 { 545 return cpu_base->hres_active; 546 } 547 548 static inline int hrtimer_hres_active(void) 549 { 550 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases)); 551 } 552 553 /* 554 * Reprogram the event source with checking both queues for the 555 * next event 556 * Called with interrupts disabled and base->lock held 557 */ 558 static void 559 hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) 560 { 561 ktime_t expires_next; 562 563 if (!cpu_base->hres_active) 564 return; 565 566 expires_next = __hrtimer_get_next_event(cpu_base); 567 568 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) 569 return; 570 571 cpu_base->expires_next.tv64 = expires_next.tv64; 572 573 /* 574 * If a hang was detected in the last timer interrupt then we 575 * leave the hang delay active in the hardware. We want the 576 * system to make progress. That also prevents the following 577 * scenario: 578 * T1 expires 50ms from now 579 * T2 expires 5s from now 580 * 581 * T1 is removed, so this code is called and would reprogram 582 * the hardware to 5s from now. Any hrtimer_start after that 583 * will not reprogram the hardware due to hang_detected being 584 * set. So we'd effectivly block all timers until the T2 event 585 * fires. 586 */ 587 if (cpu_base->hang_detected) 588 return; 589 590 tick_program_event(cpu_base->expires_next, 1); 591 } 592 593 /* 594 * When a timer is enqueued and expires earlier than the already enqueued 595 * timers, we have to check, whether it expires earlier than the timer for 596 * which the clock event device was armed. 597 * 598 * Called with interrupts disabled and base->cpu_base.lock held 599 */ 600 static void hrtimer_reprogram(struct hrtimer *timer, 601 struct hrtimer_clock_base *base) 602 { 603 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 604 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 605 606 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); 607 608 /* 609 * If the timer is not on the current cpu, we cannot reprogram 610 * the other cpus clock event device. 611 */ 612 if (base->cpu_base != cpu_base) 613 return; 614 615 /* 616 * If the hrtimer interrupt is running, then it will 617 * reevaluate the clock bases and reprogram the clock event 618 * device. The callbacks are always executed in hard interrupt 619 * context so we don't need an extra check for a running 620 * callback. 621 */ 622 if (cpu_base->in_hrtirq) 623 return; 624 625 /* 626 * CLOCK_REALTIME timer might be requested with an absolute 627 * expiry time which is less than base->offset. Set it to 0. 628 */ 629 if (expires.tv64 < 0) 630 expires.tv64 = 0; 631 632 if (expires.tv64 >= cpu_base->expires_next.tv64) 633 return; 634 635 /* Update the pointer to the next expiring timer */ 636 cpu_base->next_timer = timer; 637 638 /* 639 * If a hang was detected in the last timer interrupt then we 640 * do not schedule a timer which is earlier than the expiry 641 * which we enforced in the hang detection. We want the system 642 * to make progress. 643 */ 644 if (cpu_base->hang_detected) 645 return; 646 647 /* 648 * Program the timer hardware. We enforce the expiry for 649 * events which are already in the past. 650 */ 651 cpu_base->expires_next = expires; 652 tick_program_event(expires, 1); 653 } 654 655 /* 656 * Initialize the high resolution related parts of cpu_base 657 */ 658 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) 659 { 660 base->expires_next.tv64 = KTIME_MAX; 661 base->hres_active = 0; 662 } 663 664 /* 665 * Retrigger next event is called after clock was set 666 * 667 * Called with interrupts disabled via on_each_cpu() 668 */ 669 static void retrigger_next_event(void *arg) 670 { 671 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); 672 673 if (!base->hres_active) 674 return; 675 676 raw_spin_lock(&base->lock); 677 hrtimer_update_base(base); 678 hrtimer_force_reprogram(base, 0); 679 raw_spin_unlock(&base->lock); 680 } 681 682 /* 683 * Switch to high resolution mode 684 */ 685 static void hrtimer_switch_to_hres(void) 686 { 687 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); 688 689 if (tick_init_highres()) { 690 printk(KERN_WARNING "Could not switch to high resolution " 691 "mode on CPU %d\n", base->cpu); 692 return; 693 } 694 base->hres_active = 1; 695 hrtimer_resolution = HIGH_RES_NSEC; 696 697 tick_setup_sched_timer(); 698 /* "Retrigger" the interrupt to get things going */ 699 retrigger_next_event(NULL); 700 } 701 702 static void clock_was_set_work(struct work_struct *work) 703 { 704 clock_was_set(); 705 } 706 707 static DECLARE_WORK(hrtimer_work, clock_was_set_work); 708 709 /* 710 * Called from timekeeping and resume code to reprogramm the hrtimer 711 * interrupt device on all cpus. 712 */ 713 void clock_was_set_delayed(void) 714 { 715 schedule_work(&hrtimer_work); 716 } 717 718 #else 719 720 static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; } 721 static inline int hrtimer_hres_active(void) { return 0; } 722 static inline int hrtimer_is_hres_enabled(void) { return 0; } 723 static inline void hrtimer_switch_to_hres(void) { } 724 static inline void 725 hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } 726 static inline int hrtimer_reprogram(struct hrtimer *timer, 727 struct hrtimer_clock_base *base) 728 { 729 return 0; 730 } 731 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } 732 static inline void retrigger_next_event(void *arg) { } 733 734 #endif /* CONFIG_HIGH_RES_TIMERS */ 735 736 /* 737 * Clock realtime was set 738 * 739 * Change the offset of the realtime clock vs. the monotonic 740 * clock. 741 * 742 * We might have to reprogram the high resolution timer interrupt. On 743 * SMP we call the architecture specific code to retrigger _all_ high 744 * resolution timer interrupts. On UP we just disable interrupts and 745 * call the high resolution interrupt code. 746 */ 747 void clock_was_set(void) 748 { 749 #ifdef CONFIG_HIGH_RES_TIMERS 750 /* Retrigger the CPU local events everywhere */ 751 on_each_cpu(retrigger_next_event, NULL, 1); 752 #endif 753 timerfd_clock_was_set(); 754 } 755 756 /* 757 * During resume we might have to reprogram the high resolution timer 758 * interrupt on all online CPUs. However, all other CPUs will be 759 * stopped with IRQs interrupts disabled so the clock_was_set() call 760 * must be deferred. 761 */ 762 void hrtimers_resume(void) 763 { 764 WARN_ONCE(!irqs_disabled(), 765 KERN_INFO "hrtimers_resume() called with IRQs enabled!"); 766 767 /* Retrigger on the local CPU */ 768 retrigger_next_event(NULL); 769 /* And schedule a retrigger for all others */ 770 clock_was_set_delayed(); 771 } 772 773 static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) 774 { 775 #ifdef CONFIG_TIMER_STATS 776 if (timer->start_site) 777 return; 778 timer->start_site = __builtin_return_address(0); 779 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); 780 timer->start_pid = current->pid; 781 #endif 782 } 783 784 static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) 785 { 786 #ifdef CONFIG_TIMER_STATS 787 timer->start_site = NULL; 788 #endif 789 } 790 791 static inline void timer_stats_account_hrtimer(struct hrtimer *timer) 792 { 793 #ifdef CONFIG_TIMER_STATS 794 if (likely(!timer_stats_active)) 795 return; 796 timer_stats_update_stats(timer, timer->start_pid, timer->start_site, 797 timer->function, timer->start_comm, 0); 798 #endif 799 } 800 801 /* 802 * Counterpart to lock_hrtimer_base above: 803 */ 804 static inline 805 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 806 { 807 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); 808 } 809 810 /** 811 * hrtimer_forward - forward the timer expiry 812 * @timer: hrtimer to forward 813 * @now: forward past this time 814 * @interval: the interval to forward 815 * 816 * Forward the timer expiry so it will expire in the future. 817 * Returns the number of overruns. 818 * 819 * Can be safely called from the callback function of @timer. If 820 * called from other contexts @timer must neither be enqueued nor 821 * running the callback and the caller needs to take care of 822 * serialization. 823 * 824 * Note: This only updates the timer expiry value and does not requeue 825 * the timer. 826 */ 827 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) 828 { 829 u64 orun = 1; 830 ktime_t delta; 831 832 delta = ktime_sub(now, hrtimer_get_expires(timer)); 833 834 if (delta.tv64 < 0) 835 return 0; 836 837 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) 838 return 0; 839 840 if (interval.tv64 < hrtimer_resolution) 841 interval.tv64 = hrtimer_resolution; 842 843 if (unlikely(delta.tv64 >= interval.tv64)) { 844 s64 incr = ktime_to_ns(interval); 845 846 orun = ktime_divns(delta, incr); 847 hrtimer_add_expires_ns(timer, incr * orun); 848 if (hrtimer_get_expires_tv64(timer) > now.tv64) 849 return orun; 850 /* 851 * This (and the ktime_add() below) is the 852 * correction for exact: 853 */ 854 orun++; 855 } 856 hrtimer_add_expires(timer, interval); 857 858 return orun; 859 } 860 EXPORT_SYMBOL_GPL(hrtimer_forward); 861 862 /* 863 * enqueue_hrtimer - internal function to (re)start a timer 864 * 865 * The timer is inserted in expiry order. Insertion into the 866 * red black tree is O(log(n)). Must hold the base lock. 867 * 868 * Returns 1 when the new timer is the leftmost timer in the tree. 869 */ 870 static int enqueue_hrtimer(struct hrtimer *timer, 871 struct hrtimer_clock_base *base) 872 { 873 debug_activate(timer); 874 875 base->cpu_base->active_bases |= 1 << base->index; 876 877 timer->state = HRTIMER_STATE_ENQUEUED; 878 879 return timerqueue_add(&base->active, &timer->node); 880 } 881 882 /* 883 * __remove_hrtimer - internal function to remove a timer 884 * 885 * Caller must hold the base lock. 886 * 887 * High resolution timer mode reprograms the clock event device when the 888 * timer is the one which expires next. The caller can disable this by setting 889 * reprogram to zero. This is useful, when the context does a reprogramming 890 * anyway (e.g. timer interrupt) 891 */ 892 static void __remove_hrtimer(struct hrtimer *timer, 893 struct hrtimer_clock_base *base, 894 u8 newstate, int reprogram) 895 { 896 struct hrtimer_cpu_base *cpu_base = base->cpu_base; 897 u8 state = timer->state; 898 899 timer->state = newstate; 900 if (!(state & HRTIMER_STATE_ENQUEUED)) 901 return; 902 903 if (!timerqueue_del(&base->active, &timer->node)) 904 cpu_base->active_bases &= ~(1 << base->index); 905 906 #ifdef CONFIG_HIGH_RES_TIMERS 907 /* 908 * Note: If reprogram is false we do not update 909 * cpu_base->next_timer. This happens when we remove the first 910 * timer on a remote cpu. No harm as we never dereference 911 * cpu_base->next_timer. So the worst thing what can happen is 912 * an superflous call to hrtimer_force_reprogram() on the 913 * remote cpu later on if the same timer gets enqueued again. 914 */ 915 if (reprogram && timer == cpu_base->next_timer) 916 hrtimer_force_reprogram(cpu_base, 1); 917 #endif 918 } 919 920 /* 921 * remove hrtimer, called with base lock held 922 */ 923 static inline int 924 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) 925 { 926 if (hrtimer_is_queued(timer)) { 927 u8 state = timer->state; 928 int reprogram; 929 930 /* 931 * Remove the timer and force reprogramming when high 932 * resolution mode is active and the timer is on the current 933 * CPU. If we remove a timer on another CPU, reprogramming is 934 * skipped. The interrupt event on this CPU is fired and 935 * reprogramming happens in the interrupt handler. This is a 936 * rare case and less expensive than a smp call. 937 */ 938 debug_deactivate(timer); 939 timer_stats_hrtimer_clear_start_info(timer); 940 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); 941 942 if (!restart) 943 state = HRTIMER_STATE_INACTIVE; 944 945 __remove_hrtimer(timer, base, state, reprogram); 946 return 1; 947 } 948 return 0; 949 } 950 951 static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, 952 const enum hrtimer_mode mode) 953 { 954 #ifdef CONFIG_TIME_LOW_RES 955 /* 956 * CONFIG_TIME_LOW_RES indicates that the system has no way to return 957 * granular time values. For relative timers we add hrtimer_resolution 958 * (i.e. one jiffie) to prevent short timeouts. 959 */ 960 timer->is_rel = mode & HRTIMER_MODE_REL; 961 if (timer->is_rel) 962 tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution)); 963 #endif 964 return tim; 965 } 966 967 /** 968 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU 969 * @timer: the timer to be added 970 * @tim: expiry time 971 * @delta_ns: "slack" range for the timer 972 * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or 973 * relative (HRTIMER_MODE_REL) 974 */ 975 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 976 u64 delta_ns, const enum hrtimer_mode mode) 977 { 978 struct hrtimer_clock_base *base, *new_base; 979 unsigned long flags; 980 int leftmost; 981 982 base = lock_hrtimer_base(timer, &flags); 983 984 /* Remove an active timer from the queue: */ 985 remove_hrtimer(timer, base, true); 986 987 if (mode & HRTIMER_MODE_REL) 988 tim = ktime_add_safe(tim, base->get_time()); 989 990 tim = hrtimer_update_lowres(timer, tim, mode); 991 992 hrtimer_set_expires_range_ns(timer, tim, delta_ns); 993 994 /* Switch the timer base, if necessary: */ 995 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); 996 997 timer_stats_hrtimer_set_start_info(timer); 998 999 leftmost = enqueue_hrtimer(timer, new_base); 1000 if (!leftmost) 1001 goto unlock; 1002 1003 if (!hrtimer_is_hres_active(timer)) { 1004 /* 1005 * Kick to reschedule the next tick to handle the new timer 1006 * on dynticks target. 1007 */ 1008 if (new_base->cpu_base->nohz_active) 1009 wake_up_nohz_cpu(new_base->cpu_base->cpu); 1010 } else { 1011 hrtimer_reprogram(timer, new_base); 1012 } 1013 unlock: 1014 unlock_hrtimer_base(timer, &flags); 1015 } 1016 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); 1017 1018 /** 1019 * hrtimer_try_to_cancel - try to deactivate a timer 1020 * @timer: hrtimer to stop 1021 * 1022 * Returns: 1023 * 0 when the timer was not active 1024 * 1 when the timer was active 1025 * -1 when the timer is currently excuting the callback function and 1026 * cannot be stopped 1027 */ 1028 int hrtimer_try_to_cancel(struct hrtimer *timer) 1029 { 1030 struct hrtimer_clock_base *base; 1031 unsigned long flags; 1032 int ret = -1; 1033 1034 /* 1035 * Check lockless first. If the timer is not active (neither 1036 * enqueued nor running the callback, nothing to do here. The 1037 * base lock does not serialize against a concurrent enqueue, 1038 * so we can avoid taking it. 1039 */ 1040 if (!hrtimer_active(timer)) 1041 return 0; 1042 1043 base = lock_hrtimer_base(timer, &flags); 1044 1045 if (!hrtimer_callback_running(timer)) 1046 ret = remove_hrtimer(timer, base, false); 1047 1048 unlock_hrtimer_base(timer, &flags); 1049 1050 return ret; 1051 1052 } 1053 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); 1054 1055 /** 1056 * hrtimer_cancel - cancel a timer and wait for the handler to finish. 1057 * @timer: the timer to be cancelled 1058 * 1059 * Returns: 1060 * 0 when the timer was not active 1061 * 1 when the timer was active 1062 */ 1063 int hrtimer_cancel(struct hrtimer *timer) 1064 { 1065 for (;;) { 1066 int ret = hrtimer_try_to_cancel(timer); 1067 1068 if (ret >= 0) 1069 return ret; 1070 cpu_relax(); 1071 } 1072 } 1073 EXPORT_SYMBOL_GPL(hrtimer_cancel); 1074 1075 /** 1076 * hrtimer_get_remaining - get remaining time for the timer 1077 * @timer: the timer to read 1078 * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y 1079 */ 1080 ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) 1081 { 1082 unsigned long flags; 1083 ktime_t rem; 1084 1085 lock_hrtimer_base(timer, &flags); 1086 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust) 1087 rem = hrtimer_expires_remaining_adjusted(timer); 1088 else 1089 rem = hrtimer_expires_remaining(timer); 1090 unlock_hrtimer_base(timer, &flags); 1091 1092 return rem; 1093 } 1094 EXPORT_SYMBOL_GPL(__hrtimer_get_remaining); 1095 1096 #ifdef CONFIG_NO_HZ_COMMON 1097 /** 1098 * hrtimer_get_next_event - get the time until next expiry event 1099 * 1100 * Returns the next expiry time or KTIME_MAX if no timer is pending. 1101 */ 1102 u64 hrtimer_get_next_event(void) 1103 { 1104 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1105 u64 expires = KTIME_MAX; 1106 unsigned long flags; 1107 1108 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1109 1110 if (!__hrtimer_hres_active(cpu_base)) 1111 expires = __hrtimer_get_next_event(cpu_base).tv64; 1112 1113 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1114 1115 return expires; 1116 } 1117 #endif 1118 1119 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1120 enum hrtimer_mode mode) 1121 { 1122 struct hrtimer_cpu_base *cpu_base; 1123 int base; 1124 1125 memset(timer, 0, sizeof(struct hrtimer)); 1126 1127 cpu_base = raw_cpu_ptr(&hrtimer_bases); 1128 1129 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) 1130 clock_id = CLOCK_MONOTONIC; 1131 1132 base = hrtimer_clockid_to_base(clock_id); 1133 timer->base = &cpu_base->clock_base[base]; 1134 timerqueue_init(&timer->node); 1135 1136 #ifdef CONFIG_TIMER_STATS 1137 timer->start_site = NULL; 1138 timer->start_pid = -1; 1139 memset(timer->start_comm, 0, TASK_COMM_LEN); 1140 #endif 1141 } 1142 1143 /** 1144 * hrtimer_init - initialize a timer to the given clock 1145 * @timer: the timer to be initialized 1146 * @clock_id: the clock to be used 1147 * @mode: timer mode abs/rel 1148 */ 1149 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1150 enum hrtimer_mode mode) 1151 { 1152 debug_init(timer, clock_id, mode); 1153 __hrtimer_init(timer, clock_id, mode); 1154 } 1155 EXPORT_SYMBOL_GPL(hrtimer_init); 1156 1157 /* 1158 * A timer is active, when it is enqueued into the rbtree or the 1159 * callback function is running or it's in the state of being migrated 1160 * to another cpu. 1161 * 1162 * It is important for this function to not return a false negative. 1163 */ 1164 bool hrtimer_active(const struct hrtimer *timer) 1165 { 1166 struct hrtimer_cpu_base *cpu_base; 1167 unsigned int seq; 1168 1169 do { 1170 cpu_base = READ_ONCE(timer->base->cpu_base); 1171 seq = raw_read_seqcount_begin(&cpu_base->seq); 1172 1173 if (timer->state != HRTIMER_STATE_INACTIVE || 1174 cpu_base->running == timer) 1175 return true; 1176 1177 } while (read_seqcount_retry(&cpu_base->seq, seq) || 1178 cpu_base != READ_ONCE(timer->base->cpu_base)); 1179 1180 return false; 1181 } 1182 EXPORT_SYMBOL_GPL(hrtimer_active); 1183 1184 /* 1185 * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3 1186 * distinct sections: 1187 * 1188 * - queued: the timer is queued 1189 * - callback: the timer is being ran 1190 * - post: the timer is inactive or (re)queued 1191 * 1192 * On the read side we ensure we observe timer->state and cpu_base->running 1193 * from the same section, if anything changed while we looked at it, we retry. 1194 * This includes timer->base changing because sequence numbers alone are 1195 * insufficient for that. 1196 * 1197 * The sequence numbers are required because otherwise we could still observe 1198 * a false negative if the read side got smeared over multiple consequtive 1199 * __run_hrtimer() invocations. 1200 */ 1201 1202 static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, 1203 struct hrtimer_clock_base *base, 1204 struct hrtimer *timer, ktime_t *now) 1205 { 1206 enum hrtimer_restart (*fn)(struct hrtimer *); 1207 int restart; 1208 1209 lockdep_assert_held(&cpu_base->lock); 1210 1211 debug_deactivate(timer); 1212 cpu_base->running = timer; 1213 1214 /* 1215 * Separate the ->running assignment from the ->state assignment. 1216 * 1217 * As with a regular write barrier, this ensures the read side in 1218 * hrtimer_active() cannot observe cpu_base->running == NULL && 1219 * timer->state == INACTIVE. 1220 */ 1221 raw_write_seqcount_barrier(&cpu_base->seq); 1222 1223 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); 1224 timer_stats_account_hrtimer(timer); 1225 fn = timer->function; 1226 1227 /* 1228 * Clear the 'is relative' flag for the TIME_LOW_RES case. If the 1229 * timer is restarted with a period then it becomes an absolute 1230 * timer. If its not restarted it does not matter. 1231 */ 1232 if (IS_ENABLED(CONFIG_TIME_LOW_RES)) 1233 timer->is_rel = false; 1234 1235 /* 1236 * Because we run timers from hardirq context, there is no chance 1237 * they get migrated to another cpu, therefore its safe to unlock 1238 * the timer base. 1239 */ 1240 raw_spin_unlock(&cpu_base->lock); 1241 trace_hrtimer_expire_entry(timer, now); 1242 restart = fn(timer); 1243 trace_hrtimer_expire_exit(timer); 1244 raw_spin_lock(&cpu_base->lock); 1245 1246 /* 1247 * Note: We clear the running state after enqueue_hrtimer and 1248 * we do not reprogramm the event hardware. Happens either in 1249 * hrtimer_start_range_ns() or in hrtimer_interrupt() 1250 * 1251 * Note: Because we dropped the cpu_base->lock above, 1252 * hrtimer_start_range_ns() can have popped in and enqueued the timer 1253 * for us already. 1254 */ 1255 if (restart != HRTIMER_NORESTART && 1256 !(timer->state & HRTIMER_STATE_ENQUEUED)) 1257 enqueue_hrtimer(timer, base); 1258 1259 /* 1260 * Separate the ->running assignment from the ->state assignment. 1261 * 1262 * As with a regular write barrier, this ensures the read side in 1263 * hrtimer_active() cannot observe cpu_base->running == NULL && 1264 * timer->state == INACTIVE. 1265 */ 1266 raw_write_seqcount_barrier(&cpu_base->seq); 1267 1268 WARN_ON_ONCE(cpu_base->running != timer); 1269 cpu_base->running = NULL; 1270 } 1271 1272 static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) 1273 { 1274 struct hrtimer_clock_base *base = cpu_base->clock_base; 1275 unsigned int active = cpu_base->active_bases; 1276 1277 for (; active; base++, active >>= 1) { 1278 struct timerqueue_node *node; 1279 ktime_t basenow; 1280 1281 if (!(active & 0x01)) 1282 continue; 1283 1284 basenow = ktime_add(now, base->offset); 1285 1286 while ((node = timerqueue_getnext(&base->active))) { 1287 struct hrtimer *timer; 1288 1289 timer = container_of(node, struct hrtimer, node); 1290 1291 /* 1292 * The immediate goal for using the softexpires is 1293 * minimizing wakeups, not running timers at the 1294 * earliest interrupt after their soft expiration. 1295 * This allows us to avoid using a Priority Search 1296 * Tree, which can answer a stabbing querry for 1297 * overlapping intervals and instead use the simple 1298 * BST we already have. 1299 * We don't add extra wakeups by delaying timers that 1300 * are right-of a not yet expired timer, because that 1301 * timer will have to trigger a wakeup anyway. 1302 */ 1303 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) 1304 break; 1305 1306 __run_hrtimer(cpu_base, base, timer, &basenow); 1307 } 1308 } 1309 } 1310 1311 #ifdef CONFIG_HIGH_RES_TIMERS 1312 1313 /* 1314 * High resolution timer interrupt 1315 * Called with interrupts disabled 1316 */ 1317 void hrtimer_interrupt(struct clock_event_device *dev) 1318 { 1319 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1320 ktime_t expires_next, now, entry_time, delta; 1321 int retries = 0; 1322 1323 BUG_ON(!cpu_base->hres_active); 1324 cpu_base->nr_events++; 1325 dev->next_event.tv64 = KTIME_MAX; 1326 1327 raw_spin_lock(&cpu_base->lock); 1328 entry_time = now = hrtimer_update_base(cpu_base); 1329 retry: 1330 cpu_base->in_hrtirq = 1; 1331 /* 1332 * We set expires_next to KTIME_MAX here with cpu_base->lock 1333 * held to prevent that a timer is enqueued in our queue via 1334 * the migration code. This does not affect enqueueing of 1335 * timers which run their callback and need to be requeued on 1336 * this CPU. 1337 */ 1338 cpu_base->expires_next.tv64 = KTIME_MAX; 1339 1340 __hrtimer_run_queues(cpu_base, now); 1341 1342 /* Reevaluate the clock bases for the next expiry */ 1343 expires_next = __hrtimer_get_next_event(cpu_base); 1344 /* 1345 * Store the new expiry value so the migration code can verify 1346 * against it. 1347 */ 1348 cpu_base->expires_next = expires_next; 1349 cpu_base->in_hrtirq = 0; 1350 raw_spin_unlock(&cpu_base->lock); 1351 1352 /* Reprogramming necessary ? */ 1353 if (!tick_program_event(expires_next, 0)) { 1354 cpu_base->hang_detected = 0; 1355 return; 1356 } 1357 1358 /* 1359 * The next timer was already expired due to: 1360 * - tracing 1361 * - long lasting callbacks 1362 * - being scheduled away when running in a VM 1363 * 1364 * We need to prevent that we loop forever in the hrtimer 1365 * interrupt routine. We give it 3 attempts to avoid 1366 * overreacting on some spurious event. 1367 * 1368 * Acquire base lock for updating the offsets and retrieving 1369 * the current time. 1370 */ 1371 raw_spin_lock(&cpu_base->lock); 1372 now = hrtimer_update_base(cpu_base); 1373 cpu_base->nr_retries++; 1374 if (++retries < 3) 1375 goto retry; 1376 /* 1377 * Give the system a chance to do something else than looping 1378 * here. We stored the entry time, so we know exactly how long 1379 * we spent here. We schedule the next event this amount of 1380 * time away. 1381 */ 1382 cpu_base->nr_hangs++; 1383 cpu_base->hang_detected = 1; 1384 raw_spin_unlock(&cpu_base->lock); 1385 delta = ktime_sub(now, entry_time); 1386 if ((unsigned int)delta.tv64 > cpu_base->max_hang_time) 1387 cpu_base->max_hang_time = (unsigned int) delta.tv64; 1388 /* 1389 * Limit it to a sensible value as we enforce a longer 1390 * delay. Give the CPU at least 100ms to catch up. 1391 */ 1392 if (delta.tv64 > 100 * NSEC_PER_MSEC) 1393 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); 1394 else 1395 expires_next = ktime_add(now, delta); 1396 tick_program_event(expires_next, 1); 1397 printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", 1398 ktime_to_ns(delta)); 1399 } 1400 1401 /* 1402 * local version of hrtimer_peek_ahead_timers() called with interrupts 1403 * disabled. 1404 */ 1405 static inline void __hrtimer_peek_ahead_timers(void) 1406 { 1407 struct tick_device *td; 1408 1409 if (!hrtimer_hres_active()) 1410 return; 1411 1412 td = this_cpu_ptr(&tick_cpu_device); 1413 if (td && td->evtdev) 1414 hrtimer_interrupt(td->evtdev); 1415 } 1416 1417 #else /* CONFIG_HIGH_RES_TIMERS */ 1418 1419 static inline void __hrtimer_peek_ahead_timers(void) { } 1420 1421 #endif /* !CONFIG_HIGH_RES_TIMERS */ 1422 1423 /* 1424 * Called from run_local_timers in hardirq context every jiffy 1425 */ 1426 void hrtimer_run_queues(void) 1427 { 1428 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1429 ktime_t now; 1430 1431 if (__hrtimer_hres_active(cpu_base)) 1432 return; 1433 1434 /* 1435 * This _is_ ugly: We have to check periodically, whether we 1436 * can switch to highres and / or nohz mode. The clocksource 1437 * switch happens with xtime_lock held. Notification from 1438 * there only sets the check bit in the tick_oneshot code, 1439 * otherwise we might deadlock vs. xtime_lock. 1440 */ 1441 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { 1442 hrtimer_switch_to_hres(); 1443 return; 1444 } 1445 1446 raw_spin_lock(&cpu_base->lock); 1447 now = hrtimer_update_base(cpu_base); 1448 __hrtimer_run_queues(cpu_base, now); 1449 raw_spin_unlock(&cpu_base->lock); 1450 } 1451 1452 /* 1453 * Sleep related functions: 1454 */ 1455 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) 1456 { 1457 struct hrtimer_sleeper *t = 1458 container_of(timer, struct hrtimer_sleeper, timer); 1459 struct task_struct *task = t->task; 1460 1461 t->task = NULL; 1462 if (task) 1463 wake_up_process(task); 1464 1465 return HRTIMER_NORESTART; 1466 } 1467 1468 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) 1469 { 1470 sl->timer.function = hrtimer_wakeup; 1471 sl->task = task; 1472 } 1473 EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); 1474 1475 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 1476 { 1477 hrtimer_init_sleeper(t, current); 1478 1479 do { 1480 set_current_state(TASK_INTERRUPTIBLE); 1481 hrtimer_start_expires(&t->timer, mode); 1482 1483 if (likely(t->task)) 1484 freezable_schedule(); 1485 1486 hrtimer_cancel(&t->timer); 1487 mode = HRTIMER_MODE_ABS; 1488 1489 } while (t->task && !signal_pending(current)); 1490 1491 __set_current_state(TASK_RUNNING); 1492 1493 return t->task == NULL; 1494 } 1495 1496 static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) 1497 { 1498 struct timespec rmt; 1499 ktime_t rem; 1500 1501 rem = hrtimer_expires_remaining(timer); 1502 if (rem.tv64 <= 0) 1503 return 0; 1504 rmt = ktime_to_timespec(rem); 1505 1506 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) 1507 return -EFAULT; 1508 1509 return 1; 1510 } 1511 1512 long __sched hrtimer_nanosleep_restart(struct restart_block *restart) 1513 { 1514 struct hrtimer_sleeper t; 1515 struct timespec __user *rmtp; 1516 int ret = 0; 1517 1518 hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, 1519 HRTIMER_MODE_ABS); 1520 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); 1521 1522 if (do_nanosleep(&t, HRTIMER_MODE_ABS)) 1523 goto out; 1524 1525 rmtp = restart->nanosleep.rmtp; 1526 if (rmtp) { 1527 ret = update_rmtp(&t.timer, rmtp); 1528 if (ret <= 0) 1529 goto out; 1530 } 1531 1532 /* The other values in restart are already filled in */ 1533 ret = -ERESTART_RESTARTBLOCK; 1534 out: 1535 destroy_hrtimer_on_stack(&t.timer); 1536 return ret; 1537 } 1538 1539 long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, 1540 const enum hrtimer_mode mode, const clockid_t clockid) 1541 { 1542 struct restart_block *restart; 1543 struct hrtimer_sleeper t; 1544 int ret = 0; 1545 u64 slack; 1546 1547 slack = current->timer_slack_ns; 1548 if (dl_task(current) || rt_task(current)) 1549 slack = 0; 1550 1551 hrtimer_init_on_stack(&t.timer, clockid, mode); 1552 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); 1553 if (do_nanosleep(&t, mode)) 1554 goto out; 1555 1556 /* Absolute timers do not update the rmtp value and restart: */ 1557 if (mode == HRTIMER_MODE_ABS) { 1558 ret = -ERESTARTNOHAND; 1559 goto out; 1560 } 1561 1562 if (rmtp) { 1563 ret = update_rmtp(&t.timer, rmtp); 1564 if (ret <= 0) 1565 goto out; 1566 } 1567 1568 restart = ¤t->restart_block; 1569 restart->fn = hrtimer_nanosleep_restart; 1570 restart->nanosleep.clockid = t.timer.base->clockid; 1571 restart->nanosleep.rmtp = rmtp; 1572 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); 1573 1574 ret = -ERESTART_RESTARTBLOCK; 1575 out: 1576 destroy_hrtimer_on_stack(&t.timer); 1577 return ret; 1578 } 1579 1580 SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, 1581 struct timespec __user *, rmtp) 1582 { 1583 struct timespec tu; 1584 1585 if (copy_from_user(&tu, rqtp, sizeof(tu))) 1586 return -EFAULT; 1587 1588 if (!timespec_valid(&tu)) 1589 return -EINVAL; 1590 1591 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); 1592 } 1593 1594 /* 1595 * Functions related to boot-time initialization: 1596 */ 1597 static void init_hrtimers_cpu(int cpu) 1598 { 1599 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1600 int i; 1601 1602 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1603 cpu_base->clock_base[i].cpu_base = cpu_base; 1604 timerqueue_init_head(&cpu_base->clock_base[i].active); 1605 } 1606 1607 cpu_base->cpu = cpu; 1608 hrtimer_init_hres(cpu_base); 1609 } 1610 1611 #ifdef CONFIG_HOTPLUG_CPU 1612 1613 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1614 struct hrtimer_clock_base *new_base) 1615 { 1616 struct hrtimer *timer; 1617 struct timerqueue_node *node; 1618 1619 while ((node = timerqueue_getnext(&old_base->active))) { 1620 timer = container_of(node, struct hrtimer, node); 1621 BUG_ON(hrtimer_callback_running(timer)); 1622 debug_deactivate(timer); 1623 1624 /* 1625 * Mark it as ENQUEUED not INACTIVE otherwise the 1626 * timer could be seen as !active and just vanish away 1627 * under us on another CPU 1628 */ 1629 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); 1630 timer->base = new_base; 1631 /* 1632 * Enqueue the timers on the new cpu. This does not 1633 * reprogram the event device in case the timer 1634 * expires before the earliest on this CPU, but we run 1635 * hrtimer_interrupt after we migrated everything to 1636 * sort out already expired timers and reprogram the 1637 * event device. 1638 */ 1639 enqueue_hrtimer(timer, new_base); 1640 } 1641 } 1642 1643 static void migrate_hrtimers(int scpu) 1644 { 1645 struct hrtimer_cpu_base *old_base, *new_base; 1646 int i; 1647 1648 BUG_ON(cpu_online(scpu)); 1649 tick_cancel_sched_timer(scpu); 1650 1651 local_irq_disable(); 1652 old_base = &per_cpu(hrtimer_bases, scpu); 1653 new_base = this_cpu_ptr(&hrtimer_bases); 1654 /* 1655 * The caller is globally serialized and nobody else 1656 * takes two locks at once, deadlock is not possible. 1657 */ 1658 raw_spin_lock(&new_base->lock); 1659 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1660 1661 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1662 migrate_hrtimer_list(&old_base->clock_base[i], 1663 &new_base->clock_base[i]); 1664 } 1665 1666 raw_spin_unlock(&old_base->lock); 1667 raw_spin_unlock(&new_base->lock); 1668 1669 /* Check, if we got expired work to do */ 1670 __hrtimer_peek_ahead_timers(); 1671 local_irq_enable(); 1672 } 1673 1674 #endif /* CONFIG_HOTPLUG_CPU */ 1675 1676 static int hrtimer_cpu_notify(struct notifier_block *self, 1677 unsigned long action, void *hcpu) 1678 { 1679 int scpu = (long)hcpu; 1680 1681 switch (action) { 1682 1683 case CPU_UP_PREPARE: 1684 case CPU_UP_PREPARE_FROZEN: 1685 init_hrtimers_cpu(scpu); 1686 break; 1687 1688 #ifdef CONFIG_HOTPLUG_CPU 1689 case CPU_DEAD: 1690 case CPU_DEAD_FROZEN: 1691 migrate_hrtimers(scpu); 1692 break; 1693 #endif 1694 1695 default: 1696 break; 1697 } 1698 1699 return NOTIFY_OK; 1700 } 1701 1702 static struct notifier_block hrtimers_nb = { 1703 .notifier_call = hrtimer_cpu_notify, 1704 }; 1705 1706 void __init hrtimers_init(void) 1707 { 1708 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1709 (void *)(long)smp_processor_id()); 1710 register_cpu_notifier(&hrtimers_nb); 1711 } 1712 1713 /** 1714 * schedule_hrtimeout_range_clock - sleep until timeout 1715 * @expires: timeout value (ktime_t) 1716 * @delta: slack in expires timeout (ktime_t) 1717 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1718 * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME 1719 */ 1720 int __sched 1721 schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, 1722 const enum hrtimer_mode mode, int clock) 1723 { 1724 struct hrtimer_sleeper t; 1725 1726 /* 1727 * Optimize when a zero timeout value is given. It does not 1728 * matter whether this is an absolute or a relative time. 1729 */ 1730 if (expires && !expires->tv64) { 1731 __set_current_state(TASK_RUNNING); 1732 return 0; 1733 } 1734 1735 /* 1736 * A NULL parameter means "infinite" 1737 */ 1738 if (!expires) { 1739 schedule(); 1740 return -EINTR; 1741 } 1742 1743 hrtimer_init_on_stack(&t.timer, clock, mode); 1744 hrtimer_set_expires_range_ns(&t.timer, *expires, delta); 1745 1746 hrtimer_init_sleeper(&t, current); 1747 1748 hrtimer_start_expires(&t.timer, mode); 1749 1750 if (likely(t.task)) 1751 schedule(); 1752 1753 hrtimer_cancel(&t.timer); 1754 destroy_hrtimer_on_stack(&t.timer); 1755 1756 __set_current_state(TASK_RUNNING); 1757 1758 return !t.task ? 0 : -EINTR; 1759 } 1760 1761 /** 1762 * schedule_hrtimeout_range - sleep until timeout 1763 * @expires: timeout value (ktime_t) 1764 * @delta: slack in expires timeout (ktime_t) 1765 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1766 * 1767 * Make the current task sleep until the given expiry time has 1768 * elapsed. The routine will return immediately unless 1769 * the current task state has been set (see set_current_state()). 1770 * 1771 * The @delta argument gives the kernel the freedom to schedule the 1772 * actual wakeup to a time that is both power and performance friendly. 1773 * The kernel give the normal best effort behavior for "@expires+@delta", 1774 * but may decide to fire the timer earlier, but no earlier than @expires. 1775 * 1776 * You can set the task state as follows - 1777 * 1778 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 1779 * pass before the routine returns. 1780 * 1781 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1782 * delivered to the current task. 1783 * 1784 * The current task state is guaranteed to be TASK_RUNNING when this 1785 * routine returns. 1786 * 1787 * Returns 0 when the timer has expired otherwise -EINTR 1788 */ 1789 int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, 1790 const enum hrtimer_mode mode) 1791 { 1792 return schedule_hrtimeout_range_clock(expires, delta, mode, 1793 CLOCK_MONOTONIC); 1794 } 1795 EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); 1796 1797 /** 1798 * schedule_hrtimeout - sleep until timeout 1799 * @expires: timeout value (ktime_t) 1800 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL 1801 * 1802 * Make the current task sleep until the given expiry time has 1803 * elapsed. The routine will return immediately unless 1804 * the current task state has been set (see set_current_state()). 1805 * 1806 * You can set the task state as follows - 1807 * 1808 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 1809 * pass before the routine returns. 1810 * 1811 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1812 * delivered to the current task. 1813 * 1814 * The current task state is guaranteed to be TASK_RUNNING when this 1815 * routine returns. 1816 * 1817 * Returns 0 when the timer has expired otherwise -EINTR 1818 */ 1819 int __sched schedule_hrtimeout(ktime_t *expires, 1820 const enum hrtimer_mode mode) 1821 { 1822 return schedule_hrtimeout_range(expires, 0, mode); 1823 } 1824 EXPORT_SYMBOL_GPL(schedule_hrtimeout); 1825