15cee9645SThomas Gleixner /* 25cee9645SThomas Gleixner * linux/kernel/hrtimer.c 35cee9645SThomas Gleixner * 45cee9645SThomas Gleixner * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 55cee9645SThomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 65cee9645SThomas Gleixner * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 75cee9645SThomas Gleixner * 85cee9645SThomas Gleixner * High-resolution kernel timers 95cee9645SThomas Gleixner * 105cee9645SThomas Gleixner * In contrast to the low-resolution timeout API implemented in 115cee9645SThomas Gleixner * kernel/timer.c, hrtimers provide finer resolution and accuracy 125cee9645SThomas Gleixner * depending on system configuration and capabilities. 135cee9645SThomas Gleixner * 145cee9645SThomas Gleixner * These timers are currently used for: 155cee9645SThomas Gleixner * - itimers 165cee9645SThomas Gleixner * - POSIX timers 175cee9645SThomas Gleixner * - nanosleep 185cee9645SThomas Gleixner * - precise in-kernel timing 195cee9645SThomas Gleixner * 205cee9645SThomas Gleixner * Started by: Thomas Gleixner and Ingo Molnar 215cee9645SThomas Gleixner * 225cee9645SThomas Gleixner * Credits: 235cee9645SThomas Gleixner * based on kernel/timer.c 245cee9645SThomas Gleixner * 255cee9645SThomas Gleixner * Help, testing, suggestions, bugfixes, improvements were 265cee9645SThomas Gleixner * provided by: 275cee9645SThomas Gleixner * 285cee9645SThomas Gleixner * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel 295cee9645SThomas Gleixner * et. al. 305cee9645SThomas Gleixner * 315cee9645SThomas Gleixner * For licencing details see kernel-base/COPYING 325cee9645SThomas Gleixner */ 335cee9645SThomas Gleixner 345cee9645SThomas Gleixner #include <linux/cpu.h> 355cee9645SThomas Gleixner #include <linux/export.h> 365cee9645SThomas Gleixner #include <linux/percpu.h> 375cee9645SThomas Gleixner #include <linux/hrtimer.h> 385cee9645SThomas Gleixner #include <linux/notifier.h> 395cee9645SThomas Gleixner #include <linux/syscalls.h> 405cee9645SThomas Gleixner #include <linux/interrupt.h> 415cee9645SThomas Gleixner #include <linux/tick.h> 425cee9645SThomas Gleixner #include <linux/seq_file.h> 435cee9645SThomas Gleixner #include <linux/err.h> 445cee9645SThomas Gleixner #include <linux/debugobjects.h> 45174cd4b1SIngo Molnar #include <linux/sched/signal.h> 465cee9645SThomas Gleixner #include <linux/sched/sysctl.h> 475cee9645SThomas Gleixner #include <linux/sched/rt.h> 485cee9645SThomas Gleixner #include <linux/sched/deadline.h> 49370c9135SIngo Molnar #include <linux/sched/nohz.h> 50b17b0153SIngo Molnar #include <linux/sched/debug.h> 515cee9645SThomas Gleixner #include <linux/timer.h> 525cee9645SThomas Gleixner #include <linux/freezer.h> 53edbeda46SAl Viro #include <linux/compat.h> 545cee9645SThomas Gleixner 557c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 565cee9645SThomas Gleixner 575cee9645SThomas Gleixner #include <trace/events/timer.h> 585cee9645SThomas Gleixner 59c1797bafSThomas Gleixner #include "tick-internal.h" 608b094cd0SThomas Gleixner 615cee9645SThomas Gleixner /* 62c458b1d1SAnna-Maria Gleixner * Masks for selecting the soft and hard context timers from 63c458b1d1SAnna-Maria Gleixner * cpu_base->active 64c458b1d1SAnna-Maria Gleixner */ 65c458b1d1SAnna-Maria Gleixner #define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT) 66c458b1d1SAnna-Maria Gleixner #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1) 67c458b1d1SAnna-Maria Gleixner #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT) 68c458b1d1SAnna-Maria Gleixner #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD) 69c458b1d1SAnna-Maria Gleixner 70c458b1d1SAnna-Maria Gleixner /* 715cee9645SThomas Gleixner * The timer bases: 725cee9645SThomas Gleixner * 73571af55aSZhen Lei * There are more clockids than hrtimer bases. Thus, we index 745cee9645SThomas Gleixner * into the timer bases by the hrtimer_base_type enum. When trying 755cee9645SThomas Gleixner * to reach a base using a clockid, hrtimer_clockid_to_base() 765cee9645SThomas Gleixner * is used to convert from clockid to the proper hrtimer_base_type. 775cee9645SThomas Gleixner */ 785cee9645SThomas Gleixner DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = 795cee9645SThomas Gleixner { 805cee9645SThomas Gleixner .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), 815cee9645SThomas Gleixner .clock_base = 825cee9645SThomas Gleixner { 835cee9645SThomas Gleixner { 845cee9645SThomas Gleixner .index = HRTIMER_BASE_MONOTONIC, 855cee9645SThomas Gleixner .clockid = CLOCK_MONOTONIC, 865cee9645SThomas Gleixner .get_time = &ktime_get, 875cee9645SThomas Gleixner }, 885cee9645SThomas Gleixner { 895cee9645SThomas Gleixner .index = HRTIMER_BASE_REALTIME, 905cee9645SThomas Gleixner .clockid = CLOCK_REALTIME, 915cee9645SThomas Gleixner .get_time = &ktime_get_real, 925cee9645SThomas Gleixner }, 935cee9645SThomas Gleixner { 945cee9645SThomas Gleixner .index = HRTIMER_BASE_BOOTTIME, 955cee9645SThomas Gleixner .clockid = CLOCK_BOOTTIME, 965cee9645SThomas Gleixner .get_time = &ktime_get_boottime, 975cee9645SThomas Gleixner }, 985cee9645SThomas Gleixner { 995cee9645SThomas Gleixner .index = HRTIMER_BASE_TAI, 1005cee9645SThomas Gleixner .clockid = CLOCK_TAI, 1015cee9645SThomas Gleixner .get_time = &ktime_get_clocktai, 1025cee9645SThomas Gleixner }, 10398ecadd4SAnna-Maria Gleixner { 10498ecadd4SAnna-Maria Gleixner .index = HRTIMER_BASE_MONOTONIC_SOFT, 10598ecadd4SAnna-Maria Gleixner .clockid = CLOCK_MONOTONIC, 10698ecadd4SAnna-Maria Gleixner .get_time = &ktime_get, 10798ecadd4SAnna-Maria Gleixner }, 10898ecadd4SAnna-Maria Gleixner { 10998ecadd4SAnna-Maria Gleixner .index = HRTIMER_BASE_REALTIME_SOFT, 11098ecadd4SAnna-Maria Gleixner .clockid = CLOCK_REALTIME, 11198ecadd4SAnna-Maria Gleixner .get_time = &ktime_get_real, 11298ecadd4SAnna-Maria Gleixner }, 11398ecadd4SAnna-Maria Gleixner { 11498ecadd4SAnna-Maria Gleixner .index = HRTIMER_BASE_BOOTTIME_SOFT, 11598ecadd4SAnna-Maria Gleixner .clockid = CLOCK_BOOTTIME, 11698ecadd4SAnna-Maria Gleixner .get_time = &ktime_get_boottime, 11798ecadd4SAnna-Maria Gleixner }, 11898ecadd4SAnna-Maria Gleixner { 11998ecadd4SAnna-Maria Gleixner .index = HRTIMER_BASE_TAI_SOFT, 12098ecadd4SAnna-Maria Gleixner .clockid = CLOCK_TAI, 12198ecadd4SAnna-Maria Gleixner .get_time = &ktime_get_clocktai, 12298ecadd4SAnna-Maria Gleixner }, 1235cee9645SThomas Gleixner } 1245cee9645SThomas Gleixner }; 1255cee9645SThomas Gleixner 1265cee9645SThomas Gleixner static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { 127336a9cdeSMarc Zyngier /* Make sure we catch unsupported clockids */ 128336a9cdeSMarc Zyngier [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES, 129336a9cdeSMarc Zyngier 1305cee9645SThomas Gleixner [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 1315cee9645SThomas Gleixner [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 1325cee9645SThomas Gleixner [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, 1335cee9645SThomas Gleixner [CLOCK_TAI] = HRTIMER_BASE_TAI, 1345cee9645SThomas Gleixner }; 1355cee9645SThomas Gleixner 1365cee9645SThomas Gleixner /* 1375cee9645SThomas Gleixner * Functions and macros which are different for UP/SMP systems are kept in a 1385cee9645SThomas Gleixner * single place 1395cee9645SThomas Gleixner */ 1405cee9645SThomas Gleixner #ifdef CONFIG_SMP 1415cee9645SThomas Gleixner 1425cee9645SThomas Gleixner /* 143887d9dc9SPeter Zijlstra * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base() 144887d9dc9SPeter Zijlstra * such that hrtimer_callback_running() can unconditionally dereference 145887d9dc9SPeter Zijlstra * timer->base->cpu_base 146887d9dc9SPeter Zijlstra */ 147887d9dc9SPeter Zijlstra static struct hrtimer_cpu_base migration_cpu_base = { 148887d9dc9SPeter Zijlstra .clock_base = { { .cpu_base = &migration_cpu_base, }, }, 149887d9dc9SPeter Zijlstra }; 150887d9dc9SPeter Zijlstra 151887d9dc9SPeter Zijlstra #define migration_base migration_cpu_base.clock_base[0] 152887d9dc9SPeter Zijlstra 153887d9dc9SPeter Zijlstra /* 1545cee9645SThomas Gleixner * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock 1555cee9645SThomas Gleixner * means that all timers which are tied to this base via timer->base are 1565cee9645SThomas Gleixner * locked, and the base itself is locked too. 1575cee9645SThomas Gleixner * 1585cee9645SThomas Gleixner * So __run_timers/migrate_timers can safely modify all timers which could 1595cee9645SThomas Gleixner * be found on the lists/queues. 1605cee9645SThomas Gleixner * 1615cee9645SThomas Gleixner * When the timer's base is locked, and the timer removed from list, it is 162887d9dc9SPeter Zijlstra * possible to set timer->base = &migration_base and drop the lock: the timer 163887d9dc9SPeter Zijlstra * remains locked. 1645cee9645SThomas Gleixner */ 1655cee9645SThomas Gleixner static 1665cee9645SThomas Gleixner struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, 1675cee9645SThomas Gleixner unsigned long *flags) 1685cee9645SThomas Gleixner { 1695cee9645SThomas Gleixner struct hrtimer_clock_base *base; 1705cee9645SThomas Gleixner 1715cee9645SThomas Gleixner for (;;) { 1725cee9645SThomas Gleixner base = timer->base; 173887d9dc9SPeter Zijlstra if (likely(base != &migration_base)) { 1745cee9645SThomas Gleixner raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 1755cee9645SThomas Gleixner if (likely(base == timer->base)) 1765cee9645SThomas Gleixner return base; 1775cee9645SThomas Gleixner /* The timer has migrated to another CPU: */ 1785cee9645SThomas Gleixner raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); 1795cee9645SThomas Gleixner } 1805cee9645SThomas Gleixner cpu_relax(); 1815cee9645SThomas Gleixner } 1825cee9645SThomas Gleixner } 1835cee9645SThomas Gleixner 1845cee9645SThomas Gleixner /* 18507a9a7eaSAnna-Maria Gleixner * We do not migrate the timer when it is expiring before the next 18607a9a7eaSAnna-Maria Gleixner * event on the target cpu. When high resolution is enabled, we cannot 18707a9a7eaSAnna-Maria Gleixner * reprogram the target cpu hardware and we would cause it to fire 18807a9a7eaSAnna-Maria Gleixner * late. To keep it simple, we handle the high resolution enabled and 18907a9a7eaSAnna-Maria Gleixner * disabled case similar. 1905cee9645SThomas Gleixner * 1915cee9645SThomas Gleixner * Called with cpu_base->lock of target cpu held. 1925cee9645SThomas Gleixner */ 1935cee9645SThomas Gleixner static int 1945cee9645SThomas Gleixner hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) 1955cee9645SThomas Gleixner { 1965cee9645SThomas Gleixner ktime_t expires; 1975cee9645SThomas Gleixner 1985cee9645SThomas Gleixner expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); 1992ac2dcccSAnna-Maria Gleixner return expires < new_base->cpu_base->expires_next; 2005cee9645SThomas Gleixner } 2015cee9645SThomas Gleixner 202bc7a34b8SThomas Gleixner static inline 203bc7a34b8SThomas Gleixner struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, 204bc7a34b8SThomas Gleixner int pinned) 205bc7a34b8SThomas Gleixner { 206ae67badaSThomas Gleixner #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 207ae67badaSThomas Gleixner if (static_branch_likely(&timers_migration_enabled) && !pinned) 208bc7a34b8SThomas Gleixner return &per_cpu(hrtimer_bases, get_nohz_timer_target()); 209ae67badaSThomas Gleixner #endif 210662b3e19SFrederic Weisbecker return base; 211bc7a34b8SThomas Gleixner } 212bc7a34b8SThomas Gleixner 2135cee9645SThomas Gleixner /* 214b48362d8SFrederic Weisbecker * We switch the timer base to a power-optimized selected CPU target, 215b48362d8SFrederic Weisbecker * if: 216b48362d8SFrederic Weisbecker * - NO_HZ_COMMON is enabled 217b48362d8SFrederic Weisbecker * - timer migration is enabled 218b48362d8SFrederic Weisbecker * - the timer callback is not running 219b48362d8SFrederic Weisbecker * - the timer is not the first expiring timer on the new target 220b48362d8SFrederic Weisbecker * 221b48362d8SFrederic Weisbecker * If one of the above requirements is not fulfilled we move the timer 222b48362d8SFrederic Weisbecker * to the current CPU or leave it on the previously assigned CPU if 223b48362d8SFrederic Weisbecker * the timer callback is currently running. 2245cee9645SThomas Gleixner */ 2255cee9645SThomas Gleixner static inline struct hrtimer_clock_base * 2265cee9645SThomas Gleixner switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, 2275cee9645SThomas Gleixner int pinned) 2285cee9645SThomas Gleixner { 229b48362d8SFrederic Weisbecker struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; 2305cee9645SThomas Gleixner struct hrtimer_clock_base *new_base; 2315cee9645SThomas Gleixner int basenum = base->index; 2325cee9645SThomas Gleixner 233b48362d8SFrederic Weisbecker this_cpu_base = this_cpu_ptr(&hrtimer_bases); 234b48362d8SFrederic Weisbecker new_cpu_base = get_target_base(this_cpu_base, pinned); 2355cee9645SThomas Gleixner again: 2365cee9645SThomas Gleixner new_base = &new_cpu_base->clock_base[basenum]; 2375cee9645SThomas Gleixner 2385cee9645SThomas Gleixner if (base != new_base) { 2395cee9645SThomas Gleixner /* 2405cee9645SThomas Gleixner * We are trying to move timer to new_base. 2415cee9645SThomas Gleixner * However we can't change timer's base while it is running, 2425cee9645SThomas Gleixner * so we keep it on the same CPU. No hassle vs. reprogramming 2435cee9645SThomas Gleixner * the event source in the high resolution case. The softirq 2445cee9645SThomas Gleixner * code will take care of this when the timer function has 2455cee9645SThomas Gleixner * completed. There is no conflict as we hold the lock until 2465cee9645SThomas Gleixner * the timer is enqueued. 2475cee9645SThomas Gleixner */ 2485cee9645SThomas Gleixner if (unlikely(hrtimer_callback_running(timer))) 2495cee9645SThomas Gleixner return base; 2505cee9645SThomas Gleixner 251887d9dc9SPeter Zijlstra /* See the comment in lock_hrtimer_base() */ 252887d9dc9SPeter Zijlstra timer->base = &migration_base; 2535cee9645SThomas Gleixner raw_spin_unlock(&base->cpu_base->lock); 2545cee9645SThomas Gleixner raw_spin_lock(&new_base->cpu_base->lock); 2555cee9645SThomas Gleixner 256b48362d8SFrederic Weisbecker if (new_cpu_base != this_cpu_base && 257bc7a34b8SThomas Gleixner hrtimer_check_target(timer, new_base)) { 2585cee9645SThomas Gleixner raw_spin_unlock(&new_base->cpu_base->lock); 2595cee9645SThomas Gleixner raw_spin_lock(&base->cpu_base->lock); 260b48362d8SFrederic Weisbecker new_cpu_base = this_cpu_base; 2615cee9645SThomas Gleixner timer->base = base; 2625cee9645SThomas Gleixner goto again; 2635cee9645SThomas Gleixner } 2645cee9645SThomas Gleixner timer->base = new_base; 2655cee9645SThomas Gleixner } else { 266b48362d8SFrederic Weisbecker if (new_cpu_base != this_cpu_base && 267bc7a34b8SThomas Gleixner hrtimer_check_target(timer, new_base)) { 268b48362d8SFrederic Weisbecker new_cpu_base = this_cpu_base; 2695cee9645SThomas Gleixner goto again; 2705cee9645SThomas Gleixner } 2715cee9645SThomas Gleixner } 2725cee9645SThomas Gleixner return new_base; 2735cee9645SThomas Gleixner } 2745cee9645SThomas Gleixner 2755cee9645SThomas Gleixner #else /* CONFIG_SMP */ 2765cee9645SThomas Gleixner 2775cee9645SThomas Gleixner static inline struct hrtimer_clock_base * 2785cee9645SThomas Gleixner lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 2795cee9645SThomas Gleixner { 2805cee9645SThomas Gleixner struct hrtimer_clock_base *base = timer->base; 2815cee9645SThomas Gleixner 2825cee9645SThomas Gleixner raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 2835cee9645SThomas Gleixner 2845cee9645SThomas Gleixner return base; 2855cee9645SThomas Gleixner } 2865cee9645SThomas Gleixner 2875cee9645SThomas Gleixner # define switch_hrtimer_base(t, b, p) (b) 2885cee9645SThomas Gleixner 2895cee9645SThomas Gleixner #endif /* !CONFIG_SMP */ 2905cee9645SThomas Gleixner 2915cee9645SThomas Gleixner /* 2925cee9645SThomas Gleixner * Functions for the union type storage format of ktime_t which are 2935cee9645SThomas Gleixner * too large for inlining: 2945cee9645SThomas Gleixner */ 2955cee9645SThomas Gleixner #if BITS_PER_LONG < 64 2965cee9645SThomas Gleixner /* 2975cee9645SThomas Gleixner * Divide a ktime value by a nanosecond value 2985cee9645SThomas Gleixner */ 299f7bcb70eSJohn Stultz s64 __ktime_divns(const ktime_t kt, s64 div) 3005cee9645SThomas Gleixner { 3015cee9645SThomas Gleixner int sft = 0; 302f7bcb70eSJohn Stultz s64 dclc; 303f7bcb70eSJohn Stultz u64 tmp; 3045cee9645SThomas Gleixner 3055cee9645SThomas Gleixner dclc = ktime_to_ns(kt); 306f7bcb70eSJohn Stultz tmp = dclc < 0 ? -dclc : dclc; 307f7bcb70eSJohn Stultz 3085cee9645SThomas Gleixner /* Make sure the divisor is less than 2^32: */ 3095cee9645SThomas Gleixner while (div >> 32) { 3105cee9645SThomas Gleixner sft++; 3115cee9645SThomas Gleixner div >>= 1; 3125cee9645SThomas Gleixner } 313f7bcb70eSJohn Stultz tmp >>= sft; 314f7bcb70eSJohn Stultz do_div(tmp, (unsigned long) div); 315f7bcb70eSJohn Stultz return dclc < 0 ? -tmp : tmp; 3165cee9645SThomas Gleixner } 3178b618628SNicolas Pitre EXPORT_SYMBOL_GPL(__ktime_divns); 3185cee9645SThomas Gleixner #endif /* BITS_PER_LONG >= 64 */ 3195cee9645SThomas Gleixner 3205cee9645SThomas Gleixner /* 3215cee9645SThomas Gleixner * Add two ktime values and do a safety check for overflow: 3225cee9645SThomas Gleixner */ 3235cee9645SThomas Gleixner ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) 3245cee9645SThomas Gleixner { 325979515c5SVegard Nossum ktime_t res = ktime_add_unsafe(lhs, rhs); 3265cee9645SThomas Gleixner 3275cee9645SThomas Gleixner /* 3285cee9645SThomas Gleixner * We use KTIME_SEC_MAX here, the maximum timeout which we can 3295cee9645SThomas Gleixner * return to user space in a timespec: 3305cee9645SThomas Gleixner */ 3312456e855SThomas Gleixner if (res < 0 || res < lhs || res < rhs) 3325cee9645SThomas Gleixner res = ktime_set(KTIME_SEC_MAX, 0); 3335cee9645SThomas Gleixner 3345cee9645SThomas Gleixner return res; 3355cee9645SThomas Gleixner } 3365cee9645SThomas Gleixner 3375cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(ktime_add_safe); 3385cee9645SThomas Gleixner 3395cee9645SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 3405cee9645SThomas Gleixner 3415cee9645SThomas Gleixner static struct debug_obj_descr hrtimer_debug_descr; 3425cee9645SThomas Gleixner 3435cee9645SThomas Gleixner static void *hrtimer_debug_hint(void *addr) 3445cee9645SThomas Gleixner { 3455cee9645SThomas Gleixner return ((struct hrtimer *) addr)->function; 3465cee9645SThomas Gleixner } 3475cee9645SThomas Gleixner 3485cee9645SThomas Gleixner /* 3495cee9645SThomas Gleixner * fixup_init is called when: 3505cee9645SThomas Gleixner * - an active object is initialized 3515cee9645SThomas Gleixner */ 352e3252464SDu, Changbin static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state) 3535cee9645SThomas Gleixner { 3545cee9645SThomas Gleixner struct hrtimer *timer = addr; 3555cee9645SThomas Gleixner 3565cee9645SThomas Gleixner switch (state) { 3575cee9645SThomas Gleixner case ODEBUG_STATE_ACTIVE: 3585cee9645SThomas Gleixner hrtimer_cancel(timer); 3595cee9645SThomas Gleixner debug_object_init(timer, &hrtimer_debug_descr); 360e3252464SDu, Changbin return true; 3615cee9645SThomas Gleixner default: 362e3252464SDu, Changbin return false; 3635cee9645SThomas Gleixner } 3645cee9645SThomas Gleixner } 3655cee9645SThomas Gleixner 3665cee9645SThomas Gleixner /* 3675cee9645SThomas Gleixner * fixup_activate is called when: 3685cee9645SThomas Gleixner * - an active object is activated 369b9fdac7fSDu, Changbin * - an unknown non-static object is activated 3705cee9645SThomas Gleixner */ 371e3252464SDu, Changbin static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) 3725cee9645SThomas Gleixner { 3735cee9645SThomas Gleixner switch (state) { 3745cee9645SThomas Gleixner case ODEBUG_STATE_ACTIVE: 3755cee9645SThomas Gleixner WARN_ON(1); 3765cee9645SThomas Gleixner 3775cee9645SThomas Gleixner default: 378e3252464SDu, Changbin return false; 3795cee9645SThomas Gleixner } 3805cee9645SThomas Gleixner } 3815cee9645SThomas Gleixner 3825cee9645SThomas Gleixner /* 3835cee9645SThomas Gleixner * fixup_free is called when: 3845cee9645SThomas Gleixner * - an active object is freed 3855cee9645SThomas Gleixner */ 386e3252464SDu, Changbin static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state) 3875cee9645SThomas Gleixner { 3885cee9645SThomas Gleixner struct hrtimer *timer = addr; 3895cee9645SThomas Gleixner 3905cee9645SThomas Gleixner switch (state) { 3915cee9645SThomas Gleixner case ODEBUG_STATE_ACTIVE: 3925cee9645SThomas Gleixner hrtimer_cancel(timer); 3935cee9645SThomas Gleixner debug_object_free(timer, &hrtimer_debug_descr); 394e3252464SDu, Changbin return true; 3955cee9645SThomas Gleixner default: 396e3252464SDu, Changbin return false; 3975cee9645SThomas Gleixner } 3985cee9645SThomas Gleixner } 3995cee9645SThomas Gleixner 4005cee9645SThomas Gleixner static struct debug_obj_descr hrtimer_debug_descr = { 4015cee9645SThomas Gleixner .name = "hrtimer", 4025cee9645SThomas Gleixner .debug_hint = hrtimer_debug_hint, 4035cee9645SThomas Gleixner .fixup_init = hrtimer_fixup_init, 4045cee9645SThomas Gleixner .fixup_activate = hrtimer_fixup_activate, 4055cee9645SThomas Gleixner .fixup_free = hrtimer_fixup_free, 4065cee9645SThomas Gleixner }; 4075cee9645SThomas Gleixner 4085cee9645SThomas Gleixner static inline void debug_hrtimer_init(struct hrtimer *timer) 4095cee9645SThomas Gleixner { 4105cee9645SThomas Gleixner debug_object_init(timer, &hrtimer_debug_descr); 4115cee9645SThomas Gleixner } 4125cee9645SThomas Gleixner 4135da70160SAnna-Maria Gleixner static inline void debug_hrtimer_activate(struct hrtimer *timer, 4145da70160SAnna-Maria Gleixner enum hrtimer_mode mode) 4155cee9645SThomas Gleixner { 4165cee9645SThomas Gleixner debug_object_activate(timer, &hrtimer_debug_descr); 4175cee9645SThomas Gleixner } 4185cee9645SThomas Gleixner 4195cee9645SThomas Gleixner static inline void debug_hrtimer_deactivate(struct hrtimer *timer) 4205cee9645SThomas Gleixner { 4215cee9645SThomas Gleixner debug_object_deactivate(timer, &hrtimer_debug_descr); 4225cee9645SThomas Gleixner } 4235cee9645SThomas Gleixner 4245cee9645SThomas Gleixner static inline void debug_hrtimer_free(struct hrtimer *timer) 4255cee9645SThomas Gleixner { 4265cee9645SThomas Gleixner debug_object_free(timer, &hrtimer_debug_descr); 4275cee9645SThomas Gleixner } 4285cee9645SThomas Gleixner 4295cee9645SThomas Gleixner static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 4305cee9645SThomas Gleixner enum hrtimer_mode mode); 4315cee9645SThomas Gleixner 4325cee9645SThomas Gleixner void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, 4335cee9645SThomas Gleixner enum hrtimer_mode mode) 4345cee9645SThomas Gleixner { 4355cee9645SThomas Gleixner debug_object_init_on_stack(timer, &hrtimer_debug_descr); 4365cee9645SThomas Gleixner __hrtimer_init(timer, clock_id, mode); 4375cee9645SThomas Gleixner } 4385cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); 4395cee9645SThomas Gleixner 4405cee9645SThomas Gleixner void destroy_hrtimer_on_stack(struct hrtimer *timer) 4415cee9645SThomas Gleixner { 4425cee9645SThomas Gleixner debug_object_free(timer, &hrtimer_debug_descr); 4435cee9645SThomas Gleixner } 444c08376acSGuenter Roeck EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); 4455cee9645SThomas Gleixner 4465cee9645SThomas Gleixner #else 4475da70160SAnna-Maria Gleixner 4485cee9645SThomas Gleixner static inline void debug_hrtimer_init(struct hrtimer *timer) { } 4495da70160SAnna-Maria Gleixner static inline void debug_hrtimer_activate(struct hrtimer *timer, 4505da70160SAnna-Maria Gleixner enum hrtimer_mode mode) { } 4515cee9645SThomas Gleixner static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 4525cee9645SThomas Gleixner #endif 4535cee9645SThomas Gleixner 4545cee9645SThomas Gleixner static inline void 4555cee9645SThomas Gleixner debug_init(struct hrtimer *timer, clockid_t clockid, 4565cee9645SThomas Gleixner enum hrtimer_mode mode) 4575cee9645SThomas Gleixner { 4585cee9645SThomas Gleixner debug_hrtimer_init(timer); 4595cee9645SThomas Gleixner trace_hrtimer_init(timer, clockid, mode); 4605cee9645SThomas Gleixner } 4615cee9645SThomas Gleixner 46263e2ed36SAnna-Maria Gleixner static inline void debug_activate(struct hrtimer *timer, 46363e2ed36SAnna-Maria Gleixner enum hrtimer_mode mode) 4645cee9645SThomas Gleixner { 4655da70160SAnna-Maria Gleixner debug_hrtimer_activate(timer, mode); 46663e2ed36SAnna-Maria Gleixner trace_hrtimer_start(timer, mode); 4675cee9645SThomas Gleixner } 4685cee9645SThomas Gleixner 4695cee9645SThomas Gleixner static inline void debug_deactivate(struct hrtimer *timer) 4705cee9645SThomas Gleixner { 4715cee9645SThomas Gleixner debug_hrtimer_deactivate(timer); 4725cee9645SThomas Gleixner trace_hrtimer_cancel(timer); 4735cee9645SThomas Gleixner } 4745cee9645SThomas Gleixner 475c272ca58SAnna-Maria Gleixner static struct hrtimer_clock_base * 476c272ca58SAnna-Maria Gleixner __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) 477c272ca58SAnna-Maria Gleixner { 478c272ca58SAnna-Maria Gleixner unsigned int idx; 479c272ca58SAnna-Maria Gleixner 480c272ca58SAnna-Maria Gleixner if (!*active) 481c272ca58SAnna-Maria Gleixner return NULL; 482c272ca58SAnna-Maria Gleixner 483c272ca58SAnna-Maria Gleixner idx = __ffs(*active); 484c272ca58SAnna-Maria Gleixner *active &= ~(1U << idx); 485c272ca58SAnna-Maria Gleixner 486c272ca58SAnna-Maria Gleixner return &cpu_base->clock_base[idx]; 487c272ca58SAnna-Maria Gleixner } 488c272ca58SAnna-Maria Gleixner 489c272ca58SAnna-Maria Gleixner #define for_each_active_base(base, cpu_base, active) \ 490c272ca58SAnna-Maria Gleixner while ((base = __next_base((cpu_base), &(active)))) 491c272ca58SAnna-Maria Gleixner 492ad38f596SAnna-Maria Gleixner static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, 493*a59855cdSRafael J. Wysocki const struct hrtimer *exclude, 494ad38f596SAnna-Maria Gleixner unsigned int active, 495ad38f596SAnna-Maria Gleixner ktime_t expires_next) 4969bc74919SThomas Gleixner { 497c272ca58SAnna-Maria Gleixner struct hrtimer_clock_base *base; 498ad38f596SAnna-Maria Gleixner ktime_t expires; 4999bc74919SThomas Gleixner 500c272ca58SAnna-Maria Gleixner for_each_active_base(base, cpu_base, active) { 5019bc74919SThomas Gleixner struct timerqueue_node *next; 5029bc74919SThomas Gleixner struct hrtimer *timer; 5039bc74919SThomas Gleixner 50434aee88aSThomas Gleixner next = timerqueue_getnext(&base->active); 5059bc74919SThomas Gleixner timer = container_of(next, struct hrtimer, node); 506*a59855cdSRafael J. Wysocki if (timer == exclude) { 507*a59855cdSRafael J. Wysocki /* Get to the next timer in the queue. */ 508*a59855cdSRafael J. Wysocki struct rb_node *rbn = rb_next(&next->node); 509*a59855cdSRafael J. Wysocki 510*a59855cdSRafael J. Wysocki next = rb_entry_safe(rbn, struct timerqueue_node, node); 511*a59855cdSRafael J. Wysocki if (!next) 512*a59855cdSRafael J. Wysocki continue; 513*a59855cdSRafael J. Wysocki 514*a59855cdSRafael J. Wysocki timer = container_of(next, struct hrtimer, node); 515*a59855cdSRafael J. Wysocki } 5169bc74919SThomas Gleixner expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 5172456e855SThomas Gleixner if (expires < expires_next) { 5189bc74919SThomas Gleixner expires_next = expires; 519*a59855cdSRafael J. Wysocki 520*a59855cdSRafael J. Wysocki /* Skip cpu_base update if a timer is being excluded. */ 521*a59855cdSRafael J. Wysocki if (exclude) 522*a59855cdSRafael J. Wysocki continue; 523*a59855cdSRafael J. Wysocki 5245da70160SAnna-Maria Gleixner if (timer->is_soft) 5255da70160SAnna-Maria Gleixner cpu_base->softirq_next_timer = timer; 5265da70160SAnna-Maria Gleixner else 527eb27926bSAnna-Maria Gleixner cpu_base->next_timer = timer; 528895bdfa7SThomas Gleixner } 5299bc74919SThomas Gleixner } 5309bc74919SThomas Gleixner /* 5319bc74919SThomas Gleixner * clock_was_set() might have changed base->offset of any of 5329bc74919SThomas Gleixner * the clock bases so the result might be negative. Fix it up 5339bc74919SThomas Gleixner * to prevent a false positive in clockevents_program_event(). 5349bc74919SThomas Gleixner */ 5352456e855SThomas Gleixner if (expires_next < 0) 5362456e855SThomas Gleixner expires_next = 0; 5379bc74919SThomas Gleixner return expires_next; 5389bc74919SThomas Gleixner } 5399bc74919SThomas Gleixner 540c458b1d1SAnna-Maria Gleixner /* 541c458b1d1SAnna-Maria Gleixner * Recomputes cpu_base::*next_timer and returns the earliest expires_next but 542c458b1d1SAnna-Maria Gleixner * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram. 543c458b1d1SAnna-Maria Gleixner * 5445da70160SAnna-Maria Gleixner * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases, 5455da70160SAnna-Maria Gleixner * those timers will get run whenever the softirq gets handled, at the end of 5465da70160SAnna-Maria Gleixner * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases. 5475da70160SAnna-Maria Gleixner * 5485da70160SAnna-Maria Gleixner * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases. 5495da70160SAnna-Maria Gleixner * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual 5505da70160SAnna-Maria Gleixner * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD. 5515da70160SAnna-Maria Gleixner * 552c458b1d1SAnna-Maria Gleixner * @active_mask must be one of: 5535da70160SAnna-Maria Gleixner * - HRTIMER_ACTIVE_ALL, 554c458b1d1SAnna-Maria Gleixner * - HRTIMER_ACTIVE_SOFT, or 555c458b1d1SAnna-Maria Gleixner * - HRTIMER_ACTIVE_HARD. 556c458b1d1SAnna-Maria Gleixner */ 5575da70160SAnna-Maria Gleixner static ktime_t 5585da70160SAnna-Maria Gleixner __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask) 559ad38f596SAnna-Maria Gleixner { 560c458b1d1SAnna-Maria Gleixner unsigned int active; 5615da70160SAnna-Maria Gleixner struct hrtimer *next_timer = NULL; 562ad38f596SAnna-Maria Gleixner ktime_t expires_next = KTIME_MAX; 563ad38f596SAnna-Maria Gleixner 5645da70160SAnna-Maria Gleixner if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { 5655da70160SAnna-Maria Gleixner active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; 5665da70160SAnna-Maria Gleixner cpu_base->softirq_next_timer = NULL; 567*a59855cdSRafael J. Wysocki expires_next = __hrtimer_next_event_base(cpu_base, NULL, 568*a59855cdSRafael J. Wysocki active, KTIME_MAX); 569ad38f596SAnna-Maria Gleixner 5705da70160SAnna-Maria Gleixner next_timer = cpu_base->softirq_next_timer; 5715da70160SAnna-Maria Gleixner } 5725da70160SAnna-Maria Gleixner 5735da70160SAnna-Maria Gleixner if (active_mask & HRTIMER_ACTIVE_HARD) { 5745da70160SAnna-Maria Gleixner active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; 5755da70160SAnna-Maria Gleixner cpu_base->next_timer = next_timer; 576*a59855cdSRafael J. Wysocki expires_next = __hrtimer_next_event_base(cpu_base, NULL, active, 577*a59855cdSRafael J. Wysocki expires_next); 5785da70160SAnna-Maria Gleixner } 579ad38f596SAnna-Maria Gleixner 580ad38f596SAnna-Maria Gleixner return expires_next; 581ad38f596SAnna-Maria Gleixner } 582ad38f596SAnna-Maria Gleixner 58321d6d52aSThomas Gleixner static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 58421d6d52aSThomas Gleixner { 58521d6d52aSThomas Gleixner ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; 58621d6d52aSThomas Gleixner ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; 58721d6d52aSThomas Gleixner ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; 58821d6d52aSThomas Gleixner 5895da70160SAnna-Maria Gleixner ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, 590868a3e91SThomas Gleixner offs_real, offs_boot, offs_tai); 5915da70160SAnna-Maria Gleixner 5925da70160SAnna-Maria Gleixner base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; 5935da70160SAnna-Maria Gleixner base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; 5945da70160SAnna-Maria Gleixner base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; 5955da70160SAnna-Maria Gleixner 5965da70160SAnna-Maria Gleixner return now; 59721d6d52aSThomas Gleixner } 59821d6d52aSThomas Gleixner 59928bfd18bSAnna-Maria Gleixner /* 60028bfd18bSAnna-Maria Gleixner * Is the high resolution mode active ? 60128bfd18bSAnna-Maria Gleixner */ 60228bfd18bSAnna-Maria Gleixner static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) 60328bfd18bSAnna-Maria Gleixner { 60428bfd18bSAnna-Maria Gleixner return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? 60528bfd18bSAnna-Maria Gleixner cpu_base->hres_active : 0; 60628bfd18bSAnna-Maria Gleixner } 60728bfd18bSAnna-Maria Gleixner 60828bfd18bSAnna-Maria Gleixner static inline int hrtimer_hres_active(void) 60928bfd18bSAnna-Maria Gleixner { 61028bfd18bSAnna-Maria Gleixner return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases)); 61128bfd18bSAnna-Maria Gleixner } 61228bfd18bSAnna-Maria Gleixner 6135cee9645SThomas Gleixner /* 6145cee9645SThomas Gleixner * Reprogram the event source with checking both queues for the 6155cee9645SThomas Gleixner * next event 6165cee9645SThomas Gleixner * Called with interrupts disabled and base->lock held 6175cee9645SThomas Gleixner */ 6185cee9645SThomas Gleixner static void 6195cee9645SThomas Gleixner hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) 6205cee9645SThomas Gleixner { 62121d6d52aSThomas Gleixner ktime_t expires_next; 62221d6d52aSThomas Gleixner 6235da70160SAnna-Maria Gleixner /* 6245da70160SAnna-Maria Gleixner * Find the current next expiration time. 6255da70160SAnna-Maria Gleixner */ 6265da70160SAnna-Maria Gleixner expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); 6275da70160SAnna-Maria Gleixner 6285da70160SAnna-Maria Gleixner if (cpu_base->next_timer && cpu_base->next_timer->is_soft) { 6295da70160SAnna-Maria Gleixner /* 6305da70160SAnna-Maria Gleixner * When the softirq is activated, hrtimer has to be 6315da70160SAnna-Maria Gleixner * programmed with the first hard hrtimer because soft 6325da70160SAnna-Maria Gleixner * timer interrupt could occur too late. 6335da70160SAnna-Maria Gleixner */ 6345da70160SAnna-Maria Gleixner if (cpu_base->softirq_activated) 6355da70160SAnna-Maria Gleixner expires_next = __hrtimer_get_next_event(cpu_base, 6365da70160SAnna-Maria Gleixner HRTIMER_ACTIVE_HARD); 6375da70160SAnna-Maria Gleixner else 6385da70160SAnna-Maria Gleixner cpu_base->softirq_expires_next = expires_next; 6395da70160SAnna-Maria Gleixner } 6405cee9645SThomas Gleixner 6412456e855SThomas Gleixner if (skip_equal && expires_next == cpu_base->expires_next) 6425cee9645SThomas Gleixner return; 6435cee9645SThomas Gleixner 6442456e855SThomas Gleixner cpu_base->expires_next = expires_next; 6455cee9645SThomas Gleixner 6465cee9645SThomas Gleixner /* 64761bb4bcbSAnna-Maria Gleixner * If hres is not active, hardware does not have to be 64861bb4bcbSAnna-Maria Gleixner * reprogrammed yet. 64961bb4bcbSAnna-Maria Gleixner * 6505cee9645SThomas Gleixner * If a hang was detected in the last timer interrupt then we 6515cee9645SThomas Gleixner * leave the hang delay active in the hardware. We want the 6525cee9645SThomas Gleixner * system to make progress. That also prevents the following 6535cee9645SThomas Gleixner * scenario: 6545cee9645SThomas Gleixner * T1 expires 50ms from now 6555cee9645SThomas Gleixner * T2 expires 5s from now 6565cee9645SThomas Gleixner * 6575cee9645SThomas Gleixner * T1 is removed, so this code is called and would reprogram 6585cee9645SThomas Gleixner * the hardware to 5s from now. Any hrtimer_start after that 6595cee9645SThomas Gleixner * will not reprogram the hardware due to hang_detected being 6605cee9645SThomas Gleixner * set. So we'd effectivly block all timers until the T2 event 6615cee9645SThomas Gleixner * fires. 6625cee9645SThomas Gleixner */ 66361bb4bcbSAnna-Maria Gleixner if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) 6645cee9645SThomas Gleixner return; 6655cee9645SThomas Gleixner 6665cee9645SThomas Gleixner tick_program_event(cpu_base->expires_next, 1); 6675cee9645SThomas Gleixner } 6685cee9645SThomas Gleixner 669ebba2c72SAnna-Maria Gleixner /* High resolution timer related functions */ 670ebba2c72SAnna-Maria Gleixner #ifdef CONFIG_HIGH_RES_TIMERS 671ebba2c72SAnna-Maria Gleixner 672ebba2c72SAnna-Maria Gleixner /* 673ebba2c72SAnna-Maria Gleixner * High resolution timer enabled ? 674ebba2c72SAnna-Maria Gleixner */ 675ebba2c72SAnna-Maria Gleixner static bool hrtimer_hres_enabled __read_mostly = true; 676ebba2c72SAnna-Maria Gleixner unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC; 677ebba2c72SAnna-Maria Gleixner EXPORT_SYMBOL_GPL(hrtimer_resolution); 678ebba2c72SAnna-Maria Gleixner 679ebba2c72SAnna-Maria Gleixner /* 680ebba2c72SAnna-Maria Gleixner * Enable / Disable high resolution mode 681ebba2c72SAnna-Maria Gleixner */ 682ebba2c72SAnna-Maria Gleixner static int __init setup_hrtimer_hres(char *str) 683ebba2c72SAnna-Maria Gleixner { 684ebba2c72SAnna-Maria Gleixner return (kstrtobool(str, &hrtimer_hres_enabled) == 0); 685ebba2c72SAnna-Maria Gleixner } 686ebba2c72SAnna-Maria Gleixner 687ebba2c72SAnna-Maria Gleixner __setup("highres=", setup_hrtimer_hres); 688ebba2c72SAnna-Maria Gleixner 689ebba2c72SAnna-Maria Gleixner /* 690ebba2c72SAnna-Maria Gleixner * hrtimer_high_res_enabled - query, if the highres mode is enabled 691ebba2c72SAnna-Maria Gleixner */ 692ebba2c72SAnna-Maria Gleixner static inline int hrtimer_is_hres_enabled(void) 693ebba2c72SAnna-Maria Gleixner { 694ebba2c72SAnna-Maria Gleixner return hrtimer_hres_enabled; 695ebba2c72SAnna-Maria Gleixner } 696ebba2c72SAnna-Maria Gleixner 6975cee9645SThomas Gleixner /* 69811a9fe06SAnna-Maria Gleixner * Retrigger next event is called after clock was set 69911a9fe06SAnna-Maria Gleixner * 70011a9fe06SAnna-Maria Gleixner * Called with interrupts disabled via on_each_cpu() 70111a9fe06SAnna-Maria Gleixner */ 70211a9fe06SAnna-Maria Gleixner static void retrigger_next_event(void *arg) 70311a9fe06SAnna-Maria Gleixner { 70411a9fe06SAnna-Maria Gleixner struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); 70511a9fe06SAnna-Maria Gleixner 70611a9fe06SAnna-Maria Gleixner if (!__hrtimer_hres_active(base)) 70711a9fe06SAnna-Maria Gleixner return; 70811a9fe06SAnna-Maria Gleixner 70911a9fe06SAnna-Maria Gleixner raw_spin_lock(&base->lock); 71011a9fe06SAnna-Maria Gleixner hrtimer_update_base(base); 71111a9fe06SAnna-Maria Gleixner hrtimer_force_reprogram(base, 0); 71211a9fe06SAnna-Maria Gleixner raw_spin_unlock(&base->lock); 71311a9fe06SAnna-Maria Gleixner } 71411a9fe06SAnna-Maria Gleixner 71511a9fe06SAnna-Maria Gleixner /* 71611a9fe06SAnna-Maria Gleixner * Switch to high resolution mode 71711a9fe06SAnna-Maria Gleixner */ 71811a9fe06SAnna-Maria Gleixner static void hrtimer_switch_to_hres(void) 71911a9fe06SAnna-Maria Gleixner { 72011a9fe06SAnna-Maria Gleixner struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); 72111a9fe06SAnna-Maria Gleixner 72211a9fe06SAnna-Maria Gleixner if (tick_init_highres()) { 72311a9fe06SAnna-Maria Gleixner printk(KERN_WARNING "Could not switch to high resolution " 72411a9fe06SAnna-Maria Gleixner "mode on CPU %d\n", base->cpu); 72511a9fe06SAnna-Maria Gleixner return; 72611a9fe06SAnna-Maria Gleixner } 72711a9fe06SAnna-Maria Gleixner base->hres_active = 1; 72811a9fe06SAnna-Maria Gleixner hrtimer_resolution = HIGH_RES_NSEC; 72911a9fe06SAnna-Maria Gleixner 73011a9fe06SAnna-Maria Gleixner tick_setup_sched_timer(); 73111a9fe06SAnna-Maria Gleixner /* "Retrigger" the interrupt to get things going */ 73211a9fe06SAnna-Maria Gleixner retrigger_next_event(NULL); 73311a9fe06SAnna-Maria Gleixner } 73411a9fe06SAnna-Maria Gleixner 73511a9fe06SAnna-Maria Gleixner static void clock_was_set_work(struct work_struct *work) 73611a9fe06SAnna-Maria Gleixner { 73711a9fe06SAnna-Maria Gleixner clock_was_set(); 73811a9fe06SAnna-Maria Gleixner } 73911a9fe06SAnna-Maria Gleixner 74011a9fe06SAnna-Maria Gleixner static DECLARE_WORK(hrtimer_work, clock_was_set_work); 74111a9fe06SAnna-Maria Gleixner 74211a9fe06SAnna-Maria Gleixner /* 74311a9fe06SAnna-Maria Gleixner * Called from timekeeping and resume code to reprogram the hrtimer 74411a9fe06SAnna-Maria Gleixner * interrupt device on all cpus. 74511a9fe06SAnna-Maria Gleixner */ 74611a9fe06SAnna-Maria Gleixner void clock_was_set_delayed(void) 74711a9fe06SAnna-Maria Gleixner { 74811a9fe06SAnna-Maria Gleixner schedule_work(&hrtimer_work); 74911a9fe06SAnna-Maria Gleixner } 75011a9fe06SAnna-Maria Gleixner 75111a9fe06SAnna-Maria Gleixner #else 75211a9fe06SAnna-Maria Gleixner 75311a9fe06SAnna-Maria Gleixner static inline int hrtimer_is_hres_enabled(void) { return 0; } 75411a9fe06SAnna-Maria Gleixner static inline void hrtimer_switch_to_hres(void) { } 75511a9fe06SAnna-Maria Gleixner static inline void retrigger_next_event(void *arg) { } 75611a9fe06SAnna-Maria Gleixner 75711a9fe06SAnna-Maria Gleixner #endif /* CONFIG_HIGH_RES_TIMERS */ 75811a9fe06SAnna-Maria Gleixner 75911a9fe06SAnna-Maria Gleixner /* 7605cee9645SThomas Gleixner * When a timer is enqueued and expires earlier than the already enqueued 7615cee9645SThomas Gleixner * timers, we have to check, whether it expires earlier than the timer for 7625cee9645SThomas Gleixner * which the clock event device was armed. 7635cee9645SThomas Gleixner * 7645cee9645SThomas Gleixner * Called with interrupts disabled and base->cpu_base.lock held 7655cee9645SThomas Gleixner */ 7665da70160SAnna-Maria Gleixner static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram) 7675cee9645SThomas Gleixner { 768dc5df73bSChristoph Lameter struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 7693ec7a3eeSAnna-Maria Gleixner struct hrtimer_clock_base *base = timer->base; 7705cee9645SThomas Gleixner ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 7715cee9645SThomas Gleixner 7725cee9645SThomas Gleixner WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); 7735cee9645SThomas Gleixner 7745cee9645SThomas Gleixner /* 7755da70160SAnna-Maria Gleixner * CLOCK_REALTIME timer might be requested with an absolute 7765da70160SAnna-Maria Gleixner * expiry time which is less than base->offset. Set it to 0. 7775da70160SAnna-Maria Gleixner */ 7785da70160SAnna-Maria Gleixner if (expires < 0) 7795da70160SAnna-Maria Gleixner expires = 0; 7805da70160SAnna-Maria Gleixner 7815da70160SAnna-Maria Gleixner if (timer->is_soft) { 7825da70160SAnna-Maria Gleixner /* 7835da70160SAnna-Maria Gleixner * soft hrtimer could be started on a remote CPU. In this 7845da70160SAnna-Maria Gleixner * case softirq_expires_next needs to be updated on the 7855da70160SAnna-Maria Gleixner * remote CPU. The soft hrtimer will not expire before the 7865da70160SAnna-Maria Gleixner * first hard hrtimer on the remote CPU - 7875da70160SAnna-Maria Gleixner * hrtimer_check_target() prevents this case. 7885da70160SAnna-Maria Gleixner */ 7895da70160SAnna-Maria Gleixner struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; 7905da70160SAnna-Maria Gleixner 7915da70160SAnna-Maria Gleixner if (timer_cpu_base->softirq_activated) 7925da70160SAnna-Maria Gleixner return; 7935da70160SAnna-Maria Gleixner 7945da70160SAnna-Maria Gleixner if (!ktime_before(expires, timer_cpu_base->softirq_expires_next)) 7955da70160SAnna-Maria Gleixner return; 7965da70160SAnna-Maria Gleixner 7975da70160SAnna-Maria Gleixner timer_cpu_base->softirq_next_timer = timer; 7985da70160SAnna-Maria Gleixner timer_cpu_base->softirq_expires_next = expires; 7995da70160SAnna-Maria Gleixner 8005da70160SAnna-Maria Gleixner if (!ktime_before(expires, timer_cpu_base->expires_next) || 8015da70160SAnna-Maria Gleixner !reprogram) 8025da70160SAnna-Maria Gleixner return; 8035da70160SAnna-Maria Gleixner } 8045da70160SAnna-Maria Gleixner 8055da70160SAnna-Maria Gleixner /* 806c6eb3f70SThomas Gleixner * If the timer is not on the current cpu, we cannot reprogram 807c6eb3f70SThomas Gleixner * the other cpus clock event device. 8085cee9645SThomas Gleixner */ 809c6eb3f70SThomas Gleixner if (base->cpu_base != cpu_base) 810c6eb3f70SThomas Gleixner return; 811c6eb3f70SThomas Gleixner 812c6eb3f70SThomas Gleixner /* 813c6eb3f70SThomas Gleixner * If the hrtimer interrupt is running, then it will 814c6eb3f70SThomas Gleixner * reevaluate the clock bases and reprogram the clock event 815c6eb3f70SThomas Gleixner * device. The callbacks are always executed in hard interrupt 816c6eb3f70SThomas Gleixner * context so we don't need an extra check for a running 817c6eb3f70SThomas Gleixner * callback. 818c6eb3f70SThomas Gleixner */ 819c6eb3f70SThomas Gleixner if (cpu_base->in_hrtirq) 820c6eb3f70SThomas Gleixner return; 8215cee9645SThomas Gleixner 8222456e855SThomas Gleixner if (expires >= cpu_base->expires_next) 823c6eb3f70SThomas Gleixner return; 8245cee9645SThomas Gleixner 825c6eb3f70SThomas Gleixner /* Update the pointer to the next expiring timer */ 826895bdfa7SThomas Gleixner cpu_base->next_timer = timer; 82714c80341SAnna-Maria Gleixner cpu_base->expires_next = expires; 8289bc74919SThomas Gleixner 8299bc74919SThomas Gleixner /* 83014c80341SAnna-Maria Gleixner * If hres is not active, hardware does not have to be 83114c80341SAnna-Maria Gleixner * programmed yet. 83214c80341SAnna-Maria Gleixner * 8335cee9645SThomas Gleixner * If a hang was detected in the last timer interrupt then we 8345cee9645SThomas Gleixner * do not schedule a timer which is earlier than the expiry 8355cee9645SThomas Gleixner * which we enforced in the hang detection. We want the system 8365cee9645SThomas Gleixner * to make progress. 8375cee9645SThomas Gleixner */ 83814c80341SAnna-Maria Gleixner if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) 839c6eb3f70SThomas Gleixner return; 8405cee9645SThomas Gleixner 8415cee9645SThomas Gleixner /* 842c6eb3f70SThomas Gleixner * Program the timer hardware. We enforce the expiry for 843c6eb3f70SThomas Gleixner * events which are already in the past. 8445cee9645SThomas Gleixner */ 845c6eb3f70SThomas Gleixner tick_program_event(expires, 1); 8465cee9645SThomas Gleixner } 8475cee9645SThomas Gleixner 8485cee9645SThomas Gleixner /* 8495cee9645SThomas Gleixner * Clock realtime was set 8505cee9645SThomas Gleixner * 8515cee9645SThomas Gleixner * Change the offset of the realtime clock vs. the monotonic 8525cee9645SThomas Gleixner * clock. 8535cee9645SThomas Gleixner * 8545cee9645SThomas Gleixner * We might have to reprogram the high resolution timer interrupt. On 8555cee9645SThomas Gleixner * SMP we call the architecture specific code to retrigger _all_ high 8565cee9645SThomas Gleixner * resolution timer interrupts. On UP we just disable interrupts and 8575cee9645SThomas Gleixner * call the high resolution interrupt code. 8585cee9645SThomas Gleixner */ 8595cee9645SThomas Gleixner void clock_was_set(void) 8605cee9645SThomas Gleixner { 8615cee9645SThomas Gleixner #ifdef CONFIG_HIGH_RES_TIMERS 8625cee9645SThomas Gleixner /* Retrigger the CPU local events everywhere */ 8635cee9645SThomas Gleixner on_each_cpu(retrigger_next_event, NULL, 1); 8645cee9645SThomas Gleixner #endif 8655cee9645SThomas Gleixner timerfd_clock_was_set(); 8665cee9645SThomas Gleixner } 8675cee9645SThomas Gleixner 8685cee9645SThomas Gleixner /* 8695cee9645SThomas Gleixner * During resume we might have to reprogram the high resolution timer 8705cee9645SThomas Gleixner * interrupt on all online CPUs. However, all other CPUs will be 8715cee9645SThomas Gleixner * stopped with IRQs interrupts disabled so the clock_was_set() call 8725cee9645SThomas Gleixner * must be deferred. 8735cee9645SThomas Gleixner */ 8745cee9645SThomas Gleixner void hrtimers_resume(void) 8755cee9645SThomas Gleixner { 87653bef3fdSFrederic Weisbecker lockdep_assert_irqs_disabled(); 8775cee9645SThomas Gleixner /* Retrigger on the local CPU */ 8785cee9645SThomas Gleixner retrigger_next_event(NULL); 8795cee9645SThomas Gleixner /* And schedule a retrigger for all others */ 8805cee9645SThomas Gleixner clock_was_set_delayed(); 8815cee9645SThomas Gleixner } 8825cee9645SThomas Gleixner 8835cee9645SThomas Gleixner /* 8845cee9645SThomas Gleixner * Counterpart to lock_hrtimer_base above: 8855cee9645SThomas Gleixner */ 8865cee9645SThomas Gleixner static inline 8875cee9645SThomas Gleixner void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 8885cee9645SThomas Gleixner { 8895cee9645SThomas Gleixner raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); 8905cee9645SThomas Gleixner } 8915cee9645SThomas Gleixner 8925cee9645SThomas Gleixner /** 8935cee9645SThomas Gleixner * hrtimer_forward - forward the timer expiry 8945cee9645SThomas Gleixner * @timer: hrtimer to forward 8955cee9645SThomas Gleixner * @now: forward past this time 8965cee9645SThomas Gleixner * @interval: the interval to forward 8975cee9645SThomas Gleixner * 8985cee9645SThomas Gleixner * Forward the timer expiry so it will expire in the future. 8995cee9645SThomas Gleixner * Returns the number of overruns. 90091e5a217SThomas Gleixner * 90191e5a217SThomas Gleixner * Can be safely called from the callback function of @timer. If 90291e5a217SThomas Gleixner * called from other contexts @timer must neither be enqueued nor 90391e5a217SThomas Gleixner * running the callback and the caller needs to take care of 90491e5a217SThomas Gleixner * serialization. 90591e5a217SThomas Gleixner * 90691e5a217SThomas Gleixner * Note: This only updates the timer expiry value and does not requeue 90791e5a217SThomas Gleixner * the timer. 9085cee9645SThomas Gleixner */ 9095cee9645SThomas Gleixner u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) 9105cee9645SThomas Gleixner { 9115cee9645SThomas Gleixner u64 orun = 1; 9125cee9645SThomas Gleixner ktime_t delta; 9135cee9645SThomas Gleixner 9145cee9645SThomas Gleixner delta = ktime_sub(now, hrtimer_get_expires(timer)); 9155cee9645SThomas Gleixner 9162456e855SThomas Gleixner if (delta < 0) 9175cee9645SThomas Gleixner return 0; 9185cee9645SThomas Gleixner 9195de2755cSPeter Zijlstra if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) 9205de2755cSPeter Zijlstra return 0; 9215de2755cSPeter Zijlstra 9222456e855SThomas Gleixner if (interval < hrtimer_resolution) 9232456e855SThomas Gleixner interval = hrtimer_resolution; 9245cee9645SThomas Gleixner 9252456e855SThomas Gleixner if (unlikely(delta >= interval)) { 9265cee9645SThomas Gleixner s64 incr = ktime_to_ns(interval); 9275cee9645SThomas Gleixner 9285cee9645SThomas Gleixner orun = ktime_divns(delta, incr); 9295cee9645SThomas Gleixner hrtimer_add_expires_ns(timer, incr * orun); 9302456e855SThomas Gleixner if (hrtimer_get_expires_tv64(timer) > now) 9315cee9645SThomas Gleixner return orun; 9325cee9645SThomas Gleixner /* 9335cee9645SThomas Gleixner * This (and the ktime_add() below) is the 9345cee9645SThomas Gleixner * correction for exact: 9355cee9645SThomas Gleixner */ 9365cee9645SThomas Gleixner orun++; 9375cee9645SThomas Gleixner } 9385cee9645SThomas Gleixner hrtimer_add_expires(timer, interval); 9395cee9645SThomas Gleixner 9405cee9645SThomas Gleixner return orun; 9415cee9645SThomas Gleixner } 9425cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(hrtimer_forward); 9435cee9645SThomas Gleixner 9445cee9645SThomas Gleixner /* 9455cee9645SThomas Gleixner * enqueue_hrtimer - internal function to (re)start a timer 9465cee9645SThomas Gleixner * 9475cee9645SThomas Gleixner * The timer is inserted in expiry order. Insertion into the 9485cee9645SThomas Gleixner * red black tree is O(log(n)). Must hold the base lock. 9495cee9645SThomas Gleixner * 9505cee9645SThomas Gleixner * Returns 1 when the new timer is the leftmost timer in the tree. 9515cee9645SThomas Gleixner */ 9525cee9645SThomas Gleixner static int enqueue_hrtimer(struct hrtimer *timer, 95363e2ed36SAnna-Maria Gleixner struct hrtimer_clock_base *base, 95463e2ed36SAnna-Maria Gleixner enum hrtimer_mode mode) 9555cee9645SThomas Gleixner { 95663e2ed36SAnna-Maria Gleixner debug_activate(timer, mode); 9575cee9645SThomas Gleixner 9585cee9645SThomas Gleixner base->cpu_base->active_bases |= 1 << base->index; 9595cee9645SThomas Gleixner 960887d9dc9SPeter Zijlstra timer->state = HRTIMER_STATE_ENQUEUED; 9615cee9645SThomas Gleixner 962b97f44c9SThomas Gleixner return timerqueue_add(&base->active, &timer->node); 9635cee9645SThomas Gleixner } 9645cee9645SThomas Gleixner 9655cee9645SThomas Gleixner /* 9665cee9645SThomas Gleixner * __remove_hrtimer - internal function to remove a timer 9675cee9645SThomas Gleixner * 9685cee9645SThomas Gleixner * Caller must hold the base lock. 9695cee9645SThomas Gleixner * 9705cee9645SThomas Gleixner * High resolution timer mode reprograms the clock event device when the 9715cee9645SThomas Gleixner * timer is the one which expires next. The caller can disable this by setting 9725cee9645SThomas Gleixner * reprogram to zero. This is useful, when the context does a reprogramming 9735cee9645SThomas Gleixner * anyway (e.g. timer interrupt) 9745cee9645SThomas Gleixner */ 9755cee9645SThomas Gleixner static void __remove_hrtimer(struct hrtimer *timer, 9765cee9645SThomas Gleixner struct hrtimer_clock_base *base, 977203cbf77SThomas Gleixner u8 newstate, int reprogram) 9785cee9645SThomas Gleixner { 979e19ffe8bSThomas Gleixner struct hrtimer_cpu_base *cpu_base = base->cpu_base; 980203cbf77SThomas Gleixner u8 state = timer->state; 9815cee9645SThomas Gleixner 9825cee9645SThomas Gleixner timer->state = newstate; 983895bdfa7SThomas Gleixner if (!(state & HRTIMER_STATE_ENQUEUED)) 984895bdfa7SThomas Gleixner return; 9855cee9645SThomas Gleixner 986b97f44c9SThomas Gleixner if (!timerqueue_del(&base->active, &timer->node)) 987e19ffe8bSThomas Gleixner cpu_base->active_bases &= ~(1 << base->index); 988d9f0acdeSViresh Kumar 989895bdfa7SThomas Gleixner /* 990895bdfa7SThomas Gleixner * Note: If reprogram is false we do not update 991895bdfa7SThomas Gleixner * cpu_base->next_timer. This happens when we remove the first 992895bdfa7SThomas Gleixner * timer on a remote cpu. No harm as we never dereference 993895bdfa7SThomas Gleixner * cpu_base->next_timer. So the worst thing what can happen is 994895bdfa7SThomas Gleixner * an superflous call to hrtimer_force_reprogram() on the 995895bdfa7SThomas Gleixner * remote cpu later on if the same timer gets enqueued again. 996895bdfa7SThomas Gleixner */ 997895bdfa7SThomas Gleixner if (reprogram && timer == cpu_base->next_timer) 998e19ffe8bSThomas Gleixner hrtimer_force_reprogram(cpu_base, 1); 9995cee9645SThomas Gleixner } 10005cee9645SThomas Gleixner 10015cee9645SThomas Gleixner /* 10025cee9645SThomas Gleixner * remove hrtimer, called with base lock held 10035cee9645SThomas Gleixner */ 10045cee9645SThomas Gleixner static inline int 10058edfb036SPeter Zijlstra remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) 10065cee9645SThomas Gleixner { 10075cee9645SThomas Gleixner if (hrtimer_is_queued(timer)) { 1008203cbf77SThomas Gleixner u8 state = timer->state; 10095cee9645SThomas Gleixner int reprogram; 10105cee9645SThomas Gleixner 10115cee9645SThomas Gleixner /* 10125cee9645SThomas Gleixner * Remove the timer and force reprogramming when high 10135cee9645SThomas Gleixner * resolution mode is active and the timer is on the current 10145cee9645SThomas Gleixner * CPU. If we remove a timer on another CPU, reprogramming is 10155cee9645SThomas Gleixner * skipped. The interrupt event on this CPU is fired and 10165cee9645SThomas Gleixner * reprogramming happens in the interrupt handler. This is a 10175cee9645SThomas Gleixner * rare case and less expensive than a smp call. 10185cee9645SThomas Gleixner */ 10195cee9645SThomas Gleixner debug_deactivate(timer); 1020dc5df73bSChristoph Lameter reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); 10218edfb036SPeter Zijlstra 1022887d9dc9SPeter Zijlstra if (!restart) 1023887d9dc9SPeter Zijlstra state = HRTIMER_STATE_INACTIVE; 1024887d9dc9SPeter Zijlstra 10255cee9645SThomas Gleixner __remove_hrtimer(timer, base, state, reprogram); 10265cee9645SThomas Gleixner return 1; 10275cee9645SThomas Gleixner } 10285cee9645SThomas Gleixner return 0; 10295cee9645SThomas Gleixner } 10305cee9645SThomas Gleixner 1031203cbf77SThomas Gleixner static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, 1032203cbf77SThomas Gleixner const enum hrtimer_mode mode) 1033203cbf77SThomas Gleixner { 1034203cbf77SThomas Gleixner #ifdef CONFIG_TIME_LOW_RES 1035203cbf77SThomas Gleixner /* 1036203cbf77SThomas Gleixner * CONFIG_TIME_LOW_RES indicates that the system has no way to return 1037203cbf77SThomas Gleixner * granular time values. For relative timers we add hrtimer_resolution 1038203cbf77SThomas Gleixner * (i.e. one jiffie) to prevent short timeouts. 1039203cbf77SThomas Gleixner */ 1040203cbf77SThomas Gleixner timer->is_rel = mode & HRTIMER_MODE_REL; 1041203cbf77SThomas Gleixner if (timer->is_rel) 10428b0e1953SThomas Gleixner tim = ktime_add_safe(tim, hrtimer_resolution); 1043203cbf77SThomas Gleixner #endif 1044203cbf77SThomas Gleixner return tim; 1045203cbf77SThomas Gleixner } 1046203cbf77SThomas Gleixner 10475da70160SAnna-Maria Gleixner static void 10485da70160SAnna-Maria Gleixner hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram) 10495da70160SAnna-Maria Gleixner { 10505da70160SAnna-Maria Gleixner ktime_t expires; 10515da70160SAnna-Maria Gleixner 10525da70160SAnna-Maria Gleixner /* 10535da70160SAnna-Maria Gleixner * Find the next SOFT expiration. 10545da70160SAnna-Maria Gleixner */ 10555da70160SAnna-Maria Gleixner expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); 10565da70160SAnna-Maria Gleixner 10575da70160SAnna-Maria Gleixner /* 10585da70160SAnna-Maria Gleixner * reprogramming needs to be triggered, even if the next soft 10595da70160SAnna-Maria Gleixner * hrtimer expires at the same time than the next hard 10605da70160SAnna-Maria Gleixner * hrtimer. cpu_base->softirq_expires_next needs to be updated! 10615da70160SAnna-Maria Gleixner */ 10625da70160SAnna-Maria Gleixner if (expires == KTIME_MAX) 10635da70160SAnna-Maria Gleixner return; 10645da70160SAnna-Maria Gleixner 10655da70160SAnna-Maria Gleixner /* 10665da70160SAnna-Maria Gleixner * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event() 10675da70160SAnna-Maria Gleixner * cpu_base->*expires_next is only set by hrtimer_reprogram() 10685da70160SAnna-Maria Gleixner */ 10695da70160SAnna-Maria Gleixner hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram); 10705da70160SAnna-Maria Gleixner } 10715da70160SAnna-Maria Gleixner 1072138a6b7aSAnna-Maria Gleixner static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 1073138a6b7aSAnna-Maria Gleixner u64 delta_ns, const enum hrtimer_mode mode, 1074138a6b7aSAnna-Maria Gleixner struct hrtimer_clock_base *base) 10755cee9645SThomas Gleixner { 1076138a6b7aSAnna-Maria Gleixner struct hrtimer_clock_base *new_base; 10775cee9645SThomas Gleixner 10785cee9645SThomas Gleixner /* Remove an active timer from the queue: */ 10798edfb036SPeter Zijlstra remove_hrtimer(timer, base, true); 10805cee9645SThomas Gleixner 1081203cbf77SThomas Gleixner if (mode & HRTIMER_MODE_REL) 10825cee9645SThomas Gleixner tim = ktime_add_safe(tim, base->get_time()); 1083203cbf77SThomas Gleixner 1084203cbf77SThomas Gleixner tim = hrtimer_update_lowres(timer, tim, mode); 10855cee9645SThomas Gleixner 10865cee9645SThomas Gleixner hrtimer_set_expires_range_ns(timer, tim, delta_ns); 10875cee9645SThomas Gleixner 10885cee9645SThomas Gleixner /* Switch the timer base, if necessary: */ 10895cee9645SThomas Gleixner new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); 10905cee9645SThomas Gleixner 1091138a6b7aSAnna-Maria Gleixner return enqueue_hrtimer(timer, new_base, mode); 1092138a6b7aSAnna-Maria Gleixner } 10935da70160SAnna-Maria Gleixner 1094138a6b7aSAnna-Maria Gleixner /** 1095138a6b7aSAnna-Maria Gleixner * hrtimer_start_range_ns - (re)start an hrtimer 1096138a6b7aSAnna-Maria Gleixner * @timer: the timer to be added 1097138a6b7aSAnna-Maria Gleixner * @tim: expiry time 1098138a6b7aSAnna-Maria Gleixner * @delta_ns: "slack" range for the timer 1099138a6b7aSAnna-Maria Gleixner * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or 11005da70160SAnna-Maria Gleixner * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); 11015da70160SAnna-Maria Gleixner * softirq based mode is considered for debug purpose only! 1102138a6b7aSAnna-Maria Gleixner */ 1103138a6b7aSAnna-Maria Gleixner void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 1104138a6b7aSAnna-Maria Gleixner u64 delta_ns, const enum hrtimer_mode mode) 1105138a6b7aSAnna-Maria Gleixner { 1106138a6b7aSAnna-Maria Gleixner struct hrtimer_clock_base *base; 1107138a6b7aSAnna-Maria Gleixner unsigned long flags; 110849a2a075SViresh Kumar 11095da70160SAnna-Maria Gleixner /* 11105da70160SAnna-Maria Gleixner * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft 11115da70160SAnna-Maria Gleixner * match. 11125da70160SAnna-Maria Gleixner */ 11135da70160SAnna-Maria Gleixner WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); 11145da70160SAnna-Maria Gleixner 1115138a6b7aSAnna-Maria Gleixner base = lock_hrtimer_base(timer, &flags); 1116138a6b7aSAnna-Maria Gleixner 1117138a6b7aSAnna-Maria Gleixner if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) 11185da70160SAnna-Maria Gleixner hrtimer_reprogram(timer, true); 1119138a6b7aSAnna-Maria Gleixner 11205cee9645SThomas Gleixner unlock_hrtimer_base(timer, &flags); 11215cee9645SThomas Gleixner } 11225cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); 11235cee9645SThomas Gleixner 11245cee9645SThomas Gleixner /** 11255cee9645SThomas Gleixner * hrtimer_try_to_cancel - try to deactivate a timer 11265cee9645SThomas Gleixner * @timer: hrtimer to stop 11275cee9645SThomas Gleixner * 11285cee9645SThomas Gleixner * Returns: 11295cee9645SThomas Gleixner * 0 when the timer was not active 11305cee9645SThomas Gleixner * 1 when the timer was active 11310ba42a59SMasanari Iida * -1 when the timer is currently executing the callback function and 11325cee9645SThomas Gleixner * cannot be stopped 11335cee9645SThomas Gleixner */ 11345cee9645SThomas Gleixner int hrtimer_try_to_cancel(struct hrtimer *timer) 11355cee9645SThomas Gleixner { 11365cee9645SThomas Gleixner struct hrtimer_clock_base *base; 11375cee9645SThomas Gleixner unsigned long flags; 11385cee9645SThomas Gleixner int ret = -1; 11395cee9645SThomas Gleixner 114019d9f422SThomas Gleixner /* 114119d9f422SThomas Gleixner * Check lockless first. If the timer is not active (neither 114219d9f422SThomas Gleixner * enqueued nor running the callback, nothing to do here. The 114319d9f422SThomas Gleixner * base lock does not serialize against a concurrent enqueue, 114419d9f422SThomas Gleixner * so we can avoid taking it. 114519d9f422SThomas Gleixner */ 114619d9f422SThomas Gleixner if (!hrtimer_active(timer)) 114719d9f422SThomas Gleixner return 0; 114819d9f422SThomas Gleixner 11495cee9645SThomas Gleixner base = lock_hrtimer_base(timer, &flags); 11505cee9645SThomas Gleixner 11515cee9645SThomas Gleixner if (!hrtimer_callback_running(timer)) 11528edfb036SPeter Zijlstra ret = remove_hrtimer(timer, base, false); 11535cee9645SThomas Gleixner 11545cee9645SThomas Gleixner unlock_hrtimer_base(timer, &flags); 11555cee9645SThomas Gleixner 11565cee9645SThomas Gleixner return ret; 11575cee9645SThomas Gleixner 11585cee9645SThomas Gleixner } 11595cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); 11605cee9645SThomas Gleixner 11615cee9645SThomas Gleixner /** 11625cee9645SThomas Gleixner * hrtimer_cancel - cancel a timer and wait for the handler to finish. 11635cee9645SThomas Gleixner * @timer: the timer to be cancelled 11645cee9645SThomas Gleixner * 11655cee9645SThomas Gleixner * Returns: 11665cee9645SThomas Gleixner * 0 when the timer was not active 11675cee9645SThomas Gleixner * 1 when the timer was active 11685cee9645SThomas Gleixner */ 11695cee9645SThomas Gleixner int hrtimer_cancel(struct hrtimer *timer) 11705cee9645SThomas Gleixner { 11715cee9645SThomas Gleixner for (;;) { 11725cee9645SThomas Gleixner int ret = hrtimer_try_to_cancel(timer); 11735cee9645SThomas Gleixner 11745cee9645SThomas Gleixner if (ret >= 0) 11755cee9645SThomas Gleixner return ret; 11765cee9645SThomas Gleixner cpu_relax(); 11775cee9645SThomas Gleixner } 11785cee9645SThomas Gleixner } 11795cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(hrtimer_cancel); 11805cee9645SThomas Gleixner 11815cee9645SThomas Gleixner /** 11825cee9645SThomas Gleixner * hrtimer_get_remaining - get remaining time for the timer 11835cee9645SThomas Gleixner * @timer: the timer to read 1184203cbf77SThomas Gleixner * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y 11855cee9645SThomas Gleixner */ 1186203cbf77SThomas Gleixner ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) 11875cee9645SThomas Gleixner { 11885cee9645SThomas Gleixner unsigned long flags; 11895cee9645SThomas Gleixner ktime_t rem; 11905cee9645SThomas Gleixner 11915cee9645SThomas Gleixner lock_hrtimer_base(timer, &flags); 1192203cbf77SThomas Gleixner if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust) 1193203cbf77SThomas Gleixner rem = hrtimer_expires_remaining_adjusted(timer); 1194203cbf77SThomas Gleixner else 11955cee9645SThomas Gleixner rem = hrtimer_expires_remaining(timer); 11965cee9645SThomas Gleixner unlock_hrtimer_base(timer, &flags); 11975cee9645SThomas Gleixner 11985cee9645SThomas Gleixner return rem; 11995cee9645SThomas Gleixner } 1200203cbf77SThomas Gleixner EXPORT_SYMBOL_GPL(__hrtimer_get_remaining); 12015cee9645SThomas Gleixner 12025cee9645SThomas Gleixner #ifdef CONFIG_NO_HZ_COMMON 12035cee9645SThomas Gleixner /** 12045cee9645SThomas Gleixner * hrtimer_get_next_event - get the time until next expiry event 12055cee9645SThomas Gleixner * 1206c1ad348bSThomas Gleixner * Returns the next expiry time or KTIME_MAX if no timer is pending. 12075cee9645SThomas Gleixner */ 1208c1ad348bSThomas Gleixner u64 hrtimer_get_next_event(void) 12095cee9645SThomas Gleixner { 1210dc5df73bSChristoph Lameter struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1211c1ad348bSThomas Gleixner u64 expires = KTIME_MAX; 12125cee9645SThomas Gleixner unsigned long flags; 12135cee9645SThomas Gleixner 12145cee9645SThomas Gleixner raw_spin_lock_irqsave(&cpu_base->lock, flags); 12155cee9645SThomas Gleixner 1216e19ffe8bSThomas Gleixner if (!__hrtimer_hres_active(cpu_base)) 12175da70160SAnna-Maria Gleixner expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); 12185cee9645SThomas Gleixner 12195cee9645SThomas Gleixner raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 12205cee9645SThomas Gleixner 1221c1ad348bSThomas Gleixner return expires; 12225cee9645SThomas Gleixner } 1223*a59855cdSRafael J. Wysocki 1224*a59855cdSRafael J. Wysocki /** 1225*a59855cdSRafael J. Wysocki * hrtimer_next_event_without - time until next expiry event w/o one timer 1226*a59855cdSRafael J. Wysocki * @exclude: timer to exclude 1227*a59855cdSRafael J. Wysocki * 1228*a59855cdSRafael J. Wysocki * Returns the next expiry time over all timers except for the @exclude one or 1229*a59855cdSRafael J. Wysocki * KTIME_MAX if none of them is pending. 1230*a59855cdSRafael J. Wysocki */ 1231*a59855cdSRafael J. Wysocki u64 hrtimer_next_event_without(const struct hrtimer *exclude) 1232*a59855cdSRafael J. Wysocki { 1233*a59855cdSRafael J. Wysocki struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1234*a59855cdSRafael J. Wysocki u64 expires = KTIME_MAX; 1235*a59855cdSRafael J. Wysocki unsigned long flags; 1236*a59855cdSRafael J. Wysocki 1237*a59855cdSRafael J. Wysocki raw_spin_lock_irqsave(&cpu_base->lock, flags); 1238*a59855cdSRafael J. Wysocki 1239*a59855cdSRafael J. Wysocki if (__hrtimer_hres_active(cpu_base)) { 1240*a59855cdSRafael J. Wysocki unsigned int active; 1241*a59855cdSRafael J. Wysocki 1242*a59855cdSRafael J. Wysocki if (!cpu_base->softirq_activated) { 1243*a59855cdSRafael J. Wysocki active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; 1244*a59855cdSRafael J. Wysocki expires = __hrtimer_next_event_base(cpu_base, exclude, 1245*a59855cdSRafael J. Wysocki active, KTIME_MAX); 1246*a59855cdSRafael J. Wysocki } 1247*a59855cdSRafael J. Wysocki active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; 1248*a59855cdSRafael J. Wysocki expires = __hrtimer_next_event_base(cpu_base, exclude, active, 1249*a59855cdSRafael J. Wysocki expires); 1250*a59855cdSRafael J. Wysocki } 1251*a59855cdSRafael J. Wysocki 1252*a59855cdSRafael J. Wysocki raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1253*a59855cdSRafael J. Wysocki 1254*a59855cdSRafael J. Wysocki return expires; 1255*a59855cdSRafael J. Wysocki } 12565cee9645SThomas Gleixner #endif 12575cee9645SThomas Gleixner 1258336a9cdeSMarc Zyngier static inline int hrtimer_clockid_to_base(clockid_t clock_id) 1259336a9cdeSMarc Zyngier { 1260336a9cdeSMarc Zyngier if (likely(clock_id < MAX_CLOCKS)) { 1261336a9cdeSMarc Zyngier int base = hrtimer_clock_to_base_table[clock_id]; 1262336a9cdeSMarc Zyngier 1263336a9cdeSMarc Zyngier if (likely(base != HRTIMER_MAX_CLOCK_BASES)) 1264336a9cdeSMarc Zyngier return base; 1265336a9cdeSMarc Zyngier } 1266336a9cdeSMarc Zyngier WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id); 1267336a9cdeSMarc Zyngier return HRTIMER_BASE_MONOTONIC; 1268336a9cdeSMarc Zyngier } 1269336a9cdeSMarc Zyngier 12705cee9645SThomas Gleixner static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 12715cee9645SThomas Gleixner enum hrtimer_mode mode) 12725cee9645SThomas Gleixner { 127342f42da4SAnna-Maria Gleixner bool softtimer = !!(mode & HRTIMER_MODE_SOFT); 127442f42da4SAnna-Maria Gleixner int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; 12755cee9645SThomas Gleixner struct hrtimer_cpu_base *cpu_base; 12765cee9645SThomas Gleixner 12775cee9645SThomas Gleixner memset(timer, 0, sizeof(struct hrtimer)); 12785cee9645SThomas Gleixner 127922127e93SChristoph Lameter cpu_base = raw_cpu_ptr(&hrtimer_bases); 12805cee9645SThomas Gleixner 128148d0c9beSAnna-Maria Gleixner /* 128248d0c9beSAnna-Maria Gleixner * POSIX magic: Relative CLOCK_REALTIME timers are not affected by 128348d0c9beSAnna-Maria Gleixner * clock modifications, so they needs to become CLOCK_MONOTONIC to 128448d0c9beSAnna-Maria Gleixner * ensure POSIX compliance. 128548d0c9beSAnna-Maria Gleixner */ 128648d0c9beSAnna-Maria Gleixner if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL) 12875cee9645SThomas Gleixner clock_id = CLOCK_MONOTONIC; 12885cee9645SThomas Gleixner 128942f42da4SAnna-Maria Gleixner base += hrtimer_clockid_to_base(clock_id); 129042f42da4SAnna-Maria Gleixner timer->is_soft = softtimer; 12915cee9645SThomas Gleixner timer->base = &cpu_base->clock_base[base]; 12925cee9645SThomas Gleixner timerqueue_init(&timer->node); 12935cee9645SThomas Gleixner } 12945cee9645SThomas Gleixner 12955cee9645SThomas Gleixner /** 12965cee9645SThomas Gleixner * hrtimer_init - initialize a timer to the given clock 12975cee9645SThomas Gleixner * @timer: the timer to be initialized 12985cee9645SThomas Gleixner * @clock_id: the clock to be used 129942f42da4SAnna-Maria Gleixner * @mode: The modes which are relevant for intitialization: 130042f42da4SAnna-Maria Gleixner * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT, 130142f42da4SAnna-Maria Gleixner * HRTIMER_MODE_REL_SOFT 130242f42da4SAnna-Maria Gleixner * 130342f42da4SAnna-Maria Gleixner * The PINNED variants of the above can be handed in, 130442f42da4SAnna-Maria Gleixner * but the PINNED bit is ignored as pinning happens 130542f42da4SAnna-Maria Gleixner * when the hrtimer is started 13065cee9645SThomas Gleixner */ 13075cee9645SThomas Gleixner void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 13085cee9645SThomas Gleixner enum hrtimer_mode mode) 13095cee9645SThomas Gleixner { 13105cee9645SThomas Gleixner debug_init(timer, clock_id, mode); 13115cee9645SThomas Gleixner __hrtimer_init(timer, clock_id, mode); 13125cee9645SThomas Gleixner } 13135cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(hrtimer_init); 13145cee9645SThomas Gleixner 1315887d9dc9SPeter Zijlstra /* 1316887d9dc9SPeter Zijlstra * A timer is active, when it is enqueued into the rbtree or the 1317887d9dc9SPeter Zijlstra * callback function is running or it's in the state of being migrated 1318887d9dc9SPeter Zijlstra * to another cpu. 13195cee9645SThomas Gleixner * 1320887d9dc9SPeter Zijlstra * It is important for this function to not return a false negative. 13215cee9645SThomas Gleixner */ 1322887d9dc9SPeter Zijlstra bool hrtimer_active(const struct hrtimer *timer) 13235cee9645SThomas Gleixner { 13243f0b9e8eSAnna-Maria Gleixner struct hrtimer_clock_base *base; 1325887d9dc9SPeter Zijlstra unsigned int seq; 13265cee9645SThomas Gleixner 1327887d9dc9SPeter Zijlstra do { 13283f0b9e8eSAnna-Maria Gleixner base = READ_ONCE(timer->base); 13293f0b9e8eSAnna-Maria Gleixner seq = raw_read_seqcount_begin(&base->seq); 13305cee9645SThomas Gleixner 1331887d9dc9SPeter Zijlstra if (timer->state != HRTIMER_STATE_INACTIVE || 13323f0b9e8eSAnna-Maria Gleixner base->running == timer) 1333887d9dc9SPeter Zijlstra return true; 1334887d9dc9SPeter Zijlstra 13353f0b9e8eSAnna-Maria Gleixner } while (read_seqcount_retry(&base->seq, seq) || 13363f0b9e8eSAnna-Maria Gleixner base != READ_ONCE(timer->base)); 1337887d9dc9SPeter Zijlstra 1338887d9dc9SPeter Zijlstra return false; 13395cee9645SThomas Gleixner } 1340887d9dc9SPeter Zijlstra EXPORT_SYMBOL_GPL(hrtimer_active); 13415cee9645SThomas Gleixner 1342887d9dc9SPeter Zijlstra /* 1343887d9dc9SPeter Zijlstra * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3 1344887d9dc9SPeter Zijlstra * distinct sections: 1345887d9dc9SPeter Zijlstra * 1346887d9dc9SPeter Zijlstra * - queued: the timer is queued 1347887d9dc9SPeter Zijlstra * - callback: the timer is being ran 1348887d9dc9SPeter Zijlstra * - post: the timer is inactive or (re)queued 1349887d9dc9SPeter Zijlstra * 1350887d9dc9SPeter Zijlstra * On the read side we ensure we observe timer->state and cpu_base->running 1351887d9dc9SPeter Zijlstra * from the same section, if anything changed while we looked at it, we retry. 1352887d9dc9SPeter Zijlstra * This includes timer->base changing because sequence numbers alone are 1353887d9dc9SPeter Zijlstra * insufficient for that. 1354887d9dc9SPeter Zijlstra * 1355887d9dc9SPeter Zijlstra * The sequence numbers are required because otherwise we could still observe 1356887d9dc9SPeter Zijlstra * a false negative if the read side got smeared over multiple consequtive 1357887d9dc9SPeter Zijlstra * __run_hrtimer() invocations. 1358887d9dc9SPeter Zijlstra */ 1359887d9dc9SPeter Zijlstra 136021d6d52aSThomas Gleixner static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, 136121d6d52aSThomas Gleixner struct hrtimer_clock_base *base, 1362dd934aa8SAnna-Maria Gleixner struct hrtimer *timer, ktime_t *now, 1363dd934aa8SAnna-Maria Gleixner unsigned long flags) 13645cee9645SThomas Gleixner { 13655cee9645SThomas Gleixner enum hrtimer_restart (*fn)(struct hrtimer *); 13665cee9645SThomas Gleixner int restart; 13675cee9645SThomas Gleixner 1368887d9dc9SPeter Zijlstra lockdep_assert_held(&cpu_base->lock); 13695cee9645SThomas Gleixner 13705cee9645SThomas Gleixner debug_deactivate(timer); 13713f0b9e8eSAnna-Maria Gleixner base->running = timer; 1372887d9dc9SPeter Zijlstra 1373887d9dc9SPeter Zijlstra /* 1374887d9dc9SPeter Zijlstra * Separate the ->running assignment from the ->state assignment. 1375887d9dc9SPeter Zijlstra * 1376887d9dc9SPeter Zijlstra * As with a regular write barrier, this ensures the read side in 13773f0b9e8eSAnna-Maria Gleixner * hrtimer_active() cannot observe base->running == NULL && 1378887d9dc9SPeter Zijlstra * timer->state == INACTIVE. 1379887d9dc9SPeter Zijlstra */ 13803f0b9e8eSAnna-Maria Gleixner raw_write_seqcount_barrier(&base->seq); 1381887d9dc9SPeter Zijlstra 1382887d9dc9SPeter Zijlstra __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); 13835cee9645SThomas Gleixner fn = timer->function; 13845cee9645SThomas Gleixner 13855cee9645SThomas Gleixner /* 1386203cbf77SThomas Gleixner * Clear the 'is relative' flag for the TIME_LOW_RES case. If the 1387203cbf77SThomas Gleixner * timer is restarted with a period then it becomes an absolute 1388203cbf77SThomas Gleixner * timer. If its not restarted it does not matter. 1389203cbf77SThomas Gleixner */ 1390203cbf77SThomas Gleixner if (IS_ENABLED(CONFIG_TIME_LOW_RES)) 1391203cbf77SThomas Gleixner timer->is_rel = false; 1392203cbf77SThomas Gleixner 1393203cbf77SThomas Gleixner /* 1394d05ca13bSThomas Gleixner * The timer is marked as running in the CPU base, so it is 1395d05ca13bSThomas Gleixner * protected against migration to a different CPU even if the lock 1396d05ca13bSThomas Gleixner * is dropped. 13975cee9645SThomas Gleixner */ 1398dd934aa8SAnna-Maria Gleixner raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 13995cee9645SThomas Gleixner trace_hrtimer_expire_entry(timer, now); 14005cee9645SThomas Gleixner restart = fn(timer); 14015cee9645SThomas Gleixner trace_hrtimer_expire_exit(timer); 1402dd934aa8SAnna-Maria Gleixner raw_spin_lock_irq(&cpu_base->lock); 14035cee9645SThomas Gleixner 14045cee9645SThomas Gleixner /* 1405887d9dc9SPeter Zijlstra * Note: We clear the running state after enqueue_hrtimer and 1406b4d90e9fSPratyush Patel * we do not reprogram the event hardware. Happens either in 14075cee9645SThomas Gleixner * hrtimer_start_range_ns() or in hrtimer_interrupt() 14085de2755cSPeter Zijlstra * 14095de2755cSPeter Zijlstra * Note: Because we dropped the cpu_base->lock above, 14105de2755cSPeter Zijlstra * hrtimer_start_range_ns() can have popped in and enqueued the timer 14115de2755cSPeter Zijlstra * for us already. 14125cee9645SThomas Gleixner */ 14135de2755cSPeter Zijlstra if (restart != HRTIMER_NORESTART && 14145de2755cSPeter Zijlstra !(timer->state & HRTIMER_STATE_ENQUEUED)) 141563e2ed36SAnna-Maria Gleixner enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS); 14165cee9645SThomas Gleixner 14175cee9645SThomas Gleixner /* 1418887d9dc9SPeter Zijlstra * Separate the ->running assignment from the ->state assignment. 1419887d9dc9SPeter Zijlstra * 1420887d9dc9SPeter Zijlstra * As with a regular write barrier, this ensures the read side in 14213f0b9e8eSAnna-Maria Gleixner * hrtimer_active() cannot observe base->running.timer == NULL && 1422887d9dc9SPeter Zijlstra * timer->state == INACTIVE. 14235cee9645SThomas Gleixner */ 14243f0b9e8eSAnna-Maria Gleixner raw_write_seqcount_barrier(&base->seq); 14255cee9645SThomas Gleixner 14263f0b9e8eSAnna-Maria Gleixner WARN_ON_ONCE(base->running != timer); 14273f0b9e8eSAnna-Maria Gleixner base->running = NULL; 14285cee9645SThomas Gleixner } 14295cee9645SThomas Gleixner 1430dd934aa8SAnna-Maria Gleixner static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, 1431c458b1d1SAnna-Maria Gleixner unsigned long flags, unsigned int active_mask) 14325cee9645SThomas Gleixner { 1433c272ca58SAnna-Maria Gleixner struct hrtimer_clock_base *base; 1434c458b1d1SAnna-Maria Gleixner unsigned int active = cpu_base->active_bases & active_mask; 14355cee9645SThomas Gleixner 1436c272ca58SAnna-Maria Gleixner for_each_active_base(base, cpu_base, active) { 14375cee9645SThomas Gleixner struct timerqueue_node *node; 14385cee9645SThomas Gleixner ktime_t basenow; 14395cee9645SThomas Gleixner 14405cee9645SThomas Gleixner basenow = ktime_add(now, base->offset); 14415cee9645SThomas Gleixner 14425cee9645SThomas Gleixner while ((node = timerqueue_getnext(&base->active))) { 14435cee9645SThomas Gleixner struct hrtimer *timer; 14445cee9645SThomas Gleixner 14455cee9645SThomas Gleixner timer = container_of(node, struct hrtimer, node); 14465cee9645SThomas Gleixner 14475cee9645SThomas Gleixner /* 14485cee9645SThomas Gleixner * The immediate goal for using the softexpires is 14495cee9645SThomas Gleixner * minimizing wakeups, not running timers at the 14505cee9645SThomas Gleixner * earliest interrupt after their soft expiration. 14515cee9645SThomas Gleixner * This allows us to avoid using a Priority Search 14525cee9645SThomas Gleixner * Tree, which can answer a stabbing querry for 14535cee9645SThomas Gleixner * overlapping intervals and instead use the simple 14545cee9645SThomas Gleixner * BST we already have. 14555cee9645SThomas Gleixner * We don't add extra wakeups by delaying timers that 14565cee9645SThomas Gleixner * are right-of a not yet expired timer, because that 14575cee9645SThomas Gleixner * timer will have to trigger a wakeup anyway. 14585cee9645SThomas Gleixner */ 14592456e855SThomas Gleixner if (basenow < hrtimer_get_softexpires_tv64(timer)) 14605cee9645SThomas Gleixner break; 14615cee9645SThomas Gleixner 1462dd934aa8SAnna-Maria Gleixner __run_hrtimer(cpu_base, base, timer, &basenow, flags); 14635cee9645SThomas Gleixner } 14645cee9645SThomas Gleixner } 146521d6d52aSThomas Gleixner } 146621d6d52aSThomas Gleixner 14675da70160SAnna-Maria Gleixner static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) 14685da70160SAnna-Maria Gleixner { 14695da70160SAnna-Maria Gleixner struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 14705da70160SAnna-Maria Gleixner unsigned long flags; 14715da70160SAnna-Maria Gleixner ktime_t now; 14725da70160SAnna-Maria Gleixner 14735da70160SAnna-Maria Gleixner raw_spin_lock_irqsave(&cpu_base->lock, flags); 14745da70160SAnna-Maria Gleixner 14755da70160SAnna-Maria Gleixner now = hrtimer_update_base(cpu_base); 14765da70160SAnna-Maria Gleixner __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT); 14775da70160SAnna-Maria Gleixner 14785da70160SAnna-Maria Gleixner cpu_base->softirq_activated = 0; 14795da70160SAnna-Maria Gleixner hrtimer_update_softirq_timer(cpu_base, true); 14805da70160SAnna-Maria Gleixner 14815da70160SAnna-Maria Gleixner raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 14825da70160SAnna-Maria Gleixner } 14835da70160SAnna-Maria Gleixner 148421d6d52aSThomas Gleixner #ifdef CONFIG_HIGH_RES_TIMERS 148521d6d52aSThomas Gleixner 148621d6d52aSThomas Gleixner /* 148721d6d52aSThomas Gleixner * High resolution timer interrupt 148821d6d52aSThomas Gleixner * Called with interrupts disabled 148921d6d52aSThomas Gleixner */ 149021d6d52aSThomas Gleixner void hrtimer_interrupt(struct clock_event_device *dev) 149121d6d52aSThomas Gleixner { 149221d6d52aSThomas Gleixner struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 149321d6d52aSThomas Gleixner ktime_t expires_next, now, entry_time, delta; 1494dd934aa8SAnna-Maria Gleixner unsigned long flags; 149521d6d52aSThomas Gleixner int retries = 0; 149621d6d52aSThomas Gleixner 149721d6d52aSThomas Gleixner BUG_ON(!cpu_base->hres_active); 149821d6d52aSThomas Gleixner cpu_base->nr_events++; 14992456e855SThomas Gleixner dev->next_event = KTIME_MAX; 150021d6d52aSThomas Gleixner 1501dd934aa8SAnna-Maria Gleixner raw_spin_lock_irqsave(&cpu_base->lock, flags); 150221d6d52aSThomas Gleixner entry_time = now = hrtimer_update_base(cpu_base); 150321d6d52aSThomas Gleixner retry: 150421d6d52aSThomas Gleixner cpu_base->in_hrtirq = 1; 150521d6d52aSThomas Gleixner /* 150621d6d52aSThomas Gleixner * We set expires_next to KTIME_MAX here with cpu_base->lock 150721d6d52aSThomas Gleixner * held to prevent that a timer is enqueued in our queue via 150821d6d52aSThomas Gleixner * the migration code. This does not affect enqueueing of 150921d6d52aSThomas Gleixner * timers which run their callback and need to be requeued on 151021d6d52aSThomas Gleixner * this CPU. 151121d6d52aSThomas Gleixner */ 15122456e855SThomas Gleixner cpu_base->expires_next = KTIME_MAX; 151321d6d52aSThomas Gleixner 15145da70160SAnna-Maria Gleixner if (!ktime_before(now, cpu_base->softirq_expires_next)) { 15155da70160SAnna-Maria Gleixner cpu_base->softirq_expires_next = KTIME_MAX; 15165da70160SAnna-Maria Gleixner cpu_base->softirq_activated = 1; 15175da70160SAnna-Maria Gleixner raise_softirq_irqoff(HRTIMER_SOFTIRQ); 15185da70160SAnna-Maria Gleixner } 15195da70160SAnna-Maria Gleixner 1520c458b1d1SAnna-Maria Gleixner __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); 152121d6d52aSThomas Gleixner 15229bc74919SThomas Gleixner /* Reevaluate the clock bases for the next expiry */ 15235da70160SAnna-Maria Gleixner expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); 15245cee9645SThomas Gleixner /* 15255cee9645SThomas Gleixner * Store the new expiry value so the migration code can verify 15265cee9645SThomas Gleixner * against it. 15275cee9645SThomas Gleixner */ 15285cee9645SThomas Gleixner cpu_base->expires_next = expires_next; 15299bc74919SThomas Gleixner cpu_base->in_hrtirq = 0; 1530dd934aa8SAnna-Maria Gleixner raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 15315cee9645SThomas Gleixner 15325cee9645SThomas Gleixner /* Reprogramming necessary ? */ 1533d2540875SViresh Kumar if (!tick_program_event(expires_next, 0)) { 15345cee9645SThomas Gleixner cpu_base->hang_detected = 0; 15355cee9645SThomas Gleixner return; 15365cee9645SThomas Gleixner } 15375cee9645SThomas Gleixner 15385cee9645SThomas Gleixner /* 15395cee9645SThomas Gleixner * The next timer was already expired due to: 15405cee9645SThomas Gleixner * - tracing 15415cee9645SThomas Gleixner * - long lasting callbacks 15425cee9645SThomas Gleixner * - being scheduled away when running in a VM 15435cee9645SThomas Gleixner * 15445cee9645SThomas Gleixner * We need to prevent that we loop forever in the hrtimer 15455cee9645SThomas Gleixner * interrupt routine. We give it 3 attempts to avoid 15465cee9645SThomas Gleixner * overreacting on some spurious event. 15475cee9645SThomas Gleixner * 15485cee9645SThomas Gleixner * Acquire base lock for updating the offsets and retrieving 15495cee9645SThomas Gleixner * the current time. 15505cee9645SThomas Gleixner */ 1551dd934aa8SAnna-Maria Gleixner raw_spin_lock_irqsave(&cpu_base->lock, flags); 15525cee9645SThomas Gleixner now = hrtimer_update_base(cpu_base); 15535cee9645SThomas Gleixner cpu_base->nr_retries++; 15545cee9645SThomas Gleixner if (++retries < 3) 15555cee9645SThomas Gleixner goto retry; 15565cee9645SThomas Gleixner /* 15575cee9645SThomas Gleixner * Give the system a chance to do something else than looping 15585cee9645SThomas Gleixner * here. We stored the entry time, so we know exactly how long 15595cee9645SThomas Gleixner * we spent here. We schedule the next event this amount of 15605cee9645SThomas Gleixner * time away. 15615cee9645SThomas Gleixner */ 15625cee9645SThomas Gleixner cpu_base->nr_hangs++; 15635cee9645SThomas Gleixner cpu_base->hang_detected = 1; 1564dd934aa8SAnna-Maria Gleixner raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1565dd934aa8SAnna-Maria Gleixner 15665cee9645SThomas Gleixner delta = ktime_sub(now, entry_time); 15672456e855SThomas Gleixner if ((unsigned int)delta > cpu_base->max_hang_time) 15682456e855SThomas Gleixner cpu_base->max_hang_time = (unsigned int) delta; 15695cee9645SThomas Gleixner /* 15705cee9645SThomas Gleixner * Limit it to a sensible value as we enforce a longer 15715cee9645SThomas Gleixner * delay. Give the CPU at least 100ms to catch up. 15725cee9645SThomas Gleixner */ 15732456e855SThomas Gleixner if (delta > 100 * NSEC_PER_MSEC) 15745cee9645SThomas Gleixner expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); 15755cee9645SThomas Gleixner else 15765cee9645SThomas Gleixner expires_next = ktime_add(now, delta); 15775cee9645SThomas Gleixner tick_program_event(expires_next, 1); 15785cee9645SThomas Gleixner printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", 15795cee9645SThomas Gleixner ktime_to_ns(delta)); 15805cee9645SThomas Gleixner } 15815cee9645SThomas Gleixner 1582016da201SStephen Boyd /* called with interrupts disabled */ 1583c6eb3f70SThomas Gleixner static inline void __hrtimer_peek_ahead_timers(void) 15845cee9645SThomas Gleixner { 15855cee9645SThomas Gleixner struct tick_device *td; 15865cee9645SThomas Gleixner 15875cee9645SThomas Gleixner if (!hrtimer_hres_active()) 15885cee9645SThomas Gleixner return; 15895cee9645SThomas Gleixner 159022127e93SChristoph Lameter td = this_cpu_ptr(&tick_cpu_device); 15915cee9645SThomas Gleixner if (td && td->evtdev) 15925cee9645SThomas Gleixner hrtimer_interrupt(td->evtdev); 15935cee9645SThomas Gleixner } 15945cee9645SThomas Gleixner 15955cee9645SThomas Gleixner #else /* CONFIG_HIGH_RES_TIMERS */ 15965cee9645SThomas Gleixner 15975cee9645SThomas Gleixner static inline void __hrtimer_peek_ahead_timers(void) { } 15985cee9645SThomas Gleixner 15995cee9645SThomas Gleixner #endif /* !CONFIG_HIGH_RES_TIMERS */ 16005cee9645SThomas Gleixner 16015cee9645SThomas Gleixner /* 1602c6eb3f70SThomas Gleixner * Called from run_local_timers in hardirq context every jiffy 16035cee9645SThomas Gleixner */ 16045cee9645SThomas Gleixner void hrtimer_run_queues(void) 16055cee9645SThomas Gleixner { 1606dc5df73bSChristoph Lameter struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1607dd934aa8SAnna-Maria Gleixner unsigned long flags; 160821d6d52aSThomas Gleixner ktime_t now; 16095cee9645SThomas Gleixner 1610e19ffe8bSThomas Gleixner if (__hrtimer_hres_active(cpu_base)) 16115cee9645SThomas Gleixner return; 16125cee9645SThomas Gleixner 1613c6eb3f70SThomas Gleixner /* 1614c6eb3f70SThomas Gleixner * This _is_ ugly: We have to check periodically, whether we 1615c6eb3f70SThomas Gleixner * can switch to highres and / or nohz mode. The clocksource 1616c6eb3f70SThomas Gleixner * switch happens with xtime_lock held. Notification from 1617c6eb3f70SThomas Gleixner * there only sets the check bit in the tick_oneshot code, 1618c6eb3f70SThomas Gleixner * otherwise we might deadlock vs. xtime_lock. 1619c6eb3f70SThomas Gleixner */ 1620c6eb3f70SThomas Gleixner if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { 1621c6eb3f70SThomas Gleixner hrtimer_switch_to_hres(); 1622c6eb3f70SThomas Gleixner return; 16235cee9645SThomas Gleixner } 16245cee9645SThomas Gleixner 1625dd934aa8SAnna-Maria Gleixner raw_spin_lock_irqsave(&cpu_base->lock, flags); 162621d6d52aSThomas Gleixner now = hrtimer_update_base(cpu_base); 16275da70160SAnna-Maria Gleixner 16285da70160SAnna-Maria Gleixner if (!ktime_before(now, cpu_base->softirq_expires_next)) { 16295da70160SAnna-Maria Gleixner cpu_base->softirq_expires_next = KTIME_MAX; 16305da70160SAnna-Maria Gleixner cpu_base->softirq_activated = 1; 16315da70160SAnna-Maria Gleixner raise_softirq_irqoff(HRTIMER_SOFTIRQ); 16325da70160SAnna-Maria Gleixner } 16335da70160SAnna-Maria Gleixner 1634c458b1d1SAnna-Maria Gleixner __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); 1635dd934aa8SAnna-Maria Gleixner raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 16365cee9645SThomas Gleixner } 16375cee9645SThomas Gleixner 16385cee9645SThomas Gleixner /* 16395cee9645SThomas Gleixner * Sleep related functions: 16405cee9645SThomas Gleixner */ 16415cee9645SThomas Gleixner static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) 16425cee9645SThomas Gleixner { 16435cee9645SThomas Gleixner struct hrtimer_sleeper *t = 16445cee9645SThomas Gleixner container_of(timer, struct hrtimer_sleeper, timer); 16455cee9645SThomas Gleixner struct task_struct *task = t->task; 16465cee9645SThomas Gleixner 16475cee9645SThomas Gleixner t->task = NULL; 16485cee9645SThomas Gleixner if (task) 16495cee9645SThomas Gleixner wake_up_process(task); 16505cee9645SThomas Gleixner 16515cee9645SThomas Gleixner return HRTIMER_NORESTART; 16525cee9645SThomas Gleixner } 16535cee9645SThomas Gleixner 16545cee9645SThomas Gleixner void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) 16555cee9645SThomas Gleixner { 16565cee9645SThomas Gleixner sl->timer.function = hrtimer_wakeup; 16575cee9645SThomas Gleixner sl->task = task; 16585cee9645SThomas Gleixner } 16595cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); 16605cee9645SThomas Gleixner 1661c0edd7c9SDeepa Dinamani int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) 1662ce41aaf4SAl Viro { 1663ce41aaf4SAl Viro switch(restart->nanosleep.type) { 1664ce41aaf4SAl Viro #ifdef CONFIG_COMPAT 1665ce41aaf4SAl Viro case TT_COMPAT: 1666c0edd7c9SDeepa Dinamani if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp)) 1667ce41aaf4SAl Viro return -EFAULT; 1668ce41aaf4SAl Viro break; 1669ce41aaf4SAl Viro #endif 1670ce41aaf4SAl Viro case TT_NATIVE: 1671c0edd7c9SDeepa Dinamani if (put_timespec64(ts, restart->nanosleep.rmtp)) 1672ce41aaf4SAl Viro return -EFAULT; 1673ce41aaf4SAl Viro break; 1674ce41aaf4SAl Viro default: 1675ce41aaf4SAl Viro BUG(); 1676ce41aaf4SAl Viro } 1677ce41aaf4SAl Viro return -ERESTART_RESTARTBLOCK; 1678ce41aaf4SAl Viro } 1679ce41aaf4SAl Viro 16805cee9645SThomas Gleixner static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 16815cee9645SThomas Gleixner { 1682edbeda46SAl Viro struct restart_block *restart; 1683edbeda46SAl Viro 16845cee9645SThomas Gleixner hrtimer_init_sleeper(t, current); 16855cee9645SThomas Gleixner 16865cee9645SThomas Gleixner do { 16875cee9645SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE); 16885cee9645SThomas Gleixner hrtimer_start_expires(&t->timer, mode); 16895cee9645SThomas Gleixner 16905cee9645SThomas Gleixner if (likely(t->task)) 16915cee9645SThomas Gleixner freezable_schedule(); 16925cee9645SThomas Gleixner 16935cee9645SThomas Gleixner hrtimer_cancel(&t->timer); 16945cee9645SThomas Gleixner mode = HRTIMER_MODE_ABS; 16955cee9645SThomas Gleixner 16965cee9645SThomas Gleixner } while (t->task && !signal_pending(current)); 16975cee9645SThomas Gleixner 16985cee9645SThomas Gleixner __set_current_state(TASK_RUNNING); 16995cee9645SThomas Gleixner 1700a7602681SAl Viro if (!t->task) 1701a7602681SAl Viro return 0; 17025cee9645SThomas Gleixner 1703edbeda46SAl Viro restart = ¤t->restart_block; 1704edbeda46SAl Viro if (restart->nanosleep.type != TT_NONE) { 1705a7602681SAl Viro ktime_t rem = hrtimer_expires_remaining(&t->timer); 1706c0edd7c9SDeepa Dinamani struct timespec64 rmt; 1707edbeda46SAl Viro 17082456e855SThomas Gleixner if (rem <= 0) 17095cee9645SThomas Gleixner return 0; 1710c0edd7c9SDeepa Dinamani rmt = ktime_to_timespec64(rem); 17115cee9645SThomas Gleixner 1712ce41aaf4SAl Viro return nanosleep_copyout(restart, &rmt); 1713a7602681SAl Viro } 1714a7602681SAl Viro return -ERESTART_RESTARTBLOCK; 17155cee9645SThomas Gleixner } 17165cee9645SThomas Gleixner 1717fb923c4aSAl Viro static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) 17185cee9645SThomas Gleixner { 17195cee9645SThomas Gleixner struct hrtimer_sleeper t; 1720a7602681SAl Viro int ret; 17215cee9645SThomas Gleixner 17225cee9645SThomas Gleixner hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, 17235cee9645SThomas Gleixner HRTIMER_MODE_ABS); 17245cee9645SThomas Gleixner hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); 17255cee9645SThomas Gleixner 1726a7602681SAl Viro ret = do_nanosleep(&t, HRTIMER_MODE_ABS); 17275cee9645SThomas Gleixner destroy_hrtimer_on_stack(&t.timer); 17285cee9645SThomas Gleixner return ret; 17295cee9645SThomas Gleixner } 17305cee9645SThomas Gleixner 1731938e7cf2SThomas Gleixner long hrtimer_nanosleep(const struct timespec64 *rqtp, 17325cee9645SThomas Gleixner const enum hrtimer_mode mode, const clockid_t clockid) 17335cee9645SThomas Gleixner { 1734a7602681SAl Viro struct restart_block *restart; 17355cee9645SThomas Gleixner struct hrtimer_sleeper t; 17365cee9645SThomas Gleixner int ret = 0; 1737da8b44d5SJohn Stultz u64 slack; 17385cee9645SThomas Gleixner 17395cee9645SThomas Gleixner slack = current->timer_slack_ns; 17405cee9645SThomas Gleixner if (dl_task(current) || rt_task(current)) 17415cee9645SThomas Gleixner slack = 0; 17425cee9645SThomas Gleixner 17435cee9645SThomas Gleixner hrtimer_init_on_stack(&t.timer, clockid, mode); 1744ad196384SDeepa Dinamani hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); 1745a7602681SAl Viro ret = do_nanosleep(&t, mode); 1746a7602681SAl Viro if (ret != -ERESTART_RESTARTBLOCK) 17475cee9645SThomas Gleixner goto out; 17485cee9645SThomas Gleixner 17495cee9645SThomas Gleixner /* Absolute timers do not update the rmtp value and restart: */ 17505cee9645SThomas Gleixner if (mode == HRTIMER_MODE_ABS) { 17515cee9645SThomas Gleixner ret = -ERESTARTNOHAND; 17525cee9645SThomas Gleixner goto out; 17535cee9645SThomas Gleixner } 17545cee9645SThomas Gleixner 1755a7602681SAl Viro restart = ¤t->restart_block; 17565cee9645SThomas Gleixner restart->fn = hrtimer_nanosleep_restart; 17575cee9645SThomas Gleixner restart->nanosleep.clockid = t.timer.base->clockid; 17585cee9645SThomas Gleixner restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); 17595cee9645SThomas Gleixner out: 17605cee9645SThomas Gleixner destroy_hrtimer_on_stack(&t.timer); 17615cee9645SThomas Gleixner return ret; 17625cee9645SThomas Gleixner } 17635cee9645SThomas Gleixner 17645cee9645SThomas Gleixner SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, 17655cee9645SThomas Gleixner struct timespec __user *, rmtp) 17665cee9645SThomas Gleixner { 1767c0edd7c9SDeepa Dinamani struct timespec64 tu; 17685cee9645SThomas Gleixner 1769c0edd7c9SDeepa Dinamani if (get_timespec64(&tu, rqtp)) 17705cee9645SThomas Gleixner return -EFAULT; 17715cee9645SThomas Gleixner 1772c0edd7c9SDeepa Dinamani if (!timespec64_valid(&tu)) 17735cee9645SThomas Gleixner return -EINVAL; 17745cee9645SThomas Gleixner 1775edbeda46SAl Viro current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; 1776192a82f9SAl Viro current->restart_block.nanosleep.rmtp = rmtp; 1777c0edd7c9SDeepa Dinamani return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); 17785cee9645SThomas Gleixner } 17795cee9645SThomas Gleixner 1780edbeda46SAl Viro #ifdef CONFIG_COMPAT 1781edbeda46SAl Viro 1782edbeda46SAl Viro COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, 1783edbeda46SAl Viro struct compat_timespec __user *, rmtp) 1784edbeda46SAl Viro { 1785c0edd7c9SDeepa Dinamani struct timespec64 tu; 1786edbeda46SAl Viro 1787c0edd7c9SDeepa Dinamani if (compat_get_timespec64(&tu, rqtp)) 1788edbeda46SAl Viro return -EFAULT; 1789edbeda46SAl Viro 1790c0edd7c9SDeepa Dinamani if (!timespec64_valid(&tu)) 1791edbeda46SAl Viro return -EINVAL; 1792edbeda46SAl Viro 1793edbeda46SAl Viro current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; 1794edbeda46SAl Viro current->restart_block.nanosleep.compat_rmtp = rmtp; 1795c0edd7c9SDeepa Dinamani return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); 1796edbeda46SAl Viro } 1797edbeda46SAl Viro #endif 1798edbeda46SAl Viro 17995cee9645SThomas Gleixner /* 18005cee9645SThomas Gleixner * Functions related to boot-time initialization: 18015cee9645SThomas Gleixner */ 180227590dc1SThomas Gleixner int hrtimers_prepare_cpu(unsigned int cpu) 18035cee9645SThomas Gleixner { 18045cee9645SThomas Gleixner struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 18055cee9645SThomas Gleixner int i; 18065cee9645SThomas Gleixner 18075cee9645SThomas Gleixner for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 18085cee9645SThomas Gleixner cpu_base->clock_base[i].cpu_base = cpu_base; 18095cee9645SThomas Gleixner timerqueue_init_head(&cpu_base->clock_base[i].active); 18105cee9645SThomas Gleixner } 18115cee9645SThomas Gleixner 1812cddd0248SViresh Kumar cpu_base->cpu = cpu; 1813303c146dSThomas Gleixner cpu_base->active_bases = 0; 181428bfd18bSAnna-Maria Gleixner cpu_base->hres_active = 0; 1815303c146dSThomas Gleixner cpu_base->hang_detected = 0; 1816303c146dSThomas Gleixner cpu_base->next_timer = NULL; 1817303c146dSThomas Gleixner cpu_base->softirq_next_timer = NULL; 181807a9a7eaSAnna-Maria Gleixner cpu_base->expires_next = KTIME_MAX; 18195da70160SAnna-Maria Gleixner cpu_base->softirq_expires_next = KTIME_MAX; 182027590dc1SThomas Gleixner return 0; 18215cee9645SThomas Gleixner } 18225cee9645SThomas Gleixner 18235cee9645SThomas Gleixner #ifdef CONFIG_HOTPLUG_CPU 18245cee9645SThomas Gleixner 18255cee9645SThomas Gleixner static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 18265cee9645SThomas Gleixner struct hrtimer_clock_base *new_base) 18275cee9645SThomas Gleixner { 18285cee9645SThomas Gleixner struct hrtimer *timer; 18295cee9645SThomas Gleixner struct timerqueue_node *node; 18305cee9645SThomas Gleixner 18315cee9645SThomas Gleixner while ((node = timerqueue_getnext(&old_base->active))) { 18325cee9645SThomas Gleixner timer = container_of(node, struct hrtimer, node); 18335cee9645SThomas Gleixner BUG_ON(hrtimer_callback_running(timer)); 18345cee9645SThomas Gleixner debug_deactivate(timer); 18355cee9645SThomas Gleixner 18365cee9645SThomas Gleixner /* 1837c04dca02SOleg Nesterov * Mark it as ENQUEUED not INACTIVE otherwise the 18385cee9645SThomas Gleixner * timer could be seen as !active and just vanish away 18395cee9645SThomas Gleixner * under us on another CPU 18405cee9645SThomas Gleixner */ 1841c04dca02SOleg Nesterov __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); 18425cee9645SThomas Gleixner timer->base = new_base; 18435cee9645SThomas Gleixner /* 18445cee9645SThomas Gleixner * Enqueue the timers on the new cpu. This does not 18455cee9645SThomas Gleixner * reprogram the event device in case the timer 18465cee9645SThomas Gleixner * expires before the earliest on this CPU, but we run 18475cee9645SThomas Gleixner * hrtimer_interrupt after we migrated everything to 18485cee9645SThomas Gleixner * sort out already expired timers and reprogram the 18495cee9645SThomas Gleixner * event device. 18505cee9645SThomas Gleixner */ 185163e2ed36SAnna-Maria Gleixner enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS); 18525cee9645SThomas Gleixner } 18535cee9645SThomas Gleixner } 18545cee9645SThomas Gleixner 185527590dc1SThomas Gleixner int hrtimers_dead_cpu(unsigned int scpu) 18565cee9645SThomas Gleixner { 18575cee9645SThomas Gleixner struct hrtimer_cpu_base *old_base, *new_base; 18585cee9645SThomas Gleixner int i; 18595cee9645SThomas Gleixner 18605cee9645SThomas Gleixner BUG_ON(cpu_online(scpu)); 18615cee9645SThomas Gleixner tick_cancel_sched_timer(scpu); 18625cee9645SThomas Gleixner 18635da70160SAnna-Maria Gleixner /* 18645da70160SAnna-Maria Gleixner * this BH disable ensures that raise_softirq_irqoff() does 18655da70160SAnna-Maria Gleixner * not wakeup ksoftirqd (and acquire the pi-lock) while 18665da70160SAnna-Maria Gleixner * holding the cpu_base lock 18675da70160SAnna-Maria Gleixner */ 18685da70160SAnna-Maria Gleixner local_bh_disable(); 18695cee9645SThomas Gleixner local_irq_disable(); 18705cee9645SThomas Gleixner old_base = &per_cpu(hrtimer_bases, scpu); 1871dc5df73bSChristoph Lameter new_base = this_cpu_ptr(&hrtimer_bases); 18725cee9645SThomas Gleixner /* 18735cee9645SThomas Gleixner * The caller is globally serialized and nobody else 18745cee9645SThomas Gleixner * takes two locks at once, deadlock is not possible. 18755cee9645SThomas Gleixner */ 18765cee9645SThomas Gleixner raw_spin_lock(&new_base->lock); 18775cee9645SThomas Gleixner raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 18785cee9645SThomas Gleixner 18795cee9645SThomas Gleixner for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 18805cee9645SThomas Gleixner migrate_hrtimer_list(&old_base->clock_base[i], 18815cee9645SThomas Gleixner &new_base->clock_base[i]); 18825cee9645SThomas Gleixner } 18835cee9645SThomas Gleixner 18845da70160SAnna-Maria Gleixner /* 18855da70160SAnna-Maria Gleixner * The migration might have changed the first expiring softirq 18865da70160SAnna-Maria Gleixner * timer on this CPU. Update it. 18875da70160SAnna-Maria Gleixner */ 18885da70160SAnna-Maria Gleixner hrtimer_update_softirq_timer(new_base, false); 18895da70160SAnna-Maria Gleixner 18905cee9645SThomas Gleixner raw_spin_unlock(&old_base->lock); 18915cee9645SThomas Gleixner raw_spin_unlock(&new_base->lock); 18925cee9645SThomas Gleixner 18935cee9645SThomas Gleixner /* Check, if we got expired work to do */ 18945cee9645SThomas Gleixner __hrtimer_peek_ahead_timers(); 18955cee9645SThomas Gleixner local_irq_enable(); 18965da70160SAnna-Maria Gleixner local_bh_enable(); 189727590dc1SThomas Gleixner return 0; 18985cee9645SThomas Gleixner } 18995cee9645SThomas Gleixner 19005cee9645SThomas Gleixner #endif /* CONFIG_HOTPLUG_CPU */ 19015cee9645SThomas Gleixner 19025cee9645SThomas Gleixner void __init hrtimers_init(void) 19035cee9645SThomas Gleixner { 190427590dc1SThomas Gleixner hrtimers_prepare_cpu(smp_processor_id()); 19055da70160SAnna-Maria Gleixner open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq); 19065cee9645SThomas Gleixner } 19075cee9645SThomas Gleixner 19085cee9645SThomas Gleixner /** 19095cee9645SThomas Gleixner * schedule_hrtimeout_range_clock - sleep until timeout 19105cee9645SThomas Gleixner * @expires: timeout value (ktime_t) 19115cee9645SThomas Gleixner * @delta: slack in expires timeout (ktime_t) 191290777713SAnna-Maria Gleixner * @mode: timer mode 191390777713SAnna-Maria Gleixner * @clock_id: timer clock to be used 19145cee9645SThomas Gleixner */ 19155cee9645SThomas Gleixner int __sched 1916da8b44d5SJohn Stultz schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, 191790777713SAnna-Maria Gleixner const enum hrtimer_mode mode, clockid_t clock_id) 19185cee9645SThomas Gleixner { 19195cee9645SThomas Gleixner struct hrtimer_sleeper t; 19205cee9645SThomas Gleixner 19215cee9645SThomas Gleixner /* 19225cee9645SThomas Gleixner * Optimize when a zero timeout value is given. It does not 19235cee9645SThomas Gleixner * matter whether this is an absolute or a relative time. 19245cee9645SThomas Gleixner */ 19252456e855SThomas Gleixner if (expires && *expires == 0) { 19265cee9645SThomas Gleixner __set_current_state(TASK_RUNNING); 19275cee9645SThomas Gleixner return 0; 19285cee9645SThomas Gleixner } 19295cee9645SThomas Gleixner 19305cee9645SThomas Gleixner /* 19315cee9645SThomas Gleixner * A NULL parameter means "infinite" 19325cee9645SThomas Gleixner */ 19335cee9645SThomas Gleixner if (!expires) { 19345cee9645SThomas Gleixner schedule(); 19355cee9645SThomas Gleixner return -EINTR; 19365cee9645SThomas Gleixner } 19375cee9645SThomas Gleixner 193890777713SAnna-Maria Gleixner hrtimer_init_on_stack(&t.timer, clock_id, mode); 19395cee9645SThomas Gleixner hrtimer_set_expires_range_ns(&t.timer, *expires, delta); 19405cee9645SThomas Gleixner 19415cee9645SThomas Gleixner hrtimer_init_sleeper(&t, current); 19425cee9645SThomas Gleixner 19435cee9645SThomas Gleixner hrtimer_start_expires(&t.timer, mode); 19445cee9645SThomas Gleixner 19455cee9645SThomas Gleixner if (likely(t.task)) 19465cee9645SThomas Gleixner schedule(); 19475cee9645SThomas Gleixner 19485cee9645SThomas Gleixner hrtimer_cancel(&t.timer); 19495cee9645SThomas Gleixner destroy_hrtimer_on_stack(&t.timer); 19505cee9645SThomas Gleixner 19515cee9645SThomas Gleixner __set_current_state(TASK_RUNNING); 19525cee9645SThomas Gleixner 19535cee9645SThomas Gleixner return !t.task ? 0 : -EINTR; 19545cee9645SThomas Gleixner } 19555cee9645SThomas Gleixner 19565cee9645SThomas Gleixner /** 19575cee9645SThomas Gleixner * schedule_hrtimeout_range - sleep until timeout 19585cee9645SThomas Gleixner * @expires: timeout value (ktime_t) 19595cee9645SThomas Gleixner * @delta: slack in expires timeout (ktime_t) 196090777713SAnna-Maria Gleixner * @mode: timer mode 19615cee9645SThomas Gleixner * 19625cee9645SThomas Gleixner * Make the current task sleep until the given expiry time has 19635cee9645SThomas Gleixner * elapsed. The routine will return immediately unless 19645cee9645SThomas Gleixner * the current task state has been set (see set_current_state()). 19655cee9645SThomas Gleixner * 19665cee9645SThomas Gleixner * The @delta argument gives the kernel the freedom to schedule the 19675cee9645SThomas Gleixner * actual wakeup to a time that is both power and performance friendly. 19685cee9645SThomas Gleixner * The kernel give the normal best effort behavior for "@expires+@delta", 19695cee9645SThomas Gleixner * but may decide to fire the timer earlier, but no earlier than @expires. 19705cee9645SThomas Gleixner * 19715cee9645SThomas Gleixner * You can set the task state as follows - 19725cee9645SThomas Gleixner * 19735cee9645SThomas Gleixner * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 19744b7e9cf9SDouglas Anderson * pass before the routine returns unless the current task is explicitly 19754b7e9cf9SDouglas Anderson * woken up, (e.g. by wake_up_process()). 19765cee9645SThomas Gleixner * 19775cee9645SThomas Gleixner * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 19784b7e9cf9SDouglas Anderson * delivered to the current task or the current task is explicitly woken 19794b7e9cf9SDouglas Anderson * up. 19805cee9645SThomas Gleixner * 19815cee9645SThomas Gleixner * The current task state is guaranteed to be TASK_RUNNING when this 19825cee9645SThomas Gleixner * routine returns. 19835cee9645SThomas Gleixner * 19844b7e9cf9SDouglas Anderson * Returns 0 when the timer has expired. If the task was woken before the 19854b7e9cf9SDouglas Anderson * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or 19864b7e9cf9SDouglas Anderson * by an explicit wakeup, it returns -EINTR. 19875cee9645SThomas Gleixner */ 1988da8b44d5SJohn Stultz int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, 19895cee9645SThomas Gleixner const enum hrtimer_mode mode) 19905cee9645SThomas Gleixner { 19915cee9645SThomas Gleixner return schedule_hrtimeout_range_clock(expires, delta, mode, 19925cee9645SThomas Gleixner CLOCK_MONOTONIC); 19935cee9645SThomas Gleixner } 19945cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); 19955cee9645SThomas Gleixner 19965cee9645SThomas Gleixner /** 19975cee9645SThomas Gleixner * schedule_hrtimeout - sleep until timeout 19985cee9645SThomas Gleixner * @expires: timeout value (ktime_t) 199990777713SAnna-Maria Gleixner * @mode: timer mode 20005cee9645SThomas Gleixner * 20015cee9645SThomas Gleixner * Make the current task sleep until the given expiry time has 20025cee9645SThomas Gleixner * elapsed. The routine will return immediately unless 20035cee9645SThomas Gleixner * the current task state has been set (see set_current_state()). 20045cee9645SThomas Gleixner * 20055cee9645SThomas Gleixner * You can set the task state as follows - 20065cee9645SThomas Gleixner * 20075cee9645SThomas Gleixner * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to 20084b7e9cf9SDouglas Anderson * pass before the routine returns unless the current task is explicitly 20094b7e9cf9SDouglas Anderson * woken up, (e.g. by wake_up_process()). 20105cee9645SThomas Gleixner * 20115cee9645SThomas Gleixner * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 20124b7e9cf9SDouglas Anderson * delivered to the current task or the current task is explicitly woken 20134b7e9cf9SDouglas Anderson * up. 20145cee9645SThomas Gleixner * 20155cee9645SThomas Gleixner * The current task state is guaranteed to be TASK_RUNNING when this 20165cee9645SThomas Gleixner * routine returns. 20175cee9645SThomas Gleixner * 20184b7e9cf9SDouglas Anderson * Returns 0 when the timer has expired. If the task was woken before the 20194b7e9cf9SDouglas Anderson * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or 20204b7e9cf9SDouglas Anderson * by an explicit wakeup, it returns -EINTR. 20215cee9645SThomas Gleixner */ 20225cee9645SThomas Gleixner int __sched schedule_hrtimeout(ktime_t *expires, 20235cee9645SThomas Gleixner const enum hrtimer_mode mode) 20245cee9645SThomas Gleixner { 20255cee9645SThomas Gleixner return schedule_hrtimeout_range(expires, 0, mode); 20265cee9645SThomas Gleixner } 20275cee9645SThomas Gleixner EXPORT_SYMBOL_GPL(schedule_hrtimeout); 2028