Lines Matching +full:timer +full:-

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 * High-resolution kernel timers
9 * In contrast to the low-resolution timeout API, aka timer wheel,
16 * Based on the original timer wheel code
42 #include <linux/timer.h>
48 #include <trace/events/timer.h>
50 #include "tick-internal.h"
54 * cpu_base->active
57 #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
62 * The timer bases:
65 * into the timer bases by the hrtimer_base_type enum. When trying
119 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
136 * timer->base->cpu_base
150 * means that all timers which are tied to this base via timer->base are
156 * When the timer's base is locked, and the timer removed from list, it is
157 * possible to set timer->base = &migration_base and drop the lock: the timer
161 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
163 __acquires(&timer->base->lock)
168 base = READ_ONCE(timer->base);
170 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
171 if (likely(base == timer->base))
173 /* The timer has migrated to another CPU: */
174 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
181 * We do not migrate the timer when it is expiring before the next
187 * Called with cpu_base->lock of target cpu held.
190 hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
194 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
195 return expires < new_base->cpu_base->expires_next;
210 * We switch the timer base to a power-optimized selected CPU target,
212 * - NO_HZ_COMMON is enabled
213 * - timer migration is enabled
214 * - the timer callback is not running
215 * - the timer is not the first expiring timer on the new target
217 * If one of the above requirements is not fulfilled we move the timer
219 * the timer callback is currently running.
222 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
227 int basenum = base->index;
232 new_base = &new_cpu_base->clock_base[basenum];
236 * We are trying to move timer to new_base.
237 * However we can't change timer's base while it is running,
240 * code will take care of this when the timer function has
242 * the timer is enqueued.
244 if (unlikely(hrtimer_callback_running(timer)))
248 WRITE_ONCE(timer->base, &migration_base);
249 raw_spin_unlock(&base->cpu_base->lock);
250 raw_spin_lock(&new_base->cpu_base->lock);
253 hrtimer_check_target(timer, new_base)) {
254 raw_spin_unlock(&new_base->cpu_base->lock);
255 raw_spin_lock(&base->cpu_base->lock);
257 WRITE_ONCE(timer->base, base);
260 WRITE_ONCE(timer->base, new_base);
263 hrtimer_check_target(timer, new_base)) {
274 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
275 __acquires(&timer->base->cpu_base->lock)
277 struct hrtimer_clock_base *base = timer->base;
279 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
303 tmp = dclc < 0 ? -dclc : dclc;
312 return dclc < 0 ? -tmp : tmp;
342 return ((struct hrtimer *) addr)->function;
347 * - an active object is initialized
351 struct hrtimer *timer = addr;
355 hrtimer_cancel(timer);
356 debug_object_init(timer, &hrtimer_debug_descr);
365 * - an active object is activated
366 * - an unknown non-static object is activated
381 * - an active object is freed
385 struct hrtimer *timer = addr;
389 hrtimer_cancel(timer);
390 debug_object_free(timer, &hrtimer_debug_descr);
405 static inline void debug_hrtimer_init(struct hrtimer *timer)
407 debug_object_init(timer, &hrtimer_debug_descr);
410 static inline void debug_hrtimer_activate(struct hrtimer *timer,
413 debug_object_activate(timer, &hrtimer_debug_descr);
416 static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
418 debug_object_deactivate(timer, &hrtimer_debug_descr);
421 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
424 void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
427 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
428 __hrtimer_init(timer, clock_id, mode);
438 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
443 void destroy_hrtimer_on_stack(struct hrtimer *timer)
445 debug_object_free(timer, &hrtimer_debug_descr);
451 static inline void debug_hrtimer_init(struct hrtimer *timer) { }
452 static inline void debug_hrtimer_activate(struct hrtimer *timer,
454 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
458 debug_init(struct hrtimer *timer, clockid_t clockid,
461 debug_hrtimer_init(timer);
462 trace_hrtimer_init(timer, clockid, mode);
465 static inline void debug_activate(struct hrtimer *timer,
468 debug_hrtimer_activate(timer, mode);
469 trace_hrtimer_start(timer, mode);
472 static inline void debug_deactivate(struct hrtimer *timer)
474 debug_hrtimer_deactivate(timer);
475 trace_hrtimer_cancel(timer);
489 return &cpu_base->clock_base[idx];
505 struct hrtimer *timer;
507 next = timerqueue_getnext(&base->active);
508 timer = container_of(next, struct hrtimer, node);
509 if (timer == exclude) {
510 /* Get to the next timer in the queue. */
515 timer = container_of(next, struct hrtimer, node);
517 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
521 /* Skip cpu_base update if a timer is being excluded. */
525 if (timer->is_soft)
526 cpu_base->softirq_next_timer = timer;
528 cpu_base->next_timer = timer;
532 * clock_was_set() might have changed base->offset of any of
550 * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
557 * - HRTIMER_ACTIVE_ALL,
558 * - HRTIMER_ACTIVE_SOFT, or
559 * - HRTIMER_ACTIVE_HARD.
568 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
569 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
570 cpu_base->softirq_next_timer = NULL;
574 next_timer = cpu_base->softirq_next_timer;
578 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
579 cpu_base->next_timer = next_timer;
596 if (!cpu_base->softirq_activated) {
602 cpu_base->softirq_expires_next = soft;
607 * If a softirq timer is expiring first, update cpu_base->next_timer
611 cpu_base->next_timer = cpu_base->softirq_next_timer;
620 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
621 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
622 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
624 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
627 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
628 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
629 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
640 cpu_base->hres_active : 0;
652 cpu_base->expires_next = expires_next;
658 * If a hang was detected in the last timer interrupt then we
671 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
680 * Called with interrupts disabled and base->lock held
689 if (skip_equal && expires_next == cpu_base->expires_next)
692 __hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next);
695 /* High resolution timer related functions */
699 * High resolution timer enabled ?
716 * hrtimer_high_res_enabled - query, if the highres mode is enabled
734 base->cpu);
737 base->hres_active = 1;
757 * - CONFIG_HIGH_RES_TIMERS is enabled.
758 * - CONFIG_NOHZ_COMMON is enabled
773 * If high resolution mode is active then the next expiring timer
778 * of the next expiring timer is enough. The return from the SMP
785 raw_spin_lock(&base->lock);
791 raw_spin_unlock(&base->lock);
795 * When a timer is enqueued and expires earlier than the already enqueued
796 * timers, we have to check, whether it expires earlier than the timer for
799 * Called with interrupts disabled and base->cpu_base.lock held
801 static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
804 struct hrtimer_clock_base *base = timer->base;
805 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
807 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
810 * CLOCK_REALTIME timer might be requested with an absolute
811 * expiry time which is less than base->offset. Set it to 0.
816 if (timer->is_soft) {
821 * first hard hrtimer on the remote CPU -
824 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
826 if (timer_cpu_base->softirq_activated)
829 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
832 timer_cpu_base->softirq_next_timer = timer;
833 timer_cpu_base->softirq_expires_next = expires;
835 if (!ktime_before(expires, timer_cpu_base->expires_next) ||
841 * If the timer is not on the current cpu, we cannot reprogram
844 if (base->cpu_base != cpu_base)
847 if (expires >= cpu_base->expires_next)
854 if (cpu_base->in_hrtirq)
857 cpu_base->next_timer = timer;
859 __hrtimer_reprogram(cpu_base, timer, expires);
877 * the next expiring timer.
879 seq = cpu_base->clock_was_set_seq;
886 if (seq == cpu_base->clock_was_set_seq)
891 * will reevaluate the first expiring timer of all clock bases
894 if (cpu_base->in_hrtirq)
899 * timer in a clock base is moving ahead of the first expiring timer of
903 active &= cpu_base->active_bases;
908 next = timerqueue_getnext(&base->active);
909 expires = ktime_sub(next->expires, base->offset);
910 if (expires < cpu_base->expires_next)
914 if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT)
916 if (cpu_base->softirq_activated)
918 if (expires < cpu_base->softirq_expires_next)
931 * when the change moves an affected timer ahead of the first expiring
932 * timer on that CPU. Obviously remote per CPU clock event devices cannot
958 raw_spin_lock_irqsave(&cpu_base->lock, flags);
963 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1008 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
1009 __releases(&timer->base->cpu_base->lock)
1011 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
1015 * hrtimer_forward - forward the timer expiry
1016 * @timer: hrtimer to forward
1020 * Forward the timer expiry so it will expire in the future.
1023 * Can be safely called from the callback function of @timer. If
1024 * called from other contexts @timer must neither be enqueued nor
1028 * Note: This only updates the timer expiry value and does not requeue
1029 * the timer.
1031 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
1036 delta = ktime_sub(now, hrtimer_get_expires(timer));
1041 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
1051 hrtimer_add_expires_ns(timer, incr * orun);
1052 if (hrtimer_get_expires_tv64(timer) > now)
1060 hrtimer_add_expires(timer, interval);
1067 * enqueue_hrtimer - internal function to (re)start a timer
1069 * The timer is inserted in expiry order. Insertion into the
1072 * Returns 1 when the new timer is the leftmost timer in the tree.
1074 static int enqueue_hrtimer(struct hrtimer *timer,
1078 debug_activate(timer, mode);
1079 WARN_ON_ONCE(!base->cpu_base->online);
1081 base->cpu_base->active_bases |= 1 << base->index;
1084 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
1086 return timerqueue_add(&base->active, &timer->node);
1090 * __remove_hrtimer - internal function to remove a timer
1094 * High resolution timer mode reprograms the clock event device when the
1095 * timer is the one which expires next. The caller can disable this by setting
1097 * anyway (e.g. timer interrupt)
1099 static void __remove_hrtimer(struct hrtimer *timer,
1103 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1104 u8 state = timer->state;
1107 WRITE_ONCE(timer->state, newstate);
1111 if (!timerqueue_del(&base->active, &timer->node))
1112 cpu_base->active_bases &= ~(1 << base->index);
1116 * cpu_base->next_timer. This happens when we remove the first
1117 * timer on a remote cpu. No harm as we never dereference
1118 * cpu_base->next_timer. So the worst thing what can happen is
1120 * remote cpu later on if the same timer gets enqueued again.
1122 if (reprogram && timer == cpu_base->next_timer)
1130 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
1133 u8 state = timer->state;
1139 * Remove the timer and force reprogramming when high
1140 * resolution mode is active and the timer is on the current
1141 * CPU. If we remove a timer on another CPU, reprogramming is
1146 debug_deactivate(timer);
1147 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1150 * If the timer is not restarted then reprogramming is
1151 * required if the timer is local. If it is local and about
1160 __remove_hrtimer(timer, base, state, reprogram);
1166 static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1175 timer->is_rel = mode & HRTIMER_MODE_REL;
1176 if (timer->is_rel)
1195 * hrtimer. cpu_base->softirq_expires_next needs to be updated!
1201 * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
1202 * cpu_base->*expires_next is only set by hrtimer_reprogram()
1204 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
1207 static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1215 * If the timer is on the local cpu base and is the first expiring
1216 * timer then this might end up reprogramming the hardware twice
1218 * reprogram on removal, keep the timer local to the current CPU
1220 * it is the new first expiring timer again or not.
1222 force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1223 force_local &= base->cpu_base->next_timer == timer;
1226 * Remove an active timer from the queue. In case it is not queued
1230 * If it's on the current CPU and the first expiring timer, then
1231 * skip reprogramming, keep the timer local and enforce
1232 * reprogramming later if it was the first expiring timer. This
1236 remove_hrtimer(timer, base, true, force_local);
1239 tim = ktime_add_safe(tim, base->get_time());
1241 tim = hrtimer_update_lowres(timer, tim, mode);
1243 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1245 /* Switch the timer base, if necessary: */
1247 new_base = switch_hrtimer_base(timer, base,
1253 first = enqueue_hrtimer(timer, new_base, mode);
1258 * Timer was forced to stay on the current CPU to avoid
1260 * hardware by evaluating the new first expiring timer.
1262 hrtimer_force_reprogram(new_base->cpu_base, 1);
1267 * hrtimer_start_range_ns - (re)start an hrtimer
1268 * @timer: the timer to be added
1270 * @delta_ns: "slack" range for the timer
1271 * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
1275 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1281 if (WARN_ON_ONCE(!timer->function))
1289 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1291 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1293 base = lock_hrtimer_base(timer, &flags);
1295 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1296 hrtimer_reprogram(timer, true);
1298 unlock_hrtimer_base(timer, &flags);
1303 * hrtimer_try_to_cancel - try to deactivate a timer
1304 * @timer: hrtimer to stop
1308 * * 0 when the timer was not active
1309 * * 1 when the timer was active
1310 * * -1 when the timer is currently executing the callback function and
1313 int hrtimer_try_to_cancel(struct hrtimer *timer)
1317 int ret = -1;
1320 * Check lockless first. If the timer is not active (neither
1325 if (!hrtimer_active(timer))
1328 base = lock_hrtimer_base(timer, &flags);
1330 if (!hrtimer_callback_running(timer))
1331 ret = remove_hrtimer(timer, base, false, false);
1333 unlock_hrtimer_base(timer, &flags);
1343 spin_lock_init(&base->softirq_expiry_lock);
1348 spin_lock(&base->softirq_expiry_lock);
1353 spin_unlock(&base->softirq_expiry_lock);
1359 * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
1360 * the timer callback to finish. Drop expiry_lock and reacquire it. That
1366 if (atomic_read(&cpu_base->timer_waiters)) {
1367 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1368 spin_unlock(&cpu_base->softirq_expiry_lock);
1369 spin_lock(&cpu_base->softirq_expiry_lock);
1370 raw_spin_lock_irq(&cpu_base->lock);
1388 * deletion of a timer failed because the timer callback function was
1392 * in the middle of a timer callback, then calling del_timer_sync() can
1395 * - If the caller is on a remote CPU then it has to spin wait for the timer
1398 * - If the caller originates from the task which preempted the timer
1399 * handler on the same CPU, then spin waiting for the timer handler to
1402 void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1405 struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1408 * Just relax if the timer expires in hard interrupt context or if
1411 if (!timer->is_soft || is_migration_base(base)) {
1418 * held by the softirq across the timer callback. Drop the lock
1419 * immediately so the softirq can expire the next timer. In theory
1420 * the timer could already be running again, but that's more than
1423 atomic_inc(&base->cpu_base->timer_waiters);
1424 spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
1425 atomic_dec(&base->cpu_base->timer_waiters);
1426 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
1440 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1441 * @timer: the timer to be cancelled
1444 * 0 when the timer was not active
1445 * 1 when the timer was active
1447 int hrtimer_cancel(struct hrtimer *timer)
1452 ret = hrtimer_try_to_cancel(timer);
1455 hrtimer_cancel_wait_running(timer);
1462 * __hrtimer_get_remaining - get remaining time for the timer
1463 * @timer: the timer to read
1466 ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1471 lock_hrtimer_base(timer, &flags);
1473 rem = hrtimer_expires_remaining_adjusted(timer);
1475 rem = hrtimer_expires_remaining(timer);
1476 unlock_hrtimer_base(timer, &flags);
1484 * hrtimer_get_next_event - get the time until next expiry event
1486 * Returns the next expiry time or KTIME_MAX if no timer is pending.
1494 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1499 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1505 * hrtimer_next_event_without - time until next expiry event w/o one timer
1506 * @exclude: timer to exclude
1517 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1522 if (!cpu_base->softirq_activated) {
1523 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1527 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1532 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1550 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1566 memset(timer, 0, sizeof(struct hrtimer));
1580 timer->is_soft = softtimer;
1581 timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
1582 timer->base = &cpu_base->clock_base[base];
1583 timerqueue_init(&timer->node);
1587 * hrtimer_init - initialize a timer to the given clock
1588 * @timer: the timer to be initialized
1598 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1601 debug_init(timer, clock_id, mode);
1602 __hrtimer_init(timer, clock_id, mode);
1607 * A timer is active, when it is enqueued into the rbtree or the
1613 bool hrtimer_active(const struct hrtimer *timer)
1619 base = READ_ONCE(timer->base);
1620 seq = raw_read_seqcount_begin(&base->seq);
1622 if (timer->state != HRTIMER_STATE_INACTIVE ||
1623 base->running == timer)
1626 } while (read_seqcount_retry(&base->seq, seq) ||
1627 base != READ_ONCE(timer->base));
1637 * - queued: the timer is queued
1638 * - callback: the timer is being ran
1639 * - post: the timer is inactive or (re)queued
1641 * On the read side we ensure we observe timer->state and cpu_base->running
1643 * This includes timer->base changing because sequence numbers alone are
1653 struct hrtimer *timer, ktime_t *now,
1654 unsigned long flags) __must_hold(&cpu_base->lock)
1660 lockdep_assert_held(&cpu_base->lock);
1662 debug_deactivate(timer);
1663 base->running = timer;
1666 * Separate the ->running assignment from the ->state assignment.
1669 * hrtimer_active() cannot observe base->running == NULL &&
1670 * timer->state == INACTIVE.
1672 raw_write_seqcount_barrier(&base->seq);
1674 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1675 fn = timer->function;
1679 * timer is restarted with a period then it becomes an absolute
1680 * timer. If its not restarted it does not matter.
1683 timer->is_rel = false;
1686 * The timer is marked as running in the CPU base, so it is
1690 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1691 trace_hrtimer_expire_entry(timer, now);
1692 expires_in_hardirq = lockdep_hrtimer_enter(timer);
1694 restart = fn(timer);
1697 trace_hrtimer_expire_exit(timer);
1698 raw_spin_lock_irq(&cpu_base->lock);
1705 * Note: Because we dropped the cpu_base->lock above,
1706 * hrtimer_start_range_ns() can have popped in and enqueued the timer
1710 !(timer->state & HRTIMER_STATE_ENQUEUED))
1711 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1714 * Separate the ->running assignment from the ->state assignment.
1717 * hrtimer_active() cannot observe base->running.timer == NULL &&
1718 * timer->state == INACTIVE.
1720 raw_write_seqcount_barrier(&base->seq);
1722 WARN_ON_ONCE(base->running != timer);
1723 base->running = NULL;
1730 unsigned int active = cpu_base->active_bases & active_mask;
1736 basenow = ktime_add(now, base->offset);
1738 while ((node = timerqueue_getnext(&base->active))) {
1739 struct hrtimer *timer;
1741 timer = container_of(node, struct hrtimer, node);
1752 * are right-of a not yet expired timer, because that
1753 * timer will have to trigger a wakeup anyway.
1755 if (basenow < hrtimer_get_softexpires_tv64(timer))
1758 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
1772 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1777 cpu_base->softirq_activated = 0;
1780 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1787 * High resolution timer interrupt
1797 BUG_ON(!cpu_base->hres_active);
1798 cpu_base->nr_events++;
1799 dev->next_event = KTIME_MAX;
1801 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1804 cpu_base->in_hrtirq = 1;
1806 * We set expires_next to KTIME_MAX here with cpu_base->lock
1807 * held to prevent that a timer is enqueued in our queue via
1812 cpu_base->expires_next = KTIME_MAX;
1814 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1815 cpu_base->softirq_expires_next = KTIME_MAX;
1816 cpu_base->softirq_activated = 1;
1828 cpu_base->expires_next = expires_next;
1829 cpu_base->in_hrtirq = 0;
1830 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1834 cpu_base->hang_detected = 0;
1839 * The next timer was already expired due to:
1840 * - tracing
1841 * - long lasting callbacks
1842 * - being scheduled away when running in a VM
1851 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1853 cpu_base->nr_retries++;
1862 cpu_base->nr_hangs++;
1863 cpu_base->hang_detected = 1;
1864 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1867 if ((unsigned int)delta > cpu_base->max_hang_time)
1868 cpu_base->max_hang_time = (unsigned int) delta;
1890 if (td && td->evtdev)
1891 hrtimer_interrupt(td->evtdev);
1924 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1927 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1928 cpu_base->softirq_expires_next = KTIME_MAX;
1929 cpu_base->softirq_activated = 1;
1934 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1940 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1943 container_of(timer, struct hrtimer_sleeper, timer);
1944 struct task_struct *task = t->task;
1946 t->task = NULL;
1954 * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
1956 * @mode: timer mode abs/rel
1971 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
1974 hrtimer_start_expires(&sl->timer, mode);
1995 * OTOH, privileged real-time user space applications rely on the
1997 * a real-time scheduling class, mark the mode for hard interrupt
2005 __hrtimer_init(&sl->timer, clock_id, mode);
2006 sl->timer.function = hrtimer_wakeup;
2007 sl->task = current;
2011 * hrtimer_init_sleeper - initialize sleeper to the given clock
2014 * @mode: timer mode abs/rel
2019 debug_init(&sl->timer, clock_id, mode);
2027 switch(restart->nanosleep.type) {
2030 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
2031 return -EFAULT;
2035 if (put_timespec64(ts, restart->nanosleep.rmtp))
2036 return -EFAULT;
2041 return -ERESTART_RESTARTBLOCK;
2052 if (likely(t->task))
2055 hrtimer_cancel(&t->timer);
2058 } while (t->task && !signal_pending(current));
2062 if (!t->task)
2065 restart = &current->restart_block;
2066 if (restart->nanosleep.type != TT_NONE) {
2067 ktime_t rem = hrtimer_expires_remaining(&t->timer);
2076 return -ERESTART_RESTARTBLOCK;
2084 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
2086 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
2088 destroy_hrtimer_on_stack(&t.timer);
2100 hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns);
2102 if (ret != -ERESTART_RESTARTBLOCK)
2107 ret = -ERESTARTNOHAND;
2111 restart = &current->restart_block;
2112 restart->nanosleep.clockid = t.timer.base->clockid;
2113 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
2116 destroy_hrtimer_on_stack(&t.timer);
2128 return -EFAULT;
2131 return -EINVAL;
2133 current->restart_block.fn = do_no_restart_syscall;
2134 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
2135 current->restart_block.nanosleep.rmtp = rmtp;
2150 return -EFAULT;
2153 return -EINVAL;
2155 current->restart_block.fn = do_no_restart_syscall;
2156 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
2157 current->restart_block.nanosleep.compat_rmtp = rmtp;
2164 * Functions related to boot-time initialization:
2172 struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
2174 clock_b->cpu_base = cpu_base;
2175 seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
2176 timerqueue_init_head(&clock_b->active);
2179 cpu_base->cpu = cpu;
2189 cpu_base->active_bases = 0;
2190 cpu_base->hres_active = 0;
2191 cpu_base->hang_detected = 0;
2192 cpu_base->next_timer = NULL;
2193 cpu_base->softirq_next_timer = NULL;
2194 cpu_base->expires_next = KTIME_MAX;
2195 cpu_base->softirq_expires_next = KTIME_MAX;
2196 cpu_base->online = 1;
2205 struct hrtimer *timer;
2208 while ((node = timerqueue_getnext(&old_base->active))) {
2209 timer = container_of(node, struct hrtimer, node);
2210 BUG_ON(hrtimer_callback_running(timer));
2211 debug_deactivate(timer);
2215 * timer could be seen as !active and just vanish away
2218 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2219 timer->base = new_base;
2222 * reprogram the event device in case the timer
2228 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2246 raw_spin_lock(&old_base->lock);
2247 raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING);
2250 migrate_hrtimer_list(&old_base->clock_base[i],
2251 &new_base->clock_base[i]);
2256 * timer on this CPU. Update it.
2262 raw_spin_unlock(&new_base->lock);
2263 old_base->online = 0;
2264 raw_spin_unlock(&old_base->lock);
2279 * schedule_hrtimeout_range_clock - sleep until timeout
2282 * @mode: timer mode
2283 * @clock_id: timer clock to be used
2305 return -EINTR;
2309 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
2315 hrtimer_cancel(&t.timer);
2316 destroy_hrtimer_on_stack(&t.timer);
2320 return !t.task ? 0 : -EINTR;
2325 * schedule_hrtimeout_range - sleep until timeout
2328 * @mode: timer mode
2338 * but may decide to fire the timer earlier, but no earlier than @expires.
2340 * You can set the task state as follows -
2342 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2346 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2353 * Returns 0 when the timer has expired. If the task was woken before the
2354 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2355 * by an explicit wakeup, it returns -EINTR.
2366 * schedule_hrtimeout - sleep until timeout
2368 * @mode: timer mode
2374 * You can set the task state as follows -
2376 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
2380 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2387 * Returns 0 when the timer has expired. If the task was woken before the
2388 * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
2389 * by an explicit wakeup, it returns -EINTR.