Lines Matching +full:timer +full:-
1 // SPDX-License-Identifier: GPL-2.0
7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
33 #include <linux/posix-timers.h>
55 #include "tick-internal.h"
58 #include <trace/events/timer.h>
65 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
72 * The array level of a newly armed timer depends on the relative expiry
76 * Contrary to the original timer wheel implementation, which aims for 'exact'
78 * the timers into the lower array levels. The previous 'classic' timer wheel
83 * This is an optimization of the original timer wheel implementation for the
84 * majority of the timer wheel use cases: timeouts. The vast majority of
106 * 0 0 1 ms 0 ms - 63 ms
107 * 1 64 8 ms 64 ms - 511 ms
108 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
109 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
110 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
111 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
112 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
113 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
114 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
118 * 0 0 3 ms 0 ms - 210 ms
119 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
120 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
121 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
122 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
123 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
124 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
125 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
126 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
130 * 0 0 4 ms 0 ms - 255 ms
131 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
132 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
133 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
134 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
135 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
136 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
137 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
138 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
142 * 0 0 10 ms 0 ms - 630 ms
143 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
144 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
145 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
146 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
147 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
148 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
149 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
155 #define LVL_CLK_MASK (LVL_CLK_DIV - 1)
164 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
169 #define LVL_MASK (LVL_SIZE - 1)
181 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
318 * due to delays of the timer irq, long irq off times etc etc) then in round_jiffies_common()
324 j = j - rem; in round_jiffies_common()
326 j = j - rem + HZ; in round_jiffies_common()
329 j -= cpu * 3; in round_jiffies_common()
339 * __round_jiffies - function to round jiffies to a full second
365 * __round_jiffies_relative - function to round jiffies to a full second
389 return round_jiffies_common(j + j0, cpu, false) - j0; in __round_jiffies_relative()
394 * round_jiffies - function to round jiffies to a full second
415 * round_jiffies_relative - function to round jiffies to a full second
436 * __round_jiffies_up - function to round jiffies up to a full second
452 * __round_jiffies_up_relative - function to round jiffies up to a full second
466 return round_jiffies_common(j + j0, cpu, true) - j0; in __round_jiffies_up_relative()
471 * round_jiffies_up - function to round jiffies up to a full second
486 * round_jiffies_up_relative - function to round jiffies up to a full second
501 static inline unsigned int timer_get_idx(struct timer_list *timer) in timer_get_idx() argument
503 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; in timer_get_idx()
506 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) in timer_set_idx() argument
508 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | in timer_set_idx()
521 * The timer wheel has to guarantee that a timer does not fire in calc_index()
523 * - Timer is armed at the edge of a tick in calc_index()
524 * - Truncation of the expiry time in the outer wheel levels in calc_index()
536 unsigned long delta = expires - clk; in calc_wheel_index()
566 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); in calc_wheel_index()
572 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) in trigger_dyntick_cpu() argument
581 if (timer->flags & TIMER_DEFERRABLE) { in trigger_dyntick_cpu()
582 if (tick_nohz_full_cpu(base->cpu)) in trigger_dyntick_cpu()
583 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
589 * timer is not deferrable. If the other CPU is on the way to idle in trigger_dyntick_cpu()
590 * then it can't set base->is_idle as we hold the base lock: in trigger_dyntick_cpu()
592 if (base->is_idle) in trigger_dyntick_cpu()
593 wake_up_nohz_cpu(base->cpu); in trigger_dyntick_cpu()
597 * Enqueue the timer into the hash bucket, mark it pending in
598 * the bitmap, store the index in the timer flags then wake up
601 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, in enqueue_timer() argument
605 hlist_add_head(&timer->entry, base->vectors + idx); in enqueue_timer()
606 __set_bit(idx, base->pending_map); in enqueue_timer()
607 timer_set_idx(timer, idx); in enqueue_timer()
609 trace_timer_start(timer, timer->expires, timer->flags); in enqueue_timer()
612 * Check whether this is the new first expiring timer. The in enqueue_timer()
613 * effective expiry time of the timer is required here in enqueue_timer()
614 * (bucket_expiry) instead of timer->expires. in enqueue_timer()
616 if (time_before(bucket_expiry, base->next_expiry)) { in enqueue_timer()
621 base->next_expiry = bucket_expiry; in enqueue_timer()
622 base->timers_pending = true; in enqueue_timer()
623 base->next_expiry_recalc = false; in enqueue_timer()
624 trigger_dyntick_cpu(base, timer); in enqueue_timer()
628 static void internal_add_timer(struct timer_base *base, struct timer_list *timer) in internal_add_timer() argument
633 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); in internal_add_timer()
634 enqueue_timer(base, timer, idx, bucket_expiry); in internal_add_timer()
649 .offset = offsetof(container, hintfn) - \
655 struct delayed_work, timer, work.func),
657 struct kthread_delayed_work, timer, work.func),
662 struct timer_list *timer = addr; in timer_debug_hint() local
666 if (timer_hints[i].function == timer->function) { in timer_debug_hint()
673 return timer->function; in timer_debug_hint()
678 struct timer_list *timer = addr; in timer_is_static_object() local
680 return (timer->entry.pprev == NULL && in timer_is_static_object()
681 timer->entry.next == TIMER_ENTRY_STATIC); in timer_is_static_object()
686 * - an active object is initialized
690 struct timer_list *timer = addr; in timer_fixup_init() local
694 del_timer_sync(timer); in timer_fixup_init()
695 debug_object_init(timer, &timer_debug_descr); in timer_fixup_init()
702 /* Stub timer callback for improperly used timers. */
710 * - an active object is activated
711 * - an unknown non-static object is activated
715 struct timer_list *timer = addr; in timer_fixup_activate() local
719 timer_setup(timer, stub_timer, 0); in timer_fixup_activate()
732 * - an active object is freed
736 struct timer_list *timer = addr; in timer_fixup_free() local
740 del_timer_sync(timer); in timer_fixup_free()
741 debug_object_free(timer, &timer_debug_descr); in timer_fixup_free()
750 * - an untracked/uninit-ed object is found
754 struct timer_list *timer = addr; in timer_fixup_assert_init() local
758 timer_setup(timer, stub_timer, 0); in timer_fixup_assert_init()
775 static inline void debug_timer_init(struct timer_list *timer) in debug_timer_init() argument
777 debug_object_init(timer, &timer_debug_descr); in debug_timer_init()
780 static inline void debug_timer_activate(struct timer_list *timer) in debug_timer_activate() argument
782 debug_object_activate(timer, &timer_debug_descr); in debug_timer_activate()
785 static inline void debug_timer_deactivate(struct timer_list *timer) in debug_timer_deactivate() argument
787 debug_object_deactivate(timer, &timer_debug_descr); in debug_timer_deactivate()
790 static inline void debug_timer_assert_init(struct timer_list *timer) in debug_timer_assert_init() argument
792 debug_object_assert_init(timer, &timer_debug_descr); in debug_timer_assert_init()
795 static void do_init_timer(struct timer_list *timer,
800 void init_timer_on_stack_key(struct timer_list *timer, in init_timer_on_stack_key() argument
805 debug_object_init_on_stack(timer, &timer_debug_descr); in init_timer_on_stack_key()
806 do_init_timer(timer, func, flags, name, key); in init_timer_on_stack_key()
810 void destroy_timer_on_stack(struct timer_list *timer) in destroy_timer_on_stack() argument
812 debug_object_free(timer, &timer_debug_descr); in destroy_timer_on_stack()
817 static inline void debug_timer_init(struct timer_list *timer) { } in debug_timer_init() argument
818 static inline void debug_timer_activate(struct timer_list *timer) { } in debug_timer_activate() argument
819 static inline void debug_timer_deactivate(struct timer_list *timer) { } in debug_timer_deactivate() argument
820 static inline void debug_timer_assert_init(struct timer_list *timer) { } in debug_timer_assert_init() argument
823 static inline void debug_init(struct timer_list *timer) in debug_init() argument
825 debug_timer_init(timer); in debug_init()
826 trace_timer_init(timer); in debug_init()
829 static inline void debug_deactivate(struct timer_list *timer) in debug_deactivate() argument
831 debug_timer_deactivate(timer); in debug_deactivate()
832 trace_timer_cancel(timer); in debug_deactivate()
835 static inline void debug_assert_init(struct timer_list *timer) in debug_assert_init() argument
837 debug_timer_assert_init(timer); in debug_assert_init()
840 static void do_init_timer(struct timer_list *timer, in do_init_timer() argument
845 timer->entry.pprev = NULL; in do_init_timer()
846 timer->function = func; in do_init_timer()
849 timer->flags = flags | raw_smp_processor_id(); in do_init_timer()
850 lockdep_init_map(&timer->lockdep_map, name, key, 0); in do_init_timer()
854 * init_timer_key - initialize a timer
855 * @timer: the timer to be initialized
856 * @func: timer callback function
857 * @flags: timer flags
858 * @name: name of the timer
859 * @key: lockdep class key of the fake lock used for tracking timer
862 * init_timer_key() must be done to a timer prior calling *any* of the
863 * other timer functions.
865 void init_timer_key(struct timer_list *timer, in init_timer_key() argument
869 debug_init(timer); in init_timer_key()
870 do_init_timer(timer, func, flags, name, key); in init_timer_key()
874 static inline void detach_timer(struct timer_list *timer, bool clear_pending) in detach_timer() argument
876 struct hlist_node *entry = &timer->entry; in detach_timer()
878 debug_deactivate(timer); in detach_timer()
882 entry->pprev = NULL; in detach_timer()
883 entry->next = LIST_POISON2; in detach_timer()
886 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, in detach_if_pending() argument
889 unsigned idx = timer_get_idx(timer); in detach_if_pending()
891 if (!timer_pending(timer)) in detach_if_pending()
894 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { in detach_if_pending()
895 __clear_bit(idx, base->pending_map); in detach_if_pending()
896 base->next_expiry_recalc = true; in detach_if_pending()
899 detach_timer(timer, clear_pending); in detach_if_pending()
908 * If the timer is deferrable and NO_HZ_COMMON is set then we need in get_timer_cpu_base()
921 * If the timer is deferrable and NO_HZ_COMMON is set then we need in get_timer_this_cpu_base()
951 * Also while executing timers, base->clk is 1 offset ahead in forward_timer_base()
954 if ((long)(jnow - base->clk) < 1) in forward_timer_base()
961 if (time_after(base->next_expiry, jnow)) { in forward_timer_base()
962 base->clk = jnow; in forward_timer_base()
964 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) in forward_timer_base()
966 base->clk = base->next_expiry; in forward_timer_base()
977 * be found in the base->vectors array.
979 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
982 static struct timer_base *lock_timer_base(struct timer_list *timer, in lock_timer_base() argument
984 __acquires(timer->base->lock) in lock_timer_base()
992 * might re-read @tf between the check for TIMER_MIGRATING in lock_timer_base()
995 tf = READ_ONCE(timer->flags); in lock_timer_base()
999 raw_spin_lock_irqsave(&base->lock, *flags); in lock_timer_base()
1000 if (timer->flags == tf) in lock_timer_base()
1002 raw_spin_unlock_irqrestore(&base->lock, *flags); in lock_timer_base()
1013 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) in __mod_timer() argument
1020 debug_assert_init(timer); in __mod_timer()
1023 * This is a common optimization triggered by the networking code - if in __mod_timer()
1024 * the timer is re-modified to have the same timeout or ends up in the in __mod_timer()
1027 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { in __mod_timer()
1031 * timer with this expiry. in __mod_timer()
1033 long diff = timer->expires - expires; in __mod_timer()
1041 * We lock timer base and calculate the bucket index right in __mod_timer()
1042 * here. If the timer ends up in the same bucket, then we in __mod_timer()
1046 base = lock_timer_base(timer, &flags); in __mod_timer()
1048 * Has @timer been shutdown? This needs to be evaluated in __mod_timer()
1052 if (!timer->function) in __mod_timer()
1057 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && in __mod_timer()
1058 time_before_eq(timer->expires, expires)) { in __mod_timer()
1063 clk = base->clk; in __mod_timer()
1068 * timer. If it matches set the expiry to the new value so a in __mod_timer()
1071 if (idx == timer_get_idx(timer)) { in __mod_timer()
1073 timer->expires = expires; in __mod_timer()
1074 else if (time_after(timer->expires, expires)) in __mod_timer()
1075 timer->expires = expires; in __mod_timer()
1080 base = lock_timer_base(timer, &flags); in __mod_timer()
1082 * Has @timer been shutdown? This needs to be evaluated in __mod_timer()
1086 if (!timer->function) in __mod_timer()
1092 ret = detach_if_pending(timer, base, false); in __mod_timer()
1096 new_base = get_target_base(base, timer->flags); in __mod_timer()
1100 * We are trying to schedule the timer on the new base. in __mod_timer()
1101 * However we can't change timer's base while it is running, in __mod_timer()
1102 * otherwise timer_delete_sync() can't detect that the timer's in __mod_timer()
1104 * timer is serialized wrt itself. in __mod_timer()
1106 if (likely(base->running_timer != timer)) { in __mod_timer()
1108 timer->flags |= TIMER_MIGRATING; in __mod_timer()
1110 raw_spin_unlock(&base->lock); in __mod_timer()
1112 raw_spin_lock(&base->lock); in __mod_timer()
1113 WRITE_ONCE(timer->flags, in __mod_timer()
1114 (timer->flags & ~TIMER_BASEMASK) | base->cpu); in __mod_timer()
1119 debug_timer_activate(timer); in __mod_timer()
1121 timer->expires = expires; in __mod_timer()
1128 if (idx != UINT_MAX && clk == base->clk) in __mod_timer()
1129 enqueue_timer(base, timer, idx, bucket_expiry); in __mod_timer()
1131 internal_add_timer(base, timer); in __mod_timer()
1134 raw_spin_unlock_irqrestore(&base->lock, flags); in __mod_timer()
1140 * mod_timer_pending - Modify a pending timer's timeout
1141 * @timer: The pending timer to be modified
1147 * If @timer->function == NULL then the start operation is silently
1151 * * %0 - The timer was inactive and not modified or was in
1153 * * %1 - The timer was active and requeued to expire at @expires
1155 int mod_timer_pending(struct timer_list *timer, unsigned long expires) in mod_timer_pending() argument
1157 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); in mod_timer_pending()
1162 * mod_timer - Modify a timer's timeout
1163 * @timer: The timer to be modified
1166 * mod_timer(timer, expires) is equivalent to:
1168 * del_timer(timer); timer->expires = expires; add_timer(timer);
1171 * case that the timer is inactive, the del_timer() part is a NOP. The
1172 * timer is in any case activated with the new expiry time @expires.
1175 * same timer, then mod_timer() is the only safe way to modify the timeout,
1176 * since add_timer() cannot modify an already running timer.
1178 * If @timer->function == NULL then the start operation is silently
1182 * * %0 - The timer was inactive and started or was in shutdown
1184 * * %1 - The timer was active and requeued to expire at @expires or
1185 * the timer was active and not modified because @expires did
1188 int mod_timer(struct timer_list *timer, unsigned long expires) in mod_timer() argument
1190 return __mod_timer(timer, expires, 0); in mod_timer()
1195 * timer_reduce - Modify a timer's timeout if it would reduce the timeout
1196 * @timer: The timer to be modified
1200 * modify an enqueued timer if that would reduce the expiration time. If
1201 * @timer is not enqueued it starts the timer.
1203 * If @timer->function == NULL then the start operation is silently
1207 * * %0 - The timer was inactive and started or was in shutdown
1209 * * %1 - The timer was active and requeued to expire at @expires or
1210 * the timer was active and not modified because @expires
1212 * timer would expire earlier than already scheduled
1214 int timer_reduce(struct timer_list *timer, unsigned long expires) in timer_reduce() argument
1216 return __mod_timer(timer, expires, MOD_TIMER_REDUCE); in timer_reduce()
1221 * add_timer - Start a timer
1222 * @timer: The timer to be started
1224 * Start @timer to expire at @timer->expires in the future. @timer->expires
1225 * is the absolute expiry time measured in 'jiffies'. When the timer expires
1226 * timer->function(timer) will be invoked from soft interrupt context.
1228 * The @timer->expires and @timer->function fields must be set prior
1231 * If @timer->function == NULL then the start operation is silently
1234 * If @timer->expires is already in the past @timer will be queued to
1235 * expire at the next timer tick.
1237 * This can only operate on an inactive timer. Attempts to invoke this on
1238 * an active timer are rejected with a warning.
1240 void add_timer(struct timer_list *timer) in add_timer() argument
1242 if (WARN_ON_ONCE(timer_pending(timer))) in add_timer()
1244 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); in add_timer()
1249 * add_timer_on - Start a timer on a particular CPU
1250 * @timer: The timer to be started
1253 * Same as add_timer() except that it starts the timer on the given CPU.
1257 void add_timer_on(struct timer_list *timer, int cpu) in add_timer_on() argument
1262 debug_assert_init(timer); in add_timer_on()
1264 if (WARN_ON_ONCE(timer_pending(timer))) in add_timer_on()
1267 new_base = get_timer_cpu_base(timer->flags, cpu); in add_timer_on()
1270 * If @timer was on a different CPU, it should be migrated with the in add_timer_on()
1274 base = lock_timer_base(timer, &flags); in add_timer_on()
1276 * Has @timer been shutdown? This needs to be evaluated while in add_timer_on()
1279 if (!timer->function) in add_timer_on()
1283 timer->flags |= TIMER_MIGRATING; in add_timer_on()
1285 raw_spin_unlock(&base->lock); in add_timer_on()
1287 raw_spin_lock(&base->lock); in add_timer_on()
1288 WRITE_ONCE(timer->flags, in add_timer_on()
1289 (timer->flags & ~TIMER_BASEMASK) | cpu); in add_timer_on()
1293 debug_timer_activate(timer); in add_timer_on()
1294 internal_add_timer(base, timer); in add_timer_on()
1296 raw_spin_unlock_irqrestore(&base->lock, flags); in add_timer_on()
1301 * __timer_delete - Internal function: Deactivate a timer
1302 * @timer: The timer to be deactivated
1303 * @shutdown: If true, this indicates that the timer is about to be
1306 * If @shutdown is true then @timer->function is set to NULL under the
1307 * timer base lock which prevents further rearming of the time. In that
1308 * case any attempt to rearm @timer after this function returns will be
1312 * * %0 - The timer was not pending
1313 * * %1 - The timer was pending and deactivated
1315 static int __timer_delete(struct timer_list *timer, bool shutdown) in __timer_delete() argument
1321 debug_assert_init(timer); in __timer_delete()
1325 * timer is pending or not to protect against a concurrent rearm in __timer_delete()
1328 * enqueued timer is dequeued and cannot end up with in __timer_delete()
1329 * timer->function == NULL in the expiry code. in __timer_delete()
1331 * If timer->function is currently executed, then this makes sure in __timer_delete()
1332 * that the callback cannot requeue the timer. in __timer_delete()
1334 if (timer_pending(timer) || shutdown) { in __timer_delete()
1335 base = lock_timer_base(timer, &flags); in __timer_delete()
1336 ret = detach_if_pending(timer, base, true); in __timer_delete()
1338 timer->function = NULL; in __timer_delete()
1339 raw_spin_unlock_irqrestore(&base->lock, flags); in __timer_delete()
1346 * timer_delete - Deactivate a timer
1347 * @timer: The timer to be deactivated
1349 * The function only deactivates a pending timer, but contrary to
1350 * timer_delete_sync() it does not take into account whether the timer's
1352 * It neither prevents rearming of the timer. If @timer can be rearmed
1356 * * %0 - The timer was not pending
1357 * * %1 - The timer was pending and deactivated
1359 int timer_delete(struct timer_list *timer) in timer_delete() argument
1361 return __timer_delete(timer, false); in timer_delete()
1366 * timer_shutdown - Deactivate a timer and prevent rearming
1367 * @timer: The timer to be deactivated
1369 * The function does not wait for an eventually running timer callback on a
1370 * different CPU but it prevents rearming of the timer. Any attempt to arm
1371 * @timer after this function returns will be silently ignored.
1377 * * %0 - The timer was not pending
1378 * * %1 - The timer was pending
1380 int timer_shutdown(struct timer_list *timer) in timer_shutdown() argument
1382 return __timer_delete(timer, true); in timer_shutdown()
1387 * __try_to_del_timer_sync - Internal function: Try to deactivate a timer
1388 * @timer: Timer to deactivate
1389 * @shutdown: If true, this indicates that the timer is about to be
1392 * If @shutdown is true then @timer->function is set to NULL under the
1393 * timer base lock which prevents further rearming of the timer. Any
1394 * attempt to rearm @timer after this function returns will be silently
1397 * This function cannot guarantee that the timer cannot be rearmed
1402 * * %0 - The timer was not pending
1403 * * %1 - The timer was pending and deactivated
1404 * * %-1 - The timer callback function is running on a different CPU
1406 static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown) in __try_to_del_timer_sync() argument
1410 int ret = -1; in __try_to_del_timer_sync()
1412 debug_assert_init(timer); in __try_to_del_timer_sync()
1414 base = lock_timer_base(timer, &flags); in __try_to_del_timer_sync()
1416 if (base->running_timer != timer) in __try_to_del_timer_sync()
1417 ret = detach_if_pending(timer, base, true); in __try_to_del_timer_sync()
1419 timer->function = NULL; in __try_to_del_timer_sync()
1421 raw_spin_unlock_irqrestore(&base->lock, flags); in __try_to_del_timer_sync()
1427 * try_to_del_timer_sync - Try to deactivate a timer
1428 * @timer: Timer to deactivate
1430 * This function tries to deactivate a timer. On success the timer is not
1431 * queued and the timer callback function is not running on any CPU.
1433 * This function does not guarantee that the timer cannot be rearmed right
1438 * * %0 - The timer was not pending
1439 * * %1 - The timer was pending and deactivated
1440 * * %-1 - The timer callback function is running on a different CPU
1442 int try_to_del_timer_sync(struct timer_list *timer) in try_to_del_timer_sync() argument
1444 return __try_to_del_timer_sync(timer, false); in try_to_del_timer_sync()
1451 spin_lock_init(&base->expiry_lock); in timer_base_init_expiry_lock()
1456 spin_lock(&base->expiry_lock); in timer_base_lock_expiry()
1461 spin_unlock(&base->expiry_lock); in timer_base_unlock_expiry()
1467 * If there is a waiter for base->expiry_lock, then it was waiting for the
1468 * timer callback to finish. Drop expiry_lock and reacquire it. That allows
1473 if (atomic_read(&base->timer_waiters)) { in timer_sync_wait_running()
1474 raw_spin_unlock_irq(&base->lock); in timer_sync_wait_running()
1475 spin_unlock(&base->expiry_lock); in timer_sync_wait_running()
1476 spin_lock(&base->expiry_lock); in timer_sync_wait_running()
1477 raw_spin_lock_irq(&base->lock); in timer_sync_wait_running()
1483 * deletion of a timer failed because the timer callback function was
1488 * delete a timer preempted the softirq thread running the timer callback
1491 static void del_timer_wait_running(struct timer_list *timer) in del_timer_wait_running() argument
1495 tf = READ_ONCE(timer->flags); in del_timer_wait_running()
1501 * which is held by the softirq across the timer in del_timer_wait_running()
1503 * expire the next timer. In theory the timer could already in del_timer_wait_running()
1507 atomic_inc(&base->timer_waiters); in del_timer_wait_running()
1508 spin_lock_bh(&base->expiry_lock); in del_timer_wait_running()
1509 atomic_dec(&base->timer_waiters); in del_timer_wait_running()
1510 spin_unlock_bh(&base->expiry_lock); in del_timer_wait_running()
1518 static inline void del_timer_wait_running(struct timer_list *timer) { } in del_timer_wait_running() argument
1522 * __timer_delete_sync - Internal function: Deactivate a timer and wait
1524 * @timer: The timer to be deactivated
1525 * @shutdown: If true, @timer->function will be set to NULL under the
1526 * timer base lock which prevents rearming of @timer
1528 * If @shutdown is not set the timer can be rearmed later. If the timer can
1532 * If @shutdown is set then @timer->function is set to NULL under timer
1533 * base lock which prevents rearming of the timer. Any attempt to rearm
1534 * a shutdown timer is silently ignored.
1536 * If the timer should be reused after shutdown it has to be initialized
1540 * * %0 - The timer was not pending
1541 * * %1 - The timer was pending and deactivated
1543 static int __timer_delete_sync(struct timer_list *timer, bool shutdown) in __timer_delete_sync() argument
1555 lock_map_acquire(&timer->lockdep_map); in __timer_delete_sync()
1556 lock_map_release(&timer->lockdep_map); in __timer_delete_sync()
1563 WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE)); in __timer_delete_sync()
1569 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE)) in __timer_delete_sync()
1573 ret = __try_to_del_timer_sync(timer, shutdown); in __timer_delete_sync()
1576 del_timer_wait_running(timer); in __timer_delete_sync()
1585 * timer_delete_sync - Deactivate a timer and wait for the handler to finish.
1586 * @timer: The timer to be deactivated
1588 * Synchronization rules: Callers must prevent restarting of the timer,
1590 * interrupt contexts unless the timer is an irqsafe one. The caller must
1591 * not hold locks which would prevent completion of the timer's callback
1592 * function. The timer's handler must not call add_timer_on(). Upon exit
1593 * the timer is not queued and the handler is not running on any CPU.
1596 * interrupt context. Even if the lock has nothing to do with the timer in
1600 * ---- ----
1603 * base->running_timer = mytimer;
1608 * while (base->running_timer == mytimer);
1614 * This function cannot guarantee that the timer is not rearmed again by
1623 * * %0 - The timer was not pending
1624 * * %1 - The timer was pending and deactivated
1626 int timer_delete_sync(struct timer_list *timer) in timer_delete_sync() argument
1628 return __timer_delete_sync(timer, false); in timer_delete_sync()
1633 * timer_shutdown_sync - Shutdown a timer and prevent rearming
1634 * @timer: The timer to be shutdown
1637 * - @timer is not queued
1638 * - The callback function of @timer is not running
1639 * - @timer cannot be enqueued again. Any attempt to rearm
1640 * @timer is silently ignored.
1645 * the timer is subject to a circular dependency problem.
1647 * A common pattern for this is a timer and a workqueue where the timer can
1648 * schedule work and work can arm the timer. On shutdown the workqueue must
1649 * be destroyed and the timer must be prevented from rearming. Unless the
1650 * code has conditionals like 'if (mything->in_shutdown)' to prevent that
1656 * timer_shutdown_sync(&mything->timer);
1657 * workqueue_destroy(&mything->workqueue);
1661 * This obviously implies that the timer is not required to be functional
1665 * * %0 - The timer was not pending
1666 * * %1 - The timer was pending
1668 int timer_shutdown_sync(struct timer_list *timer) in timer_shutdown_sync() argument
1670 return __timer_delete_sync(timer, true); in timer_shutdown_sync()
1674 static void call_timer_fn(struct timer_list *timer, in call_timer_fn() argument
1682 * It is permissible to free the timer from inside the in call_timer_fn()
1686 * timer->lockdep_map, make a copy and use that here. in call_timer_fn()
1690 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); in call_timer_fn()
1699 trace_timer_expire_entry(timer, baseclk); in call_timer_fn()
1700 fn(timer); in call_timer_fn()
1701 trace_timer_expire_exit(timer); in call_timer_fn()
1706 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", in call_timer_fn()
1721 * This value is required only for tracing. base->clk was in expire_timers()
1723 * is related to the old base->clk value. in expire_timers()
1725 unsigned long baseclk = base->clk - 1; in expire_timers()
1728 struct timer_list *timer; in expire_timers() local
1731 timer = hlist_entry(head->first, struct timer_list, entry); in expire_timers()
1733 base->running_timer = timer; in expire_timers()
1734 detach_timer(timer, true); in expire_timers()
1736 fn = timer->function; in expire_timers()
1740 base->running_timer = NULL; in expire_timers()
1744 if (timer->flags & TIMER_IRQSAFE) { in expire_timers()
1745 raw_spin_unlock(&base->lock); in expire_timers()
1746 call_timer_fn(timer, fn, baseclk); in expire_timers()
1747 raw_spin_lock(&base->lock); in expire_timers()
1748 base->running_timer = NULL; in expire_timers()
1750 raw_spin_unlock_irq(&base->lock); in expire_timers()
1751 call_timer_fn(timer, fn, baseclk); in expire_timers()
1752 raw_spin_lock_irq(&base->lock); in expire_timers()
1753 base->running_timer = NULL; in expire_timers()
1762 unsigned long clk = base->clk = base->next_expiry; in collect_expired_timers()
1770 if (__test_and_clear_bit(idx, base->pending_map)) { in collect_expired_timers()
1771 vec = base->vectors + idx; in collect_expired_timers()
1795 pos = find_next_bit(base->pending_map, end, start); in next_pending_bucket()
1797 return pos - start; in next_pending_bucket()
1799 pos = find_next_bit(base->pending_map, start, offset); in next_pending_bucket()
1800 return pos < start ? pos + LVL_SIZE - start : -1; in next_pending_bucket()
1804 * Search the first expiring timer in the various clock levels. Caller must
1805 * hold base->lock.
1812 next = base->clk + NEXT_TIMER_MAX_DELTA; in __next_timer_interrupt()
1813 clk = base->clk; in __next_timer_interrupt()
1829 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) in __next_timer_interrupt()
1836 * next expiring bucket in that level. base->clk is the next in __next_timer_interrupt()
1873 base->next_expiry_recalc = false; in __next_timer_interrupt()
1874 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); in __next_timer_interrupt()
1881 * Check, if the next hrtimer event is before the next timer wheel
1896 * If the next timer is already expired, return the tick base in cmp_next_hrtimer_event()
1905 * make sure that this tick really expires the timer to avoid in cmp_next_hrtimer_event()
1914 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1919 * timer or KTIME_MAX if no timer is pending.
1928 * Pretend that there is no timer pending if the cpu is offline. in get_next_timer_interrupt()
1934 raw_spin_lock(&base->lock); in get_next_timer_interrupt()
1935 if (base->next_expiry_recalc) in get_next_timer_interrupt()
1936 base->next_expiry = __next_timer_interrupt(base); in get_next_timer_interrupt()
1937 nextevt = base->next_expiry; in get_next_timer_interrupt()
1941 * base. We can only do that when @basej is past base->clk in get_next_timer_interrupt()
1942 * otherwise we might rewind base->clk. in get_next_timer_interrupt()
1944 if (time_after(basej, base->clk)) { in get_next_timer_interrupt()
1946 base->clk = basej; in get_next_timer_interrupt()
1947 else if (time_after(nextevt, base->clk)) in get_next_timer_interrupt()
1948 base->clk = nextevt; in get_next_timer_interrupt()
1953 base->is_idle = false; in get_next_timer_interrupt()
1955 if (base->timers_pending) in get_next_timer_interrupt()
1956 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; in get_next_timer_interrupt()
1959 * Also the tick is stopped so any added timer must forward in get_next_timer_interrupt()
1964 if ((expires - basem) > TICK_NSEC) in get_next_timer_interrupt()
1965 base->is_idle = true; in get_next_timer_interrupt()
1967 raw_spin_unlock(&base->lock); in get_next_timer_interrupt()
1973 * timer_clear_idle - Clear the idle state of the timer base
1987 base->is_idle = false; in timer_clear_idle()
1992 * __run_timers - run all expired timers (if any) on this CPU.
1993 * @base: the timer vector to be processed.
2000 if (time_before(jiffies, base->next_expiry)) in __run_timers()
2004 raw_spin_lock_irq(&base->lock); in __run_timers()
2006 while (time_after_eq(jiffies, base->clk) && in __run_timers()
2007 time_after_eq(jiffies, base->next_expiry)) { in __run_timers()
2011 * timer at this clk are that all matching timers have been in __run_timers()
2012 * dequeued or no timer has been queued since in __run_timers()
2016 WARN_ON_ONCE(!levels && !base->next_expiry_recalc in __run_timers()
2017 && base->timers_pending); in __run_timers()
2018 base->clk++; in __run_timers()
2019 base->next_expiry = __next_timer_interrupt(base); in __run_timers()
2021 while (levels--) in __run_timers()
2024 raw_spin_unlock_irq(&base->lock); in __run_timers()
2029 * This function runs timers and the timer-tq in bottom half context.
2041 * Called by the local, per-CPU timer interrupt on SMP.
2049 if (time_before(jiffies, base->next_expiry)) { in run_local_timers()
2054 if (time_before(jiffies, base->next_expiry)) in run_local_timers()
2061 * Called from the timer interrupt handler to charge one tick to the current
2068 /* Note: this timer irq context must be accounted for as well. */ in update_process_times()
2082 * Since schedule_timeout()'s timer is defined on the stack, it must store
2086 struct timer_list timer; member
2092 struct process_timer *timeout = from_timer(timeout, t, timer); in process_timeout()
2094 wake_up_process(timeout->task); in process_timeout()
2098 * schedule_timeout - sleep until timeout
2105 * %TASK_RUNNING - the scheduler is called, but the task does not sleep
2109 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
2113 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
2124 * Returns 0 when the timer has expired otherwise the remaining time in
2126 * to be non-negative.
2130 struct process_timer timer; in schedule_timeout() local
2164 timer.task = current; in schedule_timeout()
2165 timer_setup_on_stack(&timer.timer, process_timeout, 0); in schedule_timeout()
2166 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING); in schedule_timeout()
2168 del_timer_sync(&timer.timer); in schedule_timeout()
2170 /* Remove the timer from the object tracker */ in schedule_timeout()
2171 destroy_timer_on_stack(&timer.timer); in schedule_timeout()
2173 timeout = expire - jiffies; in schedule_timeout()
2219 struct timer_list *timer; in migrate_timer_list() local
2220 int cpu = new_base->cpu; in migrate_timer_list()
2223 timer = hlist_entry(head->first, struct timer_list, entry); in migrate_timer_list()
2224 detach_timer(timer, false); in migrate_timer_list()
2225 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; in migrate_timer_list()
2226 internal_add_timer(new_base, timer); in migrate_timer_list()
2237 base->clk = jiffies; in timers_prepare_cpu()
2238 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in timers_prepare_cpu()
2239 base->next_expiry_recalc = false; in timers_prepare_cpu()
2240 base->timers_pending = false; in timers_prepare_cpu()
2241 base->is_idle = false; in timers_prepare_cpu()
2259 raw_spin_lock_irq(&new_base->lock); in timers_dead_cpu()
2260 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); in timers_dead_cpu()
2268 WARN_ON_ONCE(old_base->running_timer); in timers_dead_cpu()
2269 old_base->running_timer = NULL; in timers_dead_cpu()
2272 migrate_timer_list(new_base, old_base->vectors + i); in timers_dead_cpu()
2274 raw_spin_unlock(&old_base->lock); in timers_dead_cpu()
2275 raw_spin_unlock_irq(&new_base->lock); in timers_dead_cpu()
2290 base->cpu = cpu; in init_timer_cpu()
2291 raw_spin_lock_init(&base->lock); in init_timer_cpu()
2292 base->clk = jiffies; in init_timer_cpu()
2293 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; in init_timer_cpu()
2314 * msleep - sleep safely even with waitqueue interruptions
2328 * msleep_interruptible - sleep waiting for signals
2343 * usleep_range_state - Sleep for an approximate time in a given state
2348 * In non-atomic context where the exact wakeup time is flexible, use
2350 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
2351 * power usage by allowing hrtimers to take advantage of an already-
2358 u64 delta = (u64)(max - min) * NSEC_PER_USEC; in usleep_range_state()