1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel internal timers 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. 8 * 9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 10 * "A Kernel Model for Precision Timekeeping" by Dave Mills 11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to 12 * serialize accesses to xtime/lost_ticks). 13 * Copyright (C) 1998 Andrea Arcangeli 14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl 15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love 16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling. 17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar 18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar 19 */ 20 21 #include <linux/kernel_stat.h> 22 #include <linux/export.h> 23 #include <linux/interrupt.h> 24 #include <linux/percpu.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/swap.h> 28 #include <linux/pid_namespace.h> 29 #include <linux/notifier.h> 30 #include <linux/thread_info.h> 31 #include <linux/time.h> 32 #include <linux/jiffies.h> 33 #include <linux/posix-timers.h> 34 #include <linux/cpu.h> 35 #include <linux/syscalls.h> 36 #include <linux/delay.h> 37 #include <linux/tick.h> 38 #include <linux/kallsyms.h> 39 #include <linux/irq_work.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/sysctl.h> 42 #include <linux/sched/nohz.h> 43 #include <linux/sched/debug.h> 44 #include <linux/slab.h> 45 #include <linux/compat.h> 46 #include <linux/random.h> 47 48 #include <linux/uaccess.h> 49 #include <asm/unistd.h> 50 #include <asm/div64.h> 51 #include <asm/timex.h> 52 #include <asm/io.h> 53 54 #include "tick-internal.h" 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/timer.h> 58 59 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 60 61 EXPORT_SYMBOL(jiffies_64); 62 63 /* 64 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of 65 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each 66 * level has a different granularity. 67 * 68 * The level granularity is: LVL_CLK_DIV ^ lvl 69 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) 70 * 71 * The array level of a newly armed timer depends on the relative expiry 72 * time. The farther the expiry time is away the higher the array level and 73 * therefor the granularity becomes. 74 * 75 * Contrary to the original timer wheel implementation, which aims for 'exact' 76 * expiry of the timers, this implementation removes the need for recascading 77 * the timers into the lower array levels. The previous 'classic' timer wheel 78 * implementation of the kernel already violated the 'exact' expiry by adding 79 * slack to the expiry time to provide batched expiration. The granularity 80 * levels provide implicit batching. 81 * 82 * This is an optimization of the original timer wheel implementation for the 83 * majority of the timer wheel use cases: timeouts. The vast majority of 84 * timeout timers (networking, disk I/O ...) are canceled before expiry. If 85 * the timeout expires it indicates that normal operation is disturbed, so it 86 * does not matter much whether the timeout comes with a slight delay. 87 * 88 * The only exception to this are networking timers with a small expiry 89 * time. They rely on the granularity. Those fit into the first wheel level, 90 * which has HZ granularity. 91 * 92 * We don't have cascading anymore. timers with a expiry time above the 93 * capacity of the last wheel level are force expired at the maximum timeout 94 * value of the last wheel level. From data sampling we know that the maximum 95 * value observed is 5 days (network connection tracking), so this should not 96 * be an issue. 97 * 98 * The currently chosen array constants values are a good compromise between 99 * array size and granularity. 100 * 101 * This results in the following granularity and range levels: 102 * 103 * HZ 1000 steps 104 * Level Offset Granularity Range 105 * 0 0 1 ms 0 ms - 63 ms 106 * 1 64 8 ms 64 ms - 511 ms 107 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) 108 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) 109 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) 110 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) 111 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) 112 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) 113 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) 114 * 115 * HZ 300 116 * Level Offset Granularity Range 117 * 0 0 3 ms 0 ms - 210 ms 118 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) 119 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) 120 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) 121 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) 122 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) 123 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) 124 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) 125 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) 126 * 127 * HZ 250 128 * Level Offset Granularity Range 129 * 0 0 4 ms 0 ms - 255 ms 130 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) 131 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) 132 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) 133 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) 134 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) 135 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) 136 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) 137 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) 138 * 139 * HZ 100 140 * Level Offset Granularity Range 141 * 0 0 10 ms 0 ms - 630 ms 142 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) 143 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) 144 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) 145 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) 146 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) 147 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) 148 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) 149 */ 150 151 /* Clock divisor for the next level */ 152 #define LVL_CLK_SHIFT 3 153 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) 154 #define LVL_CLK_MASK (LVL_CLK_DIV - 1) 155 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) 156 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) 157 158 /* 159 * The time start value for each level to select the bucket at enqueue 160 * time. We start from the last possible delta of the previous level 161 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()). 162 */ 163 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) 164 165 /* Size of each clock level */ 166 #define LVL_BITS 6 167 #define LVL_SIZE (1UL << LVL_BITS) 168 #define LVL_MASK (LVL_SIZE - 1) 169 #define LVL_OFFS(n) ((n) * LVL_SIZE) 170 171 /* Level depth */ 172 #if HZ > 100 173 # define LVL_DEPTH 9 174 # else 175 # define LVL_DEPTH 8 176 #endif 177 178 /* The cutoff (max. capacity of the wheel) */ 179 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) 180 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) 181 182 /* 183 * The resulting wheel size. If NOHZ is configured we allocate two 184 * wheels so we have a separate storage for the deferrable timers. 185 */ 186 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) 187 188 #ifdef CONFIG_NO_HZ_COMMON 189 # define NR_BASES 2 190 # define BASE_STD 0 191 # define BASE_DEF 1 192 #else 193 # define NR_BASES 1 194 # define BASE_STD 0 195 # define BASE_DEF 0 196 #endif 197 198 struct timer_base { 199 raw_spinlock_t lock; 200 struct timer_list *running_timer; 201 #ifdef CONFIG_PREEMPT_RT 202 spinlock_t expiry_lock; 203 atomic_t timer_waiters; 204 #endif 205 unsigned long clk; 206 unsigned long next_expiry; 207 unsigned int cpu; 208 bool next_expiry_recalc; 209 bool is_idle; 210 DECLARE_BITMAP(pending_map, WHEEL_SIZE); 211 struct hlist_head vectors[WHEEL_SIZE]; 212 } ____cacheline_aligned; 213 214 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); 215 216 #ifdef CONFIG_NO_HZ_COMMON 217 218 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); 219 static DEFINE_MUTEX(timer_keys_mutex); 220 221 static void timer_update_keys(struct work_struct *work); 222 static DECLARE_WORK(timer_update_work, timer_update_keys); 223 224 #ifdef CONFIG_SMP 225 unsigned int sysctl_timer_migration = 1; 226 227 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled); 228 229 static void timers_update_migration(void) 230 { 231 if (sysctl_timer_migration && tick_nohz_active) 232 static_branch_enable(&timers_migration_enabled); 233 else 234 static_branch_disable(&timers_migration_enabled); 235 } 236 #else 237 static inline void timers_update_migration(void) { } 238 #endif /* !CONFIG_SMP */ 239 240 static void timer_update_keys(struct work_struct *work) 241 { 242 mutex_lock(&timer_keys_mutex); 243 timers_update_migration(); 244 static_branch_enable(&timers_nohz_active); 245 mutex_unlock(&timer_keys_mutex); 246 } 247 248 void timers_update_nohz(void) 249 { 250 schedule_work(&timer_update_work); 251 } 252 253 int timer_migration_handler(struct ctl_table *table, int write, 254 void *buffer, size_t *lenp, loff_t *ppos) 255 { 256 int ret; 257 258 mutex_lock(&timer_keys_mutex); 259 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 260 if (!ret && write) 261 timers_update_migration(); 262 mutex_unlock(&timer_keys_mutex); 263 return ret; 264 } 265 266 static inline bool is_timers_nohz_active(void) 267 { 268 return static_branch_unlikely(&timers_nohz_active); 269 } 270 #else 271 static inline bool is_timers_nohz_active(void) { return false; } 272 #endif /* NO_HZ_COMMON */ 273 274 static unsigned long round_jiffies_common(unsigned long j, int cpu, 275 bool force_up) 276 { 277 int rem; 278 unsigned long original = j; 279 280 /* 281 * We don't want all cpus firing their timers at once hitting the 282 * same lock or cachelines, so we skew each extra cpu with an extra 283 * 3 jiffies. This 3 jiffies came originally from the mm/ code which 284 * already did this. 285 * The skew is done by adding 3*cpunr, then round, then subtract this 286 * extra offset again. 287 */ 288 j += cpu * 3; 289 290 rem = j % HZ; 291 292 /* 293 * If the target jiffie is just after a whole second (which can happen 294 * due to delays of the timer irq, long irq off times etc etc) then 295 * we should round down to the whole second, not up. Use 1/4th second 296 * as cutoff for this rounding as an extreme upper bound for this. 297 * But never round down if @force_up is set. 298 */ 299 if (rem < HZ/4 && !force_up) /* round down */ 300 j = j - rem; 301 else /* round up */ 302 j = j - rem + HZ; 303 304 /* now that we have rounded, subtract the extra skew again */ 305 j -= cpu * 3; 306 307 /* 308 * Make sure j is still in the future. Otherwise return the 309 * unmodified value. 310 */ 311 return time_is_after_jiffies(j) ? j : original; 312 } 313 314 /** 315 * __round_jiffies - function to round jiffies to a full second 316 * @j: the time in (absolute) jiffies that should be rounded 317 * @cpu: the processor number on which the timeout will happen 318 * 319 * __round_jiffies() rounds an absolute time in the future (in jiffies) 320 * up or down to (approximately) full seconds. This is useful for timers 321 * for which the exact time they fire does not matter too much, as long as 322 * they fire approximately every X seconds. 323 * 324 * By rounding these timers to whole seconds, all such timers will fire 325 * at the same time, rather than at various times spread out. The goal 326 * of this is to have the CPU wake up less, which saves power. 327 * 328 * The exact rounding is skewed for each processor to avoid all 329 * processors firing at the exact same time, which could lead 330 * to lock contention or spurious cache line bouncing. 331 * 332 * The return value is the rounded version of the @j parameter. 333 */ 334 unsigned long __round_jiffies(unsigned long j, int cpu) 335 { 336 return round_jiffies_common(j, cpu, false); 337 } 338 EXPORT_SYMBOL_GPL(__round_jiffies); 339 340 /** 341 * __round_jiffies_relative - function to round jiffies to a full second 342 * @j: the time in (relative) jiffies that should be rounded 343 * @cpu: the processor number on which the timeout will happen 344 * 345 * __round_jiffies_relative() rounds a time delta in the future (in jiffies) 346 * up or down to (approximately) full seconds. This is useful for timers 347 * for which the exact time they fire does not matter too much, as long as 348 * they fire approximately every X seconds. 349 * 350 * By rounding these timers to whole seconds, all such timers will fire 351 * at the same time, rather than at various times spread out. The goal 352 * of this is to have the CPU wake up less, which saves power. 353 * 354 * The exact rounding is skewed for each processor to avoid all 355 * processors firing at the exact same time, which could lead 356 * to lock contention or spurious cache line bouncing. 357 * 358 * The return value is the rounded version of the @j parameter. 359 */ 360 unsigned long __round_jiffies_relative(unsigned long j, int cpu) 361 { 362 unsigned long j0 = jiffies; 363 364 /* Use j0 because jiffies might change while we run */ 365 return round_jiffies_common(j + j0, cpu, false) - j0; 366 } 367 EXPORT_SYMBOL_GPL(__round_jiffies_relative); 368 369 /** 370 * round_jiffies - function to round jiffies to a full second 371 * @j: the time in (absolute) jiffies that should be rounded 372 * 373 * round_jiffies() rounds an absolute time in the future (in jiffies) 374 * up or down to (approximately) full seconds. This is useful for timers 375 * for which the exact time they fire does not matter too much, as long as 376 * they fire approximately every X seconds. 377 * 378 * By rounding these timers to whole seconds, all such timers will fire 379 * at the same time, rather than at various times spread out. The goal 380 * of this is to have the CPU wake up less, which saves power. 381 * 382 * The return value is the rounded version of the @j parameter. 383 */ 384 unsigned long round_jiffies(unsigned long j) 385 { 386 return round_jiffies_common(j, raw_smp_processor_id(), false); 387 } 388 EXPORT_SYMBOL_GPL(round_jiffies); 389 390 /** 391 * round_jiffies_relative - function to round jiffies to a full second 392 * @j: the time in (relative) jiffies that should be rounded 393 * 394 * round_jiffies_relative() rounds a time delta in the future (in jiffies) 395 * up or down to (approximately) full seconds. This is useful for timers 396 * for which the exact time they fire does not matter too much, as long as 397 * they fire approximately every X seconds. 398 * 399 * By rounding these timers to whole seconds, all such timers will fire 400 * at the same time, rather than at various times spread out. The goal 401 * of this is to have the CPU wake up less, which saves power. 402 * 403 * The return value is the rounded version of the @j parameter. 404 */ 405 unsigned long round_jiffies_relative(unsigned long j) 406 { 407 return __round_jiffies_relative(j, raw_smp_processor_id()); 408 } 409 EXPORT_SYMBOL_GPL(round_jiffies_relative); 410 411 /** 412 * __round_jiffies_up - function to round jiffies up to a full second 413 * @j: the time in (absolute) jiffies that should be rounded 414 * @cpu: the processor number on which the timeout will happen 415 * 416 * This is the same as __round_jiffies() except that it will never 417 * round down. This is useful for timeouts for which the exact time 418 * of firing does not matter too much, as long as they don't fire too 419 * early. 420 */ 421 unsigned long __round_jiffies_up(unsigned long j, int cpu) 422 { 423 return round_jiffies_common(j, cpu, true); 424 } 425 EXPORT_SYMBOL_GPL(__round_jiffies_up); 426 427 /** 428 * __round_jiffies_up_relative - function to round jiffies up to a full second 429 * @j: the time in (relative) jiffies that should be rounded 430 * @cpu: the processor number on which the timeout will happen 431 * 432 * This is the same as __round_jiffies_relative() except that it will never 433 * round down. This is useful for timeouts for which the exact time 434 * of firing does not matter too much, as long as they don't fire too 435 * early. 436 */ 437 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) 438 { 439 unsigned long j0 = jiffies; 440 441 /* Use j0 because jiffies might change while we run */ 442 return round_jiffies_common(j + j0, cpu, true) - j0; 443 } 444 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); 445 446 /** 447 * round_jiffies_up - function to round jiffies up to a full second 448 * @j: the time in (absolute) jiffies that should be rounded 449 * 450 * This is the same as round_jiffies() except that it will never 451 * round down. This is useful for timeouts for which the exact time 452 * of firing does not matter too much, as long as they don't fire too 453 * early. 454 */ 455 unsigned long round_jiffies_up(unsigned long j) 456 { 457 return round_jiffies_common(j, raw_smp_processor_id(), true); 458 } 459 EXPORT_SYMBOL_GPL(round_jiffies_up); 460 461 /** 462 * round_jiffies_up_relative - function to round jiffies up to a full second 463 * @j: the time in (relative) jiffies that should be rounded 464 * 465 * This is the same as round_jiffies_relative() except that it will never 466 * round down. This is useful for timeouts for which the exact time 467 * of firing does not matter too much, as long as they don't fire too 468 * early. 469 */ 470 unsigned long round_jiffies_up_relative(unsigned long j) 471 { 472 return __round_jiffies_up_relative(j, raw_smp_processor_id()); 473 } 474 EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 475 476 477 static inline unsigned int timer_get_idx(struct timer_list *timer) 478 { 479 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; 480 } 481 482 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) 483 { 484 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | 485 idx << TIMER_ARRAYSHIFT; 486 } 487 488 /* 489 * Helper function to calculate the array index for a given expiry 490 * time. 491 */ 492 static inline unsigned calc_index(unsigned long expires, unsigned lvl, 493 unsigned long *bucket_expiry) 494 { 495 496 /* 497 * The timer wheel has to guarantee that a timer does not fire 498 * early. Early expiry can happen due to: 499 * - Timer is armed at the edge of a tick 500 * - Truncation of the expiry time in the outer wheel levels 501 * 502 * Round up with level granularity to prevent this. 503 */ 504 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl); 505 *bucket_expiry = expires << LVL_SHIFT(lvl); 506 return LVL_OFFS(lvl) + (expires & LVL_MASK); 507 } 508 509 static int calc_wheel_index(unsigned long expires, unsigned long clk, 510 unsigned long *bucket_expiry) 511 { 512 unsigned long delta = expires - clk; 513 unsigned int idx; 514 515 if (delta < LVL_START(1)) { 516 idx = calc_index(expires, 0, bucket_expiry); 517 } else if (delta < LVL_START(2)) { 518 idx = calc_index(expires, 1, bucket_expiry); 519 } else if (delta < LVL_START(3)) { 520 idx = calc_index(expires, 2, bucket_expiry); 521 } else if (delta < LVL_START(4)) { 522 idx = calc_index(expires, 3, bucket_expiry); 523 } else if (delta < LVL_START(5)) { 524 idx = calc_index(expires, 4, bucket_expiry); 525 } else if (delta < LVL_START(6)) { 526 idx = calc_index(expires, 5, bucket_expiry); 527 } else if (delta < LVL_START(7)) { 528 idx = calc_index(expires, 6, bucket_expiry); 529 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { 530 idx = calc_index(expires, 7, bucket_expiry); 531 } else if ((long) delta < 0) { 532 idx = clk & LVL_MASK; 533 *bucket_expiry = clk; 534 } else { 535 /* 536 * Force expire obscene large timeouts to expire at the 537 * capacity limit of the wheel. 538 */ 539 if (delta >= WHEEL_TIMEOUT_CUTOFF) 540 expires = clk + WHEEL_TIMEOUT_MAX; 541 542 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); 543 } 544 return idx; 545 } 546 547 static void 548 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) 549 { 550 if (!is_timers_nohz_active()) 551 return; 552 553 /* 554 * TODO: This wants some optimizing similar to the code below, but we 555 * will do that when we switch from push to pull for deferrable timers. 556 */ 557 if (timer->flags & TIMER_DEFERRABLE) { 558 if (tick_nohz_full_cpu(base->cpu)) 559 wake_up_nohz_cpu(base->cpu); 560 return; 561 } 562 563 /* 564 * We might have to IPI the remote CPU if the base is idle and the 565 * timer is not deferrable. If the other CPU is on the way to idle 566 * then it can't set base->is_idle as we hold the base lock: 567 */ 568 if (base->is_idle) 569 wake_up_nohz_cpu(base->cpu); 570 } 571 572 /* 573 * Enqueue the timer into the hash bucket, mark it pending in 574 * the bitmap, store the index in the timer flags then wake up 575 * the target CPU if needed. 576 */ 577 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, 578 unsigned int idx, unsigned long bucket_expiry) 579 { 580 581 hlist_add_head(&timer->entry, base->vectors + idx); 582 __set_bit(idx, base->pending_map); 583 timer_set_idx(timer, idx); 584 585 trace_timer_start(timer, timer->expires, timer->flags); 586 587 /* 588 * Check whether this is the new first expiring timer. The 589 * effective expiry time of the timer is required here 590 * (bucket_expiry) instead of timer->expires. 591 */ 592 if (time_before(bucket_expiry, base->next_expiry)) { 593 /* 594 * Set the next expiry time and kick the CPU so it 595 * can reevaluate the wheel: 596 */ 597 base->next_expiry = bucket_expiry; 598 base->next_expiry_recalc = false; 599 trigger_dyntick_cpu(base, timer); 600 } 601 } 602 603 static void internal_add_timer(struct timer_base *base, struct timer_list *timer) 604 { 605 unsigned long bucket_expiry; 606 unsigned int idx; 607 608 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); 609 enqueue_timer(base, timer, idx, bucket_expiry); 610 } 611 612 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 613 614 static struct debug_obj_descr timer_debug_descr; 615 616 static void *timer_debug_hint(void *addr) 617 { 618 return ((struct timer_list *) addr)->function; 619 } 620 621 static bool timer_is_static_object(void *addr) 622 { 623 struct timer_list *timer = addr; 624 625 return (timer->entry.pprev == NULL && 626 timer->entry.next == TIMER_ENTRY_STATIC); 627 } 628 629 /* 630 * fixup_init is called when: 631 * - an active object is initialized 632 */ 633 static bool timer_fixup_init(void *addr, enum debug_obj_state state) 634 { 635 struct timer_list *timer = addr; 636 637 switch (state) { 638 case ODEBUG_STATE_ACTIVE: 639 del_timer_sync(timer); 640 debug_object_init(timer, &timer_debug_descr); 641 return true; 642 default: 643 return false; 644 } 645 } 646 647 /* Stub timer callback for improperly used timers. */ 648 static void stub_timer(struct timer_list *unused) 649 { 650 WARN_ON(1); 651 } 652 653 /* 654 * fixup_activate is called when: 655 * - an active object is activated 656 * - an unknown non-static object is activated 657 */ 658 static bool timer_fixup_activate(void *addr, enum debug_obj_state state) 659 { 660 struct timer_list *timer = addr; 661 662 switch (state) { 663 case ODEBUG_STATE_NOTAVAILABLE: 664 timer_setup(timer, stub_timer, 0); 665 return true; 666 667 case ODEBUG_STATE_ACTIVE: 668 WARN_ON(1); 669 fallthrough; 670 default: 671 return false; 672 } 673 } 674 675 /* 676 * fixup_free is called when: 677 * - an active object is freed 678 */ 679 static bool timer_fixup_free(void *addr, enum debug_obj_state state) 680 { 681 struct timer_list *timer = addr; 682 683 switch (state) { 684 case ODEBUG_STATE_ACTIVE: 685 del_timer_sync(timer); 686 debug_object_free(timer, &timer_debug_descr); 687 return true; 688 default: 689 return false; 690 } 691 } 692 693 /* 694 * fixup_assert_init is called when: 695 * - an untracked/uninit-ed object is found 696 */ 697 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state) 698 { 699 struct timer_list *timer = addr; 700 701 switch (state) { 702 case ODEBUG_STATE_NOTAVAILABLE: 703 timer_setup(timer, stub_timer, 0); 704 return true; 705 default: 706 return false; 707 } 708 } 709 710 static struct debug_obj_descr timer_debug_descr = { 711 .name = "timer_list", 712 .debug_hint = timer_debug_hint, 713 .is_static_object = timer_is_static_object, 714 .fixup_init = timer_fixup_init, 715 .fixup_activate = timer_fixup_activate, 716 .fixup_free = timer_fixup_free, 717 .fixup_assert_init = timer_fixup_assert_init, 718 }; 719 720 static inline void debug_timer_init(struct timer_list *timer) 721 { 722 debug_object_init(timer, &timer_debug_descr); 723 } 724 725 static inline void debug_timer_activate(struct timer_list *timer) 726 { 727 debug_object_activate(timer, &timer_debug_descr); 728 } 729 730 static inline void debug_timer_deactivate(struct timer_list *timer) 731 { 732 debug_object_deactivate(timer, &timer_debug_descr); 733 } 734 735 static inline void debug_timer_free(struct timer_list *timer) 736 { 737 debug_object_free(timer, &timer_debug_descr); 738 } 739 740 static inline void debug_timer_assert_init(struct timer_list *timer) 741 { 742 debug_object_assert_init(timer, &timer_debug_descr); 743 } 744 745 static void do_init_timer(struct timer_list *timer, 746 void (*func)(struct timer_list *), 747 unsigned int flags, 748 const char *name, struct lock_class_key *key); 749 750 void init_timer_on_stack_key(struct timer_list *timer, 751 void (*func)(struct timer_list *), 752 unsigned int flags, 753 const char *name, struct lock_class_key *key) 754 { 755 debug_object_init_on_stack(timer, &timer_debug_descr); 756 do_init_timer(timer, func, flags, name, key); 757 } 758 EXPORT_SYMBOL_GPL(init_timer_on_stack_key); 759 760 void destroy_timer_on_stack(struct timer_list *timer) 761 { 762 debug_object_free(timer, &timer_debug_descr); 763 } 764 EXPORT_SYMBOL_GPL(destroy_timer_on_stack); 765 766 #else 767 static inline void debug_timer_init(struct timer_list *timer) { } 768 static inline void debug_timer_activate(struct timer_list *timer) { } 769 static inline void debug_timer_deactivate(struct timer_list *timer) { } 770 static inline void debug_timer_assert_init(struct timer_list *timer) { } 771 #endif 772 773 static inline void debug_init(struct timer_list *timer) 774 { 775 debug_timer_init(timer); 776 trace_timer_init(timer); 777 } 778 779 static inline void debug_deactivate(struct timer_list *timer) 780 { 781 debug_timer_deactivate(timer); 782 trace_timer_cancel(timer); 783 } 784 785 static inline void debug_assert_init(struct timer_list *timer) 786 { 787 debug_timer_assert_init(timer); 788 } 789 790 static void do_init_timer(struct timer_list *timer, 791 void (*func)(struct timer_list *), 792 unsigned int flags, 793 const char *name, struct lock_class_key *key) 794 { 795 timer->entry.pprev = NULL; 796 timer->function = func; 797 timer->flags = flags | raw_smp_processor_id(); 798 lockdep_init_map(&timer->lockdep_map, name, key, 0); 799 } 800 801 /** 802 * init_timer_key - initialize a timer 803 * @timer: the timer to be initialized 804 * @func: timer callback function 805 * @flags: timer flags 806 * @name: name of the timer 807 * @key: lockdep class key of the fake lock used for tracking timer 808 * sync lock dependencies 809 * 810 * init_timer_key() must be done to a timer prior calling *any* of the 811 * other timer functions. 812 */ 813 void init_timer_key(struct timer_list *timer, 814 void (*func)(struct timer_list *), unsigned int flags, 815 const char *name, struct lock_class_key *key) 816 { 817 debug_init(timer); 818 do_init_timer(timer, func, flags, name, key); 819 } 820 EXPORT_SYMBOL(init_timer_key); 821 822 static inline void detach_timer(struct timer_list *timer, bool clear_pending) 823 { 824 struct hlist_node *entry = &timer->entry; 825 826 debug_deactivate(timer); 827 828 __hlist_del(entry); 829 if (clear_pending) 830 entry->pprev = NULL; 831 entry->next = LIST_POISON2; 832 } 833 834 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, 835 bool clear_pending) 836 { 837 unsigned idx = timer_get_idx(timer); 838 839 if (!timer_pending(timer)) 840 return 0; 841 842 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { 843 __clear_bit(idx, base->pending_map); 844 base->next_expiry_recalc = true; 845 } 846 847 detach_timer(timer, clear_pending); 848 return 1; 849 } 850 851 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 852 { 853 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); 854 855 /* 856 * If the timer is deferrable and NO_HZ_COMMON is set then we need 857 * to use the deferrable base. 858 */ 859 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 860 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 861 return base; 862 } 863 864 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 865 { 866 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 867 868 /* 869 * If the timer is deferrable and NO_HZ_COMMON is set then we need 870 * to use the deferrable base. 871 */ 872 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 873 base = this_cpu_ptr(&timer_bases[BASE_DEF]); 874 return base; 875 } 876 877 static inline struct timer_base *get_timer_base(u32 tflags) 878 { 879 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); 880 } 881 882 static inline struct timer_base * 883 get_target_base(struct timer_base *base, unsigned tflags) 884 { 885 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 886 if (static_branch_likely(&timers_migration_enabled) && 887 !(tflags & TIMER_PINNED)) 888 return get_timer_cpu_base(tflags, get_nohz_timer_target()); 889 #endif 890 return get_timer_this_cpu_base(tflags); 891 } 892 893 static inline void forward_timer_base(struct timer_base *base) 894 { 895 unsigned long jnow = READ_ONCE(jiffies); 896 897 /* 898 * No need to forward if we are close enough below jiffies. 899 * Also while executing timers, base->clk is 1 offset ahead 900 * of jiffies to avoid endless requeuing to current jffies. 901 */ 902 if ((long)(jnow - base->clk) < 1) 903 return; 904 905 /* 906 * If the next expiry value is > jiffies, then we fast forward to 907 * jiffies otherwise we forward to the next expiry value. 908 */ 909 if (time_after(base->next_expiry, jnow)) { 910 base->clk = jnow; 911 } else { 912 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) 913 return; 914 base->clk = base->next_expiry; 915 } 916 } 917 918 919 /* 920 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 921 * that all timers which are tied to this base are locked, and the base itself 922 * is locked too. 923 * 924 * So __run_timers/migrate_timers can safely modify all timers which could 925 * be found in the base->vectors array. 926 * 927 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need 928 * to wait until the migration is done. 929 */ 930 static struct timer_base *lock_timer_base(struct timer_list *timer, 931 unsigned long *flags) 932 __acquires(timer->base->lock) 933 { 934 for (;;) { 935 struct timer_base *base; 936 u32 tf; 937 938 /* 939 * We need to use READ_ONCE() here, otherwise the compiler 940 * might re-read @tf between the check for TIMER_MIGRATING 941 * and spin_lock(). 942 */ 943 tf = READ_ONCE(timer->flags); 944 945 if (!(tf & TIMER_MIGRATING)) { 946 base = get_timer_base(tf); 947 raw_spin_lock_irqsave(&base->lock, *flags); 948 if (timer->flags == tf) 949 return base; 950 raw_spin_unlock_irqrestore(&base->lock, *flags); 951 } 952 cpu_relax(); 953 } 954 } 955 956 #define MOD_TIMER_PENDING_ONLY 0x01 957 #define MOD_TIMER_REDUCE 0x02 958 #define MOD_TIMER_NOTPENDING 0x04 959 960 static inline int 961 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) 962 { 963 unsigned long clk = 0, flags, bucket_expiry; 964 struct timer_base *base, *new_base; 965 unsigned int idx = UINT_MAX; 966 int ret = 0; 967 968 BUG_ON(!timer->function); 969 970 /* 971 * This is a common optimization triggered by the networking code - if 972 * the timer is re-modified to have the same timeout or ends up in the 973 * same array bucket then just return: 974 */ 975 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { 976 /* 977 * The downside of this optimization is that it can result in 978 * larger granularity than you would get from adding a new 979 * timer with this expiry. 980 */ 981 long diff = timer->expires - expires; 982 983 if (!diff) 984 return 1; 985 if (options & MOD_TIMER_REDUCE && diff <= 0) 986 return 1; 987 988 /* 989 * We lock timer base and calculate the bucket index right 990 * here. If the timer ends up in the same bucket, then we 991 * just update the expiry time and avoid the whole 992 * dequeue/enqueue dance. 993 */ 994 base = lock_timer_base(timer, &flags); 995 forward_timer_base(base); 996 997 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && 998 time_before_eq(timer->expires, expires)) { 999 ret = 1; 1000 goto out_unlock; 1001 } 1002 1003 clk = base->clk; 1004 idx = calc_wheel_index(expires, clk, &bucket_expiry); 1005 1006 /* 1007 * Retrieve and compare the array index of the pending 1008 * timer. If it matches set the expiry to the new value so a 1009 * subsequent call will exit in the expires check above. 1010 */ 1011 if (idx == timer_get_idx(timer)) { 1012 if (!(options & MOD_TIMER_REDUCE)) 1013 timer->expires = expires; 1014 else if (time_after(timer->expires, expires)) 1015 timer->expires = expires; 1016 ret = 1; 1017 goto out_unlock; 1018 } 1019 } else { 1020 base = lock_timer_base(timer, &flags); 1021 forward_timer_base(base); 1022 } 1023 1024 ret = detach_if_pending(timer, base, false); 1025 if (!ret && (options & MOD_TIMER_PENDING_ONLY)) 1026 goto out_unlock; 1027 1028 new_base = get_target_base(base, timer->flags); 1029 1030 if (base != new_base) { 1031 /* 1032 * We are trying to schedule the timer on the new base. 1033 * However we can't change timer's base while it is running, 1034 * otherwise del_timer_sync() can't detect that the timer's 1035 * handler yet has not finished. This also guarantees that the 1036 * timer is serialized wrt itself. 1037 */ 1038 if (likely(base->running_timer != timer)) { 1039 /* See the comment in lock_timer_base() */ 1040 timer->flags |= TIMER_MIGRATING; 1041 1042 raw_spin_unlock(&base->lock); 1043 base = new_base; 1044 raw_spin_lock(&base->lock); 1045 WRITE_ONCE(timer->flags, 1046 (timer->flags & ~TIMER_BASEMASK) | base->cpu); 1047 forward_timer_base(base); 1048 } 1049 } 1050 1051 debug_timer_activate(timer); 1052 1053 timer->expires = expires; 1054 /* 1055 * If 'idx' was calculated above and the base time did not advance 1056 * between calculating 'idx' and possibly switching the base, only 1057 * enqueue_timer() is required. Otherwise we need to (re)calculate 1058 * the wheel index via internal_add_timer(). 1059 */ 1060 if (idx != UINT_MAX && clk == base->clk) 1061 enqueue_timer(base, timer, idx, bucket_expiry); 1062 else 1063 internal_add_timer(base, timer); 1064 1065 out_unlock: 1066 raw_spin_unlock_irqrestore(&base->lock, flags); 1067 1068 return ret; 1069 } 1070 1071 /** 1072 * mod_timer_pending - modify a pending timer's timeout 1073 * @timer: the pending timer to be modified 1074 * @expires: new timeout in jiffies 1075 * 1076 * mod_timer_pending() is the same for pending timers as mod_timer(), 1077 * but will not re-activate and modify already deleted timers. 1078 * 1079 * It is useful for unserialized use of timers. 1080 */ 1081 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1082 { 1083 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); 1084 } 1085 EXPORT_SYMBOL(mod_timer_pending); 1086 1087 /** 1088 * mod_timer - modify a timer's timeout 1089 * @timer: the timer to be modified 1090 * @expires: new timeout in jiffies 1091 * 1092 * mod_timer() is a more efficient way to update the expire field of an 1093 * active timer (if the timer is inactive it will be activated) 1094 * 1095 * mod_timer(timer, expires) is equivalent to: 1096 * 1097 * del_timer(timer); timer->expires = expires; add_timer(timer); 1098 * 1099 * Note that if there are multiple unserialized concurrent users of the 1100 * same timer, then mod_timer() is the only safe way to modify the timeout, 1101 * since add_timer() cannot modify an already running timer. 1102 * 1103 * The function returns whether it has modified a pending timer or not. 1104 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an 1105 * active timer returns 1.) 1106 */ 1107 int mod_timer(struct timer_list *timer, unsigned long expires) 1108 { 1109 return __mod_timer(timer, expires, 0); 1110 } 1111 EXPORT_SYMBOL(mod_timer); 1112 1113 /** 1114 * timer_reduce - Modify a timer's timeout if it would reduce the timeout 1115 * @timer: The timer to be modified 1116 * @expires: New timeout in jiffies 1117 * 1118 * timer_reduce() is very similar to mod_timer(), except that it will only 1119 * modify a running timer if that would reduce the expiration time (it will 1120 * start a timer that isn't running). 1121 */ 1122 int timer_reduce(struct timer_list *timer, unsigned long expires) 1123 { 1124 return __mod_timer(timer, expires, MOD_TIMER_REDUCE); 1125 } 1126 EXPORT_SYMBOL(timer_reduce); 1127 1128 /** 1129 * add_timer - start a timer 1130 * @timer: the timer to be added 1131 * 1132 * The kernel will do a ->function(@timer) callback from the 1133 * timer interrupt at the ->expires point in the future. The 1134 * current time is 'jiffies'. 1135 * 1136 * The timer's ->expires, ->function fields must be set prior calling this 1137 * function. 1138 * 1139 * Timers with an ->expires field in the past will be executed in the next 1140 * timer tick. 1141 */ 1142 void add_timer(struct timer_list *timer) 1143 { 1144 BUG_ON(timer_pending(timer)); 1145 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); 1146 } 1147 EXPORT_SYMBOL(add_timer); 1148 1149 /** 1150 * add_timer_on - start a timer on a particular CPU 1151 * @timer: the timer to be added 1152 * @cpu: the CPU to start it on 1153 * 1154 * This is not very scalable on SMP. Double adds are not possible. 1155 */ 1156 void add_timer_on(struct timer_list *timer, int cpu) 1157 { 1158 struct timer_base *new_base, *base; 1159 unsigned long flags; 1160 1161 BUG_ON(timer_pending(timer) || !timer->function); 1162 1163 new_base = get_timer_cpu_base(timer->flags, cpu); 1164 1165 /* 1166 * If @timer was on a different CPU, it should be migrated with the 1167 * old base locked to prevent other operations proceeding with the 1168 * wrong base locked. See lock_timer_base(). 1169 */ 1170 base = lock_timer_base(timer, &flags); 1171 if (base != new_base) { 1172 timer->flags |= TIMER_MIGRATING; 1173 1174 raw_spin_unlock(&base->lock); 1175 base = new_base; 1176 raw_spin_lock(&base->lock); 1177 WRITE_ONCE(timer->flags, 1178 (timer->flags & ~TIMER_BASEMASK) | cpu); 1179 } 1180 forward_timer_base(base); 1181 1182 debug_timer_activate(timer); 1183 internal_add_timer(base, timer); 1184 raw_spin_unlock_irqrestore(&base->lock, flags); 1185 } 1186 EXPORT_SYMBOL_GPL(add_timer_on); 1187 1188 /** 1189 * del_timer - deactivate a timer. 1190 * @timer: the timer to be deactivated 1191 * 1192 * del_timer() deactivates a timer - this works on both active and inactive 1193 * timers. 1194 * 1195 * The function returns whether it has deactivated a pending timer or not. 1196 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an 1197 * active timer returns 1.) 1198 */ 1199 int del_timer(struct timer_list *timer) 1200 { 1201 struct timer_base *base; 1202 unsigned long flags; 1203 int ret = 0; 1204 1205 debug_assert_init(timer); 1206 1207 if (timer_pending(timer)) { 1208 base = lock_timer_base(timer, &flags); 1209 ret = detach_if_pending(timer, base, true); 1210 raw_spin_unlock_irqrestore(&base->lock, flags); 1211 } 1212 1213 return ret; 1214 } 1215 EXPORT_SYMBOL(del_timer); 1216 1217 /** 1218 * try_to_del_timer_sync - Try to deactivate a timer 1219 * @timer: timer to delete 1220 * 1221 * This function tries to deactivate a timer. Upon successful (ret >= 0) 1222 * exit the timer is not queued and the handler is not running on any CPU. 1223 */ 1224 int try_to_del_timer_sync(struct timer_list *timer) 1225 { 1226 struct timer_base *base; 1227 unsigned long flags; 1228 int ret = -1; 1229 1230 debug_assert_init(timer); 1231 1232 base = lock_timer_base(timer, &flags); 1233 1234 if (base->running_timer != timer) 1235 ret = detach_if_pending(timer, base, true); 1236 1237 raw_spin_unlock_irqrestore(&base->lock, flags); 1238 1239 return ret; 1240 } 1241 EXPORT_SYMBOL(try_to_del_timer_sync); 1242 1243 #ifdef CONFIG_PREEMPT_RT 1244 static __init void timer_base_init_expiry_lock(struct timer_base *base) 1245 { 1246 spin_lock_init(&base->expiry_lock); 1247 } 1248 1249 static inline void timer_base_lock_expiry(struct timer_base *base) 1250 { 1251 spin_lock(&base->expiry_lock); 1252 } 1253 1254 static inline void timer_base_unlock_expiry(struct timer_base *base) 1255 { 1256 spin_unlock(&base->expiry_lock); 1257 } 1258 1259 /* 1260 * The counterpart to del_timer_wait_running(). 1261 * 1262 * If there is a waiter for base->expiry_lock, then it was waiting for the 1263 * timer callback to finish. Drop expiry_lock and reaquire it. That allows 1264 * the waiter to acquire the lock and make progress. 1265 */ 1266 static void timer_sync_wait_running(struct timer_base *base) 1267 { 1268 if (atomic_read(&base->timer_waiters)) { 1269 spin_unlock(&base->expiry_lock); 1270 spin_lock(&base->expiry_lock); 1271 } 1272 } 1273 1274 /* 1275 * This function is called on PREEMPT_RT kernels when the fast path 1276 * deletion of a timer failed because the timer callback function was 1277 * running. 1278 * 1279 * This prevents priority inversion, if the softirq thread on a remote CPU 1280 * got preempted, and it prevents a life lock when the task which tries to 1281 * delete a timer preempted the softirq thread running the timer callback 1282 * function. 1283 */ 1284 static void del_timer_wait_running(struct timer_list *timer) 1285 { 1286 u32 tf; 1287 1288 tf = READ_ONCE(timer->flags); 1289 if (!(tf & TIMER_MIGRATING)) { 1290 struct timer_base *base = get_timer_base(tf); 1291 1292 /* 1293 * Mark the base as contended and grab the expiry lock, 1294 * which is held by the softirq across the timer 1295 * callback. Drop the lock immediately so the softirq can 1296 * expire the next timer. In theory the timer could already 1297 * be running again, but that's more than unlikely and just 1298 * causes another wait loop. 1299 */ 1300 atomic_inc(&base->timer_waiters); 1301 spin_lock_bh(&base->expiry_lock); 1302 atomic_dec(&base->timer_waiters); 1303 spin_unlock_bh(&base->expiry_lock); 1304 } 1305 } 1306 #else 1307 static inline void timer_base_init_expiry_lock(struct timer_base *base) { } 1308 static inline void timer_base_lock_expiry(struct timer_base *base) { } 1309 static inline void timer_base_unlock_expiry(struct timer_base *base) { } 1310 static inline void timer_sync_wait_running(struct timer_base *base) { } 1311 static inline void del_timer_wait_running(struct timer_list *timer) { } 1312 #endif 1313 1314 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 1315 /** 1316 * del_timer_sync - deactivate a timer and wait for the handler to finish. 1317 * @timer: the timer to be deactivated 1318 * 1319 * This function only differs from del_timer() on SMP: besides deactivating 1320 * the timer it also makes sure the handler has finished executing on other 1321 * CPUs. 1322 * 1323 * Synchronization rules: Callers must prevent restarting of the timer, 1324 * otherwise this function is meaningless. It must not be called from 1325 * interrupt contexts unless the timer is an irqsafe one. The caller must 1326 * not hold locks which would prevent completion of the timer's 1327 * handler. The timer's handler must not call add_timer_on(). Upon exit the 1328 * timer is not queued and the handler is not running on any CPU. 1329 * 1330 * Note: For !irqsafe timers, you must not hold locks that are held in 1331 * interrupt context while calling this function. Even if the lock has 1332 * nothing to do with the timer in question. Here's why:: 1333 * 1334 * CPU0 CPU1 1335 * ---- ---- 1336 * <SOFTIRQ> 1337 * call_timer_fn(); 1338 * base->running_timer = mytimer; 1339 * spin_lock_irq(somelock); 1340 * <IRQ> 1341 * spin_lock(somelock); 1342 * del_timer_sync(mytimer); 1343 * while (base->running_timer == mytimer); 1344 * 1345 * Now del_timer_sync() will never return and never release somelock. 1346 * The interrupt on the other CPU is waiting to grab somelock but 1347 * it has interrupted the softirq that CPU0 is waiting to finish. 1348 * 1349 * The function returns whether it has deactivated a pending timer or not. 1350 */ 1351 int del_timer_sync(struct timer_list *timer) 1352 { 1353 int ret; 1354 1355 #ifdef CONFIG_LOCKDEP 1356 unsigned long flags; 1357 1358 /* 1359 * If lockdep gives a backtrace here, please reference 1360 * the synchronization rules above. 1361 */ 1362 local_irq_save(flags); 1363 lock_map_acquire(&timer->lockdep_map); 1364 lock_map_release(&timer->lockdep_map); 1365 local_irq_restore(flags); 1366 #endif 1367 /* 1368 * don't use it in hardirq context, because it 1369 * could lead to deadlock. 1370 */ 1371 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); 1372 1373 do { 1374 ret = try_to_del_timer_sync(timer); 1375 1376 if (unlikely(ret < 0)) { 1377 del_timer_wait_running(timer); 1378 cpu_relax(); 1379 } 1380 } while (ret < 0); 1381 1382 return ret; 1383 } 1384 EXPORT_SYMBOL(del_timer_sync); 1385 #endif 1386 1387 static void call_timer_fn(struct timer_list *timer, 1388 void (*fn)(struct timer_list *), 1389 unsigned long baseclk) 1390 { 1391 int count = preempt_count(); 1392 1393 #ifdef CONFIG_LOCKDEP 1394 /* 1395 * It is permissible to free the timer from inside the 1396 * function that is called from it, this we need to take into 1397 * account for lockdep too. To avoid bogus "held lock freed" 1398 * warnings as well as problems when looking into 1399 * timer->lockdep_map, make a copy and use that here. 1400 */ 1401 struct lockdep_map lockdep_map; 1402 1403 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); 1404 #endif 1405 /* 1406 * Couple the lock chain with the lock chain at 1407 * del_timer_sync() by acquiring the lock_map around the fn() 1408 * call here and in del_timer_sync(). 1409 */ 1410 lock_map_acquire(&lockdep_map); 1411 1412 trace_timer_expire_entry(timer, baseclk); 1413 fn(timer); 1414 trace_timer_expire_exit(timer); 1415 1416 lock_map_release(&lockdep_map); 1417 1418 if (count != preempt_count()) { 1419 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", 1420 fn, count, preempt_count()); 1421 /* 1422 * Restore the preempt count. That gives us a decent 1423 * chance to survive and extract information. If the 1424 * callback kept a lock held, bad luck, but not worse 1425 * than the BUG() we had. 1426 */ 1427 preempt_count_set(count); 1428 } 1429 } 1430 1431 static void expire_timers(struct timer_base *base, struct hlist_head *head) 1432 { 1433 /* 1434 * This value is required only for tracing. base->clk was 1435 * incremented directly before expire_timers was called. But expiry 1436 * is related to the old base->clk value. 1437 */ 1438 unsigned long baseclk = base->clk - 1; 1439 1440 while (!hlist_empty(head)) { 1441 struct timer_list *timer; 1442 void (*fn)(struct timer_list *); 1443 1444 timer = hlist_entry(head->first, struct timer_list, entry); 1445 1446 base->running_timer = timer; 1447 detach_timer(timer, true); 1448 1449 fn = timer->function; 1450 1451 if (timer->flags & TIMER_IRQSAFE) { 1452 raw_spin_unlock(&base->lock); 1453 call_timer_fn(timer, fn, baseclk); 1454 base->running_timer = NULL; 1455 raw_spin_lock(&base->lock); 1456 } else { 1457 raw_spin_unlock_irq(&base->lock); 1458 call_timer_fn(timer, fn, baseclk); 1459 base->running_timer = NULL; 1460 timer_sync_wait_running(base); 1461 raw_spin_lock_irq(&base->lock); 1462 } 1463 } 1464 } 1465 1466 static int collect_expired_timers(struct timer_base *base, 1467 struct hlist_head *heads) 1468 { 1469 unsigned long clk = base->clk = base->next_expiry; 1470 struct hlist_head *vec; 1471 int i, levels = 0; 1472 unsigned int idx; 1473 1474 for (i = 0; i < LVL_DEPTH; i++) { 1475 idx = (clk & LVL_MASK) + i * LVL_SIZE; 1476 1477 if (__test_and_clear_bit(idx, base->pending_map)) { 1478 vec = base->vectors + idx; 1479 hlist_move_list(vec, heads++); 1480 levels++; 1481 } 1482 /* Is it time to look at the next level? */ 1483 if (clk & LVL_CLK_MASK) 1484 break; 1485 /* Shift clock for the next level granularity */ 1486 clk >>= LVL_CLK_SHIFT; 1487 } 1488 return levels; 1489 } 1490 1491 /* 1492 * Find the next pending bucket of a level. Search from level start (@offset) 1493 * + @clk upwards and if nothing there, search from start of the level 1494 * (@offset) up to @offset + clk. 1495 */ 1496 static int next_pending_bucket(struct timer_base *base, unsigned offset, 1497 unsigned clk) 1498 { 1499 unsigned pos, start = offset + clk; 1500 unsigned end = offset + LVL_SIZE; 1501 1502 pos = find_next_bit(base->pending_map, end, start); 1503 if (pos < end) 1504 return pos - start; 1505 1506 pos = find_next_bit(base->pending_map, start, offset); 1507 return pos < start ? pos + LVL_SIZE - start : -1; 1508 } 1509 1510 /* 1511 * Search the first expiring timer in the various clock levels. Caller must 1512 * hold base->lock. 1513 */ 1514 static unsigned long __next_timer_interrupt(struct timer_base *base) 1515 { 1516 unsigned long clk, next, adj; 1517 unsigned lvl, offset = 0; 1518 1519 next = base->clk + NEXT_TIMER_MAX_DELTA; 1520 clk = base->clk; 1521 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1522 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); 1523 unsigned long lvl_clk = clk & LVL_CLK_MASK; 1524 1525 if (pos >= 0) { 1526 unsigned long tmp = clk + (unsigned long) pos; 1527 1528 tmp <<= LVL_SHIFT(lvl); 1529 if (time_before(tmp, next)) 1530 next = tmp; 1531 1532 /* 1533 * If the next expiration happens before we reach 1534 * the next level, no need to check further. 1535 */ 1536 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) 1537 break; 1538 } 1539 /* 1540 * Clock for the next level. If the current level clock lower 1541 * bits are zero, we look at the next level as is. If not we 1542 * need to advance it by one because that's going to be the 1543 * next expiring bucket in that level. base->clk is the next 1544 * expiring jiffie. So in case of: 1545 * 1546 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1547 * 0 0 0 0 0 0 1548 * 1549 * we have to look at all levels @index 0. With 1550 * 1551 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1552 * 0 0 0 0 0 2 1553 * 1554 * LVL0 has the next expiring bucket @index 2. The upper 1555 * levels have the next expiring bucket @index 1. 1556 * 1557 * In case that the propagation wraps the next level the same 1558 * rules apply: 1559 * 1560 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1561 * 0 0 0 0 F 2 1562 * 1563 * So after looking at LVL0 we get: 1564 * 1565 * LVL5 LVL4 LVL3 LVL2 LVL1 1566 * 0 0 0 1 0 1567 * 1568 * So no propagation from LVL1 to LVL2 because that happened 1569 * with the add already, but then we need to propagate further 1570 * from LVL2 to LVL3. 1571 * 1572 * So the simple check whether the lower bits of the current 1573 * level are 0 or not is sufficient for all cases. 1574 */ 1575 adj = lvl_clk ? 1 : 0; 1576 clk >>= LVL_CLK_SHIFT; 1577 clk += adj; 1578 } 1579 1580 base->next_expiry_recalc = false; 1581 1582 return next; 1583 } 1584 1585 #ifdef CONFIG_NO_HZ_COMMON 1586 /* 1587 * Check, if the next hrtimer event is before the next timer wheel 1588 * event: 1589 */ 1590 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) 1591 { 1592 u64 nextevt = hrtimer_get_next_event(); 1593 1594 /* 1595 * If high resolution timers are enabled 1596 * hrtimer_get_next_event() returns KTIME_MAX. 1597 */ 1598 if (expires <= nextevt) 1599 return expires; 1600 1601 /* 1602 * If the next timer is already expired, return the tick base 1603 * time so the tick is fired immediately. 1604 */ 1605 if (nextevt <= basem) 1606 return basem; 1607 1608 /* 1609 * Round up to the next jiffie. High resolution timers are 1610 * off, so the hrtimers are expired in the tick and we need to 1611 * make sure that this tick really expires the timer to avoid 1612 * a ping pong of the nohz stop code. 1613 * 1614 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3 1615 */ 1616 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; 1617 } 1618 1619 /** 1620 * get_next_timer_interrupt - return the time (clock mono) of the next timer 1621 * @basej: base time jiffies 1622 * @basem: base time clock monotonic 1623 * 1624 * Returns the tick aligned clock monotonic time of the next pending 1625 * timer or KTIME_MAX if no timer is pending. 1626 */ 1627 u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1628 { 1629 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1630 u64 expires = KTIME_MAX; 1631 unsigned long nextevt; 1632 bool is_max_delta; 1633 1634 /* 1635 * Pretend that there is no timer pending if the cpu is offline. 1636 * Possible pending timers will be migrated later to an active cpu. 1637 */ 1638 if (cpu_is_offline(smp_processor_id())) 1639 return expires; 1640 1641 raw_spin_lock(&base->lock); 1642 if (base->next_expiry_recalc) 1643 base->next_expiry = __next_timer_interrupt(base); 1644 nextevt = base->next_expiry; 1645 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); 1646 1647 /* 1648 * We have a fresh next event. Check whether we can forward the 1649 * base. We can only do that when @basej is past base->clk 1650 * otherwise we might rewind base->clk. 1651 */ 1652 if (time_after(basej, base->clk)) { 1653 if (time_after(nextevt, basej)) 1654 base->clk = basej; 1655 else if (time_after(nextevt, base->clk)) 1656 base->clk = nextevt; 1657 } 1658 1659 if (time_before_eq(nextevt, basej)) { 1660 expires = basem; 1661 base->is_idle = false; 1662 } else { 1663 if (!is_max_delta) 1664 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; 1665 /* 1666 * If we expect to sleep more than a tick, mark the base idle. 1667 * Also the tick is stopped so any added timer must forward 1668 * the base clk itself to keep granularity small. This idle 1669 * logic is only maintained for the BASE_STD base, deferrable 1670 * timers may still see large granularity skew (by design). 1671 */ 1672 if ((expires - basem) > TICK_NSEC) 1673 base->is_idle = true; 1674 } 1675 raw_spin_unlock(&base->lock); 1676 1677 return cmp_next_hrtimer_event(basem, expires); 1678 } 1679 1680 /** 1681 * timer_clear_idle - Clear the idle state of the timer base 1682 * 1683 * Called with interrupts disabled 1684 */ 1685 void timer_clear_idle(void) 1686 { 1687 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1688 1689 /* 1690 * We do this unlocked. The worst outcome is a remote enqueue sending 1691 * a pointless IPI, but taking the lock would just make the window for 1692 * sending the IPI a few instructions smaller for the cost of taking 1693 * the lock in the exit from idle path. 1694 */ 1695 base->is_idle = false; 1696 } 1697 #endif 1698 1699 /* 1700 * Called from the timer interrupt handler to charge one tick to the current 1701 * process. user_tick is 1 if the tick is user time, 0 for system. 1702 */ 1703 void update_process_times(int user_tick) 1704 { 1705 struct task_struct *p = current; 1706 1707 /* Note: this timer irq context must be accounted for as well. */ 1708 account_process_tick(p, user_tick); 1709 run_local_timers(); 1710 rcu_sched_clock_irq(user_tick); 1711 #ifdef CONFIG_IRQ_WORK 1712 if (in_irq()) 1713 irq_work_tick(); 1714 #endif 1715 scheduler_tick(); 1716 if (IS_ENABLED(CONFIG_POSIX_TIMERS)) 1717 run_posix_cpu_timers(); 1718 1719 /* The current CPU might make use of net randoms without receiving IRQs 1720 * to renew them often enough. Let's update the net_rand_state from a 1721 * non-constant value that's not affine to the number of calls to make 1722 * sure it's updated when there's some activity (we don't care in idle). 1723 */ 1724 this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick); 1725 } 1726 1727 /** 1728 * __run_timers - run all expired timers (if any) on this CPU. 1729 * @base: the timer vector to be processed. 1730 */ 1731 static inline void __run_timers(struct timer_base *base) 1732 { 1733 struct hlist_head heads[LVL_DEPTH]; 1734 int levels; 1735 1736 if (time_before(jiffies, base->next_expiry)) 1737 return; 1738 1739 timer_base_lock_expiry(base); 1740 raw_spin_lock_irq(&base->lock); 1741 1742 while (time_after_eq(jiffies, base->clk) && 1743 time_after_eq(jiffies, base->next_expiry)) { 1744 levels = collect_expired_timers(base, heads); 1745 /* 1746 * The only possible reason for not finding any expired 1747 * timer at this clk is that all matching timers have been 1748 * dequeued. 1749 */ 1750 WARN_ON_ONCE(!levels && !base->next_expiry_recalc); 1751 base->clk++; 1752 base->next_expiry = __next_timer_interrupt(base); 1753 1754 while (levels--) 1755 expire_timers(base, heads + levels); 1756 } 1757 raw_spin_unlock_irq(&base->lock); 1758 timer_base_unlock_expiry(base); 1759 } 1760 1761 /* 1762 * This function runs timers and the timer-tq in bottom half context. 1763 */ 1764 static __latent_entropy void run_timer_softirq(struct softirq_action *h) 1765 { 1766 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1767 1768 __run_timers(base); 1769 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) 1770 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 1771 } 1772 1773 /* 1774 * Called by the local, per-CPU timer interrupt on SMP. 1775 */ 1776 void run_local_timers(void) 1777 { 1778 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1779 1780 hrtimer_run_queues(); 1781 /* Raise the softirq only if required. */ 1782 if (time_before(jiffies, base->next_expiry)) { 1783 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON)) 1784 return; 1785 /* CPU is awake, so check the deferrable base. */ 1786 base++; 1787 if (time_before(jiffies, base->next_expiry)) 1788 return; 1789 } 1790 raise_softirq(TIMER_SOFTIRQ); 1791 } 1792 1793 /* 1794 * Since schedule_timeout()'s timer is defined on the stack, it must store 1795 * the target task on the stack as well. 1796 */ 1797 struct process_timer { 1798 struct timer_list timer; 1799 struct task_struct *task; 1800 }; 1801 1802 static void process_timeout(struct timer_list *t) 1803 { 1804 struct process_timer *timeout = from_timer(timeout, t, timer); 1805 1806 wake_up_process(timeout->task); 1807 } 1808 1809 /** 1810 * schedule_timeout - sleep until timeout 1811 * @timeout: timeout value in jiffies 1812 * 1813 * Make the current task sleep until @timeout jiffies have elapsed. 1814 * The function behavior depends on the current task state 1815 * (see also set_current_state() description): 1816 * 1817 * %TASK_RUNNING - the scheduler is called, but the task does not sleep 1818 * at all. That happens because sched_submit_work() does nothing for 1819 * tasks in %TASK_RUNNING state. 1820 * 1821 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 1822 * pass before the routine returns unless the current task is explicitly 1823 * woken up, (e.g. by wake_up_process()). 1824 * 1825 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1826 * delivered to the current task or the current task is explicitly woken 1827 * up. 1828 * 1829 * The current task state is guaranteed to be %TASK_RUNNING when this 1830 * routine returns. 1831 * 1832 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule 1833 * the CPU away without a bound on the timeout. In this case the return 1834 * value will be %MAX_SCHEDULE_TIMEOUT. 1835 * 1836 * Returns 0 when the timer has expired otherwise the remaining time in 1837 * jiffies will be returned. In all cases the return value is guaranteed 1838 * to be non-negative. 1839 */ 1840 signed long __sched schedule_timeout(signed long timeout) 1841 { 1842 struct process_timer timer; 1843 unsigned long expire; 1844 1845 switch (timeout) 1846 { 1847 case MAX_SCHEDULE_TIMEOUT: 1848 /* 1849 * These two special cases are useful to be comfortable 1850 * in the caller. Nothing more. We could take 1851 * MAX_SCHEDULE_TIMEOUT from one of the negative value 1852 * but I' d like to return a valid offset (>=0) to allow 1853 * the caller to do everything it want with the retval. 1854 */ 1855 schedule(); 1856 goto out; 1857 default: 1858 /* 1859 * Another bit of PARANOID. Note that the retval will be 1860 * 0 since no piece of kernel is supposed to do a check 1861 * for a negative retval of schedule_timeout() (since it 1862 * should never happens anyway). You just have the printk() 1863 * that will tell you if something is gone wrong and where. 1864 */ 1865 if (timeout < 0) { 1866 printk(KERN_ERR "schedule_timeout: wrong timeout " 1867 "value %lx\n", timeout); 1868 dump_stack(); 1869 current->state = TASK_RUNNING; 1870 goto out; 1871 } 1872 } 1873 1874 expire = timeout + jiffies; 1875 1876 timer.task = current; 1877 timer_setup_on_stack(&timer.timer, process_timeout, 0); 1878 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING); 1879 schedule(); 1880 del_singleshot_timer_sync(&timer.timer); 1881 1882 /* Remove the timer from the object tracker */ 1883 destroy_timer_on_stack(&timer.timer); 1884 1885 timeout = expire - jiffies; 1886 1887 out: 1888 return timeout < 0 ? 0 : timeout; 1889 } 1890 EXPORT_SYMBOL(schedule_timeout); 1891 1892 /* 1893 * We can use __set_current_state() here because schedule_timeout() calls 1894 * schedule() unconditionally. 1895 */ 1896 signed long __sched schedule_timeout_interruptible(signed long timeout) 1897 { 1898 __set_current_state(TASK_INTERRUPTIBLE); 1899 return schedule_timeout(timeout); 1900 } 1901 EXPORT_SYMBOL(schedule_timeout_interruptible); 1902 1903 signed long __sched schedule_timeout_killable(signed long timeout) 1904 { 1905 __set_current_state(TASK_KILLABLE); 1906 return schedule_timeout(timeout); 1907 } 1908 EXPORT_SYMBOL(schedule_timeout_killable); 1909 1910 signed long __sched schedule_timeout_uninterruptible(signed long timeout) 1911 { 1912 __set_current_state(TASK_UNINTERRUPTIBLE); 1913 return schedule_timeout(timeout); 1914 } 1915 EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1916 1917 /* 1918 * Like schedule_timeout_uninterruptible(), except this task will not contribute 1919 * to load average. 1920 */ 1921 signed long __sched schedule_timeout_idle(signed long timeout) 1922 { 1923 __set_current_state(TASK_IDLE); 1924 return schedule_timeout(timeout); 1925 } 1926 EXPORT_SYMBOL(schedule_timeout_idle); 1927 1928 #ifdef CONFIG_HOTPLUG_CPU 1929 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) 1930 { 1931 struct timer_list *timer; 1932 int cpu = new_base->cpu; 1933 1934 while (!hlist_empty(head)) { 1935 timer = hlist_entry(head->first, struct timer_list, entry); 1936 detach_timer(timer, false); 1937 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 1938 internal_add_timer(new_base, timer); 1939 } 1940 } 1941 1942 int timers_prepare_cpu(unsigned int cpu) 1943 { 1944 struct timer_base *base; 1945 int b; 1946 1947 for (b = 0; b < NR_BASES; b++) { 1948 base = per_cpu_ptr(&timer_bases[b], cpu); 1949 base->clk = jiffies; 1950 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 1951 base->is_idle = false; 1952 } 1953 return 0; 1954 } 1955 1956 int timers_dead_cpu(unsigned int cpu) 1957 { 1958 struct timer_base *old_base; 1959 struct timer_base *new_base; 1960 int b, i; 1961 1962 BUG_ON(cpu_online(cpu)); 1963 1964 for (b = 0; b < NR_BASES; b++) { 1965 old_base = per_cpu_ptr(&timer_bases[b], cpu); 1966 new_base = get_cpu_ptr(&timer_bases[b]); 1967 /* 1968 * The caller is globally serialized and nobody else 1969 * takes two locks at once, deadlock is not possible. 1970 */ 1971 raw_spin_lock_irq(&new_base->lock); 1972 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1973 1974 /* 1975 * The current CPUs base clock might be stale. Update it 1976 * before moving the timers over. 1977 */ 1978 forward_timer_base(new_base); 1979 1980 BUG_ON(old_base->running_timer); 1981 1982 for (i = 0; i < WHEEL_SIZE; i++) 1983 migrate_timer_list(new_base, old_base->vectors + i); 1984 1985 raw_spin_unlock(&old_base->lock); 1986 raw_spin_unlock_irq(&new_base->lock); 1987 put_cpu_ptr(&timer_bases); 1988 } 1989 return 0; 1990 } 1991 1992 #endif /* CONFIG_HOTPLUG_CPU */ 1993 1994 static void __init init_timer_cpu(int cpu) 1995 { 1996 struct timer_base *base; 1997 int i; 1998 1999 for (i = 0; i < NR_BASES; i++) { 2000 base = per_cpu_ptr(&timer_bases[i], cpu); 2001 base->cpu = cpu; 2002 raw_spin_lock_init(&base->lock); 2003 base->clk = jiffies; 2004 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2005 timer_base_init_expiry_lock(base); 2006 } 2007 } 2008 2009 static void __init init_timer_cpus(void) 2010 { 2011 int cpu; 2012 2013 for_each_possible_cpu(cpu) 2014 init_timer_cpu(cpu); 2015 } 2016 2017 void __init init_timers(void) 2018 { 2019 init_timer_cpus(); 2020 posix_cputimers_init_work(); 2021 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 2022 } 2023 2024 /** 2025 * msleep - sleep safely even with waitqueue interruptions 2026 * @msecs: Time in milliseconds to sleep for 2027 */ 2028 void msleep(unsigned int msecs) 2029 { 2030 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2031 2032 while (timeout) 2033 timeout = schedule_timeout_uninterruptible(timeout); 2034 } 2035 2036 EXPORT_SYMBOL(msleep); 2037 2038 /** 2039 * msleep_interruptible - sleep waiting for signals 2040 * @msecs: Time in milliseconds to sleep for 2041 */ 2042 unsigned long msleep_interruptible(unsigned int msecs) 2043 { 2044 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2045 2046 while (timeout && !signal_pending(current)) 2047 timeout = schedule_timeout_interruptible(timeout); 2048 return jiffies_to_msecs(timeout); 2049 } 2050 2051 EXPORT_SYMBOL(msleep_interruptible); 2052 2053 /** 2054 * usleep_range - Sleep for an approximate time 2055 * @min: Minimum time in usecs to sleep 2056 * @max: Maximum time in usecs to sleep 2057 * 2058 * In non-atomic context where the exact wakeup time is flexible, use 2059 * usleep_range() instead of udelay(). The sleep improves responsiveness 2060 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 2061 * power usage by allowing hrtimers to take advantage of an already- 2062 * scheduled interrupt instead of scheduling a new one just for this sleep. 2063 */ 2064 void __sched usleep_range(unsigned long min, unsigned long max) 2065 { 2066 ktime_t exp = ktime_add_us(ktime_get(), min); 2067 u64 delta = (u64)(max - min) * NSEC_PER_USEC; 2068 2069 for (;;) { 2070 __set_current_state(TASK_UNINTERRUPTIBLE); 2071 /* Do not return before the requested sleep time has elapsed */ 2072 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) 2073 break; 2074 } 2075 } 2076 EXPORT_SYMBOL(usleep_range); 2077