1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel internal timers 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. 8 * 9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 10 * "A Kernel Model for Precision Timekeeping" by Dave Mills 11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to 12 * serialize accesses to xtime/lost_ticks). 13 * Copyright (C) 1998 Andrea Arcangeli 14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl 15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love 16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling. 17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar 18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar 19 */ 20 21 #include <linux/kernel_stat.h> 22 #include <linux/export.h> 23 #include <linux/interrupt.h> 24 #include <linux/percpu.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/swap.h> 28 #include <linux/pid_namespace.h> 29 #include <linux/notifier.h> 30 #include <linux/thread_info.h> 31 #include <linux/time.h> 32 #include <linux/jiffies.h> 33 #include <linux/posix-timers.h> 34 #include <linux/cpu.h> 35 #include <linux/syscalls.h> 36 #include <linux/delay.h> 37 #include <linux/tick.h> 38 #include <linux/kallsyms.h> 39 #include <linux/irq_work.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/sysctl.h> 42 #include <linux/sched/nohz.h> 43 #include <linux/sched/debug.h> 44 #include <linux/slab.h> 45 #include <linux/compat.h> 46 47 #include <linux/uaccess.h> 48 #include <asm/unistd.h> 49 #include <asm/div64.h> 50 #include <asm/timex.h> 51 #include <asm/io.h> 52 53 #include "tick-internal.h" 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/timer.h> 57 58 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 59 60 EXPORT_SYMBOL(jiffies_64); 61 62 /* 63 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of 64 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each 65 * level has a different granularity. 66 * 67 * The level granularity is: LVL_CLK_DIV ^ lvl 68 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) 69 * 70 * The array level of a newly armed timer depends on the relative expiry 71 * time. The farther the expiry time is away the higher the array level and 72 * therefor the granularity becomes. 73 * 74 * Contrary to the original timer wheel implementation, which aims for 'exact' 75 * expiry of the timers, this implementation removes the need for recascading 76 * the timers into the lower array levels. The previous 'classic' timer wheel 77 * implementation of the kernel already violated the 'exact' expiry by adding 78 * slack to the expiry time to provide batched expiration. The granularity 79 * levels provide implicit batching. 80 * 81 * This is an optimization of the original timer wheel implementation for the 82 * majority of the timer wheel use cases: timeouts. The vast majority of 83 * timeout timers (networking, disk I/O ...) are canceled before expiry. If 84 * the timeout expires it indicates that normal operation is disturbed, so it 85 * does not matter much whether the timeout comes with a slight delay. 86 * 87 * The only exception to this are networking timers with a small expiry 88 * time. They rely on the granularity. Those fit into the first wheel level, 89 * which has HZ granularity. 90 * 91 * We don't have cascading anymore. timers with a expiry time above the 92 * capacity of the last wheel level are force expired at the maximum timeout 93 * value of the last wheel level. From data sampling we know that the maximum 94 * value observed is 5 days (network connection tracking), so this should not 95 * be an issue. 96 * 97 * The currently chosen array constants values are a good compromise between 98 * array size and granularity. 99 * 100 * This results in the following granularity and range levels: 101 * 102 * HZ 1000 steps 103 * Level Offset Granularity Range 104 * 0 0 1 ms 0 ms - 63 ms 105 * 1 64 8 ms 64 ms - 511 ms 106 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) 107 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) 108 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) 109 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) 110 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) 111 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) 112 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) 113 * 114 * HZ 300 115 * Level Offset Granularity Range 116 * 0 0 3 ms 0 ms - 210 ms 117 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) 118 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) 119 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) 120 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) 121 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) 122 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) 123 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) 124 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) 125 * 126 * HZ 250 127 * Level Offset Granularity Range 128 * 0 0 4 ms 0 ms - 255 ms 129 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) 130 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) 131 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) 132 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) 133 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) 134 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) 135 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) 136 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) 137 * 138 * HZ 100 139 * Level Offset Granularity Range 140 * 0 0 10 ms 0 ms - 630 ms 141 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) 142 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) 143 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) 144 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) 145 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) 146 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) 147 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) 148 */ 149 150 /* Clock divisor for the next level */ 151 #define LVL_CLK_SHIFT 3 152 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) 153 #define LVL_CLK_MASK (LVL_CLK_DIV - 1) 154 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) 155 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) 156 157 /* 158 * The time start value for each level to select the bucket at enqueue 159 * time. 160 */ 161 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) 162 163 /* Size of each clock level */ 164 #define LVL_BITS 6 165 #define LVL_SIZE (1UL << LVL_BITS) 166 #define LVL_MASK (LVL_SIZE - 1) 167 #define LVL_OFFS(n) ((n) * LVL_SIZE) 168 169 /* Level depth */ 170 #if HZ > 100 171 # define LVL_DEPTH 9 172 # else 173 # define LVL_DEPTH 8 174 #endif 175 176 /* The cutoff (max. capacity of the wheel) */ 177 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) 178 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) 179 180 /* 181 * The resulting wheel size. If NOHZ is configured we allocate two 182 * wheels so we have a separate storage for the deferrable timers. 183 */ 184 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) 185 186 #ifdef CONFIG_NO_HZ_COMMON 187 # define NR_BASES 2 188 # define BASE_STD 0 189 # define BASE_DEF 1 190 #else 191 # define NR_BASES 1 192 # define BASE_STD 0 193 # define BASE_DEF 0 194 #endif 195 196 struct timer_base { 197 raw_spinlock_t lock; 198 struct timer_list *running_timer; 199 #ifdef CONFIG_PREEMPT_RT 200 spinlock_t expiry_lock; 201 atomic_t timer_waiters; 202 #endif 203 unsigned long clk; 204 unsigned long next_expiry; 205 unsigned int cpu; 206 bool is_idle; 207 bool must_forward_clk; 208 DECLARE_BITMAP(pending_map, WHEEL_SIZE); 209 struct hlist_head vectors[WHEEL_SIZE]; 210 } ____cacheline_aligned; 211 212 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); 213 214 #ifdef CONFIG_NO_HZ_COMMON 215 216 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); 217 static DEFINE_MUTEX(timer_keys_mutex); 218 219 static void timer_update_keys(struct work_struct *work); 220 static DECLARE_WORK(timer_update_work, timer_update_keys); 221 222 #ifdef CONFIG_SMP 223 unsigned int sysctl_timer_migration = 1; 224 225 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled); 226 227 static void timers_update_migration(void) 228 { 229 if (sysctl_timer_migration && tick_nohz_active) 230 static_branch_enable(&timers_migration_enabled); 231 else 232 static_branch_disable(&timers_migration_enabled); 233 } 234 #else 235 static inline void timers_update_migration(void) { } 236 #endif /* !CONFIG_SMP */ 237 238 static void timer_update_keys(struct work_struct *work) 239 { 240 mutex_lock(&timer_keys_mutex); 241 timers_update_migration(); 242 static_branch_enable(&timers_nohz_active); 243 mutex_unlock(&timer_keys_mutex); 244 } 245 246 void timers_update_nohz(void) 247 { 248 schedule_work(&timer_update_work); 249 } 250 251 int timer_migration_handler(struct ctl_table *table, int write, 252 void *buffer, size_t *lenp, loff_t *ppos) 253 { 254 int ret; 255 256 mutex_lock(&timer_keys_mutex); 257 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 258 if (!ret && write) 259 timers_update_migration(); 260 mutex_unlock(&timer_keys_mutex); 261 return ret; 262 } 263 264 static inline bool is_timers_nohz_active(void) 265 { 266 return static_branch_unlikely(&timers_nohz_active); 267 } 268 #else 269 static inline bool is_timers_nohz_active(void) { return false; } 270 #endif /* NO_HZ_COMMON */ 271 272 static unsigned long round_jiffies_common(unsigned long j, int cpu, 273 bool force_up) 274 { 275 int rem; 276 unsigned long original = j; 277 278 /* 279 * We don't want all cpus firing their timers at once hitting the 280 * same lock or cachelines, so we skew each extra cpu with an extra 281 * 3 jiffies. This 3 jiffies came originally from the mm/ code which 282 * already did this. 283 * The skew is done by adding 3*cpunr, then round, then subtract this 284 * extra offset again. 285 */ 286 j += cpu * 3; 287 288 rem = j % HZ; 289 290 /* 291 * If the target jiffie is just after a whole second (which can happen 292 * due to delays of the timer irq, long irq off times etc etc) then 293 * we should round down to the whole second, not up. Use 1/4th second 294 * as cutoff for this rounding as an extreme upper bound for this. 295 * But never round down if @force_up is set. 296 */ 297 if (rem < HZ/4 && !force_up) /* round down */ 298 j = j - rem; 299 else /* round up */ 300 j = j - rem + HZ; 301 302 /* now that we have rounded, subtract the extra skew again */ 303 j -= cpu * 3; 304 305 /* 306 * Make sure j is still in the future. Otherwise return the 307 * unmodified value. 308 */ 309 return time_is_after_jiffies(j) ? j : original; 310 } 311 312 /** 313 * __round_jiffies - function to round jiffies to a full second 314 * @j: the time in (absolute) jiffies that should be rounded 315 * @cpu: the processor number on which the timeout will happen 316 * 317 * __round_jiffies() rounds an absolute time in the future (in jiffies) 318 * up or down to (approximately) full seconds. This is useful for timers 319 * for which the exact time they fire does not matter too much, as long as 320 * they fire approximately every X seconds. 321 * 322 * By rounding these timers to whole seconds, all such timers will fire 323 * at the same time, rather than at various times spread out. The goal 324 * of this is to have the CPU wake up less, which saves power. 325 * 326 * The exact rounding is skewed for each processor to avoid all 327 * processors firing at the exact same time, which could lead 328 * to lock contention or spurious cache line bouncing. 329 * 330 * The return value is the rounded version of the @j parameter. 331 */ 332 unsigned long __round_jiffies(unsigned long j, int cpu) 333 { 334 return round_jiffies_common(j, cpu, false); 335 } 336 EXPORT_SYMBOL_GPL(__round_jiffies); 337 338 /** 339 * __round_jiffies_relative - function to round jiffies to a full second 340 * @j: the time in (relative) jiffies that should be rounded 341 * @cpu: the processor number on which the timeout will happen 342 * 343 * __round_jiffies_relative() rounds a time delta in the future (in jiffies) 344 * up or down to (approximately) full seconds. This is useful for timers 345 * for which the exact time they fire does not matter too much, as long as 346 * they fire approximately every X seconds. 347 * 348 * By rounding these timers to whole seconds, all such timers will fire 349 * at the same time, rather than at various times spread out. The goal 350 * of this is to have the CPU wake up less, which saves power. 351 * 352 * The exact rounding is skewed for each processor to avoid all 353 * processors firing at the exact same time, which could lead 354 * to lock contention or spurious cache line bouncing. 355 * 356 * The return value is the rounded version of the @j parameter. 357 */ 358 unsigned long __round_jiffies_relative(unsigned long j, int cpu) 359 { 360 unsigned long j0 = jiffies; 361 362 /* Use j0 because jiffies might change while we run */ 363 return round_jiffies_common(j + j0, cpu, false) - j0; 364 } 365 EXPORT_SYMBOL_GPL(__round_jiffies_relative); 366 367 /** 368 * round_jiffies - function to round jiffies to a full second 369 * @j: the time in (absolute) jiffies that should be rounded 370 * 371 * round_jiffies() rounds an absolute time in the future (in jiffies) 372 * up or down to (approximately) full seconds. This is useful for timers 373 * for which the exact time they fire does not matter too much, as long as 374 * they fire approximately every X seconds. 375 * 376 * By rounding these timers to whole seconds, all such timers will fire 377 * at the same time, rather than at various times spread out. The goal 378 * of this is to have the CPU wake up less, which saves power. 379 * 380 * The return value is the rounded version of the @j parameter. 381 */ 382 unsigned long round_jiffies(unsigned long j) 383 { 384 return round_jiffies_common(j, raw_smp_processor_id(), false); 385 } 386 EXPORT_SYMBOL_GPL(round_jiffies); 387 388 /** 389 * round_jiffies_relative - function to round jiffies to a full second 390 * @j: the time in (relative) jiffies that should be rounded 391 * 392 * round_jiffies_relative() rounds a time delta in the future (in jiffies) 393 * up or down to (approximately) full seconds. This is useful for timers 394 * for which the exact time they fire does not matter too much, as long as 395 * they fire approximately every X seconds. 396 * 397 * By rounding these timers to whole seconds, all such timers will fire 398 * at the same time, rather than at various times spread out. The goal 399 * of this is to have the CPU wake up less, which saves power. 400 * 401 * The return value is the rounded version of the @j parameter. 402 */ 403 unsigned long round_jiffies_relative(unsigned long j) 404 { 405 return __round_jiffies_relative(j, raw_smp_processor_id()); 406 } 407 EXPORT_SYMBOL_GPL(round_jiffies_relative); 408 409 /** 410 * __round_jiffies_up - function to round jiffies up to a full second 411 * @j: the time in (absolute) jiffies that should be rounded 412 * @cpu: the processor number on which the timeout will happen 413 * 414 * This is the same as __round_jiffies() except that it will never 415 * round down. This is useful for timeouts for which the exact time 416 * of firing does not matter too much, as long as they don't fire too 417 * early. 418 */ 419 unsigned long __round_jiffies_up(unsigned long j, int cpu) 420 { 421 return round_jiffies_common(j, cpu, true); 422 } 423 EXPORT_SYMBOL_GPL(__round_jiffies_up); 424 425 /** 426 * __round_jiffies_up_relative - function to round jiffies up to a full second 427 * @j: the time in (relative) jiffies that should be rounded 428 * @cpu: the processor number on which the timeout will happen 429 * 430 * This is the same as __round_jiffies_relative() except that it will never 431 * round down. This is useful for timeouts for which the exact time 432 * of firing does not matter too much, as long as they don't fire too 433 * early. 434 */ 435 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) 436 { 437 unsigned long j0 = jiffies; 438 439 /* Use j0 because jiffies might change while we run */ 440 return round_jiffies_common(j + j0, cpu, true) - j0; 441 } 442 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); 443 444 /** 445 * round_jiffies_up - function to round jiffies up to a full second 446 * @j: the time in (absolute) jiffies that should be rounded 447 * 448 * This is the same as round_jiffies() except that it will never 449 * round down. This is useful for timeouts for which the exact time 450 * of firing does not matter too much, as long as they don't fire too 451 * early. 452 */ 453 unsigned long round_jiffies_up(unsigned long j) 454 { 455 return round_jiffies_common(j, raw_smp_processor_id(), true); 456 } 457 EXPORT_SYMBOL_GPL(round_jiffies_up); 458 459 /** 460 * round_jiffies_up_relative - function to round jiffies up to a full second 461 * @j: the time in (relative) jiffies that should be rounded 462 * 463 * This is the same as round_jiffies_relative() except that it will never 464 * round down. This is useful for timeouts for which the exact time 465 * of firing does not matter too much, as long as they don't fire too 466 * early. 467 */ 468 unsigned long round_jiffies_up_relative(unsigned long j) 469 { 470 return __round_jiffies_up_relative(j, raw_smp_processor_id()); 471 } 472 EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 473 474 475 static inline unsigned int timer_get_idx(struct timer_list *timer) 476 { 477 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; 478 } 479 480 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) 481 { 482 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | 483 idx << TIMER_ARRAYSHIFT; 484 } 485 486 /* 487 * Helper function to calculate the array index for a given expiry 488 * time. 489 */ 490 static inline unsigned calc_index(unsigned expires, unsigned lvl) 491 { 492 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl); 493 return LVL_OFFS(lvl) + (expires & LVL_MASK); 494 } 495 496 static int calc_wheel_index(unsigned long expires, unsigned long clk) 497 { 498 unsigned long delta = expires - clk; 499 unsigned int idx; 500 501 if (delta < LVL_START(1)) { 502 idx = calc_index(expires, 0); 503 } else if (delta < LVL_START(2)) { 504 idx = calc_index(expires, 1); 505 } else if (delta < LVL_START(3)) { 506 idx = calc_index(expires, 2); 507 } else if (delta < LVL_START(4)) { 508 idx = calc_index(expires, 3); 509 } else if (delta < LVL_START(5)) { 510 idx = calc_index(expires, 4); 511 } else if (delta < LVL_START(6)) { 512 idx = calc_index(expires, 5); 513 } else if (delta < LVL_START(7)) { 514 idx = calc_index(expires, 6); 515 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { 516 idx = calc_index(expires, 7); 517 } else if ((long) delta < 0) { 518 idx = clk & LVL_MASK; 519 } else { 520 /* 521 * Force expire obscene large timeouts to expire at the 522 * capacity limit of the wheel. 523 */ 524 if (expires >= WHEEL_TIMEOUT_CUTOFF) 525 expires = WHEEL_TIMEOUT_MAX; 526 527 idx = calc_index(expires, LVL_DEPTH - 1); 528 } 529 return idx; 530 } 531 532 /* 533 * Enqueue the timer into the hash bucket, mark it pending in 534 * the bitmap and store the index in the timer flags. 535 */ 536 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, 537 unsigned int idx) 538 { 539 hlist_add_head(&timer->entry, base->vectors + idx); 540 __set_bit(idx, base->pending_map); 541 timer_set_idx(timer, idx); 542 543 trace_timer_start(timer, timer->expires, timer->flags); 544 } 545 546 static void 547 __internal_add_timer(struct timer_base *base, struct timer_list *timer) 548 { 549 unsigned int idx; 550 551 idx = calc_wheel_index(timer->expires, base->clk); 552 enqueue_timer(base, timer, idx); 553 } 554 555 static void 556 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) 557 { 558 if (!is_timers_nohz_active()) 559 return; 560 561 /* 562 * TODO: This wants some optimizing similar to the code below, but we 563 * will do that when we switch from push to pull for deferrable timers. 564 */ 565 if (timer->flags & TIMER_DEFERRABLE) { 566 if (tick_nohz_full_cpu(base->cpu)) 567 wake_up_nohz_cpu(base->cpu); 568 return; 569 } 570 571 /* 572 * We might have to IPI the remote CPU if the base is idle and the 573 * timer is not deferrable. If the other CPU is on the way to idle 574 * then it can't set base->is_idle as we hold the base lock: 575 */ 576 if (!base->is_idle) 577 return; 578 579 /* Check whether this is the new first expiring timer: */ 580 if (time_after_eq(timer->expires, base->next_expiry)) 581 return; 582 583 /* 584 * Set the next expiry time and kick the CPU so it can reevaluate the 585 * wheel: 586 */ 587 base->next_expiry = timer->expires; 588 wake_up_nohz_cpu(base->cpu); 589 } 590 591 static void 592 internal_add_timer(struct timer_base *base, struct timer_list *timer) 593 { 594 __internal_add_timer(base, timer); 595 trigger_dyntick_cpu(base, timer); 596 } 597 598 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 599 600 static struct debug_obj_descr timer_debug_descr; 601 602 static void *timer_debug_hint(void *addr) 603 { 604 return ((struct timer_list *) addr)->function; 605 } 606 607 static bool timer_is_static_object(void *addr) 608 { 609 struct timer_list *timer = addr; 610 611 return (timer->entry.pprev == NULL && 612 timer->entry.next == TIMER_ENTRY_STATIC); 613 } 614 615 /* 616 * fixup_init is called when: 617 * - an active object is initialized 618 */ 619 static bool timer_fixup_init(void *addr, enum debug_obj_state state) 620 { 621 struct timer_list *timer = addr; 622 623 switch (state) { 624 case ODEBUG_STATE_ACTIVE: 625 del_timer_sync(timer); 626 debug_object_init(timer, &timer_debug_descr); 627 return true; 628 default: 629 return false; 630 } 631 } 632 633 /* Stub timer callback for improperly used timers. */ 634 static void stub_timer(struct timer_list *unused) 635 { 636 WARN_ON(1); 637 } 638 639 /* 640 * fixup_activate is called when: 641 * - an active object is activated 642 * - an unknown non-static object is activated 643 */ 644 static bool timer_fixup_activate(void *addr, enum debug_obj_state state) 645 { 646 struct timer_list *timer = addr; 647 648 switch (state) { 649 case ODEBUG_STATE_NOTAVAILABLE: 650 timer_setup(timer, stub_timer, 0); 651 return true; 652 653 case ODEBUG_STATE_ACTIVE: 654 WARN_ON(1); 655 /* fall through */ 656 default: 657 return false; 658 } 659 } 660 661 /* 662 * fixup_free is called when: 663 * - an active object is freed 664 */ 665 static bool timer_fixup_free(void *addr, enum debug_obj_state state) 666 { 667 struct timer_list *timer = addr; 668 669 switch (state) { 670 case ODEBUG_STATE_ACTIVE: 671 del_timer_sync(timer); 672 debug_object_free(timer, &timer_debug_descr); 673 return true; 674 default: 675 return false; 676 } 677 } 678 679 /* 680 * fixup_assert_init is called when: 681 * - an untracked/uninit-ed object is found 682 */ 683 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state) 684 { 685 struct timer_list *timer = addr; 686 687 switch (state) { 688 case ODEBUG_STATE_NOTAVAILABLE: 689 timer_setup(timer, stub_timer, 0); 690 return true; 691 default: 692 return false; 693 } 694 } 695 696 static struct debug_obj_descr timer_debug_descr = { 697 .name = "timer_list", 698 .debug_hint = timer_debug_hint, 699 .is_static_object = timer_is_static_object, 700 .fixup_init = timer_fixup_init, 701 .fixup_activate = timer_fixup_activate, 702 .fixup_free = timer_fixup_free, 703 .fixup_assert_init = timer_fixup_assert_init, 704 }; 705 706 static inline void debug_timer_init(struct timer_list *timer) 707 { 708 debug_object_init(timer, &timer_debug_descr); 709 } 710 711 static inline void debug_timer_activate(struct timer_list *timer) 712 { 713 debug_object_activate(timer, &timer_debug_descr); 714 } 715 716 static inline void debug_timer_deactivate(struct timer_list *timer) 717 { 718 debug_object_deactivate(timer, &timer_debug_descr); 719 } 720 721 static inline void debug_timer_free(struct timer_list *timer) 722 { 723 debug_object_free(timer, &timer_debug_descr); 724 } 725 726 static inline void debug_timer_assert_init(struct timer_list *timer) 727 { 728 debug_object_assert_init(timer, &timer_debug_descr); 729 } 730 731 static void do_init_timer(struct timer_list *timer, 732 void (*func)(struct timer_list *), 733 unsigned int flags, 734 const char *name, struct lock_class_key *key); 735 736 void init_timer_on_stack_key(struct timer_list *timer, 737 void (*func)(struct timer_list *), 738 unsigned int flags, 739 const char *name, struct lock_class_key *key) 740 { 741 debug_object_init_on_stack(timer, &timer_debug_descr); 742 do_init_timer(timer, func, flags, name, key); 743 } 744 EXPORT_SYMBOL_GPL(init_timer_on_stack_key); 745 746 void destroy_timer_on_stack(struct timer_list *timer) 747 { 748 debug_object_free(timer, &timer_debug_descr); 749 } 750 EXPORT_SYMBOL_GPL(destroy_timer_on_stack); 751 752 #else 753 static inline void debug_timer_init(struct timer_list *timer) { } 754 static inline void debug_timer_activate(struct timer_list *timer) { } 755 static inline void debug_timer_deactivate(struct timer_list *timer) { } 756 static inline void debug_timer_assert_init(struct timer_list *timer) { } 757 #endif 758 759 static inline void debug_init(struct timer_list *timer) 760 { 761 debug_timer_init(timer); 762 trace_timer_init(timer); 763 } 764 765 static inline void debug_deactivate(struct timer_list *timer) 766 { 767 debug_timer_deactivate(timer); 768 trace_timer_cancel(timer); 769 } 770 771 static inline void debug_assert_init(struct timer_list *timer) 772 { 773 debug_timer_assert_init(timer); 774 } 775 776 static void do_init_timer(struct timer_list *timer, 777 void (*func)(struct timer_list *), 778 unsigned int flags, 779 const char *name, struct lock_class_key *key) 780 { 781 timer->entry.pprev = NULL; 782 timer->function = func; 783 timer->flags = flags | raw_smp_processor_id(); 784 lockdep_init_map(&timer->lockdep_map, name, key, 0); 785 } 786 787 /** 788 * init_timer_key - initialize a timer 789 * @timer: the timer to be initialized 790 * @func: timer callback function 791 * @flags: timer flags 792 * @name: name of the timer 793 * @key: lockdep class key of the fake lock used for tracking timer 794 * sync lock dependencies 795 * 796 * init_timer_key() must be done to a timer prior calling *any* of the 797 * other timer functions. 798 */ 799 void init_timer_key(struct timer_list *timer, 800 void (*func)(struct timer_list *), unsigned int flags, 801 const char *name, struct lock_class_key *key) 802 { 803 debug_init(timer); 804 do_init_timer(timer, func, flags, name, key); 805 } 806 EXPORT_SYMBOL(init_timer_key); 807 808 static inline void detach_timer(struct timer_list *timer, bool clear_pending) 809 { 810 struct hlist_node *entry = &timer->entry; 811 812 debug_deactivate(timer); 813 814 __hlist_del(entry); 815 if (clear_pending) 816 entry->pprev = NULL; 817 entry->next = LIST_POISON2; 818 } 819 820 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, 821 bool clear_pending) 822 { 823 unsigned idx = timer_get_idx(timer); 824 825 if (!timer_pending(timer)) 826 return 0; 827 828 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) 829 __clear_bit(idx, base->pending_map); 830 831 detach_timer(timer, clear_pending); 832 return 1; 833 } 834 835 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 836 { 837 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); 838 839 /* 840 * If the timer is deferrable and NO_HZ_COMMON is set then we need 841 * to use the deferrable base. 842 */ 843 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 844 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 845 return base; 846 } 847 848 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 849 { 850 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 851 852 /* 853 * If the timer is deferrable and NO_HZ_COMMON is set then we need 854 * to use the deferrable base. 855 */ 856 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 857 base = this_cpu_ptr(&timer_bases[BASE_DEF]); 858 return base; 859 } 860 861 static inline struct timer_base *get_timer_base(u32 tflags) 862 { 863 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); 864 } 865 866 static inline struct timer_base * 867 get_target_base(struct timer_base *base, unsigned tflags) 868 { 869 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 870 if (static_branch_likely(&timers_migration_enabled) && 871 !(tflags & TIMER_PINNED)) 872 return get_timer_cpu_base(tflags, get_nohz_timer_target()); 873 #endif 874 return get_timer_this_cpu_base(tflags); 875 } 876 877 static inline void forward_timer_base(struct timer_base *base) 878 { 879 #ifdef CONFIG_NO_HZ_COMMON 880 unsigned long jnow; 881 882 /* 883 * We only forward the base when we are idle or have just come out of 884 * idle (must_forward_clk logic), and have a delta between base clock 885 * and jiffies. In the common case, run_timers will take care of it. 886 */ 887 if (likely(!base->must_forward_clk)) 888 return; 889 890 jnow = READ_ONCE(jiffies); 891 base->must_forward_clk = base->is_idle; 892 if ((long)(jnow - base->clk) < 2) 893 return; 894 895 /* 896 * If the next expiry value is > jiffies, then we fast forward to 897 * jiffies otherwise we forward to the next expiry value. 898 */ 899 if (time_after(base->next_expiry, jnow)) 900 base->clk = jnow; 901 else 902 base->clk = base->next_expiry; 903 #endif 904 } 905 906 907 /* 908 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 909 * that all timers which are tied to this base are locked, and the base itself 910 * is locked too. 911 * 912 * So __run_timers/migrate_timers can safely modify all timers which could 913 * be found in the base->vectors array. 914 * 915 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need 916 * to wait until the migration is done. 917 */ 918 static struct timer_base *lock_timer_base(struct timer_list *timer, 919 unsigned long *flags) 920 __acquires(timer->base->lock) 921 { 922 for (;;) { 923 struct timer_base *base; 924 u32 tf; 925 926 /* 927 * We need to use READ_ONCE() here, otherwise the compiler 928 * might re-read @tf between the check for TIMER_MIGRATING 929 * and spin_lock(). 930 */ 931 tf = READ_ONCE(timer->flags); 932 933 if (!(tf & TIMER_MIGRATING)) { 934 base = get_timer_base(tf); 935 raw_spin_lock_irqsave(&base->lock, *flags); 936 if (timer->flags == tf) 937 return base; 938 raw_spin_unlock_irqrestore(&base->lock, *flags); 939 } 940 cpu_relax(); 941 } 942 } 943 944 #define MOD_TIMER_PENDING_ONLY 0x01 945 #define MOD_TIMER_REDUCE 0x02 946 #define MOD_TIMER_NOTPENDING 0x04 947 948 static inline int 949 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) 950 { 951 struct timer_base *base, *new_base; 952 unsigned int idx = UINT_MAX; 953 unsigned long clk = 0, flags; 954 int ret = 0; 955 956 BUG_ON(!timer->function); 957 958 /* 959 * This is a common optimization triggered by the networking code - if 960 * the timer is re-modified to have the same timeout or ends up in the 961 * same array bucket then just return: 962 */ 963 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { 964 /* 965 * The downside of this optimization is that it can result in 966 * larger granularity than you would get from adding a new 967 * timer with this expiry. 968 */ 969 long diff = timer->expires - expires; 970 971 if (!diff) 972 return 1; 973 if (options & MOD_TIMER_REDUCE && diff <= 0) 974 return 1; 975 976 /* 977 * We lock timer base and calculate the bucket index right 978 * here. If the timer ends up in the same bucket, then we 979 * just update the expiry time and avoid the whole 980 * dequeue/enqueue dance. 981 */ 982 base = lock_timer_base(timer, &flags); 983 forward_timer_base(base); 984 985 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && 986 time_before_eq(timer->expires, expires)) { 987 ret = 1; 988 goto out_unlock; 989 } 990 991 clk = base->clk; 992 idx = calc_wheel_index(expires, clk); 993 994 /* 995 * Retrieve and compare the array index of the pending 996 * timer. If it matches set the expiry to the new value so a 997 * subsequent call will exit in the expires check above. 998 */ 999 if (idx == timer_get_idx(timer)) { 1000 if (!(options & MOD_TIMER_REDUCE)) 1001 timer->expires = expires; 1002 else if (time_after(timer->expires, expires)) 1003 timer->expires = expires; 1004 ret = 1; 1005 goto out_unlock; 1006 } 1007 } else { 1008 base = lock_timer_base(timer, &flags); 1009 forward_timer_base(base); 1010 } 1011 1012 ret = detach_if_pending(timer, base, false); 1013 if (!ret && (options & MOD_TIMER_PENDING_ONLY)) 1014 goto out_unlock; 1015 1016 new_base = get_target_base(base, timer->flags); 1017 1018 if (base != new_base) { 1019 /* 1020 * We are trying to schedule the timer on the new base. 1021 * However we can't change timer's base while it is running, 1022 * otherwise del_timer_sync() can't detect that the timer's 1023 * handler yet has not finished. This also guarantees that the 1024 * timer is serialized wrt itself. 1025 */ 1026 if (likely(base->running_timer != timer)) { 1027 /* See the comment in lock_timer_base() */ 1028 timer->flags |= TIMER_MIGRATING; 1029 1030 raw_spin_unlock(&base->lock); 1031 base = new_base; 1032 raw_spin_lock(&base->lock); 1033 WRITE_ONCE(timer->flags, 1034 (timer->flags & ~TIMER_BASEMASK) | base->cpu); 1035 forward_timer_base(base); 1036 } 1037 } 1038 1039 debug_timer_activate(timer); 1040 1041 timer->expires = expires; 1042 /* 1043 * If 'idx' was calculated above and the base time did not advance 1044 * between calculating 'idx' and possibly switching the base, only 1045 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise 1046 * we need to (re)calculate the wheel index via 1047 * internal_add_timer(). 1048 */ 1049 if (idx != UINT_MAX && clk == base->clk) { 1050 enqueue_timer(base, timer, idx); 1051 trigger_dyntick_cpu(base, timer); 1052 } else { 1053 internal_add_timer(base, timer); 1054 } 1055 1056 out_unlock: 1057 raw_spin_unlock_irqrestore(&base->lock, flags); 1058 1059 return ret; 1060 } 1061 1062 /** 1063 * mod_timer_pending - modify a pending timer's timeout 1064 * @timer: the pending timer to be modified 1065 * @expires: new timeout in jiffies 1066 * 1067 * mod_timer_pending() is the same for pending timers as mod_timer(), 1068 * but will not re-activate and modify already deleted timers. 1069 * 1070 * It is useful for unserialized use of timers. 1071 */ 1072 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1073 { 1074 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); 1075 } 1076 EXPORT_SYMBOL(mod_timer_pending); 1077 1078 /** 1079 * mod_timer - modify a timer's timeout 1080 * @timer: the timer to be modified 1081 * @expires: new timeout in jiffies 1082 * 1083 * mod_timer() is a more efficient way to update the expire field of an 1084 * active timer (if the timer is inactive it will be activated) 1085 * 1086 * mod_timer(timer, expires) is equivalent to: 1087 * 1088 * del_timer(timer); timer->expires = expires; add_timer(timer); 1089 * 1090 * Note that if there are multiple unserialized concurrent users of the 1091 * same timer, then mod_timer() is the only safe way to modify the timeout, 1092 * since add_timer() cannot modify an already running timer. 1093 * 1094 * The function returns whether it has modified a pending timer or not. 1095 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an 1096 * active timer returns 1.) 1097 */ 1098 int mod_timer(struct timer_list *timer, unsigned long expires) 1099 { 1100 return __mod_timer(timer, expires, 0); 1101 } 1102 EXPORT_SYMBOL(mod_timer); 1103 1104 /** 1105 * timer_reduce - Modify a timer's timeout if it would reduce the timeout 1106 * @timer: The timer to be modified 1107 * @expires: New timeout in jiffies 1108 * 1109 * timer_reduce() is very similar to mod_timer(), except that it will only 1110 * modify a running timer if that would reduce the expiration time (it will 1111 * start a timer that isn't running). 1112 */ 1113 int timer_reduce(struct timer_list *timer, unsigned long expires) 1114 { 1115 return __mod_timer(timer, expires, MOD_TIMER_REDUCE); 1116 } 1117 EXPORT_SYMBOL(timer_reduce); 1118 1119 /** 1120 * add_timer - start a timer 1121 * @timer: the timer to be added 1122 * 1123 * The kernel will do a ->function(@timer) callback from the 1124 * timer interrupt at the ->expires point in the future. The 1125 * current time is 'jiffies'. 1126 * 1127 * The timer's ->expires, ->function fields must be set prior calling this 1128 * function. 1129 * 1130 * Timers with an ->expires field in the past will be executed in the next 1131 * timer tick. 1132 */ 1133 void add_timer(struct timer_list *timer) 1134 { 1135 BUG_ON(timer_pending(timer)); 1136 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); 1137 } 1138 EXPORT_SYMBOL(add_timer); 1139 1140 /** 1141 * add_timer_on - start a timer on a particular CPU 1142 * @timer: the timer to be added 1143 * @cpu: the CPU to start it on 1144 * 1145 * This is not very scalable on SMP. Double adds are not possible. 1146 */ 1147 void add_timer_on(struct timer_list *timer, int cpu) 1148 { 1149 struct timer_base *new_base, *base; 1150 unsigned long flags; 1151 1152 BUG_ON(timer_pending(timer) || !timer->function); 1153 1154 new_base = get_timer_cpu_base(timer->flags, cpu); 1155 1156 /* 1157 * If @timer was on a different CPU, it should be migrated with the 1158 * old base locked to prevent other operations proceeding with the 1159 * wrong base locked. See lock_timer_base(). 1160 */ 1161 base = lock_timer_base(timer, &flags); 1162 if (base != new_base) { 1163 timer->flags |= TIMER_MIGRATING; 1164 1165 raw_spin_unlock(&base->lock); 1166 base = new_base; 1167 raw_spin_lock(&base->lock); 1168 WRITE_ONCE(timer->flags, 1169 (timer->flags & ~TIMER_BASEMASK) | cpu); 1170 } 1171 forward_timer_base(base); 1172 1173 debug_timer_activate(timer); 1174 internal_add_timer(base, timer); 1175 raw_spin_unlock_irqrestore(&base->lock, flags); 1176 } 1177 EXPORT_SYMBOL_GPL(add_timer_on); 1178 1179 /** 1180 * del_timer - deactivate a timer. 1181 * @timer: the timer to be deactivated 1182 * 1183 * del_timer() deactivates a timer - this works on both active and inactive 1184 * timers. 1185 * 1186 * The function returns whether it has deactivated a pending timer or not. 1187 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an 1188 * active timer returns 1.) 1189 */ 1190 int del_timer(struct timer_list *timer) 1191 { 1192 struct timer_base *base; 1193 unsigned long flags; 1194 int ret = 0; 1195 1196 debug_assert_init(timer); 1197 1198 if (timer_pending(timer)) { 1199 base = lock_timer_base(timer, &flags); 1200 ret = detach_if_pending(timer, base, true); 1201 raw_spin_unlock_irqrestore(&base->lock, flags); 1202 } 1203 1204 return ret; 1205 } 1206 EXPORT_SYMBOL(del_timer); 1207 1208 /** 1209 * try_to_del_timer_sync - Try to deactivate a timer 1210 * @timer: timer to delete 1211 * 1212 * This function tries to deactivate a timer. Upon successful (ret >= 0) 1213 * exit the timer is not queued and the handler is not running on any CPU. 1214 */ 1215 int try_to_del_timer_sync(struct timer_list *timer) 1216 { 1217 struct timer_base *base; 1218 unsigned long flags; 1219 int ret = -1; 1220 1221 debug_assert_init(timer); 1222 1223 base = lock_timer_base(timer, &flags); 1224 1225 if (base->running_timer != timer) 1226 ret = detach_if_pending(timer, base, true); 1227 1228 raw_spin_unlock_irqrestore(&base->lock, flags); 1229 1230 return ret; 1231 } 1232 EXPORT_SYMBOL(try_to_del_timer_sync); 1233 1234 #ifdef CONFIG_PREEMPT_RT 1235 static __init void timer_base_init_expiry_lock(struct timer_base *base) 1236 { 1237 spin_lock_init(&base->expiry_lock); 1238 } 1239 1240 static inline void timer_base_lock_expiry(struct timer_base *base) 1241 { 1242 spin_lock(&base->expiry_lock); 1243 } 1244 1245 static inline void timer_base_unlock_expiry(struct timer_base *base) 1246 { 1247 spin_unlock(&base->expiry_lock); 1248 } 1249 1250 /* 1251 * The counterpart to del_timer_wait_running(). 1252 * 1253 * If there is a waiter for base->expiry_lock, then it was waiting for the 1254 * timer callback to finish. Drop expiry_lock and reaquire it. That allows 1255 * the waiter to acquire the lock and make progress. 1256 */ 1257 static void timer_sync_wait_running(struct timer_base *base) 1258 { 1259 if (atomic_read(&base->timer_waiters)) { 1260 spin_unlock(&base->expiry_lock); 1261 spin_lock(&base->expiry_lock); 1262 } 1263 } 1264 1265 /* 1266 * This function is called on PREEMPT_RT kernels when the fast path 1267 * deletion of a timer failed because the timer callback function was 1268 * running. 1269 * 1270 * This prevents priority inversion, if the softirq thread on a remote CPU 1271 * got preempted, and it prevents a life lock when the task which tries to 1272 * delete a timer preempted the softirq thread running the timer callback 1273 * function. 1274 */ 1275 static void del_timer_wait_running(struct timer_list *timer) 1276 { 1277 u32 tf; 1278 1279 tf = READ_ONCE(timer->flags); 1280 if (!(tf & TIMER_MIGRATING)) { 1281 struct timer_base *base = get_timer_base(tf); 1282 1283 /* 1284 * Mark the base as contended and grab the expiry lock, 1285 * which is held by the softirq across the timer 1286 * callback. Drop the lock immediately so the softirq can 1287 * expire the next timer. In theory the timer could already 1288 * be running again, but that's more than unlikely and just 1289 * causes another wait loop. 1290 */ 1291 atomic_inc(&base->timer_waiters); 1292 spin_lock_bh(&base->expiry_lock); 1293 atomic_dec(&base->timer_waiters); 1294 spin_unlock_bh(&base->expiry_lock); 1295 } 1296 } 1297 #else 1298 static inline void timer_base_init_expiry_lock(struct timer_base *base) { } 1299 static inline void timer_base_lock_expiry(struct timer_base *base) { } 1300 static inline void timer_base_unlock_expiry(struct timer_base *base) { } 1301 static inline void timer_sync_wait_running(struct timer_base *base) { } 1302 static inline void del_timer_wait_running(struct timer_list *timer) { } 1303 #endif 1304 1305 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 1306 /** 1307 * del_timer_sync - deactivate a timer and wait for the handler to finish. 1308 * @timer: the timer to be deactivated 1309 * 1310 * This function only differs from del_timer() on SMP: besides deactivating 1311 * the timer it also makes sure the handler has finished executing on other 1312 * CPUs. 1313 * 1314 * Synchronization rules: Callers must prevent restarting of the timer, 1315 * otherwise this function is meaningless. It must not be called from 1316 * interrupt contexts unless the timer is an irqsafe one. The caller must 1317 * not hold locks which would prevent completion of the timer's 1318 * handler. The timer's handler must not call add_timer_on(). Upon exit the 1319 * timer is not queued and the handler is not running on any CPU. 1320 * 1321 * Note: For !irqsafe timers, you must not hold locks that are held in 1322 * interrupt context while calling this function. Even if the lock has 1323 * nothing to do with the timer in question. Here's why:: 1324 * 1325 * CPU0 CPU1 1326 * ---- ---- 1327 * <SOFTIRQ> 1328 * call_timer_fn(); 1329 * base->running_timer = mytimer; 1330 * spin_lock_irq(somelock); 1331 * <IRQ> 1332 * spin_lock(somelock); 1333 * del_timer_sync(mytimer); 1334 * while (base->running_timer == mytimer); 1335 * 1336 * Now del_timer_sync() will never return and never release somelock. 1337 * The interrupt on the other CPU is waiting to grab somelock but 1338 * it has interrupted the softirq that CPU0 is waiting to finish. 1339 * 1340 * The function returns whether it has deactivated a pending timer or not. 1341 */ 1342 int del_timer_sync(struct timer_list *timer) 1343 { 1344 int ret; 1345 1346 #ifdef CONFIG_LOCKDEP 1347 unsigned long flags; 1348 1349 /* 1350 * If lockdep gives a backtrace here, please reference 1351 * the synchronization rules above. 1352 */ 1353 local_irq_save(flags); 1354 lock_map_acquire(&timer->lockdep_map); 1355 lock_map_release(&timer->lockdep_map); 1356 local_irq_restore(flags); 1357 #endif 1358 /* 1359 * don't use it in hardirq context, because it 1360 * could lead to deadlock. 1361 */ 1362 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); 1363 1364 do { 1365 ret = try_to_del_timer_sync(timer); 1366 1367 if (unlikely(ret < 0)) { 1368 del_timer_wait_running(timer); 1369 cpu_relax(); 1370 } 1371 } while (ret < 0); 1372 1373 return ret; 1374 } 1375 EXPORT_SYMBOL(del_timer_sync); 1376 #endif 1377 1378 static void call_timer_fn(struct timer_list *timer, 1379 void (*fn)(struct timer_list *), 1380 unsigned long baseclk) 1381 { 1382 int count = preempt_count(); 1383 1384 #ifdef CONFIG_LOCKDEP 1385 /* 1386 * It is permissible to free the timer from inside the 1387 * function that is called from it, this we need to take into 1388 * account for lockdep too. To avoid bogus "held lock freed" 1389 * warnings as well as problems when looking into 1390 * timer->lockdep_map, make a copy and use that here. 1391 */ 1392 struct lockdep_map lockdep_map; 1393 1394 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); 1395 #endif 1396 /* 1397 * Couple the lock chain with the lock chain at 1398 * del_timer_sync() by acquiring the lock_map around the fn() 1399 * call here and in del_timer_sync(). 1400 */ 1401 lock_map_acquire(&lockdep_map); 1402 1403 trace_timer_expire_entry(timer, baseclk); 1404 fn(timer); 1405 trace_timer_expire_exit(timer); 1406 1407 lock_map_release(&lockdep_map); 1408 1409 if (count != preempt_count()) { 1410 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", 1411 fn, count, preempt_count()); 1412 /* 1413 * Restore the preempt count. That gives us a decent 1414 * chance to survive and extract information. If the 1415 * callback kept a lock held, bad luck, but not worse 1416 * than the BUG() we had. 1417 */ 1418 preempt_count_set(count); 1419 } 1420 } 1421 1422 static void expire_timers(struct timer_base *base, struct hlist_head *head) 1423 { 1424 /* 1425 * This value is required only for tracing. base->clk was 1426 * incremented directly before expire_timers was called. But expiry 1427 * is related to the old base->clk value. 1428 */ 1429 unsigned long baseclk = base->clk - 1; 1430 1431 while (!hlist_empty(head)) { 1432 struct timer_list *timer; 1433 void (*fn)(struct timer_list *); 1434 1435 timer = hlist_entry(head->first, struct timer_list, entry); 1436 1437 base->running_timer = timer; 1438 detach_timer(timer, true); 1439 1440 fn = timer->function; 1441 1442 if (timer->flags & TIMER_IRQSAFE) { 1443 raw_spin_unlock(&base->lock); 1444 call_timer_fn(timer, fn, baseclk); 1445 base->running_timer = NULL; 1446 raw_spin_lock(&base->lock); 1447 } else { 1448 raw_spin_unlock_irq(&base->lock); 1449 call_timer_fn(timer, fn, baseclk); 1450 base->running_timer = NULL; 1451 timer_sync_wait_running(base); 1452 raw_spin_lock_irq(&base->lock); 1453 } 1454 } 1455 } 1456 1457 static int __collect_expired_timers(struct timer_base *base, 1458 struct hlist_head *heads) 1459 { 1460 unsigned long clk = base->clk; 1461 struct hlist_head *vec; 1462 int i, levels = 0; 1463 unsigned int idx; 1464 1465 for (i = 0; i < LVL_DEPTH; i++) { 1466 idx = (clk & LVL_MASK) + i * LVL_SIZE; 1467 1468 if (__test_and_clear_bit(idx, base->pending_map)) { 1469 vec = base->vectors + idx; 1470 hlist_move_list(vec, heads++); 1471 levels++; 1472 } 1473 /* Is it time to look at the next level? */ 1474 if (clk & LVL_CLK_MASK) 1475 break; 1476 /* Shift clock for the next level granularity */ 1477 clk >>= LVL_CLK_SHIFT; 1478 } 1479 return levels; 1480 } 1481 1482 #ifdef CONFIG_NO_HZ_COMMON 1483 /* 1484 * Find the next pending bucket of a level. Search from level start (@offset) 1485 * + @clk upwards and if nothing there, search from start of the level 1486 * (@offset) up to @offset + clk. 1487 */ 1488 static int next_pending_bucket(struct timer_base *base, unsigned offset, 1489 unsigned clk) 1490 { 1491 unsigned pos, start = offset + clk; 1492 unsigned end = offset + LVL_SIZE; 1493 1494 pos = find_next_bit(base->pending_map, end, start); 1495 if (pos < end) 1496 return pos - start; 1497 1498 pos = find_next_bit(base->pending_map, start, offset); 1499 return pos < start ? pos + LVL_SIZE - start : -1; 1500 } 1501 1502 /* 1503 * Search the first expiring timer in the various clock levels. Caller must 1504 * hold base->lock. 1505 */ 1506 static unsigned long __next_timer_interrupt(struct timer_base *base) 1507 { 1508 unsigned long clk, next, adj; 1509 unsigned lvl, offset = 0; 1510 1511 next = base->clk + NEXT_TIMER_MAX_DELTA; 1512 clk = base->clk; 1513 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1514 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); 1515 1516 if (pos >= 0) { 1517 unsigned long tmp = clk + (unsigned long) pos; 1518 1519 tmp <<= LVL_SHIFT(lvl); 1520 if (time_before(tmp, next)) 1521 next = tmp; 1522 } 1523 /* 1524 * Clock for the next level. If the current level clock lower 1525 * bits are zero, we look at the next level as is. If not we 1526 * need to advance it by one because that's going to be the 1527 * next expiring bucket in that level. base->clk is the next 1528 * expiring jiffie. So in case of: 1529 * 1530 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1531 * 0 0 0 0 0 0 1532 * 1533 * we have to look at all levels @index 0. With 1534 * 1535 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1536 * 0 0 0 0 0 2 1537 * 1538 * LVL0 has the next expiring bucket @index 2. The upper 1539 * levels have the next expiring bucket @index 1. 1540 * 1541 * In case that the propagation wraps the next level the same 1542 * rules apply: 1543 * 1544 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1545 * 0 0 0 0 F 2 1546 * 1547 * So after looking at LVL0 we get: 1548 * 1549 * LVL5 LVL4 LVL3 LVL2 LVL1 1550 * 0 0 0 1 0 1551 * 1552 * So no propagation from LVL1 to LVL2 because that happened 1553 * with the add already, but then we need to propagate further 1554 * from LVL2 to LVL3. 1555 * 1556 * So the simple check whether the lower bits of the current 1557 * level are 0 or not is sufficient for all cases. 1558 */ 1559 adj = clk & LVL_CLK_MASK ? 1 : 0; 1560 clk >>= LVL_CLK_SHIFT; 1561 clk += adj; 1562 } 1563 return next; 1564 } 1565 1566 /* 1567 * Check, if the next hrtimer event is before the next timer wheel 1568 * event: 1569 */ 1570 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) 1571 { 1572 u64 nextevt = hrtimer_get_next_event(); 1573 1574 /* 1575 * If high resolution timers are enabled 1576 * hrtimer_get_next_event() returns KTIME_MAX. 1577 */ 1578 if (expires <= nextevt) 1579 return expires; 1580 1581 /* 1582 * If the next timer is already expired, return the tick base 1583 * time so the tick is fired immediately. 1584 */ 1585 if (nextevt <= basem) 1586 return basem; 1587 1588 /* 1589 * Round up to the next jiffie. High resolution timers are 1590 * off, so the hrtimers are expired in the tick and we need to 1591 * make sure that this tick really expires the timer to avoid 1592 * a ping pong of the nohz stop code. 1593 * 1594 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3 1595 */ 1596 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; 1597 } 1598 1599 /** 1600 * get_next_timer_interrupt - return the time (clock mono) of the next timer 1601 * @basej: base time jiffies 1602 * @basem: base time clock monotonic 1603 * 1604 * Returns the tick aligned clock monotonic time of the next pending 1605 * timer or KTIME_MAX if no timer is pending. 1606 */ 1607 u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1608 { 1609 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1610 u64 expires = KTIME_MAX; 1611 unsigned long nextevt; 1612 bool is_max_delta; 1613 1614 /* 1615 * Pretend that there is no timer pending if the cpu is offline. 1616 * Possible pending timers will be migrated later to an active cpu. 1617 */ 1618 if (cpu_is_offline(smp_processor_id())) 1619 return expires; 1620 1621 raw_spin_lock(&base->lock); 1622 nextevt = __next_timer_interrupt(base); 1623 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); 1624 base->next_expiry = nextevt; 1625 /* 1626 * We have a fresh next event. Check whether we can forward the 1627 * base. We can only do that when @basej is past base->clk 1628 * otherwise we might rewind base->clk. 1629 */ 1630 if (time_after(basej, base->clk)) { 1631 if (time_after(nextevt, basej)) 1632 base->clk = basej; 1633 else if (time_after(nextevt, base->clk)) 1634 base->clk = nextevt; 1635 } 1636 1637 if (time_before_eq(nextevt, basej)) { 1638 expires = basem; 1639 base->is_idle = false; 1640 } else { 1641 if (!is_max_delta) 1642 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; 1643 /* 1644 * If we expect to sleep more than a tick, mark the base idle. 1645 * Also the tick is stopped so any added timer must forward 1646 * the base clk itself to keep granularity small. This idle 1647 * logic is only maintained for the BASE_STD base, deferrable 1648 * timers may still see large granularity skew (by design). 1649 */ 1650 if ((expires - basem) > TICK_NSEC) { 1651 base->must_forward_clk = true; 1652 base->is_idle = true; 1653 } 1654 } 1655 raw_spin_unlock(&base->lock); 1656 1657 return cmp_next_hrtimer_event(basem, expires); 1658 } 1659 1660 /** 1661 * timer_clear_idle - Clear the idle state of the timer base 1662 * 1663 * Called with interrupts disabled 1664 */ 1665 void timer_clear_idle(void) 1666 { 1667 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1668 1669 /* 1670 * We do this unlocked. The worst outcome is a remote enqueue sending 1671 * a pointless IPI, but taking the lock would just make the window for 1672 * sending the IPI a few instructions smaller for the cost of taking 1673 * the lock in the exit from idle path. 1674 */ 1675 base->is_idle = false; 1676 } 1677 1678 static int collect_expired_timers(struct timer_base *base, 1679 struct hlist_head *heads) 1680 { 1681 unsigned long now = READ_ONCE(jiffies); 1682 1683 /* 1684 * NOHZ optimization. After a long idle sleep we need to forward the 1685 * base to current jiffies. Avoid a loop by searching the bitfield for 1686 * the next expiring timer. 1687 */ 1688 if ((long)(now - base->clk) > 2) { 1689 unsigned long next = __next_timer_interrupt(base); 1690 1691 /* 1692 * If the next timer is ahead of time forward to current 1693 * jiffies, otherwise forward to the next expiry time: 1694 */ 1695 if (time_after(next, now)) { 1696 /* 1697 * The call site will increment base->clk and then 1698 * terminate the expiry loop immediately. 1699 */ 1700 base->clk = now; 1701 return 0; 1702 } 1703 base->clk = next; 1704 } 1705 return __collect_expired_timers(base, heads); 1706 } 1707 #else 1708 static inline int collect_expired_timers(struct timer_base *base, 1709 struct hlist_head *heads) 1710 { 1711 return __collect_expired_timers(base, heads); 1712 } 1713 #endif 1714 1715 /* 1716 * Called from the timer interrupt handler to charge one tick to the current 1717 * process. user_tick is 1 if the tick is user time, 0 for system. 1718 */ 1719 void update_process_times(int user_tick) 1720 { 1721 struct task_struct *p = current; 1722 1723 /* Note: this timer irq context must be accounted for as well. */ 1724 account_process_tick(p, user_tick); 1725 run_local_timers(); 1726 rcu_sched_clock_irq(user_tick); 1727 #ifdef CONFIG_IRQ_WORK 1728 if (in_irq()) 1729 irq_work_tick(); 1730 #endif 1731 scheduler_tick(); 1732 if (IS_ENABLED(CONFIG_POSIX_TIMERS)) 1733 run_posix_cpu_timers(); 1734 } 1735 1736 /** 1737 * __run_timers - run all expired timers (if any) on this CPU. 1738 * @base: the timer vector to be processed. 1739 */ 1740 static inline void __run_timers(struct timer_base *base) 1741 { 1742 struct hlist_head heads[LVL_DEPTH]; 1743 int levels; 1744 1745 if (!time_after_eq(jiffies, base->clk)) 1746 return; 1747 1748 timer_base_lock_expiry(base); 1749 raw_spin_lock_irq(&base->lock); 1750 1751 /* 1752 * timer_base::must_forward_clk must be cleared before running 1753 * timers so that any timer functions that call mod_timer() will 1754 * not try to forward the base. Idle tracking / clock forwarding 1755 * logic is only used with BASE_STD timers. 1756 * 1757 * The must_forward_clk flag is cleared unconditionally also for 1758 * the deferrable base. The deferrable base is not affected by idle 1759 * tracking and never forwarded, so clearing the flag is a NOOP. 1760 * 1761 * The fact that the deferrable base is never forwarded can cause 1762 * large variations in granularity for deferrable timers, but they 1763 * can be deferred for long periods due to idle anyway. 1764 */ 1765 base->must_forward_clk = false; 1766 1767 while (time_after_eq(jiffies, base->clk)) { 1768 1769 levels = collect_expired_timers(base, heads); 1770 base->clk++; 1771 1772 while (levels--) 1773 expire_timers(base, heads + levels); 1774 } 1775 raw_spin_unlock_irq(&base->lock); 1776 timer_base_unlock_expiry(base); 1777 } 1778 1779 /* 1780 * This function runs timers and the timer-tq in bottom half context. 1781 */ 1782 static __latent_entropy void run_timer_softirq(struct softirq_action *h) 1783 { 1784 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1785 1786 __run_timers(base); 1787 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) 1788 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 1789 } 1790 1791 /* 1792 * Called by the local, per-CPU timer interrupt on SMP. 1793 */ 1794 void run_local_timers(void) 1795 { 1796 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1797 1798 hrtimer_run_queues(); 1799 /* Raise the softirq only if required. */ 1800 if (time_before(jiffies, base->clk)) { 1801 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON)) 1802 return; 1803 /* CPU is awake, so check the deferrable base. */ 1804 base++; 1805 if (time_before(jiffies, base->clk)) 1806 return; 1807 } 1808 raise_softirq(TIMER_SOFTIRQ); 1809 } 1810 1811 /* 1812 * Since schedule_timeout()'s timer is defined on the stack, it must store 1813 * the target task on the stack as well. 1814 */ 1815 struct process_timer { 1816 struct timer_list timer; 1817 struct task_struct *task; 1818 }; 1819 1820 static void process_timeout(struct timer_list *t) 1821 { 1822 struct process_timer *timeout = from_timer(timeout, t, timer); 1823 1824 wake_up_process(timeout->task); 1825 } 1826 1827 /** 1828 * schedule_timeout - sleep until timeout 1829 * @timeout: timeout value in jiffies 1830 * 1831 * Make the current task sleep until @timeout jiffies have elapsed. 1832 * The function behavior depends on the current task state 1833 * (see also set_current_state() description): 1834 * 1835 * %TASK_RUNNING - the scheduler is called, but the task does not sleep 1836 * at all. That happens because sched_submit_work() does nothing for 1837 * tasks in %TASK_RUNNING state. 1838 * 1839 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 1840 * pass before the routine returns unless the current task is explicitly 1841 * woken up, (e.g. by wake_up_process()). 1842 * 1843 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1844 * delivered to the current task or the current task is explicitly woken 1845 * up. 1846 * 1847 * The current task state is guaranteed to be %TASK_RUNNING when this 1848 * routine returns. 1849 * 1850 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule 1851 * the CPU away without a bound on the timeout. In this case the return 1852 * value will be %MAX_SCHEDULE_TIMEOUT. 1853 * 1854 * Returns 0 when the timer has expired otherwise the remaining time in 1855 * jiffies will be returned. In all cases the return value is guaranteed 1856 * to be non-negative. 1857 */ 1858 signed long __sched schedule_timeout(signed long timeout) 1859 { 1860 struct process_timer timer; 1861 unsigned long expire; 1862 1863 switch (timeout) 1864 { 1865 case MAX_SCHEDULE_TIMEOUT: 1866 /* 1867 * These two special cases are useful to be comfortable 1868 * in the caller. Nothing more. We could take 1869 * MAX_SCHEDULE_TIMEOUT from one of the negative value 1870 * but I' d like to return a valid offset (>=0) to allow 1871 * the caller to do everything it want with the retval. 1872 */ 1873 schedule(); 1874 goto out; 1875 default: 1876 /* 1877 * Another bit of PARANOID. Note that the retval will be 1878 * 0 since no piece of kernel is supposed to do a check 1879 * for a negative retval of schedule_timeout() (since it 1880 * should never happens anyway). You just have the printk() 1881 * that will tell you if something is gone wrong and where. 1882 */ 1883 if (timeout < 0) { 1884 printk(KERN_ERR "schedule_timeout: wrong timeout " 1885 "value %lx\n", timeout); 1886 dump_stack(); 1887 current->state = TASK_RUNNING; 1888 goto out; 1889 } 1890 } 1891 1892 expire = timeout + jiffies; 1893 1894 timer.task = current; 1895 timer_setup_on_stack(&timer.timer, process_timeout, 0); 1896 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING); 1897 schedule(); 1898 del_singleshot_timer_sync(&timer.timer); 1899 1900 /* Remove the timer from the object tracker */ 1901 destroy_timer_on_stack(&timer.timer); 1902 1903 timeout = expire - jiffies; 1904 1905 out: 1906 return timeout < 0 ? 0 : timeout; 1907 } 1908 EXPORT_SYMBOL(schedule_timeout); 1909 1910 /* 1911 * We can use __set_current_state() here because schedule_timeout() calls 1912 * schedule() unconditionally. 1913 */ 1914 signed long __sched schedule_timeout_interruptible(signed long timeout) 1915 { 1916 __set_current_state(TASK_INTERRUPTIBLE); 1917 return schedule_timeout(timeout); 1918 } 1919 EXPORT_SYMBOL(schedule_timeout_interruptible); 1920 1921 signed long __sched schedule_timeout_killable(signed long timeout) 1922 { 1923 __set_current_state(TASK_KILLABLE); 1924 return schedule_timeout(timeout); 1925 } 1926 EXPORT_SYMBOL(schedule_timeout_killable); 1927 1928 signed long __sched schedule_timeout_uninterruptible(signed long timeout) 1929 { 1930 __set_current_state(TASK_UNINTERRUPTIBLE); 1931 return schedule_timeout(timeout); 1932 } 1933 EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1934 1935 /* 1936 * Like schedule_timeout_uninterruptible(), except this task will not contribute 1937 * to load average. 1938 */ 1939 signed long __sched schedule_timeout_idle(signed long timeout) 1940 { 1941 __set_current_state(TASK_IDLE); 1942 return schedule_timeout(timeout); 1943 } 1944 EXPORT_SYMBOL(schedule_timeout_idle); 1945 1946 #ifdef CONFIG_HOTPLUG_CPU 1947 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) 1948 { 1949 struct timer_list *timer; 1950 int cpu = new_base->cpu; 1951 1952 while (!hlist_empty(head)) { 1953 timer = hlist_entry(head->first, struct timer_list, entry); 1954 detach_timer(timer, false); 1955 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 1956 internal_add_timer(new_base, timer); 1957 } 1958 } 1959 1960 int timers_prepare_cpu(unsigned int cpu) 1961 { 1962 struct timer_base *base; 1963 int b; 1964 1965 for (b = 0; b < NR_BASES; b++) { 1966 base = per_cpu_ptr(&timer_bases[b], cpu); 1967 base->clk = jiffies; 1968 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 1969 base->is_idle = false; 1970 base->must_forward_clk = true; 1971 } 1972 return 0; 1973 } 1974 1975 int timers_dead_cpu(unsigned int cpu) 1976 { 1977 struct timer_base *old_base; 1978 struct timer_base *new_base; 1979 int b, i; 1980 1981 BUG_ON(cpu_online(cpu)); 1982 1983 for (b = 0; b < NR_BASES; b++) { 1984 old_base = per_cpu_ptr(&timer_bases[b], cpu); 1985 new_base = get_cpu_ptr(&timer_bases[b]); 1986 /* 1987 * The caller is globally serialized and nobody else 1988 * takes two locks at once, deadlock is not possible. 1989 */ 1990 raw_spin_lock_irq(&new_base->lock); 1991 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1992 1993 /* 1994 * The current CPUs base clock might be stale. Update it 1995 * before moving the timers over. 1996 */ 1997 forward_timer_base(new_base); 1998 1999 BUG_ON(old_base->running_timer); 2000 2001 for (i = 0; i < WHEEL_SIZE; i++) 2002 migrate_timer_list(new_base, old_base->vectors + i); 2003 2004 raw_spin_unlock(&old_base->lock); 2005 raw_spin_unlock_irq(&new_base->lock); 2006 put_cpu_ptr(&timer_bases); 2007 } 2008 return 0; 2009 } 2010 2011 #endif /* CONFIG_HOTPLUG_CPU */ 2012 2013 static void __init init_timer_cpu(int cpu) 2014 { 2015 struct timer_base *base; 2016 int i; 2017 2018 for (i = 0; i < NR_BASES; i++) { 2019 base = per_cpu_ptr(&timer_bases[i], cpu); 2020 base->cpu = cpu; 2021 raw_spin_lock_init(&base->lock); 2022 base->clk = jiffies; 2023 timer_base_init_expiry_lock(base); 2024 } 2025 } 2026 2027 static void __init init_timer_cpus(void) 2028 { 2029 int cpu; 2030 2031 for_each_possible_cpu(cpu) 2032 init_timer_cpu(cpu); 2033 } 2034 2035 void __init init_timers(void) 2036 { 2037 init_timer_cpus(); 2038 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 2039 } 2040 2041 /** 2042 * msleep - sleep safely even with waitqueue interruptions 2043 * @msecs: Time in milliseconds to sleep for 2044 */ 2045 void msleep(unsigned int msecs) 2046 { 2047 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2048 2049 while (timeout) 2050 timeout = schedule_timeout_uninterruptible(timeout); 2051 } 2052 2053 EXPORT_SYMBOL(msleep); 2054 2055 /** 2056 * msleep_interruptible - sleep waiting for signals 2057 * @msecs: Time in milliseconds to sleep for 2058 */ 2059 unsigned long msleep_interruptible(unsigned int msecs) 2060 { 2061 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2062 2063 while (timeout && !signal_pending(current)) 2064 timeout = schedule_timeout_interruptible(timeout); 2065 return jiffies_to_msecs(timeout); 2066 } 2067 2068 EXPORT_SYMBOL(msleep_interruptible); 2069 2070 /** 2071 * usleep_range - Sleep for an approximate time 2072 * @min: Minimum time in usecs to sleep 2073 * @max: Maximum time in usecs to sleep 2074 * 2075 * In non-atomic context where the exact wakeup time is flexible, use 2076 * usleep_range() instead of udelay(). The sleep improves responsiveness 2077 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 2078 * power usage by allowing hrtimers to take advantage of an already- 2079 * scheduled interrupt instead of scheduling a new one just for this sleep. 2080 */ 2081 void __sched usleep_range(unsigned long min, unsigned long max) 2082 { 2083 ktime_t exp = ktime_add_us(ktime_get(), min); 2084 u64 delta = (u64)(max - min) * NSEC_PER_USEC; 2085 2086 for (;;) { 2087 __set_current_state(TASK_UNINTERRUPTIBLE); 2088 /* Do not return before the requested sleep time has elapsed */ 2089 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) 2090 break; 2091 } 2092 } 2093 EXPORT_SYMBOL(usleep_range); 2094