1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel internal timers 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. 8 * 9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 10 * "A Kernel Model for Precision Timekeeping" by Dave Mills 11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to 12 * serialize accesses to xtime/lost_ticks). 13 * Copyright (C) 1998 Andrea Arcangeli 14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl 15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love 16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling. 17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar 18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar 19 */ 20 21 #include <linux/kernel_stat.h> 22 #include <linux/export.h> 23 #include <linux/interrupt.h> 24 #include <linux/percpu.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/swap.h> 28 #include <linux/pid_namespace.h> 29 #include <linux/notifier.h> 30 #include <linux/thread_info.h> 31 #include <linux/time.h> 32 #include <linux/jiffies.h> 33 #include <linux/posix-timers.h> 34 #include <linux/cpu.h> 35 #include <linux/syscalls.h> 36 #include <linux/delay.h> 37 #include <linux/tick.h> 38 #include <linux/kallsyms.h> 39 #include <linux/irq_work.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/sysctl.h> 42 #include <linux/sched/nohz.h> 43 #include <linux/sched/debug.h> 44 #include <linux/slab.h> 45 #include <linux/compat.h> 46 47 #include <linux/uaccess.h> 48 #include <asm/unistd.h> 49 #include <asm/div64.h> 50 #include <asm/timex.h> 51 #include <asm/io.h> 52 53 #include "tick-internal.h" 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/timer.h> 57 58 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 59 60 EXPORT_SYMBOL(jiffies_64); 61 62 /* 63 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of 64 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each 65 * level has a different granularity. 66 * 67 * The level granularity is: LVL_CLK_DIV ^ lvl 68 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) 69 * 70 * The array level of a newly armed timer depends on the relative expiry 71 * time. The farther the expiry time is away the higher the array level and 72 * therefor the granularity becomes. 73 * 74 * Contrary to the original timer wheel implementation, which aims for 'exact' 75 * expiry of the timers, this implementation removes the need for recascading 76 * the timers into the lower array levels. The previous 'classic' timer wheel 77 * implementation of the kernel already violated the 'exact' expiry by adding 78 * slack to the expiry time to provide batched expiration. The granularity 79 * levels provide implicit batching. 80 * 81 * This is an optimization of the original timer wheel implementation for the 82 * majority of the timer wheel use cases: timeouts. The vast majority of 83 * timeout timers (networking, disk I/O ...) are canceled before expiry. If 84 * the timeout expires it indicates that normal operation is disturbed, so it 85 * does not matter much whether the timeout comes with a slight delay. 86 * 87 * The only exception to this are networking timers with a small expiry 88 * time. They rely on the granularity. Those fit into the first wheel level, 89 * which has HZ granularity. 90 * 91 * We don't have cascading anymore. timers with a expiry time above the 92 * capacity of the last wheel level are force expired at the maximum timeout 93 * value of the last wheel level. From data sampling we know that the maximum 94 * value observed is 5 days (network connection tracking), so this should not 95 * be an issue. 96 * 97 * The currently chosen array constants values are a good compromise between 98 * array size and granularity. 99 * 100 * This results in the following granularity and range levels: 101 * 102 * HZ 1000 steps 103 * Level Offset Granularity Range 104 * 0 0 1 ms 0 ms - 63 ms 105 * 1 64 8 ms 64 ms - 511 ms 106 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) 107 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) 108 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) 109 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) 110 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) 111 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) 112 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) 113 * 114 * HZ 300 115 * Level Offset Granularity Range 116 * 0 0 3 ms 0 ms - 210 ms 117 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) 118 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) 119 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) 120 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) 121 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) 122 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) 123 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) 124 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) 125 * 126 * HZ 250 127 * Level Offset Granularity Range 128 * 0 0 4 ms 0 ms - 255 ms 129 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) 130 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) 131 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) 132 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) 133 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) 134 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) 135 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) 136 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) 137 * 138 * HZ 100 139 * Level Offset Granularity Range 140 * 0 0 10 ms 0 ms - 630 ms 141 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) 142 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) 143 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) 144 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) 145 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) 146 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) 147 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) 148 */ 149 150 /* Clock divisor for the next level */ 151 #define LVL_CLK_SHIFT 3 152 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) 153 #define LVL_CLK_MASK (LVL_CLK_DIV - 1) 154 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) 155 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) 156 157 /* 158 * The time start value for each level to select the bucket at enqueue 159 * time. 160 */ 161 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) 162 163 /* Size of each clock level */ 164 #define LVL_BITS 6 165 #define LVL_SIZE (1UL << LVL_BITS) 166 #define LVL_MASK (LVL_SIZE - 1) 167 #define LVL_OFFS(n) ((n) * LVL_SIZE) 168 169 /* Level depth */ 170 #if HZ > 100 171 # define LVL_DEPTH 9 172 # else 173 # define LVL_DEPTH 8 174 #endif 175 176 /* The cutoff (max. capacity of the wheel) */ 177 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) 178 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) 179 180 /* 181 * The resulting wheel size. If NOHZ is configured we allocate two 182 * wheels so we have a separate storage for the deferrable timers. 183 */ 184 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) 185 186 #ifdef CONFIG_NO_HZ_COMMON 187 # define NR_BASES 2 188 # define BASE_STD 0 189 # define BASE_DEF 1 190 #else 191 # define NR_BASES 1 192 # define BASE_STD 0 193 # define BASE_DEF 0 194 #endif 195 196 struct timer_base { 197 raw_spinlock_t lock; 198 struct timer_list *running_timer; 199 #ifdef CONFIG_PREEMPT_RT 200 spinlock_t expiry_lock; 201 atomic_t timer_waiters; 202 #endif 203 unsigned long clk; 204 unsigned long next_expiry; 205 unsigned int cpu; 206 bool is_idle; 207 bool must_forward_clk; 208 DECLARE_BITMAP(pending_map, WHEEL_SIZE); 209 struct hlist_head vectors[WHEEL_SIZE]; 210 } ____cacheline_aligned; 211 212 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); 213 214 #ifdef CONFIG_NO_HZ_COMMON 215 216 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); 217 static DEFINE_MUTEX(timer_keys_mutex); 218 219 static void timer_update_keys(struct work_struct *work); 220 static DECLARE_WORK(timer_update_work, timer_update_keys); 221 222 #ifdef CONFIG_SMP 223 unsigned int sysctl_timer_migration = 1; 224 225 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled); 226 227 static void timers_update_migration(void) 228 { 229 if (sysctl_timer_migration && tick_nohz_active) 230 static_branch_enable(&timers_migration_enabled); 231 else 232 static_branch_disable(&timers_migration_enabled); 233 } 234 #else 235 static inline void timers_update_migration(void) { } 236 #endif /* !CONFIG_SMP */ 237 238 static void timer_update_keys(struct work_struct *work) 239 { 240 mutex_lock(&timer_keys_mutex); 241 timers_update_migration(); 242 static_branch_enable(&timers_nohz_active); 243 mutex_unlock(&timer_keys_mutex); 244 } 245 246 void timers_update_nohz(void) 247 { 248 schedule_work(&timer_update_work); 249 } 250 251 int timer_migration_handler(struct ctl_table *table, int write, 252 void *buffer, size_t *lenp, loff_t *ppos) 253 { 254 int ret; 255 256 mutex_lock(&timer_keys_mutex); 257 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 258 if (!ret && write) 259 timers_update_migration(); 260 mutex_unlock(&timer_keys_mutex); 261 return ret; 262 } 263 264 static inline bool is_timers_nohz_active(void) 265 { 266 return static_branch_unlikely(&timers_nohz_active); 267 } 268 #else 269 static inline bool is_timers_nohz_active(void) { return false; } 270 #endif /* NO_HZ_COMMON */ 271 272 static unsigned long round_jiffies_common(unsigned long j, int cpu, 273 bool force_up) 274 { 275 int rem; 276 unsigned long original = j; 277 278 /* 279 * We don't want all cpus firing their timers at once hitting the 280 * same lock or cachelines, so we skew each extra cpu with an extra 281 * 3 jiffies. This 3 jiffies came originally from the mm/ code which 282 * already did this. 283 * The skew is done by adding 3*cpunr, then round, then subtract this 284 * extra offset again. 285 */ 286 j += cpu * 3; 287 288 rem = j % HZ; 289 290 /* 291 * If the target jiffie is just after a whole second (which can happen 292 * due to delays of the timer irq, long irq off times etc etc) then 293 * we should round down to the whole second, not up. Use 1/4th second 294 * as cutoff for this rounding as an extreme upper bound for this. 295 * But never round down if @force_up is set. 296 */ 297 if (rem < HZ/4 && !force_up) /* round down */ 298 j = j - rem; 299 else /* round up */ 300 j = j - rem + HZ; 301 302 /* now that we have rounded, subtract the extra skew again */ 303 j -= cpu * 3; 304 305 /* 306 * Make sure j is still in the future. Otherwise return the 307 * unmodified value. 308 */ 309 return time_is_after_jiffies(j) ? j : original; 310 } 311 312 /** 313 * __round_jiffies - function to round jiffies to a full second 314 * @j: the time in (absolute) jiffies that should be rounded 315 * @cpu: the processor number on which the timeout will happen 316 * 317 * __round_jiffies() rounds an absolute time in the future (in jiffies) 318 * up or down to (approximately) full seconds. This is useful for timers 319 * for which the exact time they fire does not matter too much, as long as 320 * they fire approximately every X seconds. 321 * 322 * By rounding these timers to whole seconds, all such timers will fire 323 * at the same time, rather than at various times spread out. The goal 324 * of this is to have the CPU wake up less, which saves power. 325 * 326 * The exact rounding is skewed for each processor to avoid all 327 * processors firing at the exact same time, which could lead 328 * to lock contention or spurious cache line bouncing. 329 * 330 * The return value is the rounded version of the @j parameter. 331 */ 332 unsigned long __round_jiffies(unsigned long j, int cpu) 333 { 334 return round_jiffies_common(j, cpu, false); 335 } 336 EXPORT_SYMBOL_GPL(__round_jiffies); 337 338 /** 339 * __round_jiffies_relative - function to round jiffies to a full second 340 * @j: the time in (relative) jiffies that should be rounded 341 * @cpu: the processor number on which the timeout will happen 342 * 343 * __round_jiffies_relative() rounds a time delta in the future (in jiffies) 344 * up or down to (approximately) full seconds. This is useful for timers 345 * for which the exact time they fire does not matter too much, as long as 346 * they fire approximately every X seconds. 347 * 348 * By rounding these timers to whole seconds, all such timers will fire 349 * at the same time, rather than at various times spread out. The goal 350 * of this is to have the CPU wake up less, which saves power. 351 * 352 * The exact rounding is skewed for each processor to avoid all 353 * processors firing at the exact same time, which could lead 354 * to lock contention or spurious cache line bouncing. 355 * 356 * The return value is the rounded version of the @j parameter. 357 */ 358 unsigned long __round_jiffies_relative(unsigned long j, int cpu) 359 { 360 unsigned long j0 = jiffies; 361 362 /* Use j0 because jiffies might change while we run */ 363 return round_jiffies_common(j + j0, cpu, false) - j0; 364 } 365 EXPORT_SYMBOL_GPL(__round_jiffies_relative); 366 367 /** 368 * round_jiffies - function to round jiffies to a full second 369 * @j: the time in (absolute) jiffies that should be rounded 370 * 371 * round_jiffies() rounds an absolute time in the future (in jiffies) 372 * up or down to (approximately) full seconds. This is useful for timers 373 * for which the exact time they fire does not matter too much, as long as 374 * they fire approximately every X seconds. 375 * 376 * By rounding these timers to whole seconds, all such timers will fire 377 * at the same time, rather than at various times spread out. The goal 378 * of this is to have the CPU wake up less, which saves power. 379 * 380 * The return value is the rounded version of the @j parameter. 381 */ 382 unsigned long round_jiffies(unsigned long j) 383 { 384 return round_jiffies_common(j, raw_smp_processor_id(), false); 385 } 386 EXPORT_SYMBOL_GPL(round_jiffies); 387 388 /** 389 * round_jiffies_relative - function to round jiffies to a full second 390 * @j: the time in (relative) jiffies that should be rounded 391 * 392 * round_jiffies_relative() rounds a time delta in the future (in jiffies) 393 * up or down to (approximately) full seconds. This is useful for timers 394 * for which the exact time they fire does not matter too much, as long as 395 * they fire approximately every X seconds. 396 * 397 * By rounding these timers to whole seconds, all such timers will fire 398 * at the same time, rather than at various times spread out. The goal 399 * of this is to have the CPU wake up less, which saves power. 400 * 401 * The return value is the rounded version of the @j parameter. 402 */ 403 unsigned long round_jiffies_relative(unsigned long j) 404 { 405 return __round_jiffies_relative(j, raw_smp_processor_id()); 406 } 407 EXPORT_SYMBOL_GPL(round_jiffies_relative); 408 409 /** 410 * __round_jiffies_up - function to round jiffies up to a full second 411 * @j: the time in (absolute) jiffies that should be rounded 412 * @cpu: the processor number on which the timeout will happen 413 * 414 * This is the same as __round_jiffies() except that it will never 415 * round down. This is useful for timeouts for which the exact time 416 * of firing does not matter too much, as long as they don't fire too 417 * early. 418 */ 419 unsigned long __round_jiffies_up(unsigned long j, int cpu) 420 { 421 return round_jiffies_common(j, cpu, true); 422 } 423 EXPORT_SYMBOL_GPL(__round_jiffies_up); 424 425 /** 426 * __round_jiffies_up_relative - function to round jiffies up to a full second 427 * @j: the time in (relative) jiffies that should be rounded 428 * @cpu: the processor number on which the timeout will happen 429 * 430 * This is the same as __round_jiffies_relative() except that it will never 431 * round down. This is useful for timeouts for which the exact time 432 * of firing does not matter too much, as long as they don't fire too 433 * early. 434 */ 435 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) 436 { 437 unsigned long j0 = jiffies; 438 439 /* Use j0 because jiffies might change while we run */ 440 return round_jiffies_common(j + j0, cpu, true) - j0; 441 } 442 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); 443 444 /** 445 * round_jiffies_up - function to round jiffies up to a full second 446 * @j: the time in (absolute) jiffies that should be rounded 447 * 448 * This is the same as round_jiffies() except that it will never 449 * round down. This is useful for timeouts for which the exact time 450 * of firing does not matter too much, as long as they don't fire too 451 * early. 452 */ 453 unsigned long round_jiffies_up(unsigned long j) 454 { 455 return round_jiffies_common(j, raw_smp_processor_id(), true); 456 } 457 EXPORT_SYMBOL_GPL(round_jiffies_up); 458 459 /** 460 * round_jiffies_up_relative - function to round jiffies up to a full second 461 * @j: the time in (relative) jiffies that should be rounded 462 * 463 * This is the same as round_jiffies_relative() except that it will never 464 * round down. This is useful for timeouts for which the exact time 465 * of firing does not matter too much, as long as they don't fire too 466 * early. 467 */ 468 unsigned long round_jiffies_up_relative(unsigned long j) 469 { 470 return __round_jiffies_up_relative(j, raw_smp_processor_id()); 471 } 472 EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 473 474 475 static inline unsigned int timer_get_idx(struct timer_list *timer) 476 { 477 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; 478 } 479 480 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) 481 { 482 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | 483 idx << TIMER_ARRAYSHIFT; 484 } 485 486 /* 487 * Helper function to calculate the array index for a given expiry 488 * time. 489 */ 490 static inline unsigned calc_index(unsigned expires, unsigned lvl) 491 { 492 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl); 493 return LVL_OFFS(lvl) + (expires & LVL_MASK); 494 } 495 496 static int calc_wheel_index(unsigned long expires, unsigned long clk) 497 { 498 unsigned long delta = expires - clk; 499 unsigned int idx; 500 501 if (delta < LVL_START(1)) { 502 idx = calc_index(expires, 0); 503 } else if (delta < LVL_START(2)) { 504 idx = calc_index(expires, 1); 505 } else if (delta < LVL_START(3)) { 506 idx = calc_index(expires, 2); 507 } else if (delta < LVL_START(4)) { 508 idx = calc_index(expires, 3); 509 } else if (delta < LVL_START(5)) { 510 idx = calc_index(expires, 4); 511 } else if (delta < LVL_START(6)) { 512 idx = calc_index(expires, 5); 513 } else if (delta < LVL_START(7)) { 514 idx = calc_index(expires, 6); 515 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { 516 idx = calc_index(expires, 7); 517 } else if ((long) delta < 0) { 518 idx = clk & LVL_MASK; 519 } else { 520 /* 521 * Force expire obscene large timeouts to expire at the 522 * capacity limit of the wheel. 523 */ 524 if (delta >= WHEEL_TIMEOUT_CUTOFF) 525 expires = clk + WHEEL_TIMEOUT_MAX; 526 527 idx = calc_index(expires, LVL_DEPTH - 1); 528 } 529 return idx; 530 } 531 532 /* 533 * Enqueue the timer into the hash bucket, mark it pending in 534 * the bitmap and store the index in the timer flags. 535 */ 536 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, 537 unsigned int idx) 538 { 539 hlist_add_head(&timer->entry, base->vectors + idx); 540 __set_bit(idx, base->pending_map); 541 timer_set_idx(timer, idx); 542 543 trace_timer_start(timer, timer->expires, timer->flags); 544 } 545 546 static void 547 __internal_add_timer(struct timer_base *base, struct timer_list *timer) 548 { 549 unsigned int idx; 550 551 idx = calc_wheel_index(timer->expires, base->clk); 552 enqueue_timer(base, timer, idx); 553 } 554 555 static void 556 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) 557 { 558 if (!is_timers_nohz_active()) 559 return; 560 561 /* 562 * TODO: This wants some optimizing similar to the code below, but we 563 * will do that when we switch from push to pull for deferrable timers. 564 */ 565 if (timer->flags & TIMER_DEFERRABLE) { 566 if (tick_nohz_full_cpu(base->cpu)) 567 wake_up_nohz_cpu(base->cpu); 568 return; 569 } 570 571 /* 572 * We might have to IPI the remote CPU if the base is idle and the 573 * timer is not deferrable. If the other CPU is on the way to idle 574 * then it can't set base->is_idle as we hold the base lock: 575 */ 576 if (!base->is_idle) 577 return; 578 579 /* Check whether this is the new first expiring timer: */ 580 if (time_after_eq(timer->expires, base->next_expiry)) 581 return; 582 583 /* 584 * Set the next expiry time and kick the CPU so it can reevaluate the 585 * wheel: 586 */ 587 if (time_before(timer->expires, base->clk)) { 588 /* 589 * Prevent from forward_timer_base() moving the base->clk 590 * backward 591 */ 592 base->next_expiry = base->clk; 593 } else { 594 base->next_expiry = timer->expires; 595 } 596 wake_up_nohz_cpu(base->cpu); 597 } 598 599 static void 600 internal_add_timer(struct timer_base *base, struct timer_list *timer) 601 { 602 __internal_add_timer(base, timer); 603 trigger_dyntick_cpu(base, timer); 604 } 605 606 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 607 608 static struct debug_obj_descr timer_debug_descr; 609 610 static void *timer_debug_hint(void *addr) 611 { 612 return ((struct timer_list *) addr)->function; 613 } 614 615 static bool timer_is_static_object(void *addr) 616 { 617 struct timer_list *timer = addr; 618 619 return (timer->entry.pprev == NULL && 620 timer->entry.next == TIMER_ENTRY_STATIC); 621 } 622 623 /* 624 * fixup_init is called when: 625 * - an active object is initialized 626 */ 627 static bool timer_fixup_init(void *addr, enum debug_obj_state state) 628 { 629 struct timer_list *timer = addr; 630 631 switch (state) { 632 case ODEBUG_STATE_ACTIVE: 633 del_timer_sync(timer); 634 debug_object_init(timer, &timer_debug_descr); 635 return true; 636 default: 637 return false; 638 } 639 } 640 641 /* Stub timer callback for improperly used timers. */ 642 static void stub_timer(struct timer_list *unused) 643 { 644 WARN_ON(1); 645 } 646 647 /* 648 * fixup_activate is called when: 649 * - an active object is activated 650 * - an unknown non-static object is activated 651 */ 652 static bool timer_fixup_activate(void *addr, enum debug_obj_state state) 653 { 654 struct timer_list *timer = addr; 655 656 switch (state) { 657 case ODEBUG_STATE_NOTAVAILABLE: 658 timer_setup(timer, stub_timer, 0); 659 return true; 660 661 case ODEBUG_STATE_ACTIVE: 662 WARN_ON(1); 663 /* fall through */ 664 default: 665 return false; 666 } 667 } 668 669 /* 670 * fixup_free is called when: 671 * - an active object is freed 672 */ 673 static bool timer_fixup_free(void *addr, enum debug_obj_state state) 674 { 675 struct timer_list *timer = addr; 676 677 switch (state) { 678 case ODEBUG_STATE_ACTIVE: 679 del_timer_sync(timer); 680 debug_object_free(timer, &timer_debug_descr); 681 return true; 682 default: 683 return false; 684 } 685 } 686 687 /* 688 * fixup_assert_init is called when: 689 * - an untracked/uninit-ed object is found 690 */ 691 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state) 692 { 693 struct timer_list *timer = addr; 694 695 switch (state) { 696 case ODEBUG_STATE_NOTAVAILABLE: 697 timer_setup(timer, stub_timer, 0); 698 return true; 699 default: 700 return false; 701 } 702 } 703 704 static struct debug_obj_descr timer_debug_descr = { 705 .name = "timer_list", 706 .debug_hint = timer_debug_hint, 707 .is_static_object = timer_is_static_object, 708 .fixup_init = timer_fixup_init, 709 .fixup_activate = timer_fixup_activate, 710 .fixup_free = timer_fixup_free, 711 .fixup_assert_init = timer_fixup_assert_init, 712 }; 713 714 static inline void debug_timer_init(struct timer_list *timer) 715 { 716 debug_object_init(timer, &timer_debug_descr); 717 } 718 719 static inline void debug_timer_activate(struct timer_list *timer) 720 { 721 debug_object_activate(timer, &timer_debug_descr); 722 } 723 724 static inline void debug_timer_deactivate(struct timer_list *timer) 725 { 726 debug_object_deactivate(timer, &timer_debug_descr); 727 } 728 729 static inline void debug_timer_free(struct timer_list *timer) 730 { 731 debug_object_free(timer, &timer_debug_descr); 732 } 733 734 static inline void debug_timer_assert_init(struct timer_list *timer) 735 { 736 debug_object_assert_init(timer, &timer_debug_descr); 737 } 738 739 static void do_init_timer(struct timer_list *timer, 740 void (*func)(struct timer_list *), 741 unsigned int flags, 742 const char *name, struct lock_class_key *key); 743 744 void init_timer_on_stack_key(struct timer_list *timer, 745 void (*func)(struct timer_list *), 746 unsigned int flags, 747 const char *name, struct lock_class_key *key) 748 { 749 debug_object_init_on_stack(timer, &timer_debug_descr); 750 do_init_timer(timer, func, flags, name, key); 751 } 752 EXPORT_SYMBOL_GPL(init_timer_on_stack_key); 753 754 void destroy_timer_on_stack(struct timer_list *timer) 755 { 756 debug_object_free(timer, &timer_debug_descr); 757 } 758 EXPORT_SYMBOL_GPL(destroy_timer_on_stack); 759 760 #else 761 static inline void debug_timer_init(struct timer_list *timer) { } 762 static inline void debug_timer_activate(struct timer_list *timer) { } 763 static inline void debug_timer_deactivate(struct timer_list *timer) { } 764 static inline void debug_timer_assert_init(struct timer_list *timer) { } 765 #endif 766 767 static inline void debug_init(struct timer_list *timer) 768 { 769 debug_timer_init(timer); 770 trace_timer_init(timer); 771 } 772 773 static inline void debug_deactivate(struct timer_list *timer) 774 { 775 debug_timer_deactivate(timer); 776 trace_timer_cancel(timer); 777 } 778 779 static inline void debug_assert_init(struct timer_list *timer) 780 { 781 debug_timer_assert_init(timer); 782 } 783 784 static void do_init_timer(struct timer_list *timer, 785 void (*func)(struct timer_list *), 786 unsigned int flags, 787 const char *name, struct lock_class_key *key) 788 { 789 timer->entry.pprev = NULL; 790 timer->function = func; 791 timer->flags = flags | raw_smp_processor_id(); 792 lockdep_init_map(&timer->lockdep_map, name, key, 0); 793 } 794 795 /** 796 * init_timer_key - initialize a timer 797 * @timer: the timer to be initialized 798 * @func: timer callback function 799 * @flags: timer flags 800 * @name: name of the timer 801 * @key: lockdep class key of the fake lock used for tracking timer 802 * sync lock dependencies 803 * 804 * init_timer_key() must be done to a timer prior calling *any* of the 805 * other timer functions. 806 */ 807 void init_timer_key(struct timer_list *timer, 808 void (*func)(struct timer_list *), unsigned int flags, 809 const char *name, struct lock_class_key *key) 810 { 811 debug_init(timer); 812 do_init_timer(timer, func, flags, name, key); 813 } 814 EXPORT_SYMBOL(init_timer_key); 815 816 static inline void detach_timer(struct timer_list *timer, bool clear_pending) 817 { 818 struct hlist_node *entry = &timer->entry; 819 820 debug_deactivate(timer); 821 822 __hlist_del(entry); 823 if (clear_pending) 824 entry->pprev = NULL; 825 entry->next = LIST_POISON2; 826 } 827 828 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, 829 bool clear_pending) 830 { 831 unsigned idx = timer_get_idx(timer); 832 833 if (!timer_pending(timer)) 834 return 0; 835 836 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) 837 __clear_bit(idx, base->pending_map); 838 839 detach_timer(timer, clear_pending); 840 return 1; 841 } 842 843 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 844 { 845 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); 846 847 /* 848 * If the timer is deferrable and NO_HZ_COMMON is set then we need 849 * to use the deferrable base. 850 */ 851 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 852 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 853 return base; 854 } 855 856 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 857 { 858 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 859 860 /* 861 * If the timer is deferrable and NO_HZ_COMMON is set then we need 862 * to use the deferrable base. 863 */ 864 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 865 base = this_cpu_ptr(&timer_bases[BASE_DEF]); 866 return base; 867 } 868 869 static inline struct timer_base *get_timer_base(u32 tflags) 870 { 871 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); 872 } 873 874 static inline struct timer_base * 875 get_target_base(struct timer_base *base, unsigned tflags) 876 { 877 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 878 if (static_branch_likely(&timers_migration_enabled) && 879 !(tflags & TIMER_PINNED)) 880 return get_timer_cpu_base(tflags, get_nohz_timer_target()); 881 #endif 882 return get_timer_this_cpu_base(tflags); 883 } 884 885 static inline void forward_timer_base(struct timer_base *base) 886 { 887 #ifdef CONFIG_NO_HZ_COMMON 888 unsigned long jnow; 889 890 /* 891 * We only forward the base when we are idle or have just come out of 892 * idle (must_forward_clk logic), and have a delta between base clock 893 * and jiffies. In the common case, run_timers will take care of it. 894 */ 895 if (likely(!base->must_forward_clk)) 896 return; 897 898 jnow = READ_ONCE(jiffies); 899 base->must_forward_clk = base->is_idle; 900 if ((long)(jnow - base->clk) < 2) 901 return; 902 903 /* 904 * If the next expiry value is > jiffies, then we fast forward to 905 * jiffies otherwise we forward to the next expiry value. 906 */ 907 if (time_after(base->next_expiry, jnow)) { 908 base->clk = jnow; 909 } else { 910 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) 911 return; 912 base->clk = base->next_expiry; 913 } 914 #endif 915 } 916 917 918 /* 919 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 920 * that all timers which are tied to this base are locked, and the base itself 921 * is locked too. 922 * 923 * So __run_timers/migrate_timers can safely modify all timers which could 924 * be found in the base->vectors array. 925 * 926 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need 927 * to wait until the migration is done. 928 */ 929 static struct timer_base *lock_timer_base(struct timer_list *timer, 930 unsigned long *flags) 931 __acquires(timer->base->lock) 932 { 933 for (;;) { 934 struct timer_base *base; 935 u32 tf; 936 937 /* 938 * We need to use READ_ONCE() here, otherwise the compiler 939 * might re-read @tf between the check for TIMER_MIGRATING 940 * and spin_lock(). 941 */ 942 tf = READ_ONCE(timer->flags); 943 944 if (!(tf & TIMER_MIGRATING)) { 945 base = get_timer_base(tf); 946 raw_spin_lock_irqsave(&base->lock, *flags); 947 if (timer->flags == tf) 948 return base; 949 raw_spin_unlock_irqrestore(&base->lock, *flags); 950 } 951 cpu_relax(); 952 } 953 } 954 955 #define MOD_TIMER_PENDING_ONLY 0x01 956 #define MOD_TIMER_REDUCE 0x02 957 #define MOD_TIMER_NOTPENDING 0x04 958 959 static inline int 960 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) 961 { 962 struct timer_base *base, *new_base; 963 unsigned int idx = UINT_MAX; 964 unsigned long clk = 0, flags; 965 int ret = 0; 966 967 BUG_ON(!timer->function); 968 969 /* 970 * This is a common optimization triggered by the networking code - if 971 * the timer is re-modified to have the same timeout or ends up in the 972 * same array bucket then just return: 973 */ 974 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { 975 /* 976 * The downside of this optimization is that it can result in 977 * larger granularity than you would get from adding a new 978 * timer with this expiry. 979 */ 980 long diff = timer->expires - expires; 981 982 if (!diff) 983 return 1; 984 if (options & MOD_TIMER_REDUCE && diff <= 0) 985 return 1; 986 987 /* 988 * We lock timer base and calculate the bucket index right 989 * here. If the timer ends up in the same bucket, then we 990 * just update the expiry time and avoid the whole 991 * dequeue/enqueue dance. 992 */ 993 base = lock_timer_base(timer, &flags); 994 forward_timer_base(base); 995 996 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && 997 time_before_eq(timer->expires, expires)) { 998 ret = 1; 999 goto out_unlock; 1000 } 1001 1002 clk = base->clk; 1003 idx = calc_wheel_index(expires, clk); 1004 1005 /* 1006 * Retrieve and compare the array index of the pending 1007 * timer. If it matches set the expiry to the new value so a 1008 * subsequent call will exit in the expires check above. 1009 */ 1010 if (idx == timer_get_idx(timer)) { 1011 if (!(options & MOD_TIMER_REDUCE)) 1012 timer->expires = expires; 1013 else if (time_after(timer->expires, expires)) 1014 timer->expires = expires; 1015 ret = 1; 1016 goto out_unlock; 1017 } 1018 } else { 1019 base = lock_timer_base(timer, &flags); 1020 forward_timer_base(base); 1021 } 1022 1023 ret = detach_if_pending(timer, base, false); 1024 if (!ret && (options & MOD_TIMER_PENDING_ONLY)) 1025 goto out_unlock; 1026 1027 new_base = get_target_base(base, timer->flags); 1028 1029 if (base != new_base) { 1030 /* 1031 * We are trying to schedule the timer on the new base. 1032 * However we can't change timer's base while it is running, 1033 * otherwise del_timer_sync() can't detect that the timer's 1034 * handler yet has not finished. This also guarantees that the 1035 * timer is serialized wrt itself. 1036 */ 1037 if (likely(base->running_timer != timer)) { 1038 /* See the comment in lock_timer_base() */ 1039 timer->flags |= TIMER_MIGRATING; 1040 1041 raw_spin_unlock(&base->lock); 1042 base = new_base; 1043 raw_spin_lock(&base->lock); 1044 WRITE_ONCE(timer->flags, 1045 (timer->flags & ~TIMER_BASEMASK) | base->cpu); 1046 forward_timer_base(base); 1047 } 1048 } 1049 1050 debug_timer_activate(timer); 1051 1052 timer->expires = expires; 1053 /* 1054 * If 'idx' was calculated above and the base time did not advance 1055 * between calculating 'idx' and possibly switching the base, only 1056 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise 1057 * we need to (re)calculate the wheel index via 1058 * internal_add_timer(). 1059 */ 1060 if (idx != UINT_MAX && clk == base->clk) { 1061 enqueue_timer(base, timer, idx); 1062 trigger_dyntick_cpu(base, timer); 1063 } else { 1064 internal_add_timer(base, timer); 1065 } 1066 1067 out_unlock: 1068 raw_spin_unlock_irqrestore(&base->lock, flags); 1069 1070 return ret; 1071 } 1072 1073 /** 1074 * mod_timer_pending - modify a pending timer's timeout 1075 * @timer: the pending timer to be modified 1076 * @expires: new timeout in jiffies 1077 * 1078 * mod_timer_pending() is the same for pending timers as mod_timer(), 1079 * but will not re-activate and modify already deleted timers. 1080 * 1081 * It is useful for unserialized use of timers. 1082 */ 1083 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1084 { 1085 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); 1086 } 1087 EXPORT_SYMBOL(mod_timer_pending); 1088 1089 /** 1090 * mod_timer - modify a timer's timeout 1091 * @timer: the timer to be modified 1092 * @expires: new timeout in jiffies 1093 * 1094 * mod_timer() is a more efficient way to update the expire field of an 1095 * active timer (if the timer is inactive it will be activated) 1096 * 1097 * mod_timer(timer, expires) is equivalent to: 1098 * 1099 * del_timer(timer); timer->expires = expires; add_timer(timer); 1100 * 1101 * Note that if there are multiple unserialized concurrent users of the 1102 * same timer, then mod_timer() is the only safe way to modify the timeout, 1103 * since add_timer() cannot modify an already running timer. 1104 * 1105 * The function returns whether it has modified a pending timer or not. 1106 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an 1107 * active timer returns 1.) 1108 */ 1109 int mod_timer(struct timer_list *timer, unsigned long expires) 1110 { 1111 return __mod_timer(timer, expires, 0); 1112 } 1113 EXPORT_SYMBOL(mod_timer); 1114 1115 /** 1116 * timer_reduce - Modify a timer's timeout if it would reduce the timeout 1117 * @timer: The timer to be modified 1118 * @expires: New timeout in jiffies 1119 * 1120 * timer_reduce() is very similar to mod_timer(), except that it will only 1121 * modify a running timer if that would reduce the expiration time (it will 1122 * start a timer that isn't running). 1123 */ 1124 int timer_reduce(struct timer_list *timer, unsigned long expires) 1125 { 1126 return __mod_timer(timer, expires, MOD_TIMER_REDUCE); 1127 } 1128 EXPORT_SYMBOL(timer_reduce); 1129 1130 /** 1131 * add_timer - start a timer 1132 * @timer: the timer to be added 1133 * 1134 * The kernel will do a ->function(@timer) callback from the 1135 * timer interrupt at the ->expires point in the future. The 1136 * current time is 'jiffies'. 1137 * 1138 * The timer's ->expires, ->function fields must be set prior calling this 1139 * function. 1140 * 1141 * Timers with an ->expires field in the past will be executed in the next 1142 * timer tick. 1143 */ 1144 void add_timer(struct timer_list *timer) 1145 { 1146 BUG_ON(timer_pending(timer)); 1147 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); 1148 } 1149 EXPORT_SYMBOL(add_timer); 1150 1151 /** 1152 * add_timer_on - start a timer on a particular CPU 1153 * @timer: the timer to be added 1154 * @cpu: the CPU to start it on 1155 * 1156 * This is not very scalable on SMP. Double adds are not possible. 1157 */ 1158 void add_timer_on(struct timer_list *timer, int cpu) 1159 { 1160 struct timer_base *new_base, *base; 1161 unsigned long flags; 1162 1163 BUG_ON(timer_pending(timer) || !timer->function); 1164 1165 new_base = get_timer_cpu_base(timer->flags, cpu); 1166 1167 /* 1168 * If @timer was on a different CPU, it should be migrated with the 1169 * old base locked to prevent other operations proceeding with the 1170 * wrong base locked. See lock_timer_base(). 1171 */ 1172 base = lock_timer_base(timer, &flags); 1173 if (base != new_base) { 1174 timer->flags |= TIMER_MIGRATING; 1175 1176 raw_spin_unlock(&base->lock); 1177 base = new_base; 1178 raw_spin_lock(&base->lock); 1179 WRITE_ONCE(timer->flags, 1180 (timer->flags & ~TIMER_BASEMASK) | cpu); 1181 } 1182 forward_timer_base(base); 1183 1184 debug_timer_activate(timer); 1185 internal_add_timer(base, timer); 1186 raw_spin_unlock_irqrestore(&base->lock, flags); 1187 } 1188 EXPORT_SYMBOL_GPL(add_timer_on); 1189 1190 /** 1191 * del_timer - deactivate a timer. 1192 * @timer: the timer to be deactivated 1193 * 1194 * del_timer() deactivates a timer - this works on both active and inactive 1195 * timers. 1196 * 1197 * The function returns whether it has deactivated a pending timer or not. 1198 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an 1199 * active timer returns 1.) 1200 */ 1201 int del_timer(struct timer_list *timer) 1202 { 1203 struct timer_base *base; 1204 unsigned long flags; 1205 int ret = 0; 1206 1207 debug_assert_init(timer); 1208 1209 if (timer_pending(timer)) { 1210 base = lock_timer_base(timer, &flags); 1211 ret = detach_if_pending(timer, base, true); 1212 raw_spin_unlock_irqrestore(&base->lock, flags); 1213 } 1214 1215 return ret; 1216 } 1217 EXPORT_SYMBOL(del_timer); 1218 1219 /** 1220 * try_to_del_timer_sync - Try to deactivate a timer 1221 * @timer: timer to delete 1222 * 1223 * This function tries to deactivate a timer. Upon successful (ret >= 0) 1224 * exit the timer is not queued and the handler is not running on any CPU. 1225 */ 1226 int try_to_del_timer_sync(struct timer_list *timer) 1227 { 1228 struct timer_base *base; 1229 unsigned long flags; 1230 int ret = -1; 1231 1232 debug_assert_init(timer); 1233 1234 base = lock_timer_base(timer, &flags); 1235 1236 if (base->running_timer != timer) 1237 ret = detach_if_pending(timer, base, true); 1238 1239 raw_spin_unlock_irqrestore(&base->lock, flags); 1240 1241 return ret; 1242 } 1243 EXPORT_SYMBOL(try_to_del_timer_sync); 1244 1245 #ifdef CONFIG_PREEMPT_RT 1246 static __init void timer_base_init_expiry_lock(struct timer_base *base) 1247 { 1248 spin_lock_init(&base->expiry_lock); 1249 } 1250 1251 static inline void timer_base_lock_expiry(struct timer_base *base) 1252 { 1253 spin_lock(&base->expiry_lock); 1254 } 1255 1256 static inline void timer_base_unlock_expiry(struct timer_base *base) 1257 { 1258 spin_unlock(&base->expiry_lock); 1259 } 1260 1261 /* 1262 * The counterpart to del_timer_wait_running(). 1263 * 1264 * If there is a waiter for base->expiry_lock, then it was waiting for the 1265 * timer callback to finish. Drop expiry_lock and reaquire it. That allows 1266 * the waiter to acquire the lock and make progress. 1267 */ 1268 static void timer_sync_wait_running(struct timer_base *base) 1269 { 1270 if (atomic_read(&base->timer_waiters)) { 1271 spin_unlock(&base->expiry_lock); 1272 spin_lock(&base->expiry_lock); 1273 } 1274 } 1275 1276 /* 1277 * This function is called on PREEMPT_RT kernels when the fast path 1278 * deletion of a timer failed because the timer callback function was 1279 * running. 1280 * 1281 * This prevents priority inversion, if the softirq thread on a remote CPU 1282 * got preempted, and it prevents a life lock when the task which tries to 1283 * delete a timer preempted the softirq thread running the timer callback 1284 * function. 1285 */ 1286 static void del_timer_wait_running(struct timer_list *timer) 1287 { 1288 u32 tf; 1289 1290 tf = READ_ONCE(timer->flags); 1291 if (!(tf & TIMER_MIGRATING)) { 1292 struct timer_base *base = get_timer_base(tf); 1293 1294 /* 1295 * Mark the base as contended and grab the expiry lock, 1296 * which is held by the softirq across the timer 1297 * callback. Drop the lock immediately so the softirq can 1298 * expire the next timer. In theory the timer could already 1299 * be running again, but that's more than unlikely and just 1300 * causes another wait loop. 1301 */ 1302 atomic_inc(&base->timer_waiters); 1303 spin_lock_bh(&base->expiry_lock); 1304 atomic_dec(&base->timer_waiters); 1305 spin_unlock_bh(&base->expiry_lock); 1306 } 1307 } 1308 #else 1309 static inline void timer_base_init_expiry_lock(struct timer_base *base) { } 1310 static inline void timer_base_lock_expiry(struct timer_base *base) { } 1311 static inline void timer_base_unlock_expiry(struct timer_base *base) { } 1312 static inline void timer_sync_wait_running(struct timer_base *base) { } 1313 static inline void del_timer_wait_running(struct timer_list *timer) { } 1314 #endif 1315 1316 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 1317 /** 1318 * del_timer_sync - deactivate a timer and wait for the handler to finish. 1319 * @timer: the timer to be deactivated 1320 * 1321 * This function only differs from del_timer() on SMP: besides deactivating 1322 * the timer it also makes sure the handler has finished executing on other 1323 * CPUs. 1324 * 1325 * Synchronization rules: Callers must prevent restarting of the timer, 1326 * otherwise this function is meaningless. It must not be called from 1327 * interrupt contexts unless the timer is an irqsafe one. The caller must 1328 * not hold locks which would prevent completion of the timer's 1329 * handler. The timer's handler must not call add_timer_on(). Upon exit the 1330 * timer is not queued and the handler is not running on any CPU. 1331 * 1332 * Note: For !irqsafe timers, you must not hold locks that are held in 1333 * interrupt context while calling this function. Even if the lock has 1334 * nothing to do with the timer in question. Here's why:: 1335 * 1336 * CPU0 CPU1 1337 * ---- ---- 1338 * <SOFTIRQ> 1339 * call_timer_fn(); 1340 * base->running_timer = mytimer; 1341 * spin_lock_irq(somelock); 1342 * <IRQ> 1343 * spin_lock(somelock); 1344 * del_timer_sync(mytimer); 1345 * while (base->running_timer == mytimer); 1346 * 1347 * Now del_timer_sync() will never return and never release somelock. 1348 * The interrupt on the other CPU is waiting to grab somelock but 1349 * it has interrupted the softirq that CPU0 is waiting to finish. 1350 * 1351 * The function returns whether it has deactivated a pending timer or not. 1352 */ 1353 int del_timer_sync(struct timer_list *timer) 1354 { 1355 int ret; 1356 1357 #ifdef CONFIG_LOCKDEP 1358 unsigned long flags; 1359 1360 /* 1361 * If lockdep gives a backtrace here, please reference 1362 * the synchronization rules above. 1363 */ 1364 local_irq_save(flags); 1365 lock_map_acquire(&timer->lockdep_map); 1366 lock_map_release(&timer->lockdep_map); 1367 local_irq_restore(flags); 1368 #endif 1369 /* 1370 * don't use it in hardirq context, because it 1371 * could lead to deadlock. 1372 */ 1373 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); 1374 1375 do { 1376 ret = try_to_del_timer_sync(timer); 1377 1378 if (unlikely(ret < 0)) { 1379 del_timer_wait_running(timer); 1380 cpu_relax(); 1381 } 1382 } while (ret < 0); 1383 1384 return ret; 1385 } 1386 EXPORT_SYMBOL(del_timer_sync); 1387 #endif 1388 1389 static void call_timer_fn(struct timer_list *timer, 1390 void (*fn)(struct timer_list *), 1391 unsigned long baseclk) 1392 { 1393 int count = preempt_count(); 1394 1395 #ifdef CONFIG_LOCKDEP 1396 /* 1397 * It is permissible to free the timer from inside the 1398 * function that is called from it, this we need to take into 1399 * account for lockdep too. To avoid bogus "held lock freed" 1400 * warnings as well as problems when looking into 1401 * timer->lockdep_map, make a copy and use that here. 1402 */ 1403 struct lockdep_map lockdep_map; 1404 1405 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); 1406 #endif 1407 /* 1408 * Couple the lock chain with the lock chain at 1409 * del_timer_sync() by acquiring the lock_map around the fn() 1410 * call here and in del_timer_sync(). 1411 */ 1412 lock_map_acquire(&lockdep_map); 1413 1414 trace_timer_expire_entry(timer, baseclk); 1415 fn(timer); 1416 trace_timer_expire_exit(timer); 1417 1418 lock_map_release(&lockdep_map); 1419 1420 if (count != preempt_count()) { 1421 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", 1422 fn, count, preempt_count()); 1423 /* 1424 * Restore the preempt count. That gives us a decent 1425 * chance to survive and extract information. If the 1426 * callback kept a lock held, bad luck, but not worse 1427 * than the BUG() we had. 1428 */ 1429 preempt_count_set(count); 1430 } 1431 } 1432 1433 static void expire_timers(struct timer_base *base, struct hlist_head *head) 1434 { 1435 /* 1436 * This value is required only for tracing. base->clk was 1437 * incremented directly before expire_timers was called. But expiry 1438 * is related to the old base->clk value. 1439 */ 1440 unsigned long baseclk = base->clk - 1; 1441 1442 while (!hlist_empty(head)) { 1443 struct timer_list *timer; 1444 void (*fn)(struct timer_list *); 1445 1446 timer = hlist_entry(head->first, struct timer_list, entry); 1447 1448 base->running_timer = timer; 1449 detach_timer(timer, true); 1450 1451 fn = timer->function; 1452 1453 if (timer->flags & TIMER_IRQSAFE) { 1454 raw_spin_unlock(&base->lock); 1455 call_timer_fn(timer, fn, baseclk); 1456 base->running_timer = NULL; 1457 raw_spin_lock(&base->lock); 1458 } else { 1459 raw_spin_unlock_irq(&base->lock); 1460 call_timer_fn(timer, fn, baseclk); 1461 base->running_timer = NULL; 1462 timer_sync_wait_running(base); 1463 raw_spin_lock_irq(&base->lock); 1464 } 1465 } 1466 } 1467 1468 static int __collect_expired_timers(struct timer_base *base, 1469 struct hlist_head *heads) 1470 { 1471 unsigned long clk = base->clk; 1472 struct hlist_head *vec; 1473 int i, levels = 0; 1474 unsigned int idx; 1475 1476 for (i = 0; i < LVL_DEPTH; i++) { 1477 idx = (clk & LVL_MASK) + i * LVL_SIZE; 1478 1479 if (__test_and_clear_bit(idx, base->pending_map)) { 1480 vec = base->vectors + idx; 1481 hlist_move_list(vec, heads++); 1482 levels++; 1483 } 1484 /* Is it time to look at the next level? */ 1485 if (clk & LVL_CLK_MASK) 1486 break; 1487 /* Shift clock for the next level granularity */ 1488 clk >>= LVL_CLK_SHIFT; 1489 } 1490 return levels; 1491 } 1492 1493 #ifdef CONFIG_NO_HZ_COMMON 1494 /* 1495 * Find the next pending bucket of a level. Search from level start (@offset) 1496 * + @clk upwards and if nothing there, search from start of the level 1497 * (@offset) up to @offset + clk. 1498 */ 1499 static int next_pending_bucket(struct timer_base *base, unsigned offset, 1500 unsigned clk) 1501 { 1502 unsigned pos, start = offset + clk; 1503 unsigned end = offset + LVL_SIZE; 1504 1505 pos = find_next_bit(base->pending_map, end, start); 1506 if (pos < end) 1507 return pos - start; 1508 1509 pos = find_next_bit(base->pending_map, start, offset); 1510 return pos < start ? pos + LVL_SIZE - start : -1; 1511 } 1512 1513 /* 1514 * Search the first expiring timer in the various clock levels. Caller must 1515 * hold base->lock. 1516 */ 1517 static unsigned long __next_timer_interrupt(struct timer_base *base) 1518 { 1519 unsigned long clk, next, adj; 1520 unsigned lvl, offset = 0; 1521 1522 next = base->clk + NEXT_TIMER_MAX_DELTA; 1523 clk = base->clk; 1524 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1525 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); 1526 1527 if (pos >= 0) { 1528 unsigned long tmp = clk + (unsigned long) pos; 1529 1530 tmp <<= LVL_SHIFT(lvl); 1531 if (time_before(tmp, next)) 1532 next = tmp; 1533 } 1534 /* 1535 * Clock for the next level. If the current level clock lower 1536 * bits are zero, we look at the next level as is. If not we 1537 * need to advance it by one because that's going to be the 1538 * next expiring bucket in that level. base->clk is the next 1539 * expiring jiffie. So in case of: 1540 * 1541 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1542 * 0 0 0 0 0 0 1543 * 1544 * we have to look at all levels @index 0. With 1545 * 1546 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1547 * 0 0 0 0 0 2 1548 * 1549 * LVL0 has the next expiring bucket @index 2. The upper 1550 * levels have the next expiring bucket @index 1. 1551 * 1552 * In case that the propagation wraps the next level the same 1553 * rules apply: 1554 * 1555 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1556 * 0 0 0 0 F 2 1557 * 1558 * So after looking at LVL0 we get: 1559 * 1560 * LVL5 LVL4 LVL3 LVL2 LVL1 1561 * 0 0 0 1 0 1562 * 1563 * So no propagation from LVL1 to LVL2 because that happened 1564 * with the add already, but then we need to propagate further 1565 * from LVL2 to LVL3. 1566 * 1567 * So the simple check whether the lower bits of the current 1568 * level are 0 or not is sufficient for all cases. 1569 */ 1570 adj = clk & LVL_CLK_MASK ? 1 : 0; 1571 clk >>= LVL_CLK_SHIFT; 1572 clk += adj; 1573 } 1574 return next; 1575 } 1576 1577 /* 1578 * Check, if the next hrtimer event is before the next timer wheel 1579 * event: 1580 */ 1581 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) 1582 { 1583 u64 nextevt = hrtimer_get_next_event(); 1584 1585 /* 1586 * If high resolution timers are enabled 1587 * hrtimer_get_next_event() returns KTIME_MAX. 1588 */ 1589 if (expires <= nextevt) 1590 return expires; 1591 1592 /* 1593 * If the next timer is already expired, return the tick base 1594 * time so the tick is fired immediately. 1595 */ 1596 if (nextevt <= basem) 1597 return basem; 1598 1599 /* 1600 * Round up to the next jiffie. High resolution timers are 1601 * off, so the hrtimers are expired in the tick and we need to 1602 * make sure that this tick really expires the timer to avoid 1603 * a ping pong of the nohz stop code. 1604 * 1605 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3 1606 */ 1607 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; 1608 } 1609 1610 /** 1611 * get_next_timer_interrupt - return the time (clock mono) of the next timer 1612 * @basej: base time jiffies 1613 * @basem: base time clock monotonic 1614 * 1615 * Returns the tick aligned clock monotonic time of the next pending 1616 * timer or KTIME_MAX if no timer is pending. 1617 */ 1618 u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1619 { 1620 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1621 u64 expires = KTIME_MAX; 1622 unsigned long nextevt; 1623 bool is_max_delta; 1624 1625 /* 1626 * Pretend that there is no timer pending if the cpu is offline. 1627 * Possible pending timers will be migrated later to an active cpu. 1628 */ 1629 if (cpu_is_offline(smp_processor_id())) 1630 return expires; 1631 1632 raw_spin_lock(&base->lock); 1633 nextevt = __next_timer_interrupt(base); 1634 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); 1635 base->next_expiry = nextevt; 1636 /* 1637 * We have a fresh next event. Check whether we can forward the 1638 * base. We can only do that when @basej is past base->clk 1639 * otherwise we might rewind base->clk. 1640 */ 1641 if (time_after(basej, base->clk)) { 1642 if (time_after(nextevt, basej)) 1643 base->clk = basej; 1644 else if (time_after(nextevt, base->clk)) 1645 base->clk = nextevt; 1646 } 1647 1648 if (time_before_eq(nextevt, basej)) { 1649 expires = basem; 1650 base->is_idle = false; 1651 } else { 1652 if (!is_max_delta) 1653 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; 1654 /* 1655 * If we expect to sleep more than a tick, mark the base idle. 1656 * Also the tick is stopped so any added timer must forward 1657 * the base clk itself to keep granularity small. This idle 1658 * logic is only maintained for the BASE_STD base, deferrable 1659 * timers may still see large granularity skew (by design). 1660 */ 1661 if ((expires - basem) > TICK_NSEC) { 1662 base->must_forward_clk = true; 1663 base->is_idle = true; 1664 } 1665 } 1666 raw_spin_unlock(&base->lock); 1667 1668 return cmp_next_hrtimer_event(basem, expires); 1669 } 1670 1671 /** 1672 * timer_clear_idle - Clear the idle state of the timer base 1673 * 1674 * Called with interrupts disabled 1675 */ 1676 void timer_clear_idle(void) 1677 { 1678 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1679 1680 /* 1681 * We do this unlocked. The worst outcome is a remote enqueue sending 1682 * a pointless IPI, but taking the lock would just make the window for 1683 * sending the IPI a few instructions smaller for the cost of taking 1684 * the lock in the exit from idle path. 1685 */ 1686 base->is_idle = false; 1687 } 1688 1689 static int collect_expired_timers(struct timer_base *base, 1690 struct hlist_head *heads) 1691 { 1692 unsigned long now = READ_ONCE(jiffies); 1693 1694 /* 1695 * NOHZ optimization. After a long idle sleep we need to forward the 1696 * base to current jiffies. Avoid a loop by searching the bitfield for 1697 * the next expiring timer. 1698 */ 1699 if ((long)(now - base->clk) > 2) { 1700 unsigned long next = __next_timer_interrupt(base); 1701 1702 /* 1703 * If the next timer is ahead of time forward to current 1704 * jiffies, otherwise forward to the next expiry time: 1705 */ 1706 if (time_after(next, now)) { 1707 /* 1708 * The call site will increment base->clk and then 1709 * terminate the expiry loop immediately. 1710 */ 1711 base->clk = now; 1712 return 0; 1713 } 1714 base->clk = next; 1715 } 1716 return __collect_expired_timers(base, heads); 1717 } 1718 #else 1719 static inline int collect_expired_timers(struct timer_base *base, 1720 struct hlist_head *heads) 1721 { 1722 return __collect_expired_timers(base, heads); 1723 } 1724 #endif 1725 1726 /* 1727 * Called from the timer interrupt handler to charge one tick to the current 1728 * process. user_tick is 1 if the tick is user time, 0 for system. 1729 */ 1730 void update_process_times(int user_tick) 1731 { 1732 struct task_struct *p = current; 1733 1734 /* Note: this timer irq context must be accounted for as well. */ 1735 account_process_tick(p, user_tick); 1736 run_local_timers(); 1737 rcu_sched_clock_irq(user_tick); 1738 #ifdef CONFIG_IRQ_WORK 1739 if (in_irq()) 1740 irq_work_tick(); 1741 #endif 1742 scheduler_tick(); 1743 if (IS_ENABLED(CONFIG_POSIX_TIMERS)) 1744 run_posix_cpu_timers(); 1745 } 1746 1747 /** 1748 * __run_timers - run all expired timers (if any) on this CPU. 1749 * @base: the timer vector to be processed. 1750 */ 1751 static inline void __run_timers(struct timer_base *base) 1752 { 1753 struct hlist_head heads[LVL_DEPTH]; 1754 int levels; 1755 1756 if (!time_after_eq(jiffies, base->clk)) 1757 return; 1758 1759 timer_base_lock_expiry(base); 1760 raw_spin_lock_irq(&base->lock); 1761 1762 /* 1763 * timer_base::must_forward_clk must be cleared before running 1764 * timers so that any timer functions that call mod_timer() will 1765 * not try to forward the base. Idle tracking / clock forwarding 1766 * logic is only used with BASE_STD timers. 1767 * 1768 * The must_forward_clk flag is cleared unconditionally also for 1769 * the deferrable base. The deferrable base is not affected by idle 1770 * tracking and never forwarded, so clearing the flag is a NOOP. 1771 * 1772 * The fact that the deferrable base is never forwarded can cause 1773 * large variations in granularity for deferrable timers, but they 1774 * can be deferred for long periods due to idle anyway. 1775 */ 1776 base->must_forward_clk = false; 1777 1778 while (time_after_eq(jiffies, base->clk)) { 1779 1780 levels = collect_expired_timers(base, heads); 1781 base->clk++; 1782 1783 while (levels--) 1784 expire_timers(base, heads + levels); 1785 } 1786 raw_spin_unlock_irq(&base->lock); 1787 timer_base_unlock_expiry(base); 1788 } 1789 1790 /* 1791 * This function runs timers and the timer-tq in bottom half context. 1792 */ 1793 static __latent_entropy void run_timer_softirq(struct softirq_action *h) 1794 { 1795 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1796 1797 __run_timers(base); 1798 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) 1799 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 1800 } 1801 1802 /* 1803 * Called by the local, per-CPU timer interrupt on SMP. 1804 */ 1805 void run_local_timers(void) 1806 { 1807 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1808 1809 hrtimer_run_queues(); 1810 /* Raise the softirq only if required. */ 1811 if (time_before(jiffies, base->clk)) { 1812 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON)) 1813 return; 1814 /* CPU is awake, so check the deferrable base. */ 1815 base++; 1816 if (time_before(jiffies, base->clk)) 1817 return; 1818 } 1819 raise_softirq(TIMER_SOFTIRQ); 1820 } 1821 1822 /* 1823 * Since schedule_timeout()'s timer is defined on the stack, it must store 1824 * the target task on the stack as well. 1825 */ 1826 struct process_timer { 1827 struct timer_list timer; 1828 struct task_struct *task; 1829 }; 1830 1831 static void process_timeout(struct timer_list *t) 1832 { 1833 struct process_timer *timeout = from_timer(timeout, t, timer); 1834 1835 wake_up_process(timeout->task); 1836 } 1837 1838 /** 1839 * schedule_timeout - sleep until timeout 1840 * @timeout: timeout value in jiffies 1841 * 1842 * Make the current task sleep until @timeout jiffies have elapsed. 1843 * The function behavior depends on the current task state 1844 * (see also set_current_state() description): 1845 * 1846 * %TASK_RUNNING - the scheduler is called, but the task does not sleep 1847 * at all. That happens because sched_submit_work() does nothing for 1848 * tasks in %TASK_RUNNING state. 1849 * 1850 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 1851 * pass before the routine returns unless the current task is explicitly 1852 * woken up, (e.g. by wake_up_process()). 1853 * 1854 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1855 * delivered to the current task or the current task is explicitly woken 1856 * up. 1857 * 1858 * The current task state is guaranteed to be %TASK_RUNNING when this 1859 * routine returns. 1860 * 1861 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule 1862 * the CPU away without a bound on the timeout. In this case the return 1863 * value will be %MAX_SCHEDULE_TIMEOUT. 1864 * 1865 * Returns 0 when the timer has expired otherwise the remaining time in 1866 * jiffies will be returned. In all cases the return value is guaranteed 1867 * to be non-negative. 1868 */ 1869 signed long __sched schedule_timeout(signed long timeout) 1870 { 1871 struct process_timer timer; 1872 unsigned long expire; 1873 1874 switch (timeout) 1875 { 1876 case MAX_SCHEDULE_TIMEOUT: 1877 /* 1878 * These two special cases are useful to be comfortable 1879 * in the caller. Nothing more. We could take 1880 * MAX_SCHEDULE_TIMEOUT from one of the negative value 1881 * but I' d like to return a valid offset (>=0) to allow 1882 * the caller to do everything it want with the retval. 1883 */ 1884 schedule(); 1885 goto out; 1886 default: 1887 /* 1888 * Another bit of PARANOID. Note that the retval will be 1889 * 0 since no piece of kernel is supposed to do a check 1890 * for a negative retval of schedule_timeout() (since it 1891 * should never happens anyway). You just have the printk() 1892 * that will tell you if something is gone wrong and where. 1893 */ 1894 if (timeout < 0) { 1895 printk(KERN_ERR "schedule_timeout: wrong timeout " 1896 "value %lx\n", timeout); 1897 dump_stack(); 1898 current->state = TASK_RUNNING; 1899 goto out; 1900 } 1901 } 1902 1903 expire = timeout + jiffies; 1904 1905 timer.task = current; 1906 timer_setup_on_stack(&timer.timer, process_timeout, 0); 1907 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING); 1908 schedule(); 1909 del_singleshot_timer_sync(&timer.timer); 1910 1911 /* Remove the timer from the object tracker */ 1912 destroy_timer_on_stack(&timer.timer); 1913 1914 timeout = expire - jiffies; 1915 1916 out: 1917 return timeout < 0 ? 0 : timeout; 1918 } 1919 EXPORT_SYMBOL(schedule_timeout); 1920 1921 /* 1922 * We can use __set_current_state() here because schedule_timeout() calls 1923 * schedule() unconditionally. 1924 */ 1925 signed long __sched schedule_timeout_interruptible(signed long timeout) 1926 { 1927 __set_current_state(TASK_INTERRUPTIBLE); 1928 return schedule_timeout(timeout); 1929 } 1930 EXPORT_SYMBOL(schedule_timeout_interruptible); 1931 1932 signed long __sched schedule_timeout_killable(signed long timeout) 1933 { 1934 __set_current_state(TASK_KILLABLE); 1935 return schedule_timeout(timeout); 1936 } 1937 EXPORT_SYMBOL(schedule_timeout_killable); 1938 1939 signed long __sched schedule_timeout_uninterruptible(signed long timeout) 1940 { 1941 __set_current_state(TASK_UNINTERRUPTIBLE); 1942 return schedule_timeout(timeout); 1943 } 1944 EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1945 1946 /* 1947 * Like schedule_timeout_uninterruptible(), except this task will not contribute 1948 * to load average. 1949 */ 1950 signed long __sched schedule_timeout_idle(signed long timeout) 1951 { 1952 __set_current_state(TASK_IDLE); 1953 return schedule_timeout(timeout); 1954 } 1955 EXPORT_SYMBOL(schedule_timeout_idle); 1956 1957 #ifdef CONFIG_HOTPLUG_CPU 1958 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) 1959 { 1960 struct timer_list *timer; 1961 int cpu = new_base->cpu; 1962 1963 while (!hlist_empty(head)) { 1964 timer = hlist_entry(head->first, struct timer_list, entry); 1965 detach_timer(timer, false); 1966 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 1967 internal_add_timer(new_base, timer); 1968 } 1969 } 1970 1971 int timers_prepare_cpu(unsigned int cpu) 1972 { 1973 struct timer_base *base; 1974 int b; 1975 1976 for (b = 0; b < NR_BASES; b++) { 1977 base = per_cpu_ptr(&timer_bases[b], cpu); 1978 base->clk = jiffies; 1979 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 1980 base->is_idle = false; 1981 base->must_forward_clk = true; 1982 } 1983 return 0; 1984 } 1985 1986 int timers_dead_cpu(unsigned int cpu) 1987 { 1988 struct timer_base *old_base; 1989 struct timer_base *new_base; 1990 int b, i; 1991 1992 BUG_ON(cpu_online(cpu)); 1993 1994 for (b = 0; b < NR_BASES; b++) { 1995 old_base = per_cpu_ptr(&timer_bases[b], cpu); 1996 new_base = get_cpu_ptr(&timer_bases[b]); 1997 /* 1998 * The caller is globally serialized and nobody else 1999 * takes two locks at once, deadlock is not possible. 2000 */ 2001 raw_spin_lock_irq(&new_base->lock); 2002 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 2003 2004 /* 2005 * The current CPUs base clock might be stale. Update it 2006 * before moving the timers over. 2007 */ 2008 forward_timer_base(new_base); 2009 2010 BUG_ON(old_base->running_timer); 2011 2012 for (i = 0; i < WHEEL_SIZE; i++) 2013 migrate_timer_list(new_base, old_base->vectors + i); 2014 2015 raw_spin_unlock(&old_base->lock); 2016 raw_spin_unlock_irq(&new_base->lock); 2017 put_cpu_ptr(&timer_bases); 2018 } 2019 return 0; 2020 } 2021 2022 #endif /* CONFIG_HOTPLUG_CPU */ 2023 2024 static void __init init_timer_cpu(int cpu) 2025 { 2026 struct timer_base *base; 2027 int i; 2028 2029 for (i = 0; i < NR_BASES; i++) { 2030 base = per_cpu_ptr(&timer_bases[i], cpu); 2031 base->cpu = cpu; 2032 raw_spin_lock_init(&base->lock); 2033 base->clk = jiffies; 2034 timer_base_init_expiry_lock(base); 2035 } 2036 } 2037 2038 static void __init init_timer_cpus(void) 2039 { 2040 int cpu; 2041 2042 for_each_possible_cpu(cpu) 2043 init_timer_cpu(cpu); 2044 } 2045 2046 void __init init_timers(void) 2047 { 2048 init_timer_cpus(); 2049 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 2050 } 2051 2052 /** 2053 * msleep - sleep safely even with waitqueue interruptions 2054 * @msecs: Time in milliseconds to sleep for 2055 */ 2056 void msleep(unsigned int msecs) 2057 { 2058 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2059 2060 while (timeout) 2061 timeout = schedule_timeout_uninterruptible(timeout); 2062 } 2063 2064 EXPORT_SYMBOL(msleep); 2065 2066 /** 2067 * msleep_interruptible - sleep waiting for signals 2068 * @msecs: Time in milliseconds to sleep for 2069 */ 2070 unsigned long msleep_interruptible(unsigned int msecs) 2071 { 2072 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2073 2074 while (timeout && !signal_pending(current)) 2075 timeout = schedule_timeout_interruptible(timeout); 2076 return jiffies_to_msecs(timeout); 2077 } 2078 2079 EXPORT_SYMBOL(msleep_interruptible); 2080 2081 /** 2082 * usleep_range - Sleep for an approximate time 2083 * @min: Minimum time in usecs to sleep 2084 * @max: Maximum time in usecs to sleep 2085 * 2086 * In non-atomic context where the exact wakeup time is flexible, use 2087 * usleep_range() instead of udelay(). The sleep improves responsiveness 2088 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 2089 * power usage by allowing hrtimers to take advantage of an already- 2090 * scheduled interrupt instead of scheduling a new one just for this sleep. 2091 */ 2092 void __sched usleep_range(unsigned long min, unsigned long max) 2093 { 2094 ktime_t exp = ktime_add_us(ktime_get(), min); 2095 u64 delta = (u64)(max - min) * NSEC_PER_USEC; 2096 2097 for (;;) { 2098 __set_current_state(TASK_UNINTERRUPTIBLE); 2099 /* Do not return before the requested sleep time has elapsed */ 2100 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) 2101 break; 2102 } 2103 } 2104 EXPORT_SYMBOL(usleep_range); 2105