1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel internal timers 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. 8 * 9 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 10 * "A Kernel Model for Precision Timekeeping" by Dave Mills 11 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to 12 * serialize accesses to xtime/lost_ticks). 13 * Copyright (C) 1998 Andrea Arcangeli 14 * 1999-03-10 Improved NTP compatibility by Ulrich Windl 15 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love 16 * 2000-10-05 Implemented scalable SMP per-CPU timer handling. 17 * Copyright (C) 2000, 2001, 2002 Ingo Molnar 18 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar 19 */ 20 21 #include <linux/kernel_stat.h> 22 #include <linux/export.h> 23 #include <linux/interrupt.h> 24 #include <linux/percpu.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/swap.h> 28 #include <linux/pid_namespace.h> 29 #include <linux/notifier.h> 30 #include <linux/thread_info.h> 31 #include <linux/time.h> 32 #include <linux/jiffies.h> 33 #include <linux/posix-timers.h> 34 #include <linux/cpu.h> 35 #include <linux/syscalls.h> 36 #include <linux/delay.h> 37 #include <linux/tick.h> 38 #include <linux/kallsyms.h> 39 #include <linux/irq_work.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/sysctl.h> 42 #include <linux/sched/nohz.h> 43 #include <linux/sched/debug.h> 44 #include <linux/slab.h> 45 #include <linux/compat.h> 46 #include <linux/random.h> 47 #include <linux/sysctl.h> 48 49 #include <linux/uaccess.h> 50 #include <asm/unistd.h> 51 #include <asm/div64.h> 52 #include <asm/timex.h> 53 #include <asm/io.h> 54 55 #include "tick-internal.h" 56 57 #define CREATE_TRACE_POINTS 58 #include <trace/events/timer.h> 59 60 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 61 62 EXPORT_SYMBOL(jiffies_64); 63 64 /* 65 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of 66 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each 67 * level has a different granularity. 68 * 69 * The level granularity is: LVL_CLK_DIV ^ lvl 70 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) 71 * 72 * The array level of a newly armed timer depends on the relative expiry 73 * time. The farther the expiry time is away the higher the array level and 74 * therefor the granularity becomes. 75 * 76 * Contrary to the original timer wheel implementation, which aims for 'exact' 77 * expiry of the timers, this implementation removes the need for recascading 78 * the timers into the lower array levels. The previous 'classic' timer wheel 79 * implementation of the kernel already violated the 'exact' expiry by adding 80 * slack to the expiry time to provide batched expiration. The granularity 81 * levels provide implicit batching. 82 * 83 * This is an optimization of the original timer wheel implementation for the 84 * majority of the timer wheel use cases: timeouts. The vast majority of 85 * timeout timers (networking, disk I/O ...) are canceled before expiry. If 86 * the timeout expires it indicates that normal operation is disturbed, so it 87 * does not matter much whether the timeout comes with a slight delay. 88 * 89 * The only exception to this are networking timers with a small expiry 90 * time. They rely on the granularity. Those fit into the first wheel level, 91 * which has HZ granularity. 92 * 93 * We don't have cascading anymore. timers with a expiry time above the 94 * capacity of the last wheel level are force expired at the maximum timeout 95 * value of the last wheel level. From data sampling we know that the maximum 96 * value observed is 5 days (network connection tracking), so this should not 97 * be an issue. 98 * 99 * The currently chosen array constants values are a good compromise between 100 * array size and granularity. 101 * 102 * This results in the following granularity and range levels: 103 * 104 * HZ 1000 steps 105 * Level Offset Granularity Range 106 * 0 0 1 ms 0 ms - 63 ms 107 * 1 64 8 ms 64 ms - 511 ms 108 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) 109 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) 110 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) 111 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) 112 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) 113 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) 114 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) 115 * 116 * HZ 300 117 * Level Offset Granularity Range 118 * 0 0 3 ms 0 ms - 210 ms 119 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) 120 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) 121 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) 122 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) 123 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) 124 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) 125 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) 126 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) 127 * 128 * HZ 250 129 * Level Offset Granularity Range 130 * 0 0 4 ms 0 ms - 255 ms 131 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) 132 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) 133 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) 134 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) 135 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) 136 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) 137 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) 138 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) 139 * 140 * HZ 100 141 * Level Offset Granularity Range 142 * 0 0 10 ms 0 ms - 630 ms 143 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) 144 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) 145 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) 146 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) 147 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) 148 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) 149 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) 150 */ 151 152 /* Clock divisor for the next level */ 153 #define LVL_CLK_SHIFT 3 154 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) 155 #define LVL_CLK_MASK (LVL_CLK_DIV - 1) 156 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) 157 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) 158 159 /* 160 * The time start value for each level to select the bucket at enqueue 161 * time. We start from the last possible delta of the previous level 162 * so that we can later add an extra LVL_GRAN(n) to n (see calc_index()). 163 */ 164 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) 165 166 /* Size of each clock level */ 167 #define LVL_BITS 6 168 #define LVL_SIZE (1UL << LVL_BITS) 169 #define LVL_MASK (LVL_SIZE - 1) 170 #define LVL_OFFS(n) ((n) * LVL_SIZE) 171 172 /* Level depth */ 173 #if HZ > 100 174 # define LVL_DEPTH 9 175 # else 176 # define LVL_DEPTH 8 177 #endif 178 179 /* The cutoff (max. capacity of the wheel) */ 180 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) 181 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) 182 183 /* 184 * The resulting wheel size. If NOHZ is configured we allocate two 185 * wheels so we have a separate storage for the deferrable timers. 186 */ 187 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) 188 189 #ifdef CONFIG_NO_HZ_COMMON 190 # define NR_BASES 2 191 # define BASE_STD 0 192 # define BASE_DEF 1 193 #else 194 # define NR_BASES 1 195 # define BASE_STD 0 196 # define BASE_DEF 0 197 #endif 198 199 struct timer_base { 200 raw_spinlock_t lock; 201 struct timer_list *running_timer; 202 #ifdef CONFIG_PREEMPT_RT 203 spinlock_t expiry_lock; 204 atomic_t timer_waiters; 205 #endif 206 unsigned long clk; 207 unsigned long next_expiry; 208 unsigned int cpu; 209 bool next_expiry_recalc; 210 bool is_idle; 211 bool timers_pending; 212 DECLARE_BITMAP(pending_map, WHEEL_SIZE); 213 struct hlist_head vectors[WHEEL_SIZE]; 214 } ____cacheline_aligned; 215 216 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); 217 218 #ifdef CONFIG_NO_HZ_COMMON 219 220 static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); 221 static DEFINE_MUTEX(timer_keys_mutex); 222 223 static void timer_update_keys(struct work_struct *work); 224 static DECLARE_WORK(timer_update_work, timer_update_keys); 225 226 #ifdef CONFIG_SMP 227 static unsigned int sysctl_timer_migration = 1; 228 229 DEFINE_STATIC_KEY_FALSE(timers_migration_enabled); 230 231 static void timers_update_migration(void) 232 { 233 if (sysctl_timer_migration && tick_nohz_active) 234 static_branch_enable(&timers_migration_enabled); 235 else 236 static_branch_disable(&timers_migration_enabled); 237 } 238 239 #ifdef CONFIG_SYSCTL 240 static int timer_migration_handler(struct ctl_table *table, int write, 241 void *buffer, size_t *lenp, loff_t *ppos) 242 { 243 int ret; 244 245 mutex_lock(&timer_keys_mutex); 246 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 247 if (!ret && write) 248 timers_update_migration(); 249 mutex_unlock(&timer_keys_mutex); 250 return ret; 251 } 252 253 static struct ctl_table timer_sysctl[] = { 254 { 255 .procname = "timer_migration", 256 .data = &sysctl_timer_migration, 257 .maxlen = sizeof(unsigned int), 258 .mode = 0644, 259 .proc_handler = timer_migration_handler, 260 .extra1 = SYSCTL_ZERO, 261 .extra2 = SYSCTL_ONE, 262 }, 263 {} 264 }; 265 266 static int __init timer_sysctl_init(void) 267 { 268 register_sysctl("kernel", timer_sysctl); 269 return 0; 270 } 271 device_initcall(timer_sysctl_init); 272 #endif /* CONFIG_SYSCTL */ 273 #else /* CONFIG_SMP */ 274 static inline void timers_update_migration(void) { } 275 #endif /* !CONFIG_SMP */ 276 277 static void timer_update_keys(struct work_struct *work) 278 { 279 mutex_lock(&timer_keys_mutex); 280 timers_update_migration(); 281 static_branch_enable(&timers_nohz_active); 282 mutex_unlock(&timer_keys_mutex); 283 } 284 285 void timers_update_nohz(void) 286 { 287 schedule_work(&timer_update_work); 288 } 289 290 static inline bool is_timers_nohz_active(void) 291 { 292 return static_branch_unlikely(&timers_nohz_active); 293 } 294 #else 295 static inline bool is_timers_nohz_active(void) { return false; } 296 #endif /* NO_HZ_COMMON */ 297 298 static unsigned long round_jiffies_common(unsigned long j, int cpu, 299 bool force_up) 300 { 301 int rem; 302 unsigned long original = j; 303 304 /* 305 * We don't want all cpus firing their timers at once hitting the 306 * same lock or cachelines, so we skew each extra cpu with an extra 307 * 3 jiffies. This 3 jiffies came originally from the mm/ code which 308 * already did this. 309 * The skew is done by adding 3*cpunr, then round, then subtract this 310 * extra offset again. 311 */ 312 j += cpu * 3; 313 314 rem = j % HZ; 315 316 /* 317 * If the target jiffie is just after a whole second (which can happen 318 * due to delays of the timer irq, long irq off times etc etc) then 319 * we should round down to the whole second, not up. Use 1/4th second 320 * as cutoff for this rounding as an extreme upper bound for this. 321 * But never round down if @force_up is set. 322 */ 323 if (rem < HZ/4 && !force_up) /* round down */ 324 j = j - rem; 325 else /* round up */ 326 j = j - rem + HZ; 327 328 /* now that we have rounded, subtract the extra skew again */ 329 j -= cpu * 3; 330 331 /* 332 * Make sure j is still in the future. Otherwise return the 333 * unmodified value. 334 */ 335 return time_is_after_jiffies(j) ? j : original; 336 } 337 338 /** 339 * __round_jiffies - function to round jiffies to a full second 340 * @j: the time in (absolute) jiffies that should be rounded 341 * @cpu: the processor number on which the timeout will happen 342 * 343 * __round_jiffies() rounds an absolute time in the future (in jiffies) 344 * up or down to (approximately) full seconds. This is useful for timers 345 * for which the exact time they fire does not matter too much, as long as 346 * they fire approximately every X seconds. 347 * 348 * By rounding these timers to whole seconds, all such timers will fire 349 * at the same time, rather than at various times spread out. The goal 350 * of this is to have the CPU wake up less, which saves power. 351 * 352 * The exact rounding is skewed for each processor to avoid all 353 * processors firing at the exact same time, which could lead 354 * to lock contention or spurious cache line bouncing. 355 * 356 * The return value is the rounded version of the @j parameter. 357 */ 358 unsigned long __round_jiffies(unsigned long j, int cpu) 359 { 360 return round_jiffies_common(j, cpu, false); 361 } 362 EXPORT_SYMBOL_GPL(__round_jiffies); 363 364 /** 365 * __round_jiffies_relative - function to round jiffies to a full second 366 * @j: the time in (relative) jiffies that should be rounded 367 * @cpu: the processor number on which the timeout will happen 368 * 369 * __round_jiffies_relative() rounds a time delta in the future (in jiffies) 370 * up or down to (approximately) full seconds. This is useful for timers 371 * for which the exact time they fire does not matter too much, as long as 372 * they fire approximately every X seconds. 373 * 374 * By rounding these timers to whole seconds, all such timers will fire 375 * at the same time, rather than at various times spread out. The goal 376 * of this is to have the CPU wake up less, which saves power. 377 * 378 * The exact rounding is skewed for each processor to avoid all 379 * processors firing at the exact same time, which could lead 380 * to lock contention or spurious cache line bouncing. 381 * 382 * The return value is the rounded version of the @j parameter. 383 */ 384 unsigned long __round_jiffies_relative(unsigned long j, int cpu) 385 { 386 unsigned long j0 = jiffies; 387 388 /* Use j0 because jiffies might change while we run */ 389 return round_jiffies_common(j + j0, cpu, false) - j0; 390 } 391 EXPORT_SYMBOL_GPL(__round_jiffies_relative); 392 393 /** 394 * round_jiffies - function to round jiffies to a full second 395 * @j: the time in (absolute) jiffies that should be rounded 396 * 397 * round_jiffies() rounds an absolute time in the future (in jiffies) 398 * up or down to (approximately) full seconds. This is useful for timers 399 * for which the exact time they fire does not matter too much, as long as 400 * they fire approximately every X seconds. 401 * 402 * By rounding these timers to whole seconds, all such timers will fire 403 * at the same time, rather than at various times spread out. The goal 404 * of this is to have the CPU wake up less, which saves power. 405 * 406 * The return value is the rounded version of the @j parameter. 407 */ 408 unsigned long round_jiffies(unsigned long j) 409 { 410 return round_jiffies_common(j, raw_smp_processor_id(), false); 411 } 412 EXPORT_SYMBOL_GPL(round_jiffies); 413 414 /** 415 * round_jiffies_relative - function to round jiffies to a full second 416 * @j: the time in (relative) jiffies that should be rounded 417 * 418 * round_jiffies_relative() rounds a time delta in the future (in jiffies) 419 * up or down to (approximately) full seconds. This is useful for timers 420 * for which the exact time they fire does not matter too much, as long as 421 * they fire approximately every X seconds. 422 * 423 * By rounding these timers to whole seconds, all such timers will fire 424 * at the same time, rather than at various times spread out. The goal 425 * of this is to have the CPU wake up less, which saves power. 426 * 427 * The return value is the rounded version of the @j parameter. 428 */ 429 unsigned long round_jiffies_relative(unsigned long j) 430 { 431 return __round_jiffies_relative(j, raw_smp_processor_id()); 432 } 433 EXPORT_SYMBOL_GPL(round_jiffies_relative); 434 435 /** 436 * __round_jiffies_up - function to round jiffies up to a full second 437 * @j: the time in (absolute) jiffies that should be rounded 438 * @cpu: the processor number on which the timeout will happen 439 * 440 * This is the same as __round_jiffies() except that it will never 441 * round down. This is useful for timeouts for which the exact time 442 * of firing does not matter too much, as long as they don't fire too 443 * early. 444 */ 445 unsigned long __round_jiffies_up(unsigned long j, int cpu) 446 { 447 return round_jiffies_common(j, cpu, true); 448 } 449 EXPORT_SYMBOL_GPL(__round_jiffies_up); 450 451 /** 452 * __round_jiffies_up_relative - function to round jiffies up to a full second 453 * @j: the time in (relative) jiffies that should be rounded 454 * @cpu: the processor number on which the timeout will happen 455 * 456 * This is the same as __round_jiffies_relative() except that it will never 457 * round down. This is useful for timeouts for which the exact time 458 * of firing does not matter too much, as long as they don't fire too 459 * early. 460 */ 461 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) 462 { 463 unsigned long j0 = jiffies; 464 465 /* Use j0 because jiffies might change while we run */ 466 return round_jiffies_common(j + j0, cpu, true) - j0; 467 } 468 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); 469 470 /** 471 * round_jiffies_up - function to round jiffies up to a full second 472 * @j: the time in (absolute) jiffies that should be rounded 473 * 474 * This is the same as round_jiffies() except that it will never 475 * round down. This is useful for timeouts for which the exact time 476 * of firing does not matter too much, as long as they don't fire too 477 * early. 478 */ 479 unsigned long round_jiffies_up(unsigned long j) 480 { 481 return round_jiffies_common(j, raw_smp_processor_id(), true); 482 } 483 EXPORT_SYMBOL_GPL(round_jiffies_up); 484 485 /** 486 * round_jiffies_up_relative - function to round jiffies up to a full second 487 * @j: the time in (relative) jiffies that should be rounded 488 * 489 * This is the same as round_jiffies_relative() except that it will never 490 * round down. This is useful for timeouts for which the exact time 491 * of firing does not matter too much, as long as they don't fire too 492 * early. 493 */ 494 unsigned long round_jiffies_up_relative(unsigned long j) 495 { 496 return __round_jiffies_up_relative(j, raw_smp_processor_id()); 497 } 498 EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 499 500 501 static inline unsigned int timer_get_idx(struct timer_list *timer) 502 { 503 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; 504 } 505 506 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) 507 { 508 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | 509 idx << TIMER_ARRAYSHIFT; 510 } 511 512 /* 513 * Helper function to calculate the array index for a given expiry 514 * time. 515 */ 516 static inline unsigned calc_index(unsigned long expires, unsigned lvl, 517 unsigned long *bucket_expiry) 518 { 519 520 /* 521 * The timer wheel has to guarantee that a timer does not fire 522 * early. Early expiry can happen due to: 523 * - Timer is armed at the edge of a tick 524 * - Truncation of the expiry time in the outer wheel levels 525 * 526 * Round up with level granularity to prevent this. 527 */ 528 expires = (expires >> LVL_SHIFT(lvl)) + 1; 529 *bucket_expiry = expires << LVL_SHIFT(lvl); 530 return LVL_OFFS(lvl) + (expires & LVL_MASK); 531 } 532 533 static int calc_wheel_index(unsigned long expires, unsigned long clk, 534 unsigned long *bucket_expiry) 535 { 536 unsigned long delta = expires - clk; 537 unsigned int idx; 538 539 if (delta < LVL_START(1)) { 540 idx = calc_index(expires, 0, bucket_expiry); 541 } else if (delta < LVL_START(2)) { 542 idx = calc_index(expires, 1, bucket_expiry); 543 } else if (delta < LVL_START(3)) { 544 idx = calc_index(expires, 2, bucket_expiry); 545 } else if (delta < LVL_START(4)) { 546 idx = calc_index(expires, 3, bucket_expiry); 547 } else if (delta < LVL_START(5)) { 548 idx = calc_index(expires, 4, bucket_expiry); 549 } else if (delta < LVL_START(6)) { 550 idx = calc_index(expires, 5, bucket_expiry); 551 } else if (delta < LVL_START(7)) { 552 idx = calc_index(expires, 6, bucket_expiry); 553 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { 554 idx = calc_index(expires, 7, bucket_expiry); 555 } else if ((long) delta < 0) { 556 idx = clk & LVL_MASK; 557 *bucket_expiry = clk; 558 } else { 559 /* 560 * Force expire obscene large timeouts to expire at the 561 * capacity limit of the wheel. 562 */ 563 if (delta >= WHEEL_TIMEOUT_CUTOFF) 564 expires = clk + WHEEL_TIMEOUT_MAX; 565 566 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry); 567 } 568 return idx; 569 } 570 571 static void 572 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) 573 { 574 if (!is_timers_nohz_active()) 575 return; 576 577 /* 578 * TODO: This wants some optimizing similar to the code below, but we 579 * will do that when we switch from push to pull for deferrable timers. 580 */ 581 if (timer->flags & TIMER_DEFERRABLE) { 582 if (tick_nohz_full_cpu(base->cpu)) 583 wake_up_nohz_cpu(base->cpu); 584 return; 585 } 586 587 /* 588 * We might have to IPI the remote CPU if the base is idle and the 589 * timer is not deferrable. If the other CPU is on the way to idle 590 * then it can't set base->is_idle as we hold the base lock: 591 */ 592 if (base->is_idle) 593 wake_up_nohz_cpu(base->cpu); 594 } 595 596 /* 597 * Enqueue the timer into the hash bucket, mark it pending in 598 * the bitmap, store the index in the timer flags then wake up 599 * the target CPU if needed. 600 */ 601 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, 602 unsigned int idx, unsigned long bucket_expiry) 603 { 604 605 hlist_add_head(&timer->entry, base->vectors + idx); 606 __set_bit(idx, base->pending_map); 607 timer_set_idx(timer, idx); 608 609 trace_timer_start(timer, timer->expires, timer->flags); 610 611 /* 612 * Check whether this is the new first expiring timer. The 613 * effective expiry time of the timer is required here 614 * (bucket_expiry) instead of timer->expires. 615 */ 616 if (time_before(bucket_expiry, base->next_expiry)) { 617 /* 618 * Set the next expiry time and kick the CPU so it 619 * can reevaluate the wheel: 620 */ 621 base->next_expiry = bucket_expiry; 622 base->timers_pending = true; 623 base->next_expiry_recalc = false; 624 trigger_dyntick_cpu(base, timer); 625 } 626 } 627 628 static void internal_add_timer(struct timer_base *base, struct timer_list *timer) 629 { 630 unsigned long bucket_expiry; 631 unsigned int idx; 632 633 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry); 634 enqueue_timer(base, timer, idx, bucket_expiry); 635 } 636 637 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 638 639 static const struct debug_obj_descr timer_debug_descr; 640 641 struct timer_hint { 642 void (*function)(struct timer_list *t); 643 long offset; 644 }; 645 646 #define TIMER_HINT(fn, container, timr, hintfn) \ 647 { \ 648 .function = fn, \ 649 .offset = offsetof(container, hintfn) - \ 650 offsetof(container, timr) \ 651 } 652 653 static const struct timer_hint timer_hints[] = { 654 TIMER_HINT(delayed_work_timer_fn, 655 struct delayed_work, timer, work.func), 656 TIMER_HINT(kthread_delayed_work_timer_fn, 657 struct kthread_delayed_work, timer, work.func), 658 }; 659 660 static void *timer_debug_hint(void *addr) 661 { 662 struct timer_list *timer = addr; 663 int i; 664 665 for (i = 0; i < ARRAY_SIZE(timer_hints); i++) { 666 if (timer_hints[i].function == timer->function) { 667 void (**fn)(void) = addr + timer_hints[i].offset; 668 669 return *fn; 670 } 671 } 672 673 return timer->function; 674 } 675 676 static bool timer_is_static_object(void *addr) 677 { 678 struct timer_list *timer = addr; 679 680 return (timer->entry.pprev == NULL && 681 timer->entry.next == TIMER_ENTRY_STATIC); 682 } 683 684 /* 685 * fixup_init is called when: 686 * - an active object is initialized 687 */ 688 static bool timer_fixup_init(void *addr, enum debug_obj_state state) 689 { 690 struct timer_list *timer = addr; 691 692 switch (state) { 693 case ODEBUG_STATE_ACTIVE: 694 del_timer_sync(timer); 695 debug_object_init(timer, &timer_debug_descr); 696 return true; 697 default: 698 return false; 699 } 700 } 701 702 /* Stub timer callback for improperly used timers. */ 703 static void stub_timer(struct timer_list *unused) 704 { 705 WARN_ON(1); 706 } 707 708 /* 709 * fixup_activate is called when: 710 * - an active object is activated 711 * - an unknown non-static object is activated 712 */ 713 static bool timer_fixup_activate(void *addr, enum debug_obj_state state) 714 { 715 struct timer_list *timer = addr; 716 717 switch (state) { 718 case ODEBUG_STATE_NOTAVAILABLE: 719 timer_setup(timer, stub_timer, 0); 720 return true; 721 722 case ODEBUG_STATE_ACTIVE: 723 WARN_ON(1); 724 fallthrough; 725 default: 726 return false; 727 } 728 } 729 730 /* 731 * fixup_free is called when: 732 * - an active object is freed 733 */ 734 static bool timer_fixup_free(void *addr, enum debug_obj_state state) 735 { 736 struct timer_list *timer = addr; 737 738 switch (state) { 739 case ODEBUG_STATE_ACTIVE: 740 del_timer_sync(timer); 741 debug_object_free(timer, &timer_debug_descr); 742 return true; 743 default: 744 return false; 745 } 746 } 747 748 /* 749 * fixup_assert_init is called when: 750 * - an untracked/uninit-ed object is found 751 */ 752 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state) 753 { 754 struct timer_list *timer = addr; 755 756 switch (state) { 757 case ODEBUG_STATE_NOTAVAILABLE: 758 timer_setup(timer, stub_timer, 0); 759 return true; 760 default: 761 return false; 762 } 763 } 764 765 static const struct debug_obj_descr timer_debug_descr = { 766 .name = "timer_list", 767 .debug_hint = timer_debug_hint, 768 .is_static_object = timer_is_static_object, 769 .fixup_init = timer_fixup_init, 770 .fixup_activate = timer_fixup_activate, 771 .fixup_free = timer_fixup_free, 772 .fixup_assert_init = timer_fixup_assert_init, 773 }; 774 775 static inline void debug_timer_init(struct timer_list *timer) 776 { 777 debug_object_init(timer, &timer_debug_descr); 778 } 779 780 static inline void debug_timer_activate(struct timer_list *timer) 781 { 782 debug_object_activate(timer, &timer_debug_descr); 783 } 784 785 static inline void debug_timer_deactivate(struct timer_list *timer) 786 { 787 debug_object_deactivate(timer, &timer_debug_descr); 788 } 789 790 static inline void debug_timer_assert_init(struct timer_list *timer) 791 { 792 debug_object_assert_init(timer, &timer_debug_descr); 793 } 794 795 static void do_init_timer(struct timer_list *timer, 796 void (*func)(struct timer_list *), 797 unsigned int flags, 798 const char *name, struct lock_class_key *key); 799 800 void init_timer_on_stack_key(struct timer_list *timer, 801 void (*func)(struct timer_list *), 802 unsigned int flags, 803 const char *name, struct lock_class_key *key) 804 { 805 debug_object_init_on_stack(timer, &timer_debug_descr); 806 do_init_timer(timer, func, flags, name, key); 807 } 808 EXPORT_SYMBOL_GPL(init_timer_on_stack_key); 809 810 void destroy_timer_on_stack(struct timer_list *timer) 811 { 812 debug_object_free(timer, &timer_debug_descr); 813 } 814 EXPORT_SYMBOL_GPL(destroy_timer_on_stack); 815 816 #else 817 static inline void debug_timer_init(struct timer_list *timer) { } 818 static inline void debug_timer_activate(struct timer_list *timer) { } 819 static inline void debug_timer_deactivate(struct timer_list *timer) { } 820 static inline void debug_timer_assert_init(struct timer_list *timer) { } 821 #endif 822 823 static inline void debug_init(struct timer_list *timer) 824 { 825 debug_timer_init(timer); 826 trace_timer_init(timer); 827 } 828 829 static inline void debug_deactivate(struct timer_list *timer) 830 { 831 debug_timer_deactivate(timer); 832 trace_timer_cancel(timer); 833 } 834 835 static inline void debug_assert_init(struct timer_list *timer) 836 { 837 debug_timer_assert_init(timer); 838 } 839 840 static void do_init_timer(struct timer_list *timer, 841 void (*func)(struct timer_list *), 842 unsigned int flags, 843 const char *name, struct lock_class_key *key) 844 { 845 timer->entry.pprev = NULL; 846 timer->function = func; 847 if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS)) 848 flags &= TIMER_INIT_FLAGS; 849 timer->flags = flags | raw_smp_processor_id(); 850 lockdep_init_map(&timer->lockdep_map, name, key, 0); 851 } 852 853 /** 854 * init_timer_key - initialize a timer 855 * @timer: the timer to be initialized 856 * @func: timer callback function 857 * @flags: timer flags 858 * @name: name of the timer 859 * @key: lockdep class key of the fake lock used for tracking timer 860 * sync lock dependencies 861 * 862 * init_timer_key() must be done to a timer prior calling *any* of the 863 * other timer functions. 864 */ 865 void init_timer_key(struct timer_list *timer, 866 void (*func)(struct timer_list *), unsigned int flags, 867 const char *name, struct lock_class_key *key) 868 { 869 debug_init(timer); 870 do_init_timer(timer, func, flags, name, key); 871 } 872 EXPORT_SYMBOL(init_timer_key); 873 874 static inline void detach_timer(struct timer_list *timer, bool clear_pending) 875 { 876 struct hlist_node *entry = &timer->entry; 877 878 debug_deactivate(timer); 879 880 __hlist_del(entry); 881 if (clear_pending) 882 entry->pprev = NULL; 883 entry->next = LIST_POISON2; 884 } 885 886 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, 887 bool clear_pending) 888 { 889 unsigned idx = timer_get_idx(timer); 890 891 if (!timer_pending(timer)) 892 return 0; 893 894 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) { 895 __clear_bit(idx, base->pending_map); 896 base->next_expiry_recalc = true; 897 } 898 899 detach_timer(timer, clear_pending); 900 return 1; 901 } 902 903 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 904 { 905 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); 906 907 /* 908 * If the timer is deferrable and NO_HZ_COMMON is set then we need 909 * to use the deferrable base. 910 */ 911 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 912 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 913 return base; 914 } 915 916 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 917 { 918 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 919 920 /* 921 * If the timer is deferrable and NO_HZ_COMMON is set then we need 922 * to use the deferrable base. 923 */ 924 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) 925 base = this_cpu_ptr(&timer_bases[BASE_DEF]); 926 return base; 927 } 928 929 static inline struct timer_base *get_timer_base(u32 tflags) 930 { 931 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); 932 } 933 934 static inline struct timer_base * 935 get_target_base(struct timer_base *base, unsigned tflags) 936 { 937 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 938 if (static_branch_likely(&timers_migration_enabled) && 939 !(tflags & TIMER_PINNED)) 940 return get_timer_cpu_base(tflags, get_nohz_timer_target()); 941 #endif 942 return get_timer_this_cpu_base(tflags); 943 } 944 945 static inline void forward_timer_base(struct timer_base *base) 946 { 947 unsigned long jnow = READ_ONCE(jiffies); 948 949 /* 950 * No need to forward if we are close enough below jiffies. 951 * Also while executing timers, base->clk is 1 offset ahead 952 * of jiffies to avoid endless requeuing to current jiffies. 953 */ 954 if ((long)(jnow - base->clk) < 1) 955 return; 956 957 /* 958 * If the next expiry value is > jiffies, then we fast forward to 959 * jiffies otherwise we forward to the next expiry value. 960 */ 961 if (time_after(base->next_expiry, jnow)) { 962 base->clk = jnow; 963 } else { 964 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) 965 return; 966 base->clk = base->next_expiry; 967 } 968 } 969 970 971 /* 972 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 973 * that all timers which are tied to this base are locked, and the base itself 974 * is locked too. 975 * 976 * So __run_timers/migrate_timers can safely modify all timers which could 977 * be found in the base->vectors array. 978 * 979 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need 980 * to wait until the migration is done. 981 */ 982 static struct timer_base *lock_timer_base(struct timer_list *timer, 983 unsigned long *flags) 984 __acquires(timer->base->lock) 985 { 986 for (;;) { 987 struct timer_base *base; 988 u32 tf; 989 990 /* 991 * We need to use READ_ONCE() here, otherwise the compiler 992 * might re-read @tf between the check for TIMER_MIGRATING 993 * and spin_lock(). 994 */ 995 tf = READ_ONCE(timer->flags); 996 997 if (!(tf & TIMER_MIGRATING)) { 998 base = get_timer_base(tf); 999 raw_spin_lock_irqsave(&base->lock, *flags); 1000 if (timer->flags == tf) 1001 return base; 1002 raw_spin_unlock_irqrestore(&base->lock, *flags); 1003 } 1004 cpu_relax(); 1005 } 1006 } 1007 1008 #define MOD_TIMER_PENDING_ONLY 0x01 1009 #define MOD_TIMER_REDUCE 0x02 1010 #define MOD_TIMER_NOTPENDING 0x04 1011 1012 static inline int 1013 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) 1014 { 1015 unsigned long clk = 0, flags, bucket_expiry; 1016 struct timer_base *base, *new_base; 1017 unsigned int idx = UINT_MAX; 1018 int ret = 0; 1019 1020 debug_assert_init(timer); 1021 1022 /* 1023 * This is a common optimization triggered by the networking code - if 1024 * the timer is re-modified to have the same timeout or ends up in the 1025 * same array bucket then just return: 1026 */ 1027 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) { 1028 /* 1029 * The downside of this optimization is that it can result in 1030 * larger granularity than you would get from adding a new 1031 * timer with this expiry. 1032 */ 1033 long diff = timer->expires - expires; 1034 1035 if (!diff) 1036 return 1; 1037 if (options & MOD_TIMER_REDUCE && diff <= 0) 1038 return 1; 1039 1040 /* 1041 * We lock timer base and calculate the bucket index right 1042 * here. If the timer ends up in the same bucket, then we 1043 * just update the expiry time and avoid the whole 1044 * dequeue/enqueue dance. 1045 */ 1046 base = lock_timer_base(timer, &flags); 1047 /* 1048 * Has @timer been shutdown? This needs to be evaluated 1049 * while holding base lock to prevent a race against the 1050 * shutdown code. 1051 */ 1052 if (!timer->function) 1053 goto out_unlock; 1054 1055 forward_timer_base(base); 1056 1057 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && 1058 time_before_eq(timer->expires, expires)) { 1059 ret = 1; 1060 goto out_unlock; 1061 } 1062 1063 clk = base->clk; 1064 idx = calc_wheel_index(expires, clk, &bucket_expiry); 1065 1066 /* 1067 * Retrieve and compare the array index of the pending 1068 * timer. If it matches set the expiry to the new value so a 1069 * subsequent call will exit in the expires check above. 1070 */ 1071 if (idx == timer_get_idx(timer)) { 1072 if (!(options & MOD_TIMER_REDUCE)) 1073 timer->expires = expires; 1074 else if (time_after(timer->expires, expires)) 1075 timer->expires = expires; 1076 ret = 1; 1077 goto out_unlock; 1078 } 1079 } else { 1080 base = lock_timer_base(timer, &flags); 1081 /* 1082 * Has @timer been shutdown? This needs to be evaluated 1083 * while holding base lock to prevent a race against the 1084 * shutdown code. 1085 */ 1086 if (!timer->function) 1087 goto out_unlock; 1088 1089 forward_timer_base(base); 1090 } 1091 1092 ret = detach_if_pending(timer, base, false); 1093 if (!ret && (options & MOD_TIMER_PENDING_ONLY)) 1094 goto out_unlock; 1095 1096 new_base = get_target_base(base, timer->flags); 1097 1098 if (base != new_base) { 1099 /* 1100 * We are trying to schedule the timer on the new base. 1101 * However we can't change timer's base while it is running, 1102 * otherwise timer_delete_sync() can't detect that the timer's 1103 * handler yet has not finished. This also guarantees that the 1104 * timer is serialized wrt itself. 1105 */ 1106 if (likely(base->running_timer != timer)) { 1107 /* See the comment in lock_timer_base() */ 1108 timer->flags |= TIMER_MIGRATING; 1109 1110 raw_spin_unlock(&base->lock); 1111 base = new_base; 1112 raw_spin_lock(&base->lock); 1113 WRITE_ONCE(timer->flags, 1114 (timer->flags & ~TIMER_BASEMASK) | base->cpu); 1115 forward_timer_base(base); 1116 } 1117 } 1118 1119 debug_timer_activate(timer); 1120 1121 timer->expires = expires; 1122 /* 1123 * If 'idx' was calculated above and the base time did not advance 1124 * between calculating 'idx' and possibly switching the base, only 1125 * enqueue_timer() is required. Otherwise we need to (re)calculate 1126 * the wheel index via internal_add_timer(). 1127 */ 1128 if (idx != UINT_MAX && clk == base->clk) 1129 enqueue_timer(base, timer, idx, bucket_expiry); 1130 else 1131 internal_add_timer(base, timer); 1132 1133 out_unlock: 1134 raw_spin_unlock_irqrestore(&base->lock, flags); 1135 1136 return ret; 1137 } 1138 1139 /** 1140 * mod_timer_pending - Modify a pending timer's timeout 1141 * @timer: The pending timer to be modified 1142 * @expires: New absolute timeout in jiffies 1143 * 1144 * mod_timer_pending() is the same for pending timers as mod_timer(), but 1145 * will not activate inactive timers. 1146 * 1147 * If @timer->function == NULL then the start operation is silently 1148 * discarded. 1149 * 1150 * Return: 1151 * * %0 - The timer was inactive and not modified or was in 1152 * shutdown state and the operation was discarded 1153 * * %1 - The timer was active and requeued to expire at @expires 1154 */ 1155 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1156 { 1157 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); 1158 } 1159 EXPORT_SYMBOL(mod_timer_pending); 1160 1161 /** 1162 * mod_timer - Modify a timer's timeout 1163 * @timer: The timer to be modified 1164 * @expires: New absolute timeout in jiffies 1165 * 1166 * mod_timer(timer, expires) is equivalent to: 1167 * 1168 * del_timer(timer); timer->expires = expires; add_timer(timer); 1169 * 1170 * mod_timer() is more efficient than the above open coded sequence. In 1171 * case that the timer is inactive, the del_timer() part is a NOP. The 1172 * timer is in any case activated with the new expiry time @expires. 1173 * 1174 * Note that if there are multiple unserialized concurrent users of the 1175 * same timer, then mod_timer() is the only safe way to modify the timeout, 1176 * since add_timer() cannot modify an already running timer. 1177 * 1178 * If @timer->function == NULL then the start operation is silently 1179 * discarded. In this case the return value is 0 and meaningless. 1180 * 1181 * Return: 1182 * * %0 - The timer was inactive and started or was in shutdown 1183 * state and the operation was discarded 1184 * * %1 - The timer was active and requeued to expire at @expires or 1185 * the timer was active and not modified because @expires did 1186 * not change the effective expiry time 1187 */ 1188 int mod_timer(struct timer_list *timer, unsigned long expires) 1189 { 1190 return __mod_timer(timer, expires, 0); 1191 } 1192 EXPORT_SYMBOL(mod_timer); 1193 1194 /** 1195 * timer_reduce - Modify a timer's timeout if it would reduce the timeout 1196 * @timer: The timer to be modified 1197 * @expires: New absolute timeout in jiffies 1198 * 1199 * timer_reduce() is very similar to mod_timer(), except that it will only 1200 * modify an enqueued timer if that would reduce the expiration time. If 1201 * @timer is not enqueued it starts the timer. 1202 * 1203 * If @timer->function == NULL then the start operation is silently 1204 * discarded. 1205 * 1206 * Return: 1207 * * %0 - The timer was inactive and started or was in shutdown 1208 * state and the operation was discarded 1209 * * %1 - The timer was active and requeued to expire at @expires or 1210 * the timer was active and not modified because @expires 1211 * did not change the effective expiry time such that the 1212 * timer would expire earlier than already scheduled 1213 */ 1214 int timer_reduce(struct timer_list *timer, unsigned long expires) 1215 { 1216 return __mod_timer(timer, expires, MOD_TIMER_REDUCE); 1217 } 1218 EXPORT_SYMBOL(timer_reduce); 1219 1220 /** 1221 * add_timer - Start a timer 1222 * @timer: The timer to be started 1223 * 1224 * Start @timer to expire at @timer->expires in the future. @timer->expires 1225 * is the absolute expiry time measured in 'jiffies'. When the timer expires 1226 * timer->function(timer) will be invoked from soft interrupt context. 1227 * 1228 * The @timer->expires and @timer->function fields must be set prior 1229 * to calling this function. 1230 * 1231 * If @timer->function == NULL then the start operation is silently 1232 * discarded. 1233 * 1234 * If @timer->expires is already in the past @timer will be queued to 1235 * expire at the next timer tick. 1236 * 1237 * This can only operate on an inactive timer. Attempts to invoke this on 1238 * an active timer are rejected with a warning. 1239 */ 1240 void add_timer(struct timer_list *timer) 1241 { 1242 if (WARN_ON_ONCE(timer_pending(timer))) 1243 return; 1244 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); 1245 } 1246 EXPORT_SYMBOL(add_timer); 1247 1248 /** 1249 * add_timer_on - Start a timer on a particular CPU 1250 * @timer: The timer to be started 1251 * @cpu: The CPU to start it on 1252 * 1253 * Same as add_timer() except that it starts the timer on the given CPU. 1254 * 1255 * See add_timer() for further details. 1256 */ 1257 void add_timer_on(struct timer_list *timer, int cpu) 1258 { 1259 struct timer_base *new_base, *base; 1260 unsigned long flags; 1261 1262 debug_assert_init(timer); 1263 1264 if (WARN_ON_ONCE(timer_pending(timer))) 1265 return; 1266 1267 new_base = get_timer_cpu_base(timer->flags, cpu); 1268 1269 /* 1270 * If @timer was on a different CPU, it should be migrated with the 1271 * old base locked to prevent other operations proceeding with the 1272 * wrong base locked. See lock_timer_base(). 1273 */ 1274 base = lock_timer_base(timer, &flags); 1275 /* 1276 * Has @timer been shutdown? This needs to be evaluated while 1277 * holding base lock to prevent a race against the shutdown code. 1278 */ 1279 if (!timer->function) 1280 goto out_unlock; 1281 1282 if (base != new_base) { 1283 timer->flags |= TIMER_MIGRATING; 1284 1285 raw_spin_unlock(&base->lock); 1286 base = new_base; 1287 raw_spin_lock(&base->lock); 1288 WRITE_ONCE(timer->flags, 1289 (timer->flags & ~TIMER_BASEMASK) | cpu); 1290 } 1291 forward_timer_base(base); 1292 1293 debug_timer_activate(timer); 1294 internal_add_timer(base, timer); 1295 out_unlock: 1296 raw_spin_unlock_irqrestore(&base->lock, flags); 1297 } 1298 EXPORT_SYMBOL_GPL(add_timer_on); 1299 1300 /** 1301 * timer_delete - Deactivate a timer 1302 * @timer: The timer to be deactivated 1303 * 1304 * The function only deactivates a pending timer, but contrary to 1305 * timer_delete_sync() it does not take into account whether the timer's 1306 * callback function is concurrently executed on a different CPU or not. 1307 * It neither prevents rearming of the timer. If @timer can be rearmed 1308 * concurrently then the return value of this function is meaningless. 1309 * 1310 * Return: 1311 * * %0 - The timer was not pending 1312 * * %1 - The timer was pending and deactivated 1313 */ 1314 int timer_delete(struct timer_list *timer) 1315 { 1316 struct timer_base *base; 1317 unsigned long flags; 1318 int ret = 0; 1319 1320 debug_assert_init(timer); 1321 1322 if (timer_pending(timer)) { 1323 base = lock_timer_base(timer, &flags); 1324 ret = detach_if_pending(timer, base, true); 1325 raw_spin_unlock_irqrestore(&base->lock, flags); 1326 } 1327 1328 return ret; 1329 } 1330 EXPORT_SYMBOL(timer_delete); 1331 1332 /** 1333 * try_to_del_timer_sync - Try to deactivate a timer 1334 * @timer: Timer to deactivate 1335 * 1336 * This function tries to deactivate a timer. On success the timer is not 1337 * queued and the timer callback function is not running on any CPU. 1338 * 1339 * This function does not guarantee that the timer cannot be rearmed right 1340 * after dropping the base lock. That needs to be prevented by the calling 1341 * code if necessary. 1342 * 1343 * Return: 1344 * * %0 - The timer was not pending 1345 * * %1 - The timer was pending and deactivated 1346 * * %-1 - The timer callback function is running on a different CPU 1347 */ 1348 int try_to_del_timer_sync(struct timer_list *timer) 1349 { 1350 struct timer_base *base; 1351 unsigned long flags; 1352 int ret = -1; 1353 1354 debug_assert_init(timer); 1355 1356 base = lock_timer_base(timer, &flags); 1357 1358 if (base->running_timer != timer) 1359 ret = detach_if_pending(timer, base, true); 1360 1361 raw_spin_unlock_irqrestore(&base->lock, flags); 1362 1363 return ret; 1364 } 1365 EXPORT_SYMBOL(try_to_del_timer_sync); 1366 1367 #ifdef CONFIG_PREEMPT_RT 1368 static __init void timer_base_init_expiry_lock(struct timer_base *base) 1369 { 1370 spin_lock_init(&base->expiry_lock); 1371 } 1372 1373 static inline void timer_base_lock_expiry(struct timer_base *base) 1374 { 1375 spin_lock(&base->expiry_lock); 1376 } 1377 1378 static inline void timer_base_unlock_expiry(struct timer_base *base) 1379 { 1380 spin_unlock(&base->expiry_lock); 1381 } 1382 1383 /* 1384 * The counterpart to del_timer_wait_running(). 1385 * 1386 * If there is a waiter for base->expiry_lock, then it was waiting for the 1387 * timer callback to finish. Drop expiry_lock and reacquire it. That allows 1388 * the waiter to acquire the lock and make progress. 1389 */ 1390 static void timer_sync_wait_running(struct timer_base *base) 1391 { 1392 if (atomic_read(&base->timer_waiters)) { 1393 raw_spin_unlock_irq(&base->lock); 1394 spin_unlock(&base->expiry_lock); 1395 spin_lock(&base->expiry_lock); 1396 raw_spin_lock_irq(&base->lock); 1397 } 1398 } 1399 1400 /* 1401 * This function is called on PREEMPT_RT kernels when the fast path 1402 * deletion of a timer failed because the timer callback function was 1403 * running. 1404 * 1405 * This prevents priority inversion, if the softirq thread on a remote CPU 1406 * got preempted, and it prevents a life lock when the task which tries to 1407 * delete a timer preempted the softirq thread running the timer callback 1408 * function. 1409 */ 1410 static void del_timer_wait_running(struct timer_list *timer) 1411 { 1412 u32 tf; 1413 1414 tf = READ_ONCE(timer->flags); 1415 if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) { 1416 struct timer_base *base = get_timer_base(tf); 1417 1418 /* 1419 * Mark the base as contended and grab the expiry lock, 1420 * which is held by the softirq across the timer 1421 * callback. Drop the lock immediately so the softirq can 1422 * expire the next timer. In theory the timer could already 1423 * be running again, but that's more than unlikely and just 1424 * causes another wait loop. 1425 */ 1426 atomic_inc(&base->timer_waiters); 1427 spin_lock_bh(&base->expiry_lock); 1428 atomic_dec(&base->timer_waiters); 1429 spin_unlock_bh(&base->expiry_lock); 1430 } 1431 } 1432 #else 1433 static inline void timer_base_init_expiry_lock(struct timer_base *base) { } 1434 static inline void timer_base_lock_expiry(struct timer_base *base) { } 1435 static inline void timer_base_unlock_expiry(struct timer_base *base) { } 1436 static inline void timer_sync_wait_running(struct timer_base *base) { } 1437 static inline void del_timer_wait_running(struct timer_list *timer) { } 1438 #endif 1439 1440 /** 1441 * timer_delete_sync - Deactivate a timer and wait for the handler to finish. 1442 * @timer: The timer to be deactivated 1443 * 1444 * Synchronization rules: Callers must prevent restarting of the timer, 1445 * otherwise this function is meaningless. It must not be called from 1446 * interrupt contexts unless the timer is an irqsafe one. The caller must 1447 * not hold locks which would prevent completion of the timer's callback 1448 * function. The timer's handler must not call add_timer_on(). Upon exit 1449 * the timer is not queued and the handler is not running on any CPU. 1450 * 1451 * For !irqsafe timers, the caller must not hold locks that are held in 1452 * interrupt context. Even if the lock has nothing to do with the timer in 1453 * question. Here's why:: 1454 * 1455 * CPU0 CPU1 1456 * ---- ---- 1457 * <SOFTIRQ> 1458 * call_timer_fn(); 1459 * base->running_timer = mytimer; 1460 * spin_lock_irq(somelock); 1461 * <IRQ> 1462 * spin_lock(somelock); 1463 * timer_delete_sync(mytimer); 1464 * while (base->running_timer == mytimer); 1465 * 1466 * Now timer_delete_sync() will never return and never release somelock. 1467 * The interrupt on the other CPU is waiting to grab somelock but it has 1468 * interrupted the softirq that CPU0 is waiting to finish. 1469 * 1470 * This function cannot guarantee that the timer is not rearmed again by 1471 * some concurrent or preempting code, right after it dropped the base 1472 * lock. If there is the possibility of a concurrent rearm then the return 1473 * value of the function is meaningless. 1474 * 1475 * Return: 1476 * * %0 - The timer was not pending 1477 * * %1 - The timer was pending and deactivated 1478 */ 1479 int timer_delete_sync(struct timer_list *timer) 1480 { 1481 int ret; 1482 1483 #ifdef CONFIG_LOCKDEP 1484 unsigned long flags; 1485 1486 /* 1487 * If lockdep gives a backtrace here, please reference 1488 * the synchronization rules above. 1489 */ 1490 local_irq_save(flags); 1491 lock_map_acquire(&timer->lockdep_map); 1492 lock_map_release(&timer->lockdep_map); 1493 local_irq_restore(flags); 1494 #endif 1495 /* 1496 * don't use it in hardirq context, because it 1497 * could lead to deadlock. 1498 */ 1499 WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE)); 1500 1501 /* 1502 * Must be able to sleep on PREEMPT_RT because of the slowpath in 1503 * del_timer_wait_running(). 1504 */ 1505 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE)) 1506 lockdep_assert_preemption_enabled(); 1507 1508 do { 1509 ret = try_to_del_timer_sync(timer); 1510 1511 if (unlikely(ret < 0)) { 1512 del_timer_wait_running(timer); 1513 cpu_relax(); 1514 } 1515 } while (ret < 0); 1516 1517 return ret; 1518 } 1519 EXPORT_SYMBOL(timer_delete_sync); 1520 1521 static void call_timer_fn(struct timer_list *timer, 1522 void (*fn)(struct timer_list *), 1523 unsigned long baseclk) 1524 { 1525 int count = preempt_count(); 1526 1527 #ifdef CONFIG_LOCKDEP 1528 /* 1529 * It is permissible to free the timer from inside the 1530 * function that is called from it, this we need to take into 1531 * account for lockdep too. To avoid bogus "held lock freed" 1532 * warnings as well as problems when looking into 1533 * timer->lockdep_map, make a copy and use that here. 1534 */ 1535 struct lockdep_map lockdep_map; 1536 1537 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); 1538 #endif 1539 /* 1540 * Couple the lock chain with the lock chain at 1541 * timer_delete_sync() by acquiring the lock_map around the fn() 1542 * call here and in timer_delete_sync(). 1543 */ 1544 lock_map_acquire(&lockdep_map); 1545 1546 trace_timer_expire_entry(timer, baseclk); 1547 fn(timer); 1548 trace_timer_expire_exit(timer); 1549 1550 lock_map_release(&lockdep_map); 1551 1552 if (count != preempt_count()) { 1553 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", 1554 fn, count, preempt_count()); 1555 /* 1556 * Restore the preempt count. That gives us a decent 1557 * chance to survive and extract information. If the 1558 * callback kept a lock held, bad luck, but not worse 1559 * than the BUG() we had. 1560 */ 1561 preempt_count_set(count); 1562 } 1563 } 1564 1565 static void expire_timers(struct timer_base *base, struct hlist_head *head) 1566 { 1567 /* 1568 * This value is required only for tracing. base->clk was 1569 * incremented directly before expire_timers was called. But expiry 1570 * is related to the old base->clk value. 1571 */ 1572 unsigned long baseclk = base->clk - 1; 1573 1574 while (!hlist_empty(head)) { 1575 struct timer_list *timer; 1576 void (*fn)(struct timer_list *); 1577 1578 timer = hlist_entry(head->first, struct timer_list, entry); 1579 1580 base->running_timer = timer; 1581 detach_timer(timer, true); 1582 1583 fn = timer->function; 1584 1585 if (WARN_ON_ONCE(!fn)) { 1586 /* Should never happen. Emphasis on should! */ 1587 base->running_timer = NULL; 1588 continue; 1589 } 1590 1591 if (timer->flags & TIMER_IRQSAFE) { 1592 raw_spin_unlock(&base->lock); 1593 call_timer_fn(timer, fn, baseclk); 1594 raw_spin_lock(&base->lock); 1595 base->running_timer = NULL; 1596 } else { 1597 raw_spin_unlock_irq(&base->lock); 1598 call_timer_fn(timer, fn, baseclk); 1599 raw_spin_lock_irq(&base->lock); 1600 base->running_timer = NULL; 1601 timer_sync_wait_running(base); 1602 } 1603 } 1604 } 1605 1606 static int collect_expired_timers(struct timer_base *base, 1607 struct hlist_head *heads) 1608 { 1609 unsigned long clk = base->clk = base->next_expiry; 1610 struct hlist_head *vec; 1611 int i, levels = 0; 1612 unsigned int idx; 1613 1614 for (i = 0; i < LVL_DEPTH; i++) { 1615 idx = (clk & LVL_MASK) + i * LVL_SIZE; 1616 1617 if (__test_and_clear_bit(idx, base->pending_map)) { 1618 vec = base->vectors + idx; 1619 hlist_move_list(vec, heads++); 1620 levels++; 1621 } 1622 /* Is it time to look at the next level? */ 1623 if (clk & LVL_CLK_MASK) 1624 break; 1625 /* Shift clock for the next level granularity */ 1626 clk >>= LVL_CLK_SHIFT; 1627 } 1628 return levels; 1629 } 1630 1631 /* 1632 * Find the next pending bucket of a level. Search from level start (@offset) 1633 * + @clk upwards and if nothing there, search from start of the level 1634 * (@offset) up to @offset + clk. 1635 */ 1636 static int next_pending_bucket(struct timer_base *base, unsigned offset, 1637 unsigned clk) 1638 { 1639 unsigned pos, start = offset + clk; 1640 unsigned end = offset + LVL_SIZE; 1641 1642 pos = find_next_bit(base->pending_map, end, start); 1643 if (pos < end) 1644 return pos - start; 1645 1646 pos = find_next_bit(base->pending_map, start, offset); 1647 return pos < start ? pos + LVL_SIZE - start : -1; 1648 } 1649 1650 /* 1651 * Search the first expiring timer in the various clock levels. Caller must 1652 * hold base->lock. 1653 */ 1654 static unsigned long __next_timer_interrupt(struct timer_base *base) 1655 { 1656 unsigned long clk, next, adj; 1657 unsigned lvl, offset = 0; 1658 1659 next = base->clk + NEXT_TIMER_MAX_DELTA; 1660 clk = base->clk; 1661 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1662 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); 1663 unsigned long lvl_clk = clk & LVL_CLK_MASK; 1664 1665 if (pos >= 0) { 1666 unsigned long tmp = clk + (unsigned long) pos; 1667 1668 tmp <<= LVL_SHIFT(lvl); 1669 if (time_before(tmp, next)) 1670 next = tmp; 1671 1672 /* 1673 * If the next expiration happens before we reach 1674 * the next level, no need to check further. 1675 */ 1676 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK)) 1677 break; 1678 } 1679 /* 1680 * Clock for the next level. If the current level clock lower 1681 * bits are zero, we look at the next level as is. If not we 1682 * need to advance it by one because that's going to be the 1683 * next expiring bucket in that level. base->clk is the next 1684 * expiring jiffie. So in case of: 1685 * 1686 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1687 * 0 0 0 0 0 0 1688 * 1689 * we have to look at all levels @index 0. With 1690 * 1691 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1692 * 0 0 0 0 0 2 1693 * 1694 * LVL0 has the next expiring bucket @index 2. The upper 1695 * levels have the next expiring bucket @index 1. 1696 * 1697 * In case that the propagation wraps the next level the same 1698 * rules apply: 1699 * 1700 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1701 * 0 0 0 0 F 2 1702 * 1703 * So after looking at LVL0 we get: 1704 * 1705 * LVL5 LVL4 LVL3 LVL2 LVL1 1706 * 0 0 0 1 0 1707 * 1708 * So no propagation from LVL1 to LVL2 because that happened 1709 * with the add already, but then we need to propagate further 1710 * from LVL2 to LVL3. 1711 * 1712 * So the simple check whether the lower bits of the current 1713 * level are 0 or not is sufficient for all cases. 1714 */ 1715 adj = lvl_clk ? 1 : 0; 1716 clk >>= LVL_CLK_SHIFT; 1717 clk += adj; 1718 } 1719 1720 base->next_expiry_recalc = false; 1721 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); 1722 1723 return next; 1724 } 1725 1726 #ifdef CONFIG_NO_HZ_COMMON 1727 /* 1728 * Check, if the next hrtimer event is before the next timer wheel 1729 * event: 1730 */ 1731 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) 1732 { 1733 u64 nextevt = hrtimer_get_next_event(); 1734 1735 /* 1736 * If high resolution timers are enabled 1737 * hrtimer_get_next_event() returns KTIME_MAX. 1738 */ 1739 if (expires <= nextevt) 1740 return expires; 1741 1742 /* 1743 * If the next timer is already expired, return the tick base 1744 * time so the tick is fired immediately. 1745 */ 1746 if (nextevt <= basem) 1747 return basem; 1748 1749 /* 1750 * Round up to the next jiffie. High resolution timers are 1751 * off, so the hrtimers are expired in the tick and we need to 1752 * make sure that this tick really expires the timer to avoid 1753 * a ping pong of the nohz stop code. 1754 * 1755 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3 1756 */ 1757 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; 1758 } 1759 1760 /** 1761 * get_next_timer_interrupt - return the time (clock mono) of the next timer 1762 * @basej: base time jiffies 1763 * @basem: base time clock monotonic 1764 * 1765 * Returns the tick aligned clock monotonic time of the next pending 1766 * timer or KTIME_MAX if no timer is pending. 1767 */ 1768 u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1769 { 1770 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1771 u64 expires = KTIME_MAX; 1772 unsigned long nextevt; 1773 1774 /* 1775 * Pretend that there is no timer pending if the cpu is offline. 1776 * Possible pending timers will be migrated later to an active cpu. 1777 */ 1778 if (cpu_is_offline(smp_processor_id())) 1779 return expires; 1780 1781 raw_spin_lock(&base->lock); 1782 if (base->next_expiry_recalc) 1783 base->next_expiry = __next_timer_interrupt(base); 1784 nextevt = base->next_expiry; 1785 1786 /* 1787 * We have a fresh next event. Check whether we can forward the 1788 * base. We can only do that when @basej is past base->clk 1789 * otherwise we might rewind base->clk. 1790 */ 1791 if (time_after(basej, base->clk)) { 1792 if (time_after(nextevt, basej)) 1793 base->clk = basej; 1794 else if (time_after(nextevt, base->clk)) 1795 base->clk = nextevt; 1796 } 1797 1798 if (time_before_eq(nextevt, basej)) { 1799 expires = basem; 1800 base->is_idle = false; 1801 } else { 1802 if (base->timers_pending) 1803 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; 1804 /* 1805 * If we expect to sleep more than a tick, mark the base idle. 1806 * Also the tick is stopped so any added timer must forward 1807 * the base clk itself to keep granularity small. This idle 1808 * logic is only maintained for the BASE_STD base, deferrable 1809 * timers may still see large granularity skew (by design). 1810 */ 1811 if ((expires - basem) > TICK_NSEC) 1812 base->is_idle = true; 1813 } 1814 raw_spin_unlock(&base->lock); 1815 1816 return cmp_next_hrtimer_event(basem, expires); 1817 } 1818 1819 /** 1820 * timer_clear_idle - Clear the idle state of the timer base 1821 * 1822 * Called with interrupts disabled 1823 */ 1824 void timer_clear_idle(void) 1825 { 1826 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1827 1828 /* 1829 * We do this unlocked. The worst outcome is a remote enqueue sending 1830 * a pointless IPI, but taking the lock would just make the window for 1831 * sending the IPI a few instructions smaller for the cost of taking 1832 * the lock in the exit from idle path. 1833 */ 1834 base->is_idle = false; 1835 } 1836 #endif 1837 1838 /** 1839 * __run_timers - run all expired timers (if any) on this CPU. 1840 * @base: the timer vector to be processed. 1841 */ 1842 static inline void __run_timers(struct timer_base *base) 1843 { 1844 struct hlist_head heads[LVL_DEPTH]; 1845 int levels; 1846 1847 if (time_before(jiffies, base->next_expiry)) 1848 return; 1849 1850 timer_base_lock_expiry(base); 1851 raw_spin_lock_irq(&base->lock); 1852 1853 while (time_after_eq(jiffies, base->clk) && 1854 time_after_eq(jiffies, base->next_expiry)) { 1855 levels = collect_expired_timers(base, heads); 1856 /* 1857 * The two possible reasons for not finding any expired 1858 * timer at this clk are that all matching timers have been 1859 * dequeued or no timer has been queued since 1860 * base::next_expiry was set to base::clk + 1861 * NEXT_TIMER_MAX_DELTA. 1862 */ 1863 WARN_ON_ONCE(!levels && !base->next_expiry_recalc 1864 && base->timers_pending); 1865 base->clk++; 1866 base->next_expiry = __next_timer_interrupt(base); 1867 1868 while (levels--) 1869 expire_timers(base, heads + levels); 1870 } 1871 raw_spin_unlock_irq(&base->lock); 1872 timer_base_unlock_expiry(base); 1873 } 1874 1875 /* 1876 * This function runs timers and the timer-tq in bottom half context. 1877 */ 1878 static __latent_entropy void run_timer_softirq(struct softirq_action *h) 1879 { 1880 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1881 1882 __run_timers(base); 1883 if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) 1884 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 1885 } 1886 1887 /* 1888 * Called by the local, per-CPU timer interrupt on SMP. 1889 */ 1890 static void run_local_timers(void) 1891 { 1892 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1893 1894 hrtimer_run_queues(); 1895 /* Raise the softirq only if required. */ 1896 if (time_before(jiffies, base->next_expiry)) { 1897 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON)) 1898 return; 1899 /* CPU is awake, so check the deferrable base. */ 1900 base++; 1901 if (time_before(jiffies, base->next_expiry)) 1902 return; 1903 } 1904 raise_softirq(TIMER_SOFTIRQ); 1905 } 1906 1907 /* 1908 * Called from the timer interrupt handler to charge one tick to the current 1909 * process. user_tick is 1 if the tick is user time, 0 for system. 1910 */ 1911 void update_process_times(int user_tick) 1912 { 1913 struct task_struct *p = current; 1914 1915 /* Note: this timer irq context must be accounted for as well. */ 1916 account_process_tick(p, user_tick); 1917 run_local_timers(); 1918 rcu_sched_clock_irq(user_tick); 1919 #ifdef CONFIG_IRQ_WORK 1920 if (in_irq()) 1921 irq_work_tick(); 1922 #endif 1923 scheduler_tick(); 1924 if (IS_ENABLED(CONFIG_POSIX_TIMERS)) 1925 run_posix_cpu_timers(); 1926 } 1927 1928 /* 1929 * Since schedule_timeout()'s timer is defined on the stack, it must store 1930 * the target task on the stack as well. 1931 */ 1932 struct process_timer { 1933 struct timer_list timer; 1934 struct task_struct *task; 1935 }; 1936 1937 static void process_timeout(struct timer_list *t) 1938 { 1939 struct process_timer *timeout = from_timer(timeout, t, timer); 1940 1941 wake_up_process(timeout->task); 1942 } 1943 1944 /** 1945 * schedule_timeout - sleep until timeout 1946 * @timeout: timeout value in jiffies 1947 * 1948 * Make the current task sleep until @timeout jiffies have elapsed. 1949 * The function behavior depends on the current task state 1950 * (see also set_current_state() description): 1951 * 1952 * %TASK_RUNNING - the scheduler is called, but the task does not sleep 1953 * at all. That happens because sched_submit_work() does nothing for 1954 * tasks in %TASK_RUNNING state. 1955 * 1956 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 1957 * pass before the routine returns unless the current task is explicitly 1958 * woken up, (e.g. by wake_up_process()). 1959 * 1960 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1961 * delivered to the current task or the current task is explicitly woken 1962 * up. 1963 * 1964 * The current task state is guaranteed to be %TASK_RUNNING when this 1965 * routine returns. 1966 * 1967 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule 1968 * the CPU away without a bound on the timeout. In this case the return 1969 * value will be %MAX_SCHEDULE_TIMEOUT. 1970 * 1971 * Returns 0 when the timer has expired otherwise the remaining time in 1972 * jiffies will be returned. In all cases the return value is guaranteed 1973 * to be non-negative. 1974 */ 1975 signed long __sched schedule_timeout(signed long timeout) 1976 { 1977 struct process_timer timer; 1978 unsigned long expire; 1979 1980 switch (timeout) 1981 { 1982 case MAX_SCHEDULE_TIMEOUT: 1983 /* 1984 * These two special cases are useful to be comfortable 1985 * in the caller. Nothing more. We could take 1986 * MAX_SCHEDULE_TIMEOUT from one of the negative value 1987 * but I' d like to return a valid offset (>=0) to allow 1988 * the caller to do everything it want with the retval. 1989 */ 1990 schedule(); 1991 goto out; 1992 default: 1993 /* 1994 * Another bit of PARANOID. Note that the retval will be 1995 * 0 since no piece of kernel is supposed to do a check 1996 * for a negative retval of schedule_timeout() (since it 1997 * should never happens anyway). You just have the printk() 1998 * that will tell you if something is gone wrong and where. 1999 */ 2000 if (timeout < 0) { 2001 printk(KERN_ERR "schedule_timeout: wrong timeout " 2002 "value %lx\n", timeout); 2003 dump_stack(); 2004 __set_current_state(TASK_RUNNING); 2005 goto out; 2006 } 2007 } 2008 2009 expire = timeout + jiffies; 2010 2011 timer.task = current; 2012 timer_setup_on_stack(&timer.timer, process_timeout, 0); 2013 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING); 2014 schedule(); 2015 del_timer_sync(&timer.timer); 2016 2017 /* Remove the timer from the object tracker */ 2018 destroy_timer_on_stack(&timer.timer); 2019 2020 timeout = expire - jiffies; 2021 2022 out: 2023 return timeout < 0 ? 0 : timeout; 2024 } 2025 EXPORT_SYMBOL(schedule_timeout); 2026 2027 /* 2028 * We can use __set_current_state() here because schedule_timeout() calls 2029 * schedule() unconditionally. 2030 */ 2031 signed long __sched schedule_timeout_interruptible(signed long timeout) 2032 { 2033 __set_current_state(TASK_INTERRUPTIBLE); 2034 return schedule_timeout(timeout); 2035 } 2036 EXPORT_SYMBOL(schedule_timeout_interruptible); 2037 2038 signed long __sched schedule_timeout_killable(signed long timeout) 2039 { 2040 __set_current_state(TASK_KILLABLE); 2041 return schedule_timeout(timeout); 2042 } 2043 EXPORT_SYMBOL(schedule_timeout_killable); 2044 2045 signed long __sched schedule_timeout_uninterruptible(signed long timeout) 2046 { 2047 __set_current_state(TASK_UNINTERRUPTIBLE); 2048 return schedule_timeout(timeout); 2049 } 2050 EXPORT_SYMBOL(schedule_timeout_uninterruptible); 2051 2052 /* 2053 * Like schedule_timeout_uninterruptible(), except this task will not contribute 2054 * to load average. 2055 */ 2056 signed long __sched schedule_timeout_idle(signed long timeout) 2057 { 2058 __set_current_state(TASK_IDLE); 2059 return schedule_timeout(timeout); 2060 } 2061 EXPORT_SYMBOL(schedule_timeout_idle); 2062 2063 #ifdef CONFIG_HOTPLUG_CPU 2064 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) 2065 { 2066 struct timer_list *timer; 2067 int cpu = new_base->cpu; 2068 2069 while (!hlist_empty(head)) { 2070 timer = hlist_entry(head->first, struct timer_list, entry); 2071 detach_timer(timer, false); 2072 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 2073 internal_add_timer(new_base, timer); 2074 } 2075 } 2076 2077 int timers_prepare_cpu(unsigned int cpu) 2078 { 2079 struct timer_base *base; 2080 int b; 2081 2082 for (b = 0; b < NR_BASES; b++) { 2083 base = per_cpu_ptr(&timer_bases[b], cpu); 2084 base->clk = jiffies; 2085 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2086 base->next_expiry_recalc = false; 2087 base->timers_pending = false; 2088 base->is_idle = false; 2089 } 2090 return 0; 2091 } 2092 2093 int timers_dead_cpu(unsigned int cpu) 2094 { 2095 struct timer_base *old_base; 2096 struct timer_base *new_base; 2097 int b, i; 2098 2099 for (b = 0; b < NR_BASES; b++) { 2100 old_base = per_cpu_ptr(&timer_bases[b], cpu); 2101 new_base = get_cpu_ptr(&timer_bases[b]); 2102 /* 2103 * The caller is globally serialized and nobody else 2104 * takes two locks at once, deadlock is not possible. 2105 */ 2106 raw_spin_lock_irq(&new_base->lock); 2107 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 2108 2109 /* 2110 * The current CPUs base clock might be stale. Update it 2111 * before moving the timers over. 2112 */ 2113 forward_timer_base(new_base); 2114 2115 WARN_ON_ONCE(old_base->running_timer); 2116 old_base->running_timer = NULL; 2117 2118 for (i = 0; i < WHEEL_SIZE; i++) 2119 migrate_timer_list(new_base, old_base->vectors + i); 2120 2121 raw_spin_unlock(&old_base->lock); 2122 raw_spin_unlock_irq(&new_base->lock); 2123 put_cpu_ptr(&timer_bases); 2124 } 2125 return 0; 2126 } 2127 2128 #endif /* CONFIG_HOTPLUG_CPU */ 2129 2130 static void __init init_timer_cpu(int cpu) 2131 { 2132 struct timer_base *base; 2133 int i; 2134 2135 for (i = 0; i < NR_BASES; i++) { 2136 base = per_cpu_ptr(&timer_bases[i], cpu); 2137 base->cpu = cpu; 2138 raw_spin_lock_init(&base->lock); 2139 base->clk = jiffies; 2140 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2141 timer_base_init_expiry_lock(base); 2142 } 2143 } 2144 2145 static void __init init_timer_cpus(void) 2146 { 2147 int cpu; 2148 2149 for_each_possible_cpu(cpu) 2150 init_timer_cpu(cpu); 2151 } 2152 2153 void __init init_timers(void) 2154 { 2155 init_timer_cpus(); 2156 posix_cputimers_init_work(); 2157 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 2158 } 2159 2160 /** 2161 * msleep - sleep safely even with waitqueue interruptions 2162 * @msecs: Time in milliseconds to sleep for 2163 */ 2164 void msleep(unsigned int msecs) 2165 { 2166 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2167 2168 while (timeout) 2169 timeout = schedule_timeout_uninterruptible(timeout); 2170 } 2171 2172 EXPORT_SYMBOL(msleep); 2173 2174 /** 2175 * msleep_interruptible - sleep waiting for signals 2176 * @msecs: Time in milliseconds to sleep for 2177 */ 2178 unsigned long msleep_interruptible(unsigned int msecs) 2179 { 2180 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 2181 2182 while (timeout && !signal_pending(current)) 2183 timeout = schedule_timeout_interruptible(timeout); 2184 return jiffies_to_msecs(timeout); 2185 } 2186 2187 EXPORT_SYMBOL(msleep_interruptible); 2188 2189 /** 2190 * usleep_range_state - Sleep for an approximate time in a given state 2191 * @min: Minimum time in usecs to sleep 2192 * @max: Maximum time in usecs to sleep 2193 * @state: State of the current task that will be while sleeping 2194 * 2195 * In non-atomic context where the exact wakeup time is flexible, use 2196 * usleep_range_state() instead of udelay(). The sleep improves responsiveness 2197 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 2198 * power usage by allowing hrtimers to take advantage of an already- 2199 * scheduled interrupt instead of scheduling a new one just for this sleep. 2200 */ 2201 void __sched usleep_range_state(unsigned long min, unsigned long max, 2202 unsigned int state) 2203 { 2204 ktime_t exp = ktime_add_us(ktime_get(), min); 2205 u64 delta = (u64)(max - min) * NSEC_PER_USEC; 2206 2207 for (;;) { 2208 __set_current_state(state); 2209 /* Do not return before the requested sleep time has elapsed */ 2210 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) 2211 break; 2212 } 2213 } 2214 EXPORT_SYMBOL(usleep_range_state); 2215