1 /* 2 * linux/kernel/timer.c 3 * 4 * Kernel internal timers 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * 8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. 9 * 10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 11 * "A Kernel Model for Precision Timekeeping" by Dave Mills 12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to 13 * serialize accesses to xtime/lost_ticks). 14 * Copyright (C) 1998 Andrea Arcangeli 15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl 16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love 17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling. 18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar 19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar 20 */ 21 22 #include <linux/kernel_stat.h> 23 #include <linux/export.h> 24 #include <linux/interrupt.h> 25 #include <linux/percpu.h> 26 #include <linux/init.h> 27 #include <linux/mm.h> 28 #include <linux/swap.h> 29 #include <linux/pid_namespace.h> 30 #include <linux/notifier.h> 31 #include <linux/thread_info.h> 32 #include <linux/time.h> 33 #include <linux/jiffies.h> 34 #include <linux/posix-timers.h> 35 #include <linux/cpu.h> 36 #include <linux/syscalls.h> 37 #include <linux/delay.h> 38 #include <linux/tick.h> 39 #include <linux/kallsyms.h> 40 #include <linux/irq_work.h> 41 #include <linux/sched/signal.h> 42 #include <linux/sched/sysctl.h> 43 #include <linux/sched/nohz.h> 44 #include <linux/sched/debug.h> 45 #include <linux/slab.h> 46 #include <linux/compat.h> 47 48 #include <linux/uaccess.h> 49 #include <asm/unistd.h> 50 #include <asm/div64.h> 51 #include <asm/timex.h> 52 #include <asm/io.h> 53 54 #include "tick-internal.h" 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/timer.h> 58 59 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 60 61 EXPORT_SYMBOL(jiffies_64); 62 63 /* 64 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of 65 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each 66 * level has a different granularity. 67 * 68 * The level granularity is: LVL_CLK_DIV ^ lvl 69 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level) 70 * 71 * The array level of a newly armed timer depends on the relative expiry 72 * time. The farther the expiry time is away the higher the array level and 73 * therefor the granularity becomes. 74 * 75 * Contrary to the original timer wheel implementation, which aims for 'exact' 76 * expiry of the timers, this implementation removes the need for recascading 77 * the timers into the lower array levels. The previous 'classic' timer wheel 78 * implementation of the kernel already violated the 'exact' expiry by adding 79 * slack to the expiry time to provide batched expiration. The granularity 80 * levels provide implicit batching. 81 * 82 * This is an optimization of the original timer wheel implementation for the 83 * majority of the timer wheel use cases: timeouts. The vast majority of 84 * timeout timers (networking, disk I/O ...) are canceled before expiry. If 85 * the timeout expires it indicates that normal operation is disturbed, so it 86 * does not matter much whether the timeout comes with a slight delay. 87 * 88 * The only exception to this are networking timers with a small expiry 89 * time. They rely on the granularity. Those fit into the first wheel level, 90 * which has HZ granularity. 91 * 92 * We don't have cascading anymore. timers with a expiry time above the 93 * capacity of the last wheel level are force expired at the maximum timeout 94 * value of the last wheel level. From data sampling we know that the maximum 95 * value observed is 5 days (network connection tracking), so this should not 96 * be an issue. 97 * 98 * The currently chosen array constants values are a good compromise between 99 * array size and granularity. 100 * 101 * This results in the following granularity and range levels: 102 * 103 * HZ 1000 steps 104 * Level Offset Granularity Range 105 * 0 0 1 ms 0 ms - 63 ms 106 * 1 64 8 ms 64 ms - 511 ms 107 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s) 108 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s) 109 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m) 110 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m) 111 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h) 112 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d) 113 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d) 114 * 115 * HZ 300 116 * Level Offset Granularity Range 117 * 0 0 3 ms 0 ms - 210 ms 118 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s) 119 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s) 120 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m) 121 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m) 122 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h) 123 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h) 124 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d) 125 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d) 126 * 127 * HZ 250 128 * Level Offset Granularity Range 129 * 0 0 4 ms 0 ms - 255 ms 130 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s) 131 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s) 132 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m) 133 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m) 134 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h) 135 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h) 136 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d) 137 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d) 138 * 139 * HZ 100 140 * Level Offset Granularity Range 141 * 0 0 10 ms 0 ms - 630 ms 142 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s) 143 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s) 144 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m) 145 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m) 146 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h) 147 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d) 148 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d) 149 */ 150 151 /* Clock divisor for the next level */ 152 #define LVL_CLK_SHIFT 3 153 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT) 154 #define LVL_CLK_MASK (LVL_CLK_DIV - 1) 155 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) 156 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) 157 158 /* 159 * The time start value for each level to select the bucket at enqueue 160 * time. 161 */ 162 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) 163 164 /* Size of each clock level */ 165 #define LVL_BITS 6 166 #define LVL_SIZE (1UL << LVL_BITS) 167 #define LVL_MASK (LVL_SIZE - 1) 168 #define LVL_OFFS(n) ((n) * LVL_SIZE) 169 170 /* Level depth */ 171 #if HZ > 100 172 # define LVL_DEPTH 9 173 # else 174 # define LVL_DEPTH 8 175 #endif 176 177 /* The cutoff (max. capacity of the wheel) */ 178 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH)) 179 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1)) 180 181 /* 182 * The resulting wheel size. If NOHZ is configured we allocate two 183 * wheels so we have a separate storage for the deferrable timers. 184 */ 185 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) 186 187 #ifdef CONFIG_NO_HZ_COMMON 188 # define NR_BASES 2 189 # define BASE_STD 0 190 # define BASE_DEF 1 191 #else 192 # define NR_BASES 1 193 # define BASE_STD 0 194 # define BASE_DEF 0 195 #endif 196 197 struct timer_base { 198 raw_spinlock_t lock; 199 struct timer_list *running_timer; 200 unsigned long clk; 201 unsigned long next_expiry; 202 unsigned int cpu; 203 bool migration_enabled; 204 bool nohz_active; 205 bool is_idle; 206 bool must_forward_clk; 207 DECLARE_BITMAP(pending_map, WHEEL_SIZE); 208 struct hlist_head vectors[WHEEL_SIZE]; 209 } ____cacheline_aligned; 210 211 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); 212 213 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 214 unsigned int sysctl_timer_migration = 1; 215 216 void timers_update_migration(bool update_nohz) 217 { 218 bool on = sysctl_timer_migration && tick_nohz_active; 219 unsigned int cpu; 220 221 /* Avoid the loop, if nothing to update */ 222 if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on) 223 return; 224 225 for_each_possible_cpu(cpu) { 226 per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on; 227 per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on; 228 per_cpu(hrtimer_bases.migration_enabled, cpu) = on; 229 if (!update_nohz) 230 continue; 231 per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true; 232 per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true; 233 per_cpu(hrtimer_bases.nohz_active, cpu) = true; 234 } 235 } 236 237 int timer_migration_handler(struct ctl_table *table, int write, 238 void __user *buffer, size_t *lenp, 239 loff_t *ppos) 240 { 241 static DEFINE_MUTEX(mutex); 242 int ret; 243 244 mutex_lock(&mutex); 245 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 246 if (!ret && write) 247 timers_update_migration(false); 248 mutex_unlock(&mutex); 249 return ret; 250 } 251 #endif 252 253 static unsigned long round_jiffies_common(unsigned long j, int cpu, 254 bool force_up) 255 { 256 int rem; 257 unsigned long original = j; 258 259 /* 260 * We don't want all cpus firing their timers at once hitting the 261 * same lock or cachelines, so we skew each extra cpu with an extra 262 * 3 jiffies. This 3 jiffies came originally from the mm/ code which 263 * already did this. 264 * The skew is done by adding 3*cpunr, then round, then subtract this 265 * extra offset again. 266 */ 267 j += cpu * 3; 268 269 rem = j % HZ; 270 271 /* 272 * If the target jiffie is just after a whole second (which can happen 273 * due to delays of the timer irq, long irq off times etc etc) then 274 * we should round down to the whole second, not up. Use 1/4th second 275 * as cutoff for this rounding as an extreme upper bound for this. 276 * But never round down if @force_up is set. 277 */ 278 if (rem < HZ/4 && !force_up) /* round down */ 279 j = j - rem; 280 else /* round up */ 281 j = j - rem + HZ; 282 283 /* now that we have rounded, subtract the extra skew again */ 284 j -= cpu * 3; 285 286 /* 287 * Make sure j is still in the future. Otherwise return the 288 * unmodified value. 289 */ 290 return time_is_after_jiffies(j) ? j : original; 291 } 292 293 /** 294 * __round_jiffies - function to round jiffies to a full second 295 * @j: the time in (absolute) jiffies that should be rounded 296 * @cpu: the processor number on which the timeout will happen 297 * 298 * __round_jiffies() rounds an absolute time in the future (in jiffies) 299 * up or down to (approximately) full seconds. This is useful for timers 300 * for which the exact time they fire does not matter too much, as long as 301 * they fire approximately every X seconds. 302 * 303 * By rounding these timers to whole seconds, all such timers will fire 304 * at the same time, rather than at various times spread out. The goal 305 * of this is to have the CPU wake up less, which saves power. 306 * 307 * The exact rounding is skewed for each processor to avoid all 308 * processors firing at the exact same time, which could lead 309 * to lock contention or spurious cache line bouncing. 310 * 311 * The return value is the rounded version of the @j parameter. 312 */ 313 unsigned long __round_jiffies(unsigned long j, int cpu) 314 { 315 return round_jiffies_common(j, cpu, false); 316 } 317 EXPORT_SYMBOL_GPL(__round_jiffies); 318 319 /** 320 * __round_jiffies_relative - function to round jiffies to a full second 321 * @j: the time in (relative) jiffies that should be rounded 322 * @cpu: the processor number on which the timeout will happen 323 * 324 * __round_jiffies_relative() rounds a time delta in the future (in jiffies) 325 * up or down to (approximately) full seconds. This is useful for timers 326 * for which the exact time they fire does not matter too much, as long as 327 * they fire approximately every X seconds. 328 * 329 * By rounding these timers to whole seconds, all such timers will fire 330 * at the same time, rather than at various times spread out. The goal 331 * of this is to have the CPU wake up less, which saves power. 332 * 333 * The exact rounding is skewed for each processor to avoid all 334 * processors firing at the exact same time, which could lead 335 * to lock contention or spurious cache line bouncing. 336 * 337 * The return value is the rounded version of the @j parameter. 338 */ 339 unsigned long __round_jiffies_relative(unsigned long j, int cpu) 340 { 341 unsigned long j0 = jiffies; 342 343 /* Use j0 because jiffies might change while we run */ 344 return round_jiffies_common(j + j0, cpu, false) - j0; 345 } 346 EXPORT_SYMBOL_GPL(__round_jiffies_relative); 347 348 /** 349 * round_jiffies - function to round jiffies to a full second 350 * @j: the time in (absolute) jiffies that should be rounded 351 * 352 * round_jiffies() rounds an absolute time in the future (in jiffies) 353 * up or down to (approximately) full seconds. This is useful for timers 354 * for which the exact time they fire does not matter too much, as long as 355 * they fire approximately every X seconds. 356 * 357 * By rounding these timers to whole seconds, all such timers will fire 358 * at the same time, rather than at various times spread out. The goal 359 * of this is to have the CPU wake up less, which saves power. 360 * 361 * The return value is the rounded version of the @j parameter. 362 */ 363 unsigned long round_jiffies(unsigned long j) 364 { 365 return round_jiffies_common(j, raw_smp_processor_id(), false); 366 } 367 EXPORT_SYMBOL_GPL(round_jiffies); 368 369 /** 370 * round_jiffies_relative - function to round jiffies to a full second 371 * @j: the time in (relative) jiffies that should be rounded 372 * 373 * round_jiffies_relative() rounds a time delta in the future (in jiffies) 374 * up or down to (approximately) full seconds. This is useful for timers 375 * for which the exact time they fire does not matter too much, as long as 376 * they fire approximately every X seconds. 377 * 378 * By rounding these timers to whole seconds, all such timers will fire 379 * at the same time, rather than at various times spread out. The goal 380 * of this is to have the CPU wake up less, which saves power. 381 * 382 * The return value is the rounded version of the @j parameter. 383 */ 384 unsigned long round_jiffies_relative(unsigned long j) 385 { 386 return __round_jiffies_relative(j, raw_smp_processor_id()); 387 } 388 EXPORT_SYMBOL_GPL(round_jiffies_relative); 389 390 /** 391 * __round_jiffies_up - function to round jiffies up to a full second 392 * @j: the time in (absolute) jiffies that should be rounded 393 * @cpu: the processor number on which the timeout will happen 394 * 395 * This is the same as __round_jiffies() except that it will never 396 * round down. This is useful for timeouts for which the exact time 397 * of firing does not matter too much, as long as they don't fire too 398 * early. 399 */ 400 unsigned long __round_jiffies_up(unsigned long j, int cpu) 401 { 402 return round_jiffies_common(j, cpu, true); 403 } 404 EXPORT_SYMBOL_GPL(__round_jiffies_up); 405 406 /** 407 * __round_jiffies_up_relative - function to round jiffies up to a full second 408 * @j: the time in (relative) jiffies that should be rounded 409 * @cpu: the processor number on which the timeout will happen 410 * 411 * This is the same as __round_jiffies_relative() except that it will never 412 * round down. This is useful for timeouts for which the exact time 413 * of firing does not matter too much, as long as they don't fire too 414 * early. 415 */ 416 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) 417 { 418 unsigned long j0 = jiffies; 419 420 /* Use j0 because jiffies might change while we run */ 421 return round_jiffies_common(j + j0, cpu, true) - j0; 422 } 423 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); 424 425 /** 426 * round_jiffies_up - function to round jiffies up to a full second 427 * @j: the time in (absolute) jiffies that should be rounded 428 * 429 * This is the same as round_jiffies() except that it will never 430 * round down. This is useful for timeouts for which the exact time 431 * of firing does not matter too much, as long as they don't fire too 432 * early. 433 */ 434 unsigned long round_jiffies_up(unsigned long j) 435 { 436 return round_jiffies_common(j, raw_smp_processor_id(), true); 437 } 438 EXPORT_SYMBOL_GPL(round_jiffies_up); 439 440 /** 441 * round_jiffies_up_relative - function to round jiffies up to a full second 442 * @j: the time in (relative) jiffies that should be rounded 443 * 444 * This is the same as round_jiffies_relative() except that it will never 445 * round down. This is useful for timeouts for which the exact time 446 * of firing does not matter too much, as long as they don't fire too 447 * early. 448 */ 449 unsigned long round_jiffies_up_relative(unsigned long j) 450 { 451 return __round_jiffies_up_relative(j, raw_smp_processor_id()); 452 } 453 EXPORT_SYMBOL_GPL(round_jiffies_up_relative); 454 455 456 static inline unsigned int timer_get_idx(struct timer_list *timer) 457 { 458 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT; 459 } 460 461 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx) 462 { 463 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) | 464 idx << TIMER_ARRAYSHIFT; 465 } 466 467 /* 468 * Helper function to calculate the array index for a given expiry 469 * time. 470 */ 471 static inline unsigned calc_index(unsigned expires, unsigned lvl) 472 { 473 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl); 474 return LVL_OFFS(lvl) + (expires & LVL_MASK); 475 } 476 477 static int calc_wheel_index(unsigned long expires, unsigned long clk) 478 { 479 unsigned long delta = expires - clk; 480 unsigned int idx; 481 482 if (delta < LVL_START(1)) { 483 idx = calc_index(expires, 0); 484 } else if (delta < LVL_START(2)) { 485 idx = calc_index(expires, 1); 486 } else if (delta < LVL_START(3)) { 487 idx = calc_index(expires, 2); 488 } else if (delta < LVL_START(4)) { 489 idx = calc_index(expires, 3); 490 } else if (delta < LVL_START(5)) { 491 idx = calc_index(expires, 4); 492 } else if (delta < LVL_START(6)) { 493 idx = calc_index(expires, 5); 494 } else if (delta < LVL_START(7)) { 495 idx = calc_index(expires, 6); 496 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) { 497 idx = calc_index(expires, 7); 498 } else if ((long) delta < 0) { 499 idx = clk & LVL_MASK; 500 } else { 501 /* 502 * Force expire obscene large timeouts to expire at the 503 * capacity limit of the wheel. 504 */ 505 if (expires >= WHEEL_TIMEOUT_CUTOFF) 506 expires = WHEEL_TIMEOUT_MAX; 507 508 idx = calc_index(expires, LVL_DEPTH - 1); 509 } 510 return idx; 511 } 512 513 /* 514 * Enqueue the timer into the hash bucket, mark it pending in 515 * the bitmap and store the index in the timer flags. 516 */ 517 static void enqueue_timer(struct timer_base *base, struct timer_list *timer, 518 unsigned int idx) 519 { 520 hlist_add_head(&timer->entry, base->vectors + idx); 521 __set_bit(idx, base->pending_map); 522 timer_set_idx(timer, idx); 523 } 524 525 static void 526 __internal_add_timer(struct timer_base *base, struct timer_list *timer) 527 { 528 unsigned int idx; 529 530 idx = calc_wheel_index(timer->expires, base->clk); 531 enqueue_timer(base, timer, idx); 532 } 533 534 static void 535 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) 536 { 537 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) 538 return; 539 540 /* 541 * TODO: This wants some optimizing similar to the code below, but we 542 * will do that when we switch from push to pull for deferrable timers. 543 */ 544 if (timer->flags & TIMER_DEFERRABLE) { 545 if (tick_nohz_full_cpu(base->cpu)) 546 wake_up_nohz_cpu(base->cpu); 547 return; 548 } 549 550 /* 551 * We might have to IPI the remote CPU if the base is idle and the 552 * timer is not deferrable. If the other CPU is on the way to idle 553 * then it can't set base->is_idle as we hold the base lock: 554 */ 555 if (!base->is_idle) 556 return; 557 558 /* Check whether this is the new first expiring timer: */ 559 if (time_after_eq(timer->expires, base->next_expiry)) 560 return; 561 562 /* 563 * Set the next expiry time and kick the CPU so it can reevaluate the 564 * wheel: 565 */ 566 base->next_expiry = timer->expires; 567 wake_up_nohz_cpu(base->cpu); 568 } 569 570 static void 571 internal_add_timer(struct timer_base *base, struct timer_list *timer) 572 { 573 __internal_add_timer(base, timer); 574 trigger_dyntick_cpu(base, timer); 575 } 576 577 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 578 579 static struct debug_obj_descr timer_debug_descr; 580 581 static void *timer_debug_hint(void *addr) 582 { 583 return ((struct timer_list *) addr)->function; 584 } 585 586 static bool timer_is_static_object(void *addr) 587 { 588 struct timer_list *timer = addr; 589 590 return (timer->entry.pprev == NULL && 591 timer->entry.next == TIMER_ENTRY_STATIC); 592 } 593 594 /* 595 * fixup_init is called when: 596 * - an active object is initialized 597 */ 598 static bool timer_fixup_init(void *addr, enum debug_obj_state state) 599 { 600 struct timer_list *timer = addr; 601 602 switch (state) { 603 case ODEBUG_STATE_ACTIVE: 604 del_timer_sync(timer); 605 debug_object_init(timer, &timer_debug_descr); 606 return true; 607 default: 608 return false; 609 } 610 } 611 612 /* Stub timer callback for improperly used timers. */ 613 static void stub_timer(unsigned long data) 614 { 615 WARN_ON(1); 616 } 617 618 /* 619 * fixup_activate is called when: 620 * - an active object is activated 621 * - an unknown non-static object is activated 622 */ 623 static bool timer_fixup_activate(void *addr, enum debug_obj_state state) 624 { 625 struct timer_list *timer = addr; 626 627 switch (state) { 628 case ODEBUG_STATE_NOTAVAILABLE: 629 setup_timer(timer, stub_timer, 0); 630 return true; 631 632 case ODEBUG_STATE_ACTIVE: 633 WARN_ON(1); 634 635 default: 636 return false; 637 } 638 } 639 640 /* 641 * fixup_free is called when: 642 * - an active object is freed 643 */ 644 static bool timer_fixup_free(void *addr, enum debug_obj_state state) 645 { 646 struct timer_list *timer = addr; 647 648 switch (state) { 649 case ODEBUG_STATE_ACTIVE: 650 del_timer_sync(timer); 651 debug_object_free(timer, &timer_debug_descr); 652 return true; 653 default: 654 return false; 655 } 656 } 657 658 /* 659 * fixup_assert_init is called when: 660 * - an untracked/uninit-ed object is found 661 */ 662 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state) 663 { 664 struct timer_list *timer = addr; 665 666 switch (state) { 667 case ODEBUG_STATE_NOTAVAILABLE: 668 setup_timer(timer, stub_timer, 0); 669 return true; 670 default: 671 return false; 672 } 673 } 674 675 static struct debug_obj_descr timer_debug_descr = { 676 .name = "timer_list", 677 .debug_hint = timer_debug_hint, 678 .is_static_object = timer_is_static_object, 679 .fixup_init = timer_fixup_init, 680 .fixup_activate = timer_fixup_activate, 681 .fixup_free = timer_fixup_free, 682 .fixup_assert_init = timer_fixup_assert_init, 683 }; 684 685 static inline void debug_timer_init(struct timer_list *timer) 686 { 687 debug_object_init(timer, &timer_debug_descr); 688 } 689 690 static inline void debug_timer_activate(struct timer_list *timer) 691 { 692 debug_object_activate(timer, &timer_debug_descr); 693 } 694 695 static inline void debug_timer_deactivate(struct timer_list *timer) 696 { 697 debug_object_deactivate(timer, &timer_debug_descr); 698 } 699 700 static inline void debug_timer_free(struct timer_list *timer) 701 { 702 debug_object_free(timer, &timer_debug_descr); 703 } 704 705 static inline void debug_timer_assert_init(struct timer_list *timer) 706 { 707 debug_object_assert_init(timer, &timer_debug_descr); 708 } 709 710 static void do_init_timer(struct timer_list *timer, unsigned int flags, 711 const char *name, struct lock_class_key *key); 712 713 void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags, 714 const char *name, struct lock_class_key *key) 715 { 716 debug_object_init_on_stack(timer, &timer_debug_descr); 717 do_init_timer(timer, flags, name, key); 718 } 719 EXPORT_SYMBOL_GPL(init_timer_on_stack_key); 720 721 void destroy_timer_on_stack(struct timer_list *timer) 722 { 723 debug_object_free(timer, &timer_debug_descr); 724 } 725 EXPORT_SYMBOL_GPL(destroy_timer_on_stack); 726 727 #else 728 static inline void debug_timer_init(struct timer_list *timer) { } 729 static inline void debug_timer_activate(struct timer_list *timer) { } 730 static inline void debug_timer_deactivate(struct timer_list *timer) { } 731 static inline void debug_timer_assert_init(struct timer_list *timer) { } 732 #endif 733 734 static inline void debug_init(struct timer_list *timer) 735 { 736 debug_timer_init(timer); 737 trace_timer_init(timer); 738 } 739 740 static inline void 741 debug_activate(struct timer_list *timer, unsigned long expires) 742 { 743 debug_timer_activate(timer); 744 trace_timer_start(timer, expires, timer->flags); 745 } 746 747 static inline void debug_deactivate(struct timer_list *timer) 748 { 749 debug_timer_deactivate(timer); 750 trace_timer_cancel(timer); 751 } 752 753 static inline void debug_assert_init(struct timer_list *timer) 754 { 755 debug_timer_assert_init(timer); 756 } 757 758 static void do_init_timer(struct timer_list *timer, unsigned int flags, 759 const char *name, struct lock_class_key *key) 760 { 761 timer->entry.pprev = NULL; 762 timer->flags = flags | raw_smp_processor_id(); 763 lockdep_init_map(&timer->lockdep_map, name, key, 0); 764 } 765 766 /** 767 * init_timer_key - initialize a timer 768 * @timer: the timer to be initialized 769 * @flags: timer flags 770 * @name: name of the timer 771 * @key: lockdep class key of the fake lock used for tracking timer 772 * sync lock dependencies 773 * 774 * init_timer_key() must be done to a timer prior calling *any* of the 775 * other timer functions. 776 */ 777 void init_timer_key(struct timer_list *timer, unsigned int flags, 778 const char *name, struct lock_class_key *key) 779 { 780 debug_init(timer); 781 do_init_timer(timer, flags, name, key); 782 } 783 EXPORT_SYMBOL(init_timer_key); 784 785 static inline void detach_timer(struct timer_list *timer, bool clear_pending) 786 { 787 struct hlist_node *entry = &timer->entry; 788 789 debug_deactivate(timer); 790 791 __hlist_del(entry); 792 if (clear_pending) 793 entry->pprev = NULL; 794 entry->next = LIST_POISON2; 795 } 796 797 static int detach_if_pending(struct timer_list *timer, struct timer_base *base, 798 bool clear_pending) 799 { 800 unsigned idx = timer_get_idx(timer); 801 802 if (!timer_pending(timer)) 803 return 0; 804 805 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) 806 __clear_bit(idx, base->pending_map); 807 808 detach_timer(timer, clear_pending); 809 return 1; 810 } 811 812 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) 813 { 814 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); 815 816 /* 817 * If the timer is deferrable and nohz is active then we need to use 818 * the deferrable base. 819 */ 820 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && 821 (tflags & TIMER_DEFERRABLE)) 822 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); 823 return base; 824 } 825 826 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) 827 { 828 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 829 830 /* 831 * If the timer is deferrable and nohz is active then we need to use 832 * the deferrable base. 833 */ 834 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && 835 (tflags & TIMER_DEFERRABLE)) 836 base = this_cpu_ptr(&timer_bases[BASE_DEF]); 837 return base; 838 } 839 840 static inline struct timer_base *get_timer_base(u32 tflags) 841 { 842 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); 843 } 844 845 #ifdef CONFIG_NO_HZ_COMMON 846 static inline struct timer_base * 847 get_target_base(struct timer_base *base, unsigned tflags) 848 { 849 #ifdef CONFIG_SMP 850 if ((tflags & TIMER_PINNED) || !base->migration_enabled) 851 return get_timer_this_cpu_base(tflags); 852 return get_timer_cpu_base(tflags, get_nohz_timer_target()); 853 #else 854 return get_timer_this_cpu_base(tflags); 855 #endif 856 } 857 858 static inline void forward_timer_base(struct timer_base *base) 859 { 860 unsigned long jnow; 861 862 /* 863 * We only forward the base when we are idle or have just come out of 864 * idle (must_forward_clk logic), and have a delta between base clock 865 * and jiffies. In the common case, run_timers will take care of it. 866 */ 867 if (likely(!base->must_forward_clk)) 868 return; 869 870 jnow = READ_ONCE(jiffies); 871 base->must_forward_clk = base->is_idle; 872 if ((long)(jnow - base->clk) < 2) 873 return; 874 875 /* 876 * If the next expiry value is > jiffies, then we fast forward to 877 * jiffies otherwise we forward to the next expiry value. 878 */ 879 if (time_after(base->next_expiry, jnow)) 880 base->clk = jnow; 881 else 882 base->clk = base->next_expiry; 883 } 884 #else 885 static inline struct timer_base * 886 get_target_base(struct timer_base *base, unsigned tflags) 887 { 888 return get_timer_this_cpu_base(tflags); 889 } 890 891 static inline void forward_timer_base(struct timer_base *base) { } 892 #endif 893 894 895 /* 896 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 897 * that all timers which are tied to this base are locked, and the base itself 898 * is locked too. 899 * 900 * So __run_timers/migrate_timers can safely modify all timers which could 901 * be found in the base->vectors array. 902 * 903 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need 904 * to wait until the migration is done. 905 */ 906 static struct timer_base *lock_timer_base(struct timer_list *timer, 907 unsigned long *flags) 908 __acquires(timer->base->lock) 909 { 910 for (;;) { 911 struct timer_base *base; 912 u32 tf; 913 914 /* 915 * We need to use READ_ONCE() here, otherwise the compiler 916 * might re-read @tf between the check for TIMER_MIGRATING 917 * and spin_lock(). 918 */ 919 tf = READ_ONCE(timer->flags); 920 921 if (!(tf & TIMER_MIGRATING)) { 922 base = get_timer_base(tf); 923 raw_spin_lock_irqsave(&base->lock, *flags); 924 if (timer->flags == tf) 925 return base; 926 raw_spin_unlock_irqrestore(&base->lock, *flags); 927 } 928 cpu_relax(); 929 } 930 } 931 932 static inline int 933 __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) 934 { 935 struct timer_base *base, *new_base; 936 unsigned int idx = UINT_MAX; 937 unsigned long clk = 0, flags; 938 int ret = 0; 939 940 BUG_ON(!timer->function); 941 942 /* 943 * This is a common optimization triggered by the networking code - if 944 * the timer is re-modified to have the same timeout or ends up in the 945 * same array bucket then just return: 946 */ 947 if (timer_pending(timer)) { 948 /* 949 * The downside of this optimization is that it can result in 950 * larger granularity than you would get from adding a new 951 * timer with this expiry. 952 */ 953 if (timer->expires == expires) 954 return 1; 955 956 /* 957 * We lock timer base and calculate the bucket index right 958 * here. If the timer ends up in the same bucket, then we 959 * just update the expiry time and avoid the whole 960 * dequeue/enqueue dance. 961 */ 962 base = lock_timer_base(timer, &flags); 963 forward_timer_base(base); 964 965 clk = base->clk; 966 idx = calc_wheel_index(expires, clk); 967 968 /* 969 * Retrieve and compare the array index of the pending 970 * timer. If it matches set the expiry to the new value so a 971 * subsequent call will exit in the expires check above. 972 */ 973 if (idx == timer_get_idx(timer)) { 974 timer->expires = expires; 975 ret = 1; 976 goto out_unlock; 977 } 978 } else { 979 base = lock_timer_base(timer, &flags); 980 forward_timer_base(base); 981 } 982 983 ret = detach_if_pending(timer, base, false); 984 if (!ret && pending_only) 985 goto out_unlock; 986 987 debug_activate(timer, expires); 988 989 new_base = get_target_base(base, timer->flags); 990 991 if (base != new_base) { 992 /* 993 * We are trying to schedule the timer on the new base. 994 * However we can't change timer's base while it is running, 995 * otherwise del_timer_sync() can't detect that the timer's 996 * handler yet has not finished. This also guarantees that the 997 * timer is serialized wrt itself. 998 */ 999 if (likely(base->running_timer != timer)) { 1000 /* See the comment in lock_timer_base() */ 1001 timer->flags |= TIMER_MIGRATING; 1002 1003 raw_spin_unlock(&base->lock); 1004 base = new_base; 1005 raw_spin_lock(&base->lock); 1006 WRITE_ONCE(timer->flags, 1007 (timer->flags & ~TIMER_BASEMASK) | base->cpu); 1008 forward_timer_base(base); 1009 } 1010 } 1011 1012 timer->expires = expires; 1013 /* 1014 * If 'idx' was calculated above and the base time did not advance 1015 * between calculating 'idx' and possibly switching the base, only 1016 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise 1017 * we need to (re)calculate the wheel index via 1018 * internal_add_timer(). 1019 */ 1020 if (idx != UINT_MAX && clk == base->clk) { 1021 enqueue_timer(base, timer, idx); 1022 trigger_dyntick_cpu(base, timer); 1023 } else { 1024 internal_add_timer(base, timer); 1025 } 1026 1027 out_unlock: 1028 raw_spin_unlock_irqrestore(&base->lock, flags); 1029 1030 return ret; 1031 } 1032 1033 /** 1034 * mod_timer_pending - modify a pending timer's timeout 1035 * @timer: the pending timer to be modified 1036 * @expires: new timeout in jiffies 1037 * 1038 * mod_timer_pending() is the same for pending timers as mod_timer(), 1039 * but will not re-activate and modify already deleted timers. 1040 * 1041 * It is useful for unserialized use of timers. 1042 */ 1043 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1044 { 1045 return __mod_timer(timer, expires, true); 1046 } 1047 EXPORT_SYMBOL(mod_timer_pending); 1048 1049 /** 1050 * mod_timer - modify a timer's timeout 1051 * @timer: the timer to be modified 1052 * @expires: new timeout in jiffies 1053 * 1054 * mod_timer() is a more efficient way to update the expire field of an 1055 * active timer (if the timer is inactive it will be activated) 1056 * 1057 * mod_timer(timer, expires) is equivalent to: 1058 * 1059 * del_timer(timer); timer->expires = expires; add_timer(timer); 1060 * 1061 * Note that if there are multiple unserialized concurrent users of the 1062 * same timer, then mod_timer() is the only safe way to modify the timeout, 1063 * since add_timer() cannot modify an already running timer. 1064 * 1065 * The function returns whether it has modified a pending timer or not. 1066 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an 1067 * active timer returns 1.) 1068 */ 1069 int mod_timer(struct timer_list *timer, unsigned long expires) 1070 { 1071 return __mod_timer(timer, expires, false); 1072 } 1073 EXPORT_SYMBOL(mod_timer); 1074 1075 /** 1076 * add_timer - start a timer 1077 * @timer: the timer to be added 1078 * 1079 * The kernel will do a ->function(->data) callback from the 1080 * timer interrupt at the ->expires point in the future. The 1081 * current time is 'jiffies'. 1082 * 1083 * The timer's ->expires, ->function (and if the handler uses it, ->data) 1084 * fields must be set prior calling this function. 1085 * 1086 * Timers with an ->expires field in the past will be executed in the next 1087 * timer tick. 1088 */ 1089 void add_timer(struct timer_list *timer) 1090 { 1091 BUG_ON(timer_pending(timer)); 1092 mod_timer(timer, timer->expires); 1093 } 1094 EXPORT_SYMBOL(add_timer); 1095 1096 /** 1097 * add_timer_on - start a timer on a particular CPU 1098 * @timer: the timer to be added 1099 * @cpu: the CPU to start it on 1100 * 1101 * This is not very scalable on SMP. Double adds are not possible. 1102 */ 1103 void add_timer_on(struct timer_list *timer, int cpu) 1104 { 1105 struct timer_base *new_base, *base; 1106 unsigned long flags; 1107 1108 BUG_ON(timer_pending(timer) || !timer->function); 1109 1110 new_base = get_timer_cpu_base(timer->flags, cpu); 1111 1112 /* 1113 * If @timer was on a different CPU, it should be migrated with the 1114 * old base locked to prevent other operations proceeding with the 1115 * wrong base locked. See lock_timer_base(). 1116 */ 1117 base = lock_timer_base(timer, &flags); 1118 if (base != new_base) { 1119 timer->flags |= TIMER_MIGRATING; 1120 1121 raw_spin_unlock(&base->lock); 1122 base = new_base; 1123 raw_spin_lock(&base->lock); 1124 WRITE_ONCE(timer->flags, 1125 (timer->flags & ~TIMER_BASEMASK) | cpu); 1126 } 1127 forward_timer_base(base); 1128 1129 debug_activate(timer, timer->expires); 1130 internal_add_timer(base, timer); 1131 raw_spin_unlock_irqrestore(&base->lock, flags); 1132 } 1133 EXPORT_SYMBOL_GPL(add_timer_on); 1134 1135 /** 1136 * del_timer - deactivate a timer. 1137 * @timer: the timer to be deactivated 1138 * 1139 * del_timer() deactivates a timer - this works on both active and inactive 1140 * timers. 1141 * 1142 * The function returns whether it has deactivated a pending timer or not. 1143 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an 1144 * active timer returns 1.) 1145 */ 1146 int del_timer(struct timer_list *timer) 1147 { 1148 struct timer_base *base; 1149 unsigned long flags; 1150 int ret = 0; 1151 1152 debug_assert_init(timer); 1153 1154 if (timer_pending(timer)) { 1155 base = lock_timer_base(timer, &flags); 1156 ret = detach_if_pending(timer, base, true); 1157 raw_spin_unlock_irqrestore(&base->lock, flags); 1158 } 1159 1160 return ret; 1161 } 1162 EXPORT_SYMBOL(del_timer); 1163 1164 /** 1165 * try_to_del_timer_sync - Try to deactivate a timer 1166 * @timer: timer to delete 1167 * 1168 * This function tries to deactivate a timer. Upon successful (ret >= 0) 1169 * exit the timer is not queued and the handler is not running on any CPU. 1170 */ 1171 int try_to_del_timer_sync(struct timer_list *timer) 1172 { 1173 struct timer_base *base; 1174 unsigned long flags; 1175 int ret = -1; 1176 1177 debug_assert_init(timer); 1178 1179 base = lock_timer_base(timer, &flags); 1180 1181 if (base->running_timer != timer) 1182 ret = detach_if_pending(timer, base, true); 1183 1184 raw_spin_unlock_irqrestore(&base->lock, flags); 1185 1186 return ret; 1187 } 1188 EXPORT_SYMBOL(try_to_del_timer_sync); 1189 1190 #ifdef CONFIG_SMP 1191 /** 1192 * del_timer_sync - deactivate a timer and wait for the handler to finish. 1193 * @timer: the timer to be deactivated 1194 * 1195 * This function only differs from del_timer() on SMP: besides deactivating 1196 * the timer it also makes sure the handler has finished executing on other 1197 * CPUs. 1198 * 1199 * Synchronization rules: Callers must prevent restarting of the timer, 1200 * otherwise this function is meaningless. It must not be called from 1201 * interrupt contexts unless the timer is an irqsafe one. The caller must 1202 * not hold locks which would prevent completion of the timer's 1203 * handler. The timer's handler must not call add_timer_on(). Upon exit the 1204 * timer is not queued and the handler is not running on any CPU. 1205 * 1206 * Note: For !irqsafe timers, you must not hold locks that are held in 1207 * interrupt context while calling this function. Even if the lock has 1208 * nothing to do with the timer in question. Here's why: 1209 * 1210 * CPU0 CPU1 1211 * ---- ---- 1212 * <SOFTIRQ> 1213 * call_timer_fn(); 1214 * base->running_timer = mytimer; 1215 * spin_lock_irq(somelock); 1216 * <IRQ> 1217 * spin_lock(somelock); 1218 * del_timer_sync(mytimer); 1219 * while (base->running_timer == mytimer); 1220 * 1221 * Now del_timer_sync() will never return and never release somelock. 1222 * The interrupt on the other CPU is waiting to grab somelock but 1223 * it has interrupted the softirq that CPU0 is waiting to finish. 1224 * 1225 * The function returns whether it has deactivated a pending timer or not. 1226 */ 1227 int del_timer_sync(struct timer_list *timer) 1228 { 1229 #ifdef CONFIG_LOCKDEP 1230 unsigned long flags; 1231 1232 /* 1233 * If lockdep gives a backtrace here, please reference 1234 * the synchronization rules above. 1235 */ 1236 local_irq_save(flags); 1237 lock_map_acquire(&timer->lockdep_map); 1238 lock_map_release(&timer->lockdep_map); 1239 local_irq_restore(flags); 1240 #endif 1241 /* 1242 * don't use it in hardirq context, because it 1243 * could lead to deadlock. 1244 */ 1245 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); 1246 for (;;) { 1247 int ret = try_to_del_timer_sync(timer); 1248 if (ret >= 0) 1249 return ret; 1250 cpu_relax(); 1251 } 1252 } 1253 EXPORT_SYMBOL(del_timer_sync); 1254 #endif 1255 1256 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), 1257 unsigned long data) 1258 { 1259 int count = preempt_count(); 1260 1261 #ifdef CONFIG_LOCKDEP 1262 /* 1263 * It is permissible to free the timer from inside the 1264 * function that is called from it, this we need to take into 1265 * account for lockdep too. To avoid bogus "held lock freed" 1266 * warnings as well as problems when looking into 1267 * timer->lockdep_map, make a copy and use that here. 1268 */ 1269 struct lockdep_map lockdep_map; 1270 1271 lockdep_copy_map(&lockdep_map, &timer->lockdep_map); 1272 #endif 1273 /* 1274 * Couple the lock chain with the lock chain at 1275 * del_timer_sync() by acquiring the lock_map around the fn() 1276 * call here and in del_timer_sync(). 1277 */ 1278 lock_map_acquire(&lockdep_map); 1279 1280 trace_timer_expire_entry(timer); 1281 fn(data); 1282 trace_timer_expire_exit(timer); 1283 1284 lock_map_release(&lockdep_map); 1285 1286 if (count != preempt_count()) { 1287 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", 1288 fn, count, preempt_count()); 1289 /* 1290 * Restore the preempt count. That gives us a decent 1291 * chance to survive and extract information. If the 1292 * callback kept a lock held, bad luck, but not worse 1293 * than the BUG() we had. 1294 */ 1295 preempt_count_set(count); 1296 } 1297 } 1298 1299 static void expire_timers(struct timer_base *base, struct hlist_head *head) 1300 { 1301 while (!hlist_empty(head)) { 1302 struct timer_list *timer; 1303 void (*fn)(unsigned long); 1304 unsigned long data; 1305 1306 timer = hlist_entry(head->first, struct timer_list, entry); 1307 1308 base->running_timer = timer; 1309 detach_timer(timer, true); 1310 1311 fn = timer->function; 1312 data = timer->data; 1313 1314 if (timer->flags & TIMER_IRQSAFE) { 1315 raw_spin_unlock(&base->lock); 1316 call_timer_fn(timer, fn, data); 1317 raw_spin_lock(&base->lock); 1318 } else { 1319 raw_spin_unlock_irq(&base->lock); 1320 call_timer_fn(timer, fn, data); 1321 raw_spin_lock_irq(&base->lock); 1322 } 1323 } 1324 } 1325 1326 static int __collect_expired_timers(struct timer_base *base, 1327 struct hlist_head *heads) 1328 { 1329 unsigned long clk = base->clk; 1330 struct hlist_head *vec; 1331 int i, levels = 0; 1332 unsigned int idx; 1333 1334 for (i = 0; i < LVL_DEPTH; i++) { 1335 idx = (clk & LVL_MASK) + i * LVL_SIZE; 1336 1337 if (__test_and_clear_bit(idx, base->pending_map)) { 1338 vec = base->vectors + idx; 1339 hlist_move_list(vec, heads++); 1340 levels++; 1341 } 1342 /* Is it time to look at the next level? */ 1343 if (clk & LVL_CLK_MASK) 1344 break; 1345 /* Shift clock for the next level granularity */ 1346 clk >>= LVL_CLK_SHIFT; 1347 } 1348 return levels; 1349 } 1350 1351 #ifdef CONFIG_NO_HZ_COMMON 1352 /* 1353 * Find the next pending bucket of a level. Search from level start (@offset) 1354 * + @clk upwards and if nothing there, search from start of the level 1355 * (@offset) up to @offset + clk. 1356 */ 1357 static int next_pending_bucket(struct timer_base *base, unsigned offset, 1358 unsigned clk) 1359 { 1360 unsigned pos, start = offset + clk; 1361 unsigned end = offset + LVL_SIZE; 1362 1363 pos = find_next_bit(base->pending_map, end, start); 1364 if (pos < end) 1365 return pos - start; 1366 1367 pos = find_next_bit(base->pending_map, start, offset); 1368 return pos < start ? pos + LVL_SIZE - start : -1; 1369 } 1370 1371 /* 1372 * Search the first expiring timer in the various clock levels. Caller must 1373 * hold base->lock. 1374 */ 1375 static unsigned long __next_timer_interrupt(struct timer_base *base) 1376 { 1377 unsigned long clk, next, adj; 1378 unsigned lvl, offset = 0; 1379 1380 next = base->clk + NEXT_TIMER_MAX_DELTA; 1381 clk = base->clk; 1382 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1383 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); 1384 1385 if (pos >= 0) { 1386 unsigned long tmp = clk + (unsigned long) pos; 1387 1388 tmp <<= LVL_SHIFT(lvl); 1389 if (time_before(tmp, next)) 1390 next = tmp; 1391 } 1392 /* 1393 * Clock for the next level. If the current level clock lower 1394 * bits are zero, we look at the next level as is. If not we 1395 * need to advance it by one because that's going to be the 1396 * next expiring bucket in that level. base->clk is the next 1397 * expiring jiffie. So in case of: 1398 * 1399 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1400 * 0 0 0 0 0 0 1401 * 1402 * we have to look at all levels @index 0. With 1403 * 1404 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1405 * 0 0 0 0 0 2 1406 * 1407 * LVL0 has the next expiring bucket @index 2. The upper 1408 * levels have the next expiring bucket @index 1. 1409 * 1410 * In case that the propagation wraps the next level the same 1411 * rules apply: 1412 * 1413 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0 1414 * 0 0 0 0 F 2 1415 * 1416 * So after looking at LVL0 we get: 1417 * 1418 * LVL5 LVL4 LVL3 LVL2 LVL1 1419 * 0 0 0 1 0 1420 * 1421 * So no propagation from LVL1 to LVL2 because that happened 1422 * with the add already, but then we need to propagate further 1423 * from LVL2 to LVL3. 1424 * 1425 * So the simple check whether the lower bits of the current 1426 * level are 0 or not is sufficient for all cases. 1427 */ 1428 adj = clk & LVL_CLK_MASK ? 1 : 0; 1429 clk >>= LVL_CLK_SHIFT; 1430 clk += adj; 1431 } 1432 return next; 1433 } 1434 1435 /* 1436 * Check, if the next hrtimer event is before the next timer wheel 1437 * event: 1438 */ 1439 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) 1440 { 1441 u64 nextevt = hrtimer_get_next_event(); 1442 1443 /* 1444 * If high resolution timers are enabled 1445 * hrtimer_get_next_event() returns KTIME_MAX. 1446 */ 1447 if (expires <= nextevt) 1448 return expires; 1449 1450 /* 1451 * If the next timer is already expired, return the tick base 1452 * time so the tick is fired immediately. 1453 */ 1454 if (nextevt <= basem) 1455 return basem; 1456 1457 /* 1458 * Round up to the next jiffie. High resolution timers are 1459 * off, so the hrtimers are expired in the tick and we need to 1460 * make sure that this tick really expires the timer to avoid 1461 * a ping pong of the nohz stop code. 1462 * 1463 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3 1464 */ 1465 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; 1466 } 1467 1468 /** 1469 * get_next_timer_interrupt - return the time (clock mono) of the next timer 1470 * @basej: base time jiffies 1471 * @basem: base time clock monotonic 1472 * 1473 * Returns the tick aligned clock monotonic time of the next pending 1474 * timer or KTIME_MAX if no timer is pending. 1475 */ 1476 u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1477 { 1478 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1479 u64 expires = KTIME_MAX; 1480 unsigned long nextevt; 1481 bool is_max_delta; 1482 1483 /* 1484 * Pretend that there is no timer pending if the cpu is offline. 1485 * Possible pending timers will be migrated later to an active cpu. 1486 */ 1487 if (cpu_is_offline(smp_processor_id())) 1488 return expires; 1489 1490 raw_spin_lock(&base->lock); 1491 nextevt = __next_timer_interrupt(base); 1492 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); 1493 base->next_expiry = nextevt; 1494 /* 1495 * We have a fresh next event. Check whether we can forward the 1496 * base. We can only do that when @basej is past base->clk 1497 * otherwise we might rewind base->clk. 1498 */ 1499 if (time_after(basej, base->clk)) { 1500 if (time_after(nextevt, basej)) 1501 base->clk = basej; 1502 else if (time_after(nextevt, base->clk)) 1503 base->clk = nextevt; 1504 } 1505 1506 if (time_before_eq(nextevt, basej)) { 1507 expires = basem; 1508 base->is_idle = false; 1509 } else { 1510 if (!is_max_delta) 1511 expires = basem + (u64)(nextevt - basej) * TICK_NSEC; 1512 /* 1513 * If we expect to sleep more than a tick, mark the base idle. 1514 * Also the tick is stopped so any added timer must forward 1515 * the base clk itself to keep granularity small. This idle 1516 * logic is only maintained for the BASE_STD base, deferrable 1517 * timers may still see large granularity skew (by design). 1518 */ 1519 if ((expires - basem) > TICK_NSEC) { 1520 base->must_forward_clk = true; 1521 base->is_idle = true; 1522 } 1523 } 1524 raw_spin_unlock(&base->lock); 1525 1526 return cmp_next_hrtimer_event(basem, expires); 1527 } 1528 1529 /** 1530 * timer_clear_idle - Clear the idle state of the timer base 1531 * 1532 * Called with interrupts disabled 1533 */ 1534 void timer_clear_idle(void) 1535 { 1536 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1537 1538 /* 1539 * We do this unlocked. The worst outcome is a remote enqueue sending 1540 * a pointless IPI, but taking the lock would just make the window for 1541 * sending the IPI a few instructions smaller for the cost of taking 1542 * the lock in the exit from idle path. 1543 */ 1544 base->is_idle = false; 1545 } 1546 1547 static int collect_expired_timers(struct timer_base *base, 1548 struct hlist_head *heads) 1549 { 1550 /* 1551 * NOHZ optimization. After a long idle sleep we need to forward the 1552 * base to current jiffies. Avoid a loop by searching the bitfield for 1553 * the next expiring timer. 1554 */ 1555 if ((long)(jiffies - base->clk) > 2) { 1556 unsigned long next = __next_timer_interrupt(base); 1557 1558 /* 1559 * If the next timer is ahead of time forward to current 1560 * jiffies, otherwise forward to the next expiry time: 1561 */ 1562 if (time_after(next, jiffies)) { 1563 /* The call site will increment clock! */ 1564 base->clk = jiffies - 1; 1565 return 0; 1566 } 1567 base->clk = next; 1568 } 1569 return __collect_expired_timers(base, heads); 1570 } 1571 #else 1572 static inline int collect_expired_timers(struct timer_base *base, 1573 struct hlist_head *heads) 1574 { 1575 return __collect_expired_timers(base, heads); 1576 } 1577 #endif 1578 1579 /* 1580 * Called from the timer interrupt handler to charge one tick to the current 1581 * process. user_tick is 1 if the tick is user time, 0 for system. 1582 */ 1583 void update_process_times(int user_tick) 1584 { 1585 struct task_struct *p = current; 1586 1587 /* Note: this timer irq context must be accounted for as well. */ 1588 account_process_tick(p, user_tick); 1589 run_local_timers(); 1590 rcu_check_callbacks(user_tick); 1591 #ifdef CONFIG_IRQ_WORK 1592 if (in_irq()) 1593 irq_work_tick(); 1594 #endif 1595 scheduler_tick(); 1596 if (IS_ENABLED(CONFIG_POSIX_TIMERS)) 1597 run_posix_cpu_timers(p); 1598 } 1599 1600 /** 1601 * __run_timers - run all expired timers (if any) on this CPU. 1602 * @base: the timer vector to be processed. 1603 */ 1604 static inline void __run_timers(struct timer_base *base) 1605 { 1606 struct hlist_head heads[LVL_DEPTH]; 1607 int levels; 1608 1609 if (!time_after_eq(jiffies, base->clk)) 1610 return; 1611 1612 raw_spin_lock_irq(&base->lock); 1613 1614 while (time_after_eq(jiffies, base->clk)) { 1615 1616 levels = collect_expired_timers(base, heads); 1617 base->clk++; 1618 1619 while (levels--) 1620 expire_timers(base, heads + levels); 1621 } 1622 base->running_timer = NULL; 1623 raw_spin_unlock_irq(&base->lock); 1624 } 1625 1626 /* 1627 * This function runs timers and the timer-tq in bottom half context. 1628 */ 1629 static __latent_entropy void run_timer_softirq(struct softirq_action *h) 1630 { 1631 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1632 1633 /* 1634 * must_forward_clk must be cleared before running timers so that any 1635 * timer functions that call mod_timer will not try to forward the 1636 * base. idle trcking / clock forwarding logic is only used with 1637 * BASE_STD timers. 1638 * 1639 * The deferrable base does not do idle tracking at all, so we do 1640 * not forward it. This can result in very large variations in 1641 * granularity for deferrable timers, but they can be deferred for 1642 * long periods due to idle. 1643 */ 1644 base->must_forward_clk = false; 1645 1646 __run_timers(base); 1647 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) 1648 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); 1649 } 1650 1651 /* 1652 * Called by the local, per-CPU timer interrupt on SMP. 1653 */ 1654 void run_local_timers(void) 1655 { 1656 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1657 1658 hrtimer_run_queues(); 1659 /* Raise the softirq only if required. */ 1660 if (time_before(jiffies, base->clk)) { 1661 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) 1662 return; 1663 /* CPU is awake, so check the deferrable base. */ 1664 base++; 1665 if (time_before(jiffies, base->clk)) 1666 return; 1667 } 1668 raise_softirq(TIMER_SOFTIRQ); 1669 } 1670 1671 static void process_timeout(unsigned long __data) 1672 { 1673 wake_up_process((struct task_struct *)__data); 1674 } 1675 1676 /** 1677 * schedule_timeout - sleep until timeout 1678 * @timeout: timeout value in jiffies 1679 * 1680 * Make the current task sleep until @timeout jiffies have 1681 * elapsed. The routine will return immediately unless 1682 * the current task state has been set (see set_current_state()). 1683 * 1684 * You can set the task state as follows - 1685 * 1686 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to 1687 * pass before the routine returns unless the current task is explicitly 1688 * woken up, (e.g. by wake_up_process())". 1689 * 1690 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is 1691 * delivered to the current task or the current task is explicitly woken 1692 * up. 1693 * 1694 * The current task state is guaranteed to be TASK_RUNNING when this 1695 * routine returns. 1696 * 1697 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule 1698 * the CPU away without a bound on the timeout. In this case the return 1699 * value will be %MAX_SCHEDULE_TIMEOUT. 1700 * 1701 * Returns 0 when the timer has expired otherwise the remaining time in 1702 * jiffies will be returned. In all cases the return value is guaranteed 1703 * to be non-negative. 1704 */ 1705 signed long __sched schedule_timeout(signed long timeout) 1706 { 1707 struct timer_list timer; 1708 unsigned long expire; 1709 1710 switch (timeout) 1711 { 1712 case MAX_SCHEDULE_TIMEOUT: 1713 /* 1714 * These two special cases are useful to be comfortable 1715 * in the caller. Nothing more. We could take 1716 * MAX_SCHEDULE_TIMEOUT from one of the negative value 1717 * but I' d like to return a valid offset (>=0) to allow 1718 * the caller to do everything it want with the retval. 1719 */ 1720 schedule(); 1721 goto out; 1722 default: 1723 /* 1724 * Another bit of PARANOID. Note that the retval will be 1725 * 0 since no piece of kernel is supposed to do a check 1726 * for a negative retval of schedule_timeout() (since it 1727 * should never happens anyway). You just have the printk() 1728 * that will tell you if something is gone wrong and where. 1729 */ 1730 if (timeout < 0) { 1731 printk(KERN_ERR "schedule_timeout: wrong timeout " 1732 "value %lx\n", timeout); 1733 dump_stack(); 1734 current->state = TASK_RUNNING; 1735 goto out; 1736 } 1737 } 1738 1739 expire = timeout + jiffies; 1740 1741 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1742 __mod_timer(&timer, expire, false); 1743 schedule(); 1744 del_singleshot_timer_sync(&timer); 1745 1746 /* Remove the timer from the object tracker */ 1747 destroy_timer_on_stack(&timer); 1748 1749 timeout = expire - jiffies; 1750 1751 out: 1752 return timeout < 0 ? 0 : timeout; 1753 } 1754 EXPORT_SYMBOL(schedule_timeout); 1755 1756 /* 1757 * We can use __set_current_state() here because schedule_timeout() calls 1758 * schedule() unconditionally. 1759 */ 1760 signed long __sched schedule_timeout_interruptible(signed long timeout) 1761 { 1762 __set_current_state(TASK_INTERRUPTIBLE); 1763 return schedule_timeout(timeout); 1764 } 1765 EXPORT_SYMBOL(schedule_timeout_interruptible); 1766 1767 signed long __sched schedule_timeout_killable(signed long timeout) 1768 { 1769 __set_current_state(TASK_KILLABLE); 1770 return schedule_timeout(timeout); 1771 } 1772 EXPORT_SYMBOL(schedule_timeout_killable); 1773 1774 signed long __sched schedule_timeout_uninterruptible(signed long timeout) 1775 { 1776 __set_current_state(TASK_UNINTERRUPTIBLE); 1777 return schedule_timeout(timeout); 1778 } 1779 EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1780 1781 /* 1782 * Like schedule_timeout_uninterruptible(), except this task will not contribute 1783 * to load average. 1784 */ 1785 signed long __sched schedule_timeout_idle(signed long timeout) 1786 { 1787 __set_current_state(TASK_IDLE); 1788 return schedule_timeout(timeout); 1789 } 1790 EXPORT_SYMBOL(schedule_timeout_idle); 1791 1792 #ifdef CONFIG_HOTPLUG_CPU 1793 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) 1794 { 1795 struct timer_list *timer; 1796 int cpu = new_base->cpu; 1797 1798 while (!hlist_empty(head)) { 1799 timer = hlist_entry(head->first, struct timer_list, entry); 1800 detach_timer(timer, false); 1801 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu; 1802 internal_add_timer(new_base, timer); 1803 } 1804 } 1805 1806 int timers_dead_cpu(unsigned int cpu) 1807 { 1808 struct timer_base *old_base; 1809 struct timer_base *new_base; 1810 int b, i; 1811 1812 BUG_ON(cpu_online(cpu)); 1813 1814 for (b = 0; b < NR_BASES; b++) { 1815 old_base = per_cpu_ptr(&timer_bases[b], cpu); 1816 new_base = get_cpu_ptr(&timer_bases[b]); 1817 /* 1818 * The caller is globally serialized and nobody else 1819 * takes two locks at once, deadlock is not possible. 1820 */ 1821 raw_spin_lock_irq(&new_base->lock); 1822 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1823 1824 BUG_ON(old_base->running_timer); 1825 1826 for (i = 0; i < WHEEL_SIZE; i++) 1827 migrate_timer_list(new_base, old_base->vectors + i); 1828 1829 raw_spin_unlock(&old_base->lock); 1830 raw_spin_unlock_irq(&new_base->lock); 1831 put_cpu_ptr(&timer_bases); 1832 } 1833 return 0; 1834 } 1835 1836 #endif /* CONFIG_HOTPLUG_CPU */ 1837 1838 static void __init init_timer_cpu(int cpu) 1839 { 1840 struct timer_base *base; 1841 int i; 1842 1843 for (i = 0; i < NR_BASES; i++) { 1844 base = per_cpu_ptr(&timer_bases[i], cpu); 1845 base->cpu = cpu; 1846 raw_spin_lock_init(&base->lock); 1847 base->clk = jiffies; 1848 } 1849 } 1850 1851 static void __init init_timer_cpus(void) 1852 { 1853 int cpu; 1854 1855 for_each_possible_cpu(cpu) 1856 init_timer_cpu(cpu); 1857 } 1858 1859 void __init init_timers(void) 1860 { 1861 init_timer_cpus(); 1862 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 1863 } 1864 1865 /** 1866 * msleep - sleep safely even with waitqueue interruptions 1867 * @msecs: Time in milliseconds to sleep for 1868 */ 1869 void msleep(unsigned int msecs) 1870 { 1871 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 1872 1873 while (timeout) 1874 timeout = schedule_timeout_uninterruptible(timeout); 1875 } 1876 1877 EXPORT_SYMBOL(msleep); 1878 1879 /** 1880 * msleep_interruptible - sleep waiting for signals 1881 * @msecs: Time in milliseconds to sleep for 1882 */ 1883 unsigned long msleep_interruptible(unsigned int msecs) 1884 { 1885 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 1886 1887 while (timeout && !signal_pending(current)) 1888 timeout = schedule_timeout_interruptible(timeout); 1889 return jiffies_to_msecs(timeout); 1890 } 1891 1892 EXPORT_SYMBOL(msleep_interruptible); 1893 1894 /** 1895 * usleep_range - Sleep for an approximate time 1896 * @min: Minimum time in usecs to sleep 1897 * @max: Maximum time in usecs to sleep 1898 * 1899 * In non-atomic context where the exact wakeup time is flexible, use 1900 * usleep_range() instead of udelay(). The sleep improves responsiveness 1901 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 1902 * power usage by allowing hrtimers to take advantage of an already- 1903 * scheduled interrupt instead of scheduling a new one just for this sleep. 1904 */ 1905 void __sched usleep_range(unsigned long min, unsigned long max) 1906 { 1907 ktime_t exp = ktime_add_us(ktime_get(), min); 1908 u64 delta = (u64)(max - min) * NSEC_PER_USEC; 1909 1910 for (;;) { 1911 __set_current_state(TASK_UNINTERRUPTIBLE); 1912 /* Do not return before the requested sleep time has elapsed */ 1913 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) 1914 break; 1915 } 1916 } 1917 EXPORT_SYMBOL(usleep_range); 1918