1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 6 * 7 * No idle tick implementation for low and high resolution timers 8 * 9 * Started by: Thomas Gleixner and Ingo Molnar 10 */ 11 #include <linux/cpu.h> 12 #include <linux/err.h> 13 #include <linux/hrtimer.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/percpu.h> 17 #include <linux/nmi.h> 18 #include <linux/profile.h> 19 #include <linux/sched/signal.h> 20 #include <linux/sched/clock.h> 21 #include <linux/sched/stat.h> 22 #include <linux/sched/nohz.h> 23 #include <linux/module.h> 24 #include <linux/irq_work.h> 25 #include <linux/posix-timers.h> 26 #include <linux/context_tracking.h> 27 #include <linux/mm.h> 28 29 #include <asm/irq_regs.h> 30 31 #include "tick-internal.h" 32 33 #include <trace/events/timer.h> 34 35 /* 36 * Per-CPU nohz control structure 37 */ 38 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 39 40 struct tick_sched *tick_get_tick_sched(int cpu) 41 { 42 return &per_cpu(tick_cpu_sched, cpu); 43 } 44 45 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 46 /* 47 * The time, when the last jiffy update happened. Protected by jiffies_lock. 48 */ 49 static ktime_t last_jiffies_update; 50 51 /* 52 * Must be called with interrupts disabled ! 53 */ 54 static void tick_do_update_jiffies64(ktime_t now) 55 { 56 unsigned long ticks = 0; 57 ktime_t delta; 58 59 /* 60 * Do a quick check without holding jiffies_lock: 61 * The READ_ONCE() pairs with two updates done later in this function. 62 */ 63 delta = ktime_sub(now, READ_ONCE(last_jiffies_update)); 64 if (delta < tick_period) 65 return; 66 67 /* Reevaluate with jiffies_lock held */ 68 raw_spin_lock(&jiffies_lock); 69 write_seqcount_begin(&jiffies_seq); 70 71 delta = ktime_sub(now, last_jiffies_update); 72 if (delta >= tick_period) { 73 74 delta = ktime_sub(delta, tick_period); 75 /* Pairs with the lockless read in this function. */ 76 WRITE_ONCE(last_jiffies_update, 77 ktime_add(last_jiffies_update, tick_period)); 78 79 /* Slow path for long timeouts */ 80 if (unlikely(delta >= tick_period)) { 81 s64 incr = ktime_to_ns(tick_period); 82 83 ticks = ktime_divns(delta, incr); 84 85 /* Pairs with the lockless read in this function. */ 86 WRITE_ONCE(last_jiffies_update, 87 ktime_add_ns(last_jiffies_update, 88 incr * ticks)); 89 } 90 do_timer(++ticks); 91 92 /* Keep the tick_next_period variable up to date */ 93 tick_next_period = ktime_add(last_jiffies_update, tick_period); 94 } else { 95 write_seqcount_end(&jiffies_seq); 96 raw_spin_unlock(&jiffies_lock); 97 return; 98 } 99 write_seqcount_end(&jiffies_seq); 100 raw_spin_unlock(&jiffies_lock); 101 update_wall_time(); 102 } 103 104 /* 105 * Initialize and return retrieve the jiffies update. 106 */ 107 static ktime_t tick_init_jiffy_update(void) 108 { 109 ktime_t period; 110 111 raw_spin_lock(&jiffies_lock); 112 write_seqcount_begin(&jiffies_seq); 113 /* Did we start the jiffies update yet ? */ 114 if (last_jiffies_update == 0) 115 last_jiffies_update = tick_next_period; 116 period = last_jiffies_update; 117 write_seqcount_end(&jiffies_seq); 118 raw_spin_unlock(&jiffies_lock); 119 return period; 120 } 121 122 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) 123 { 124 int cpu = smp_processor_id(); 125 126 #ifdef CONFIG_NO_HZ_COMMON 127 /* 128 * Check if the do_timer duty was dropped. We don't care about 129 * concurrency: This happens only when the CPU in charge went 130 * into a long sleep. If two CPUs happen to assign themselves to 131 * this duty, then the jiffies update is still serialized by 132 * jiffies_lock. 133 * 134 * If nohz_full is enabled, this should not happen because the 135 * tick_do_timer_cpu never relinquishes. 136 */ 137 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { 138 #ifdef CONFIG_NO_HZ_FULL 139 WARN_ON(tick_nohz_full_running); 140 #endif 141 tick_do_timer_cpu = cpu; 142 } 143 #endif 144 145 /* Check, if the jiffies need an update */ 146 if (tick_do_timer_cpu == cpu) 147 tick_do_update_jiffies64(now); 148 149 if (ts->inidle) 150 ts->got_idle_tick = 1; 151 } 152 153 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) 154 { 155 #ifdef CONFIG_NO_HZ_COMMON 156 /* 157 * When we are idle and the tick is stopped, we have to touch 158 * the watchdog as we might not schedule for a really long 159 * time. This happens on complete idle SMP systems while 160 * waiting on the login prompt. We also increment the "start of 161 * idle" jiffy stamp so the idle accounting adjustment we do 162 * when we go busy again does not account too much ticks. 163 */ 164 if (ts->tick_stopped) { 165 touch_softlockup_watchdog_sched(); 166 if (is_idle_task(current)) 167 ts->idle_jiffies++; 168 /* 169 * In case the current tick fired too early past its expected 170 * expiration, make sure we don't bypass the next clock reprogramming 171 * to the same deadline. 172 */ 173 ts->next_tick = 0; 174 } 175 #endif 176 update_process_times(user_mode(regs)); 177 profile_tick(CPU_PROFILING); 178 } 179 #endif 180 181 #ifdef CONFIG_NO_HZ_FULL 182 cpumask_var_t tick_nohz_full_mask; 183 bool tick_nohz_full_running; 184 EXPORT_SYMBOL_GPL(tick_nohz_full_running); 185 static atomic_t tick_dep_mask; 186 187 static bool check_tick_dependency(atomic_t *dep) 188 { 189 int val = atomic_read(dep); 190 191 if (val & TICK_DEP_MASK_POSIX_TIMER) { 192 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); 193 return true; 194 } 195 196 if (val & TICK_DEP_MASK_PERF_EVENTS) { 197 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); 198 return true; 199 } 200 201 if (val & TICK_DEP_MASK_SCHED) { 202 trace_tick_stop(0, TICK_DEP_MASK_SCHED); 203 return true; 204 } 205 206 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { 207 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); 208 return true; 209 } 210 211 if (val & TICK_DEP_MASK_RCU) { 212 trace_tick_stop(0, TICK_DEP_MASK_RCU); 213 return true; 214 } 215 216 return false; 217 } 218 219 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) 220 { 221 lockdep_assert_irqs_disabled(); 222 223 if (unlikely(!cpu_online(cpu))) 224 return false; 225 226 if (check_tick_dependency(&tick_dep_mask)) 227 return false; 228 229 if (check_tick_dependency(&ts->tick_dep_mask)) 230 return false; 231 232 if (check_tick_dependency(¤t->tick_dep_mask)) 233 return false; 234 235 if (check_tick_dependency(¤t->signal->tick_dep_mask)) 236 return false; 237 238 return true; 239 } 240 241 static void nohz_full_kick_func(struct irq_work *work) 242 { 243 /* Empty, the tick restart happens on tick_nohz_irq_exit() */ 244 } 245 246 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { 247 .func = nohz_full_kick_func, 248 }; 249 250 /* 251 * Kick this CPU if it's full dynticks in order to force it to 252 * re-evaluate its dependency on the tick and restart it if necessary. 253 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), 254 * is NMI safe. 255 */ 256 static void tick_nohz_full_kick(void) 257 { 258 if (!tick_nohz_full_cpu(smp_processor_id())) 259 return; 260 261 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); 262 } 263 264 /* 265 * Kick the CPU if it's full dynticks in order to force it to 266 * re-evaluate its dependency on the tick and restart it if necessary. 267 */ 268 void tick_nohz_full_kick_cpu(int cpu) 269 { 270 if (!tick_nohz_full_cpu(cpu)) 271 return; 272 273 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); 274 } 275 276 /* 277 * Kick all full dynticks CPUs in order to force these to re-evaluate 278 * their dependency on the tick and restart it if necessary. 279 */ 280 static void tick_nohz_full_kick_all(void) 281 { 282 int cpu; 283 284 if (!tick_nohz_full_running) 285 return; 286 287 preempt_disable(); 288 for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) 289 tick_nohz_full_kick_cpu(cpu); 290 preempt_enable(); 291 } 292 293 static void tick_nohz_dep_set_all(atomic_t *dep, 294 enum tick_dep_bits bit) 295 { 296 int prev; 297 298 prev = atomic_fetch_or(BIT(bit), dep); 299 if (!prev) 300 tick_nohz_full_kick_all(); 301 } 302 303 /* 304 * Set a global tick dependency. Used by perf events that rely on freq and 305 * by unstable clock. 306 */ 307 void tick_nohz_dep_set(enum tick_dep_bits bit) 308 { 309 tick_nohz_dep_set_all(&tick_dep_mask, bit); 310 } 311 312 void tick_nohz_dep_clear(enum tick_dep_bits bit) 313 { 314 atomic_andnot(BIT(bit), &tick_dep_mask); 315 } 316 317 /* 318 * Set per-CPU tick dependency. Used by scheduler and perf events in order to 319 * manage events throttling. 320 */ 321 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) 322 { 323 int prev; 324 struct tick_sched *ts; 325 326 ts = per_cpu_ptr(&tick_cpu_sched, cpu); 327 328 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); 329 if (!prev) { 330 preempt_disable(); 331 /* Perf needs local kick that is NMI safe */ 332 if (cpu == smp_processor_id()) { 333 tick_nohz_full_kick(); 334 } else { 335 /* Remote irq work not NMI-safe */ 336 if (!WARN_ON_ONCE(in_nmi())) 337 tick_nohz_full_kick_cpu(cpu); 338 } 339 preempt_enable(); 340 } 341 } 342 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); 343 344 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) 345 { 346 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 347 348 atomic_andnot(BIT(bit), &ts->tick_dep_mask); 349 } 350 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); 351 352 /* 353 * Set a per-task tick dependency. Posix CPU timers need this in order to elapse 354 * per task timers. 355 */ 356 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) 357 { 358 /* 359 * We could optimize this with just kicking the target running the task 360 * if that noise matters for nohz full users. 361 */ 362 tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit); 363 } 364 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); 365 366 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) 367 { 368 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); 369 } 370 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); 371 372 /* 373 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse 374 * per process timers. 375 */ 376 void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit) 377 { 378 tick_nohz_dep_set_all(&sig->tick_dep_mask, bit); 379 } 380 381 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) 382 { 383 atomic_andnot(BIT(bit), &sig->tick_dep_mask); 384 } 385 386 /* 387 * Re-evaluate the need for the tick as we switch the current task. 388 * It might need the tick due to per task/process properties: 389 * perf events, posix CPU timers, ... 390 */ 391 void __tick_nohz_task_switch(void) 392 { 393 unsigned long flags; 394 struct tick_sched *ts; 395 396 local_irq_save(flags); 397 398 if (!tick_nohz_full_cpu(smp_processor_id())) 399 goto out; 400 401 ts = this_cpu_ptr(&tick_cpu_sched); 402 403 if (ts->tick_stopped) { 404 if (atomic_read(¤t->tick_dep_mask) || 405 atomic_read(¤t->signal->tick_dep_mask)) 406 tick_nohz_full_kick(); 407 } 408 out: 409 local_irq_restore(flags); 410 } 411 412 /* Get the boot-time nohz CPU list from the kernel parameters. */ 413 void __init tick_nohz_full_setup(cpumask_var_t cpumask) 414 { 415 alloc_bootmem_cpumask_var(&tick_nohz_full_mask); 416 cpumask_copy(tick_nohz_full_mask, cpumask); 417 tick_nohz_full_running = true; 418 } 419 EXPORT_SYMBOL_GPL(tick_nohz_full_setup); 420 421 static int tick_nohz_cpu_down(unsigned int cpu) 422 { 423 /* 424 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound 425 * timers, workqueues, timekeeping, ...) on behalf of full dynticks 426 * CPUs. It must remain online when nohz full is enabled. 427 */ 428 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) 429 return -EBUSY; 430 return 0; 431 } 432 433 void __init tick_nohz_init(void) 434 { 435 int cpu, ret; 436 437 if (!tick_nohz_full_running) 438 return; 439 440 /* 441 * Full dynticks uses irq work to drive the tick rescheduling on safe 442 * locking contexts. But then we need irq work to raise its own 443 * interrupts to avoid circular dependency on the tick 444 */ 445 if (!arch_irq_work_has_interrupt()) { 446 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); 447 cpumask_clear(tick_nohz_full_mask); 448 tick_nohz_full_running = false; 449 return; 450 } 451 452 if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && 453 !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { 454 cpu = smp_processor_id(); 455 456 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { 457 pr_warn("NO_HZ: Clearing %d from nohz_full range " 458 "for timekeeping\n", cpu); 459 cpumask_clear_cpu(cpu, tick_nohz_full_mask); 460 } 461 } 462 463 for_each_cpu(cpu, tick_nohz_full_mask) 464 context_tracking_cpu_set(cpu); 465 466 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 467 "kernel/nohz:predown", NULL, 468 tick_nohz_cpu_down); 469 WARN_ON(ret < 0); 470 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", 471 cpumask_pr_args(tick_nohz_full_mask)); 472 } 473 #endif 474 475 /* 476 * NOHZ - aka dynamic tick functionality 477 */ 478 #ifdef CONFIG_NO_HZ_COMMON 479 /* 480 * NO HZ enabled ? 481 */ 482 bool tick_nohz_enabled __read_mostly = true; 483 unsigned long tick_nohz_active __read_mostly; 484 /* 485 * Enable / Disable tickless mode 486 */ 487 static int __init setup_tick_nohz(char *str) 488 { 489 return (kstrtobool(str, &tick_nohz_enabled) == 0); 490 } 491 492 __setup("nohz=", setup_tick_nohz); 493 494 bool tick_nohz_tick_stopped(void) 495 { 496 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 497 498 return ts->tick_stopped; 499 } 500 501 bool tick_nohz_tick_stopped_cpu(int cpu) 502 { 503 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 504 505 return ts->tick_stopped; 506 } 507 508 /** 509 * tick_nohz_update_jiffies - update jiffies when idle was interrupted 510 * 511 * Called from interrupt entry when the CPU was idle 512 * 513 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 514 * must be updated. Otherwise an interrupt handler could use a stale jiffy 515 * value. We do this unconditionally on any CPU, as we don't know whether the 516 * CPU, which has the update task assigned is in a long sleep. 517 */ 518 static void tick_nohz_update_jiffies(ktime_t now) 519 { 520 unsigned long flags; 521 522 __this_cpu_write(tick_cpu_sched.idle_waketime, now); 523 524 local_irq_save(flags); 525 tick_do_update_jiffies64(now); 526 local_irq_restore(flags); 527 528 touch_softlockup_watchdog_sched(); 529 } 530 531 /* 532 * Updates the per-CPU time idle statistics counters 533 */ 534 static void 535 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 536 { 537 ktime_t delta; 538 539 if (ts->idle_active) { 540 delta = ktime_sub(now, ts->idle_entrytime); 541 if (nr_iowait_cpu(cpu) > 0) 542 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 543 else 544 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 545 ts->idle_entrytime = now; 546 } 547 548 if (last_update_time) 549 *last_update_time = ktime_to_us(now); 550 551 } 552 553 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) 554 { 555 update_ts_time_stats(smp_processor_id(), ts, now, NULL); 556 ts->idle_active = 0; 557 558 sched_clock_idle_wakeup_event(); 559 } 560 561 static void tick_nohz_start_idle(struct tick_sched *ts) 562 { 563 ts->idle_entrytime = ktime_get(); 564 ts->idle_active = 1; 565 sched_clock_idle_sleep_event(); 566 } 567 568 /** 569 * get_cpu_idle_time_us - get the total idle time of a CPU 570 * @cpu: CPU number to query 571 * @last_update_time: variable to store update time in. Do not update 572 * counters if NULL. 573 * 574 * Return the cumulative idle time (since boot) for a given 575 * CPU, in microseconds. 576 * 577 * This time is measured via accounting rather than sampling, 578 * and is as accurate as ktime_get() is. 579 * 580 * This function returns -1 if NOHZ is not enabled. 581 */ 582 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 583 { 584 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 585 ktime_t now, idle; 586 587 if (!tick_nohz_active) 588 return -1; 589 590 now = ktime_get(); 591 if (last_update_time) { 592 update_ts_time_stats(cpu, ts, now, last_update_time); 593 idle = ts->idle_sleeptime; 594 } else { 595 if (ts->idle_active && !nr_iowait_cpu(cpu)) { 596 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 597 598 idle = ktime_add(ts->idle_sleeptime, delta); 599 } else { 600 idle = ts->idle_sleeptime; 601 } 602 } 603 604 return ktime_to_us(idle); 605 606 } 607 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 608 609 /** 610 * get_cpu_iowait_time_us - get the total iowait time of a CPU 611 * @cpu: CPU number to query 612 * @last_update_time: variable to store update time in. Do not update 613 * counters if NULL. 614 * 615 * Return the cumulative iowait time (since boot) for a given 616 * CPU, in microseconds. 617 * 618 * This time is measured via accounting rather than sampling, 619 * and is as accurate as ktime_get() is. 620 * 621 * This function returns -1 if NOHZ is not enabled. 622 */ 623 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) 624 { 625 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 626 ktime_t now, iowait; 627 628 if (!tick_nohz_active) 629 return -1; 630 631 now = ktime_get(); 632 if (last_update_time) { 633 update_ts_time_stats(cpu, ts, now, last_update_time); 634 iowait = ts->iowait_sleeptime; 635 } else { 636 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { 637 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 638 639 iowait = ktime_add(ts->iowait_sleeptime, delta); 640 } else { 641 iowait = ts->iowait_sleeptime; 642 } 643 } 644 645 return ktime_to_us(iowait); 646 } 647 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 648 649 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) 650 { 651 hrtimer_cancel(&ts->sched_timer); 652 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); 653 654 /* Forward the time to expire in the future */ 655 hrtimer_forward(&ts->sched_timer, now, tick_period); 656 657 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 658 hrtimer_start_expires(&ts->sched_timer, 659 HRTIMER_MODE_ABS_PINNED_HARD); 660 } else { 661 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 662 } 663 664 /* 665 * Reset to make sure next tick stop doesn't get fooled by past 666 * cached clock deadline. 667 */ 668 ts->next_tick = 0; 669 } 670 671 static inline bool local_timer_softirq_pending(void) 672 { 673 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); 674 } 675 676 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) 677 { 678 u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; 679 unsigned long basejiff; 680 unsigned int seq; 681 682 /* Read jiffies and the time when jiffies were updated last */ 683 do { 684 seq = read_seqcount_begin(&jiffies_seq); 685 basemono = last_jiffies_update; 686 basejiff = jiffies; 687 } while (read_seqcount_retry(&jiffies_seq, seq)); 688 ts->last_jiffies = basejiff; 689 ts->timer_expires_base = basemono; 690 691 /* 692 * Keep the periodic tick, when RCU, architecture or irq_work 693 * requests it. 694 * Aside of that check whether the local timer softirq is 695 * pending. If so its a bad idea to call get_next_timer_interrupt() 696 * because there is an already expired timer, so it will request 697 * immeditate expiry, which rearms the hardware timer with a 698 * minimal delta which brings us back to this place 699 * immediately. Lather, rinse and repeat... 700 */ 701 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || 702 irq_work_needs_cpu() || local_timer_softirq_pending()) { 703 next_tick = basemono + TICK_NSEC; 704 } else { 705 /* 706 * Get the next pending timer. If high resolution 707 * timers are enabled this only takes the timer wheel 708 * timers into account. If high resolution timers are 709 * disabled this also looks at the next expiring 710 * hrtimer. 711 */ 712 next_tmr = get_next_timer_interrupt(basejiff, basemono); 713 ts->next_timer = next_tmr; 714 /* Take the next rcu event into account */ 715 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; 716 } 717 718 /* 719 * If the tick is due in the next period, keep it ticking or 720 * force prod the timer. 721 */ 722 delta = next_tick - basemono; 723 if (delta <= (u64)TICK_NSEC) { 724 /* 725 * Tell the timer code that the base is not idle, i.e. undo 726 * the effect of get_next_timer_interrupt(): 727 */ 728 timer_clear_idle(); 729 /* 730 * We've not stopped the tick yet, and there's a timer in the 731 * next period, so no point in stopping it either, bail. 732 */ 733 if (!ts->tick_stopped) { 734 ts->timer_expires = 0; 735 goto out; 736 } 737 } 738 739 /* 740 * If this CPU is the one which had the do_timer() duty last, we limit 741 * the sleep time to the timekeeping max_deferment value. 742 * Otherwise we can sleep as long as we want. 743 */ 744 delta = timekeeping_max_deferment(); 745 if (cpu != tick_do_timer_cpu && 746 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) 747 delta = KTIME_MAX; 748 749 /* Calculate the next expiry time */ 750 if (delta < (KTIME_MAX - basemono)) 751 expires = basemono + delta; 752 else 753 expires = KTIME_MAX; 754 755 ts->timer_expires = min_t(u64, expires, next_tick); 756 757 out: 758 return ts->timer_expires; 759 } 760 761 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) 762 { 763 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 764 u64 basemono = ts->timer_expires_base; 765 u64 expires = ts->timer_expires; 766 ktime_t tick = expires; 767 768 /* Make sure we won't be trying to stop it twice in a row. */ 769 ts->timer_expires_base = 0; 770 771 /* 772 * If this CPU is the one which updates jiffies, then give up 773 * the assignment and let it be taken by the CPU which runs 774 * the tick timer next, which might be this CPU as well. If we 775 * don't drop this here the jiffies might be stale and 776 * do_timer() never invoked. Keep track of the fact that it 777 * was the one which had the do_timer() duty last. 778 */ 779 if (cpu == tick_do_timer_cpu) { 780 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 781 ts->do_timer_last = 1; 782 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { 783 ts->do_timer_last = 0; 784 } 785 786 /* Skip reprogram of event if its not changed */ 787 if (ts->tick_stopped && (expires == ts->next_tick)) { 788 /* Sanity check: make sure clockevent is actually programmed */ 789 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) 790 return; 791 792 WARN_ON_ONCE(1); 793 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", 794 basemono, ts->next_tick, dev->next_event, 795 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); 796 } 797 798 /* 799 * nohz_stop_sched_tick can be called several times before 800 * the nohz_restart_sched_tick is called. This happens when 801 * interrupts arrive which do not cause a reschedule. In the 802 * first call we save the current tick time, so we can restart 803 * the scheduler tick in nohz_restart_sched_tick. 804 */ 805 if (!ts->tick_stopped) { 806 calc_load_nohz_start(); 807 quiet_vmstat(); 808 809 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); 810 ts->tick_stopped = 1; 811 trace_tick_stop(1, TICK_DEP_MASK_NONE); 812 } 813 814 ts->next_tick = tick; 815 816 /* 817 * If the expiration time == KTIME_MAX, then we simply stop 818 * the tick timer. 819 */ 820 if (unlikely(expires == KTIME_MAX)) { 821 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 822 hrtimer_cancel(&ts->sched_timer); 823 return; 824 } 825 826 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 827 hrtimer_start(&ts->sched_timer, tick, 828 HRTIMER_MODE_ABS_PINNED_HARD); 829 } else { 830 hrtimer_set_expires(&ts->sched_timer, tick); 831 tick_program_event(tick, 1); 832 } 833 } 834 835 static void tick_nohz_retain_tick(struct tick_sched *ts) 836 { 837 ts->timer_expires_base = 0; 838 } 839 840 #ifdef CONFIG_NO_HZ_FULL 841 static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) 842 { 843 if (tick_nohz_next_event(ts, cpu)) 844 tick_nohz_stop_tick(ts, cpu); 845 else 846 tick_nohz_retain_tick(ts); 847 } 848 #endif /* CONFIG_NO_HZ_FULL */ 849 850 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) 851 { 852 /* Update jiffies first */ 853 tick_do_update_jiffies64(now); 854 855 /* 856 * Clear the timer idle flag, so we avoid IPIs on remote queueing and 857 * the clock forward checks in the enqueue path: 858 */ 859 timer_clear_idle(); 860 861 calc_load_nohz_stop(); 862 touch_softlockup_watchdog_sched(); 863 /* 864 * Cancel the scheduled timer and restore the tick 865 */ 866 ts->tick_stopped = 0; 867 ts->idle_exittime = now; 868 869 tick_nohz_restart(ts, now); 870 } 871 872 static void tick_nohz_full_update_tick(struct tick_sched *ts) 873 { 874 #ifdef CONFIG_NO_HZ_FULL 875 int cpu = smp_processor_id(); 876 877 if (!tick_nohz_full_cpu(cpu)) 878 return; 879 880 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) 881 return; 882 883 if (can_stop_full_tick(cpu, ts)) 884 tick_nohz_stop_sched_tick(ts, cpu); 885 else if (ts->tick_stopped) 886 tick_nohz_restart_sched_tick(ts, ktime_get()); 887 #endif 888 } 889 890 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 891 { 892 /* 893 * If this CPU is offline and it is the one which updates 894 * jiffies, then give up the assignment and let it be taken by 895 * the CPU which runs the tick timer next. If we don't drop 896 * this here the jiffies might be stale and do_timer() never 897 * invoked. 898 */ 899 if (unlikely(!cpu_online(cpu))) { 900 if (cpu == tick_do_timer_cpu) 901 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 902 /* 903 * Make sure the CPU doesn't get fooled by obsolete tick 904 * deadline if it comes back online later. 905 */ 906 ts->next_tick = 0; 907 return false; 908 } 909 910 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 911 return false; 912 913 if (need_resched()) 914 return false; 915 916 if (unlikely(local_softirq_pending())) { 917 static int ratelimit; 918 919 if (ratelimit < 10 && 920 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { 921 pr_warn("NOHZ: local_softirq_pending %02x\n", 922 (unsigned int) local_softirq_pending()); 923 ratelimit++; 924 } 925 return false; 926 } 927 928 if (tick_nohz_full_enabled()) { 929 /* 930 * Keep the tick alive to guarantee timekeeping progression 931 * if there are full dynticks CPUs around 932 */ 933 if (tick_do_timer_cpu == cpu) 934 return false; 935 /* 936 * Boot safety: make sure the timekeeping duty has been 937 * assigned before entering dyntick-idle mode, 938 * tick_do_timer_cpu is TICK_DO_TIMER_BOOT 939 */ 940 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT)) 941 return false; 942 943 /* Should not happen for nohz-full */ 944 if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 945 return false; 946 } 947 948 return true; 949 } 950 951 static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) 952 { 953 ktime_t expires; 954 int cpu = smp_processor_id(); 955 956 /* 957 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the 958 * tick timer expiration time is known already. 959 */ 960 if (ts->timer_expires_base) 961 expires = ts->timer_expires; 962 else if (can_stop_idle_tick(cpu, ts)) 963 expires = tick_nohz_next_event(ts, cpu); 964 else 965 return; 966 967 ts->idle_calls++; 968 969 if (expires > 0LL) { 970 int was_stopped = ts->tick_stopped; 971 972 tick_nohz_stop_tick(ts, cpu); 973 974 ts->idle_sleeps++; 975 ts->idle_expires = expires; 976 977 if (!was_stopped && ts->tick_stopped) { 978 ts->idle_jiffies = ts->last_jiffies; 979 nohz_balance_enter_idle(cpu); 980 } 981 } else { 982 tick_nohz_retain_tick(ts); 983 } 984 } 985 986 /** 987 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task 988 * 989 * When the next event is more than a tick into the future, stop the idle tick 990 */ 991 void tick_nohz_idle_stop_tick(void) 992 { 993 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); 994 } 995 996 void tick_nohz_idle_retain_tick(void) 997 { 998 tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); 999 /* 1000 * Undo the effect of get_next_timer_interrupt() called from 1001 * tick_nohz_next_event(). 1002 */ 1003 timer_clear_idle(); 1004 } 1005 1006 /** 1007 * tick_nohz_idle_enter - prepare for entering idle on the current CPU 1008 * 1009 * Called when we start the idle loop. 1010 */ 1011 void tick_nohz_idle_enter(void) 1012 { 1013 struct tick_sched *ts; 1014 1015 lockdep_assert_irqs_enabled(); 1016 1017 local_irq_disable(); 1018 1019 ts = this_cpu_ptr(&tick_cpu_sched); 1020 1021 WARN_ON_ONCE(ts->timer_expires_base); 1022 1023 ts->inidle = 1; 1024 tick_nohz_start_idle(ts); 1025 1026 local_irq_enable(); 1027 } 1028 1029 /** 1030 * tick_nohz_irq_exit - update next tick event from interrupt exit 1031 * 1032 * When an interrupt fires while we are idle and it doesn't cause 1033 * a reschedule, it may still add, modify or delete a timer, enqueue 1034 * an RCU callback, etc... 1035 * So we need to re-calculate and reprogram the next tick event. 1036 */ 1037 void tick_nohz_irq_exit(void) 1038 { 1039 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1040 1041 if (ts->inidle) 1042 tick_nohz_start_idle(ts); 1043 else 1044 tick_nohz_full_update_tick(ts); 1045 } 1046 1047 /** 1048 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run 1049 */ 1050 bool tick_nohz_idle_got_tick(void) 1051 { 1052 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1053 1054 if (ts->got_idle_tick) { 1055 ts->got_idle_tick = 0; 1056 return true; 1057 } 1058 return false; 1059 } 1060 1061 /** 1062 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer 1063 * or the tick, whatever that expires first. Note that, if the tick has been 1064 * stopped, it returns the next hrtimer. 1065 * 1066 * Called from power state control code with interrupts disabled 1067 */ 1068 ktime_t tick_nohz_get_next_hrtimer(void) 1069 { 1070 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; 1071 } 1072 1073 /** 1074 * tick_nohz_get_sleep_length - return the expected length of the current sleep 1075 * @delta_next: duration until the next event if the tick cannot be stopped 1076 * 1077 * Called from power state control code with interrupts disabled 1078 */ 1079 ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 1080 { 1081 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 1082 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1083 int cpu = smp_processor_id(); 1084 /* 1085 * The idle entry time is expected to be a sufficient approximation of 1086 * the current time at this point. 1087 */ 1088 ktime_t now = ts->idle_entrytime; 1089 ktime_t next_event; 1090 1091 WARN_ON_ONCE(!ts->inidle); 1092 1093 *delta_next = ktime_sub(dev->next_event, now); 1094 1095 if (!can_stop_idle_tick(cpu, ts)) 1096 return *delta_next; 1097 1098 next_event = tick_nohz_next_event(ts, cpu); 1099 if (!next_event) 1100 return *delta_next; 1101 1102 /* 1103 * If the next highres timer to expire is earlier than next_event, the 1104 * idle governor needs to know that. 1105 */ 1106 next_event = min_t(u64, next_event, 1107 hrtimer_next_event_without(&ts->sched_timer)); 1108 1109 return ktime_sub(next_event, now); 1110 } 1111 1112 /** 1113 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value 1114 * for a particular CPU. 1115 * 1116 * Called from the schedutil frequency scaling governor in scheduler context. 1117 */ 1118 unsigned long tick_nohz_get_idle_calls_cpu(int cpu) 1119 { 1120 struct tick_sched *ts = tick_get_tick_sched(cpu); 1121 1122 return ts->idle_calls; 1123 } 1124 1125 /** 1126 * tick_nohz_get_idle_calls - return the current idle calls counter value 1127 * 1128 * Called from the schedutil frequency scaling governor in scheduler context. 1129 */ 1130 unsigned long tick_nohz_get_idle_calls(void) 1131 { 1132 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1133 1134 return ts->idle_calls; 1135 } 1136 1137 static void tick_nohz_account_idle_ticks(struct tick_sched *ts) 1138 { 1139 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 1140 unsigned long ticks; 1141 1142 if (vtime_accounting_enabled_this_cpu()) 1143 return; 1144 /* 1145 * We stopped the tick in idle. Update process times would miss the 1146 * time we slept as update_process_times does only a 1 tick 1147 * accounting. Enforce that this is accounted to idle ! 1148 */ 1149 ticks = jiffies - ts->idle_jiffies; 1150 /* 1151 * We might be one off. Do not randomly account a huge number of ticks! 1152 */ 1153 if (ticks && ticks < LONG_MAX) 1154 account_idle_ticks(ticks); 1155 #endif 1156 } 1157 1158 static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now) 1159 { 1160 tick_nohz_restart_sched_tick(ts, now); 1161 tick_nohz_account_idle_ticks(ts); 1162 } 1163 1164 void tick_nohz_idle_restart_tick(void) 1165 { 1166 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1167 1168 if (ts->tick_stopped) 1169 __tick_nohz_idle_restart_tick(ts, ktime_get()); 1170 } 1171 1172 /** 1173 * tick_nohz_idle_exit - restart the idle tick from the idle task 1174 * 1175 * Restart the idle tick when the CPU is woken up from idle 1176 * This also exit the RCU extended quiescent state. The CPU 1177 * can use RCU again after this function is called. 1178 */ 1179 void tick_nohz_idle_exit(void) 1180 { 1181 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1182 bool idle_active, tick_stopped; 1183 ktime_t now; 1184 1185 local_irq_disable(); 1186 1187 WARN_ON_ONCE(!ts->inidle); 1188 WARN_ON_ONCE(ts->timer_expires_base); 1189 1190 ts->inidle = 0; 1191 idle_active = ts->idle_active; 1192 tick_stopped = ts->tick_stopped; 1193 1194 if (idle_active || tick_stopped) 1195 now = ktime_get(); 1196 1197 if (idle_active) 1198 tick_nohz_stop_idle(ts, now); 1199 1200 if (tick_stopped) 1201 __tick_nohz_idle_restart_tick(ts, now); 1202 1203 local_irq_enable(); 1204 } 1205 1206 /* 1207 * The nohz low res interrupt handler 1208 */ 1209 static void tick_nohz_handler(struct clock_event_device *dev) 1210 { 1211 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1212 struct pt_regs *regs = get_irq_regs(); 1213 ktime_t now = ktime_get(); 1214 1215 dev->next_event = KTIME_MAX; 1216 1217 tick_sched_do_timer(ts, now); 1218 tick_sched_handle(ts, regs); 1219 1220 /* No need to reprogram if we are running tickless */ 1221 if (unlikely(ts->tick_stopped)) 1222 return; 1223 1224 hrtimer_forward(&ts->sched_timer, now, tick_period); 1225 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1226 } 1227 1228 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) 1229 { 1230 if (!tick_nohz_enabled) 1231 return; 1232 ts->nohz_mode = mode; 1233 /* One update is enough */ 1234 if (!test_and_set_bit(0, &tick_nohz_active)) 1235 timers_update_nohz(); 1236 } 1237 1238 /** 1239 * tick_nohz_switch_to_nohz - switch to nohz mode 1240 */ 1241 static void tick_nohz_switch_to_nohz(void) 1242 { 1243 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1244 ktime_t next; 1245 1246 if (!tick_nohz_enabled) 1247 return; 1248 1249 if (tick_switch_to_oneshot(tick_nohz_handler)) 1250 return; 1251 1252 /* 1253 * Recycle the hrtimer in ts, so we can share the 1254 * hrtimer_forward with the highres code. 1255 */ 1256 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1257 /* Get the next period */ 1258 next = tick_init_jiffy_update(); 1259 1260 hrtimer_set_expires(&ts->sched_timer, next); 1261 hrtimer_forward_now(&ts->sched_timer, tick_period); 1262 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1263 tick_nohz_activate(ts, NOHZ_MODE_LOWRES); 1264 } 1265 1266 static inline void tick_nohz_irq_enter(void) 1267 { 1268 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1269 ktime_t now; 1270 1271 if (!ts->idle_active && !ts->tick_stopped) 1272 return; 1273 now = ktime_get(); 1274 if (ts->idle_active) 1275 tick_nohz_stop_idle(ts, now); 1276 if (ts->tick_stopped) 1277 tick_nohz_update_jiffies(now); 1278 } 1279 1280 #else 1281 1282 static inline void tick_nohz_switch_to_nohz(void) { } 1283 static inline void tick_nohz_irq_enter(void) { } 1284 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } 1285 1286 #endif /* CONFIG_NO_HZ_COMMON */ 1287 1288 /* 1289 * Called from irq_enter to notify about the possible interruption of idle() 1290 */ 1291 void tick_irq_enter(void) 1292 { 1293 tick_check_oneshot_broadcast_this_cpu(); 1294 tick_nohz_irq_enter(); 1295 } 1296 1297 /* 1298 * High resolution timer specific code 1299 */ 1300 #ifdef CONFIG_HIGH_RES_TIMERS 1301 /* 1302 * We rearm the timer until we get disabled by the idle code. 1303 * Called with interrupts disabled. 1304 */ 1305 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 1306 { 1307 struct tick_sched *ts = 1308 container_of(timer, struct tick_sched, sched_timer); 1309 struct pt_regs *regs = get_irq_regs(); 1310 ktime_t now = ktime_get(); 1311 1312 tick_sched_do_timer(ts, now); 1313 1314 /* 1315 * Do not call, when we are not in irq context and have 1316 * no valid regs pointer 1317 */ 1318 if (regs) 1319 tick_sched_handle(ts, regs); 1320 else 1321 ts->next_tick = 0; 1322 1323 /* No need to reprogram if we are in idle or full dynticks mode */ 1324 if (unlikely(ts->tick_stopped)) 1325 return HRTIMER_NORESTART; 1326 1327 hrtimer_forward(timer, now, tick_period); 1328 1329 return HRTIMER_RESTART; 1330 } 1331 1332 static int sched_skew_tick; 1333 1334 static int __init skew_tick(char *str) 1335 { 1336 get_option(&str, &sched_skew_tick); 1337 1338 return 0; 1339 } 1340 early_param("skew_tick", skew_tick); 1341 1342 /** 1343 * tick_setup_sched_timer - setup the tick emulation timer 1344 */ 1345 void tick_setup_sched_timer(void) 1346 { 1347 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1348 ktime_t now = ktime_get(); 1349 1350 /* 1351 * Emulate tick processing via per-CPU hrtimers: 1352 */ 1353 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1354 ts->sched_timer.function = tick_sched_timer; 1355 1356 /* Get the next period (per-CPU) */ 1357 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 1358 1359 /* Offset the tick to avert jiffies_lock contention. */ 1360 if (sched_skew_tick) { 1361 u64 offset = ktime_to_ns(tick_period) >> 1; 1362 do_div(offset, num_possible_cpus()); 1363 offset *= smp_processor_id(); 1364 hrtimer_add_expires_ns(&ts->sched_timer, offset); 1365 } 1366 1367 hrtimer_forward(&ts->sched_timer, now, tick_period); 1368 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); 1369 tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); 1370 } 1371 #endif /* HIGH_RES_TIMERS */ 1372 1373 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS 1374 void tick_cancel_sched_timer(int cpu) 1375 { 1376 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1377 1378 # ifdef CONFIG_HIGH_RES_TIMERS 1379 if (ts->sched_timer.base) 1380 hrtimer_cancel(&ts->sched_timer); 1381 # endif 1382 1383 memset(ts, 0, sizeof(*ts)); 1384 } 1385 #endif 1386 1387 /** 1388 * Async notification about clocksource changes 1389 */ 1390 void tick_clock_notify(void) 1391 { 1392 int cpu; 1393 1394 for_each_possible_cpu(cpu) 1395 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); 1396 } 1397 1398 /* 1399 * Async notification about clock event changes 1400 */ 1401 void tick_oneshot_notify(void) 1402 { 1403 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1404 1405 set_bit(0, &ts->check_clocks); 1406 } 1407 1408 /** 1409 * Check, if a change happened, which makes oneshot possible. 1410 * 1411 * Called cyclic from the hrtimer softirq (driven by the timer 1412 * softirq) allow_nohz signals, that we can switch into low-res nohz 1413 * mode, because high resolution timers are disabled (either compile 1414 * or runtime). Called with interrupts disabled. 1415 */ 1416 int tick_check_oneshot_change(int allow_nohz) 1417 { 1418 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1419 1420 if (!test_and_clear_bit(0, &ts->check_clocks)) 1421 return 0; 1422 1423 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) 1424 return 0; 1425 1426 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) 1427 return 0; 1428 1429 if (!allow_nohz) 1430 return 1; 1431 1432 tick_nohz_switch_to_nohz(); 1433 return 0; 1434 } 1435