1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 6 * 7 * No idle tick implementation for low and high resolution timers 8 * 9 * Started by: Thomas Gleixner and Ingo Molnar 10 */ 11 #include <linux/cpu.h> 12 #include <linux/err.h> 13 #include <linux/hrtimer.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/percpu.h> 17 #include <linux/nmi.h> 18 #include <linux/profile.h> 19 #include <linux/sched/signal.h> 20 #include <linux/sched/clock.h> 21 #include <linux/sched/stat.h> 22 #include <linux/sched/nohz.h> 23 #include <linux/sched/loadavg.h> 24 #include <linux/module.h> 25 #include <linux/irq_work.h> 26 #include <linux/posix-timers.h> 27 #include <linux/context_tracking.h> 28 #include <linux/mm.h> 29 30 #include <asm/irq_regs.h> 31 32 #include "tick-internal.h" 33 34 #include <trace/events/timer.h> 35 36 /* 37 * Per-CPU nohz control structure 38 */ 39 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 40 41 struct tick_sched *tick_get_tick_sched(int cpu) 42 { 43 return &per_cpu(tick_cpu_sched, cpu); 44 } 45 46 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 47 /* 48 * The time, when the last jiffy update happened. Write access must hold 49 * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a 50 * consistent view of jiffies and last_jiffies_update. 51 */ 52 static ktime_t last_jiffies_update; 53 54 /* 55 * Must be called with interrupts disabled ! 56 */ 57 static void tick_do_update_jiffies64(ktime_t now) 58 { 59 unsigned long ticks = 1; 60 ktime_t delta, nextp; 61 62 /* 63 * 64bit can do a quick check without holding jiffies lock and 64 * without looking at the sequence count. The smp_load_acquire() 65 * pairs with the update done later in this function. 66 * 67 * 32bit cannot do that because the store of tick_next_period 68 * consists of two 32bit stores and the first store could move it 69 * to a random point in the future. 70 */ 71 if (IS_ENABLED(CONFIG_64BIT)) { 72 if (ktime_before(now, smp_load_acquire(&tick_next_period))) 73 return; 74 } else { 75 unsigned int seq; 76 77 /* 78 * Avoid contention on jiffies_lock and protect the quick 79 * check with the sequence count. 80 */ 81 do { 82 seq = read_seqcount_begin(&jiffies_seq); 83 nextp = tick_next_period; 84 } while (read_seqcount_retry(&jiffies_seq, seq)); 85 86 if (ktime_before(now, nextp)) 87 return; 88 } 89 90 /* Quick check failed, i.e. update is required. */ 91 raw_spin_lock(&jiffies_lock); 92 /* 93 * Reevaluate with the lock held. Another CPU might have done the 94 * update already. 95 */ 96 if (ktime_before(now, tick_next_period)) { 97 raw_spin_unlock(&jiffies_lock); 98 return; 99 } 100 101 write_seqcount_begin(&jiffies_seq); 102 103 delta = ktime_sub(now, tick_next_period); 104 if (unlikely(delta >= TICK_NSEC)) { 105 /* Slow path for long idle sleep times */ 106 s64 incr = TICK_NSEC; 107 108 ticks += ktime_divns(delta, incr); 109 110 last_jiffies_update = ktime_add_ns(last_jiffies_update, 111 incr * ticks); 112 } else { 113 last_jiffies_update = ktime_add_ns(last_jiffies_update, 114 TICK_NSEC); 115 } 116 117 /* Advance jiffies to complete the jiffies_seq protected job */ 118 jiffies_64 += ticks; 119 120 /* 121 * Keep the tick_next_period variable up to date. 122 */ 123 nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); 124 125 if (IS_ENABLED(CONFIG_64BIT)) { 126 /* 127 * Pairs with smp_load_acquire() in the lockless quick 128 * check above and ensures that the update to jiffies_64 is 129 * not reordered vs. the store to tick_next_period, neither 130 * by the compiler nor by the CPU. 131 */ 132 smp_store_release(&tick_next_period, nextp); 133 } else { 134 /* 135 * A plain store is good enough on 32bit as the quick check 136 * above is protected by the sequence count. 137 */ 138 tick_next_period = nextp; 139 } 140 141 /* 142 * Release the sequence count. calc_global_load() below is not 143 * protected by it, but jiffies_lock needs to be held to prevent 144 * concurrent invocations. 145 */ 146 write_seqcount_end(&jiffies_seq); 147 148 calc_global_load(); 149 150 raw_spin_unlock(&jiffies_lock); 151 update_wall_time(); 152 } 153 154 /* 155 * Initialize and return retrieve the jiffies update. 156 */ 157 static ktime_t tick_init_jiffy_update(void) 158 { 159 ktime_t period; 160 161 raw_spin_lock(&jiffies_lock); 162 write_seqcount_begin(&jiffies_seq); 163 /* Did we start the jiffies update yet ? */ 164 if (last_jiffies_update == 0) 165 last_jiffies_update = tick_next_period; 166 period = last_jiffies_update; 167 write_seqcount_end(&jiffies_seq); 168 raw_spin_unlock(&jiffies_lock); 169 return period; 170 } 171 172 #define MAX_STALLED_JIFFIES 5 173 174 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) 175 { 176 int cpu = smp_processor_id(); 177 178 #ifdef CONFIG_NO_HZ_COMMON 179 /* 180 * Check if the do_timer duty was dropped. We don't care about 181 * concurrency: This happens only when the CPU in charge went 182 * into a long sleep. If two CPUs happen to assign themselves to 183 * this duty, then the jiffies update is still serialized by 184 * jiffies_lock. 185 * 186 * If nohz_full is enabled, this should not happen because the 187 * tick_do_timer_cpu never relinquishes. 188 */ 189 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { 190 #ifdef CONFIG_NO_HZ_FULL 191 WARN_ON_ONCE(tick_nohz_full_running); 192 #endif 193 tick_do_timer_cpu = cpu; 194 } 195 #endif 196 197 /* Check, if the jiffies need an update */ 198 if (tick_do_timer_cpu == cpu) 199 tick_do_update_jiffies64(now); 200 201 /* 202 * If jiffies update stalled for too long (timekeeper in stop_machine() 203 * or VMEXIT'ed for several msecs), force an update. 204 */ 205 if (ts->last_tick_jiffies != jiffies) { 206 ts->stalled_jiffies = 0; 207 ts->last_tick_jiffies = READ_ONCE(jiffies); 208 } else { 209 if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { 210 tick_do_update_jiffies64(now); 211 ts->stalled_jiffies = 0; 212 ts->last_tick_jiffies = READ_ONCE(jiffies); 213 } 214 } 215 216 if (ts->inidle) 217 ts->got_idle_tick = 1; 218 } 219 220 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) 221 { 222 #ifdef CONFIG_NO_HZ_COMMON 223 /* 224 * When we are idle and the tick is stopped, we have to touch 225 * the watchdog as we might not schedule for a really long 226 * time. This happens on complete idle SMP systems while 227 * waiting on the login prompt. We also increment the "start of 228 * idle" jiffy stamp so the idle accounting adjustment we do 229 * when we go busy again does not account too much ticks. 230 */ 231 if (ts->tick_stopped) { 232 touch_softlockup_watchdog_sched(); 233 if (is_idle_task(current)) 234 ts->idle_jiffies++; 235 /* 236 * In case the current tick fired too early past its expected 237 * expiration, make sure we don't bypass the next clock reprogramming 238 * to the same deadline. 239 */ 240 ts->next_tick = 0; 241 } 242 #endif 243 update_process_times(user_mode(regs)); 244 profile_tick(CPU_PROFILING); 245 } 246 #endif 247 248 #ifdef CONFIG_NO_HZ_FULL 249 cpumask_var_t tick_nohz_full_mask; 250 EXPORT_SYMBOL_GPL(tick_nohz_full_mask); 251 bool tick_nohz_full_running; 252 EXPORT_SYMBOL_GPL(tick_nohz_full_running); 253 static atomic_t tick_dep_mask; 254 255 static bool check_tick_dependency(atomic_t *dep) 256 { 257 int val = atomic_read(dep); 258 259 if (val & TICK_DEP_MASK_POSIX_TIMER) { 260 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); 261 return true; 262 } 263 264 if (val & TICK_DEP_MASK_PERF_EVENTS) { 265 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); 266 return true; 267 } 268 269 if (val & TICK_DEP_MASK_SCHED) { 270 trace_tick_stop(0, TICK_DEP_MASK_SCHED); 271 return true; 272 } 273 274 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { 275 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); 276 return true; 277 } 278 279 if (val & TICK_DEP_MASK_RCU) { 280 trace_tick_stop(0, TICK_DEP_MASK_RCU); 281 return true; 282 } 283 284 return false; 285 } 286 287 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) 288 { 289 lockdep_assert_irqs_disabled(); 290 291 if (unlikely(!cpu_online(cpu))) 292 return false; 293 294 if (check_tick_dependency(&tick_dep_mask)) 295 return false; 296 297 if (check_tick_dependency(&ts->tick_dep_mask)) 298 return false; 299 300 if (check_tick_dependency(¤t->tick_dep_mask)) 301 return false; 302 303 if (check_tick_dependency(¤t->signal->tick_dep_mask)) 304 return false; 305 306 return true; 307 } 308 309 static void nohz_full_kick_func(struct irq_work *work) 310 { 311 /* Empty, the tick restart happens on tick_nohz_irq_exit() */ 312 } 313 314 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = 315 IRQ_WORK_INIT_HARD(nohz_full_kick_func); 316 317 /* 318 * Kick this CPU if it's full dynticks in order to force it to 319 * re-evaluate its dependency on the tick and restart it if necessary. 320 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), 321 * is NMI safe. 322 */ 323 static void tick_nohz_full_kick(void) 324 { 325 if (!tick_nohz_full_cpu(smp_processor_id())) 326 return; 327 328 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); 329 } 330 331 /* 332 * Kick the CPU if it's full dynticks in order to force it to 333 * re-evaluate its dependency on the tick and restart it if necessary. 334 */ 335 void tick_nohz_full_kick_cpu(int cpu) 336 { 337 if (!tick_nohz_full_cpu(cpu)) 338 return; 339 340 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); 341 } 342 343 static void tick_nohz_kick_task(struct task_struct *tsk) 344 { 345 int cpu; 346 347 /* 348 * If the task is not running, run_posix_cpu_timers() 349 * has nothing to elapse, IPI can then be spared. 350 * 351 * activate_task() STORE p->tick_dep_mask 352 * STORE p->on_rq 353 * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) 354 * LOCK rq->lock LOAD p->on_rq 355 * smp_mb__after_spin_lock() 356 * tick_nohz_task_switch() 357 * LOAD p->tick_dep_mask 358 */ 359 if (!sched_task_on_rq(tsk)) 360 return; 361 362 /* 363 * If the task concurrently migrates to another CPU, 364 * we guarantee it sees the new tick dependency upon 365 * schedule. 366 * 367 * set_task_cpu(p, cpu); 368 * STORE p->cpu = @cpu 369 * __schedule() (switch to task 'p') 370 * LOCK rq->lock 371 * smp_mb__after_spin_lock() STORE p->tick_dep_mask 372 * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) 373 * LOAD p->tick_dep_mask LOAD p->cpu 374 */ 375 cpu = task_cpu(tsk); 376 377 preempt_disable(); 378 if (cpu_online(cpu)) 379 tick_nohz_full_kick_cpu(cpu); 380 preempt_enable(); 381 } 382 383 /* 384 * Kick all full dynticks CPUs in order to force these to re-evaluate 385 * their dependency on the tick and restart it if necessary. 386 */ 387 static void tick_nohz_full_kick_all(void) 388 { 389 int cpu; 390 391 if (!tick_nohz_full_running) 392 return; 393 394 preempt_disable(); 395 for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) 396 tick_nohz_full_kick_cpu(cpu); 397 preempt_enable(); 398 } 399 400 static void tick_nohz_dep_set_all(atomic_t *dep, 401 enum tick_dep_bits bit) 402 { 403 int prev; 404 405 prev = atomic_fetch_or(BIT(bit), dep); 406 if (!prev) 407 tick_nohz_full_kick_all(); 408 } 409 410 /* 411 * Set a global tick dependency. Used by perf events that rely on freq and 412 * by unstable clock. 413 */ 414 void tick_nohz_dep_set(enum tick_dep_bits bit) 415 { 416 tick_nohz_dep_set_all(&tick_dep_mask, bit); 417 } 418 419 void tick_nohz_dep_clear(enum tick_dep_bits bit) 420 { 421 atomic_andnot(BIT(bit), &tick_dep_mask); 422 } 423 424 /* 425 * Set per-CPU tick dependency. Used by scheduler and perf events in order to 426 * manage events throttling. 427 */ 428 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) 429 { 430 int prev; 431 struct tick_sched *ts; 432 433 ts = per_cpu_ptr(&tick_cpu_sched, cpu); 434 435 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); 436 if (!prev) { 437 preempt_disable(); 438 /* Perf needs local kick that is NMI safe */ 439 if (cpu == smp_processor_id()) { 440 tick_nohz_full_kick(); 441 } else { 442 /* Remote irq work not NMI-safe */ 443 if (!WARN_ON_ONCE(in_nmi())) 444 tick_nohz_full_kick_cpu(cpu); 445 } 446 preempt_enable(); 447 } 448 } 449 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); 450 451 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) 452 { 453 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 454 455 atomic_andnot(BIT(bit), &ts->tick_dep_mask); 456 } 457 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); 458 459 /* 460 * Set a per-task tick dependency. RCU need this. Also posix CPU timers 461 * in order to elapse per task timers. 462 */ 463 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) 464 { 465 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) 466 tick_nohz_kick_task(tsk); 467 } 468 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); 469 470 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) 471 { 472 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); 473 } 474 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); 475 476 /* 477 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse 478 * per process timers. 479 */ 480 void tick_nohz_dep_set_signal(struct task_struct *tsk, 481 enum tick_dep_bits bit) 482 { 483 int prev; 484 struct signal_struct *sig = tsk->signal; 485 486 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); 487 if (!prev) { 488 struct task_struct *t; 489 490 lockdep_assert_held(&tsk->sighand->siglock); 491 __for_each_thread(sig, t) 492 tick_nohz_kick_task(t); 493 } 494 } 495 496 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) 497 { 498 atomic_andnot(BIT(bit), &sig->tick_dep_mask); 499 } 500 501 /* 502 * Re-evaluate the need for the tick as we switch the current task. 503 * It might need the tick due to per task/process properties: 504 * perf events, posix CPU timers, ... 505 */ 506 void __tick_nohz_task_switch(void) 507 { 508 struct tick_sched *ts; 509 510 if (!tick_nohz_full_cpu(smp_processor_id())) 511 return; 512 513 ts = this_cpu_ptr(&tick_cpu_sched); 514 515 if (ts->tick_stopped) { 516 if (atomic_read(¤t->tick_dep_mask) || 517 atomic_read(¤t->signal->tick_dep_mask)) 518 tick_nohz_full_kick(); 519 } 520 } 521 522 /* Get the boot-time nohz CPU list from the kernel parameters. */ 523 void __init tick_nohz_full_setup(cpumask_var_t cpumask) 524 { 525 alloc_bootmem_cpumask_var(&tick_nohz_full_mask); 526 cpumask_copy(tick_nohz_full_mask, cpumask); 527 tick_nohz_full_running = true; 528 } 529 530 static int tick_nohz_cpu_down(unsigned int cpu) 531 { 532 /* 533 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound 534 * timers, workqueues, timekeeping, ...) on behalf of full dynticks 535 * CPUs. It must remain online when nohz full is enabled. 536 */ 537 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) 538 return -EBUSY; 539 return 0; 540 } 541 542 void __init tick_nohz_init(void) 543 { 544 int cpu, ret; 545 546 if (!tick_nohz_full_running) 547 return; 548 549 /* 550 * Full dynticks uses irq work to drive the tick rescheduling on safe 551 * locking contexts. But then we need irq work to raise its own 552 * interrupts to avoid circular dependency on the tick 553 */ 554 if (!arch_irq_work_has_interrupt()) { 555 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); 556 cpumask_clear(tick_nohz_full_mask); 557 tick_nohz_full_running = false; 558 return; 559 } 560 561 if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && 562 !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { 563 cpu = smp_processor_id(); 564 565 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { 566 pr_warn("NO_HZ: Clearing %d from nohz_full range " 567 "for timekeeping\n", cpu); 568 cpumask_clear_cpu(cpu, tick_nohz_full_mask); 569 } 570 } 571 572 for_each_cpu(cpu, tick_nohz_full_mask) 573 ct_cpu_track_user(cpu); 574 575 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 576 "kernel/nohz:predown", NULL, 577 tick_nohz_cpu_down); 578 WARN_ON(ret < 0); 579 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", 580 cpumask_pr_args(tick_nohz_full_mask)); 581 } 582 #endif 583 584 /* 585 * NOHZ - aka dynamic tick functionality 586 */ 587 #ifdef CONFIG_NO_HZ_COMMON 588 /* 589 * NO HZ enabled ? 590 */ 591 bool tick_nohz_enabled __read_mostly = true; 592 unsigned long tick_nohz_active __read_mostly; 593 /* 594 * Enable / Disable tickless mode 595 */ 596 static int __init setup_tick_nohz(char *str) 597 { 598 return (kstrtobool(str, &tick_nohz_enabled) == 0); 599 } 600 601 __setup("nohz=", setup_tick_nohz); 602 603 bool tick_nohz_tick_stopped(void) 604 { 605 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 606 607 return ts->tick_stopped; 608 } 609 610 bool tick_nohz_tick_stopped_cpu(int cpu) 611 { 612 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 613 614 return ts->tick_stopped; 615 } 616 617 /** 618 * tick_nohz_update_jiffies - update jiffies when idle was interrupted 619 * 620 * Called from interrupt entry when the CPU was idle 621 * 622 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 623 * must be updated. Otherwise an interrupt handler could use a stale jiffy 624 * value. We do this unconditionally on any CPU, as we don't know whether the 625 * CPU, which has the update task assigned is in a long sleep. 626 */ 627 static void tick_nohz_update_jiffies(ktime_t now) 628 { 629 unsigned long flags; 630 631 __this_cpu_write(tick_cpu_sched.idle_waketime, now); 632 633 local_irq_save(flags); 634 tick_do_update_jiffies64(now); 635 local_irq_restore(flags); 636 637 touch_softlockup_watchdog_sched(); 638 } 639 640 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) 641 { 642 ktime_t delta; 643 644 if (WARN_ON_ONCE(!ts->idle_active)) 645 return; 646 647 delta = ktime_sub(now, ts->idle_entrytime); 648 649 write_seqcount_begin(&ts->idle_sleeptime_seq); 650 if (nr_iowait_cpu(smp_processor_id()) > 0) 651 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 652 else 653 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 654 655 ts->idle_entrytime = now; 656 ts->idle_active = 0; 657 write_seqcount_end(&ts->idle_sleeptime_seq); 658 659 sched_clock_idle_wakeup_event(); 660 } 661 662 static void tick_nohz_start_idle(struct tick_sched *ts) 663 { 664 write_seqcount_begin(&ts->idle_sleeptime_seq); 665 ts->idle_entrytime = ktime_get(); 666 ts->idle_active = 1; 667 write_seqcount_end(&ts->idle_sleeptime_seq); 668 669 sched_clock_idle_sleep_event(); 670 } 671 672 static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, 673 bool compute_delta, u64 *last_update_time) 674 { 675 ktime_t now, idle; 676 unsigned int seq; 677 678 if (!tick_nohz_active) 679 return -1; 680 681 now = ktime_get(); 682 if (last_update_time) 683 *last_update_time = ktime_to_us(now); 684 685 do { 686 seq = read_seqcount_begin(&ts->idle_sleeptime_seq); 687 688 if (ts->idle_active && compute_delta) { 689 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 690 691 idle = ktime_add(*sleeptime, delta); 692 } else { 693 idle = *sleeptime; 694 } 695 } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); 696 697 return ktime_to_us(idle); 698 699 } 700 701 /** 702 * get_cpu_idle_time_us - get the total idle time of a CPU 703 * @cpu: CPU number to query 704 * @last_update_time: variable to store update time in. Do not update 705 * counters if NULL. 706 * 707 * Return the cumulative idle time (since boot) for a given 708 * CPU, in microseconds. Note this is partially broken due to 709 * the counter of iowait tasks that can be remotely updated without 710 * any synchronization. Therefore it is possible to observe backward 711 * values within two consecutive reads. 712 * 713 * This time is measured via accounting rather than sampling, 714 * and is as accurate as ktime_get() is. 715 * 716 * This function returns -1 if NOHZ is not enabled. 717 */ 718 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 719 { 720 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 721 722 return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, 723 !nr_iowait_cpu(cpu), last_update_time); 724 } 725 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 726 727 /** 728 * get_cpu_iowait_time_us - get the total iowait time of a CPU 729 * @cpu: CPU number to query 730 * @last_update_time: variable to store update time in. Do not update 731 * counters if NULL. 732 * 733 * Return the cumulative iowait time (since boot) for a given 734 * CPU, in microseconds. Note this is partially broken due to 735 * the counter of iowait tasks that can be remotely updated without 736 * any synchronization. Therefore it is possible to observe backward 737 * values within two consecutive reads. 738 * 739 * This time is measured via accounting rather than sampling, 740 * and is as accurate as ktime_get() is. 741 * 742 * This function returns -1 if NOHZ is not enabled. 743 */ 744 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) 745 { 746 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 747 748 return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, 749 nr_iowait_cpu(cpu), last_update_time); 750 } 751 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 752 753 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) 754 { 755 hrtimer_cancel(&ts->sched_timer); 756 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); 757 758 /* Forward the time to expire in the future */ 759 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 760 761 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 762 hrtimer_start_expires(&ts->sched_timer, 763 HRTIMER_MODE_ABS_PINNED_HARD); 764 } else { 765 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 766 } 767 768 /* 769 * Reset to make sure next tick stop doesn't get fooled by past 770 * cached clock deadline. 771 */ 772 ts->next_tick = 0; 773 } 774 775 static inline bool local_timer_softirq_pending(void) 776 { 777 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); 778 } 779 780 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) 781 { 782 u64 basemono, next_tick, delta, expires; 783 unsigned long basejiff; 784 unsigned int seq; 785 786 /* Read jiffies and the time when jiffies were updated last */ 787 do { 788 seq = read_seqcount_begin(&jiffies_seq); 789 basemono = last_jiffies_update; 790 basejiff = jiffies; 791 } while (read_seqcount_retry(&jiffies_seq, seq)); 792 ts->last_jiffies = basejiff; 793 ts->timer_expires_base = basemono; 794 795 /* 796 * Keep the periodic tick, when RCU, architecture or irq_work 797 * requests it. 798 * Aside of that check whether the local timer softirq is 799 * pending. If so its a bad idea to call get_next_timer_interrupt() 800 * because there is an already expired timer, so it will request 801 * immediate expiry, which rearms the hardware timer with a 802 * minimal delta which brings us back to this place 803 * immediately. Lather, rinse and repeat... 804 */ 805 if (rcu_needs_cpu() || arch_needs_cpu() || 806 irq_work_needs_cpu() || local_timer_softirq_pending()) { 807 next_tick = basemono + TICK_NSEC; 808 } else { 809 /* 810 * Get the next pending timer. If high resolution 811 * timers are enabled this only takes the timer wheel 812 * timers into account. If high resolution timers are 813 * disabled this also looks at the next expiring 814 * hrtimer. 815 */ 816 next_tick = get_next_timer_interrupt(basejiff, basemono); 817 ts->next_timer = next_tick; 818 } 819 820 /* 821 * If the tick is due in the next period, keep it ticking or 822 * force prod the timer. 823 */ 824 delta = next_tick - basemono; 825 if (delta <= (u64)TICK_NSEC) { 826 /* 827 * Tell the timer code that the base is not idle, i.e. undo 828 * the effect of get_next_timer_interrupt(): 829 */ 830 timer_clear_idle(); 831 /* 832 * We've not stopped the tick yet, and there's a timer in the 833 * next period, so no point in stopping it either, bail. 834 */ 835 if (!ts->tick_stopped) { 836 ts->timer_expires = 0; 837 goto out; 838 } 839 } 840 841 /* 842 * If this CPU is the one which had the do_timer() duty last, we limit 843 * the sleep time to the timekeeping max_deferment value. 844 * Otherwise we can sleep as long as we want. 845 */ 846 delta = timekeeping_max_deferment(); 847 if (cpu != tick_do_timer_cpu && 848 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) 849 delta = KTIME_MAX; 850 851 /* Calculate the next expiry time */ 852 if (delta < (KTIME_MAX - basemono)) 853 expires = basemono + delta; 854 else 855 expires = KTIME_MAX; 856 857 ts->timer_expires = min_t(u64, expires, next_tick); 858 859 out: 860 return ts->timer_expires; 861 } 862 863 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) 864 { 865 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 866 u64 basemono = ts->timer_expires_base; 867 u64 expires = ts->timer_expires; 868 ktime_t tick = expires; 869 870 /* Make sure we won't be trying to stop it twice in a row. */ 871 ts->timer_expires_base = 0; 872 873 /* 874 * If this CPU is the one which updates jiffies, then give up 875 * the assignment and let it be taken by the CPU which runs 876 * the tick timer next, which might be this CPU as well. If we 877 * don't drop this here the jiffies might be stale and 878 * do_timer() never invoked. Keep track of the fact that it 879 * was the one which had the do_timer() duty last. 880 */ 881 if (cpu == tick_do_timer_cpu) { 882 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 883 ts->do_timer_last = 1; 884 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { 885 ts->do_timer_last = 0; 886 } 887 888 /* Skip reprogram of event if its not changed */ 889 if (ts->tick_stopped && (expires == ts->next_tick)) { 890 /* Sanity check: make sure clockevent is actually programmed */ 891 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) 892 return; 893 894 WARN_ON_ONCE(1); 895 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", 896 basemono, ts->next_tick, dev->next_event, 897 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); 898 } 899 900 /* 901 * nohz_stop_sched_tick can be called several times before 902 * the nohz_restart_sched_tick is called. This happens when 903 * interrupts arrive which do not cause a reschedule. In the 904 * first call we save the current tick time, so we can restart 905 * the scheduler tick in nohz_restart_sched_tick. 906 */ 907 if (!ts->tick_stopped) { 908 calc_load_nohz_start(); 909 quiet_vmstat(); 910 911 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); 912 ts->tick_stopped = 1; 913 trace_tick_stop(1, TICK_DEP_MASK_NONE); 914 } 915 916 ts->next_tick = tick; 917 918 /* 919 * If the expiration time == KTIME_MAX, then we simply stop 920 * the tick timer. 921 */ 922 if (unlikely(expires == KTIME_MAX)) { 923 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 924 hrtimer_cancel(&ts->sched_timer); 925 else 926 tick_program_event(KTIME_MAX, 1); 927 return; 928 } 929 930 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 931 hrtimer_start(&ts->sched_timer, tick, 932 HRTIMER_MODE_ABS_PINNED_HARD); 933 } else { 934 hrtimer_set_expires(&ts->sched_timer, tick); 935 tick_program_event(tick, 1); 936 } 937 } 938 939 static void tick_nohz_retain_tick(struct tick_sched *ts) 940 { 941 ts->timer_expires_base = 0; 942 } 943 944 #ifdef CONFIG_NO_HZ_FULL 945 static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) 946 { 947 if (tick_nohz_next_event(ts, cpu)) 948 tick_nohz_stop_tick(ts, cpu); 949 else 950 tick_nohz_retain_tick(ts); 951 } 952 #endif /* CONFIG_NO_HZ_FULL */ 953 954 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) 955 { 956 /* Update jiffies first */ 957 tick_do_update_jiffies64(now); 958 959 /* 960 * Clear the timer idle flag, so we avoid IPIs on remote queueing and 961 * the clock forward checks in the enqueue path: 962 */ 963 timer_clear_idle(); 964 965 calc_load_nohz_stop(); 966 touch_softlockup_watchdog_sched(); 967 /* 968 * Cancel the scheduled timer and restore the tick 969 */ 970 ts->tick_stopped = 0; 971 tick_nohz_restart(ts, now); 972 } 973 974 static void __tick_nohz_full_update_tick(struct tick_sched *ts, 975 ktime_t now) 976 { 977 #ifdef CONFIG_NO_HZ_FULL 978 int cpu = smp_processor_id(); 979 980 if (can_stop_full_tick(cpu, ts)) 981 tick_nohz_stop_sched_tick(ts, cpu); 982 else if (ts->tick_stopped) 983 tick_nohz_restart_sched_tick(ts, now); 984 #endif 985 } 986 987 static void tick_nohz_full_update_tick(struct tick_sched *ts) 988 { 989 if (!tick_nohz_full_cpu(smp_processor_id())) 990 return; 991 992 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) 993 return; 994 995 __tick_nohz_full_update_tick(ts, ktime_get()); 996 } 997 998 /* 999 * A pending softirq outside an IRQ (or softirq disabled section) context 1000 * should be waiting for ksoftirqd to handle it. Therefore we shouldn't 1001 * reach here due to the need_resched() early check in can_stop_idle_tick(). 1002 * 1003 * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the 1004 * cpu_down() process, softirqs can still be raised while ksoftirqd is parked, 1005 * triggering the below since wakep_softirqd() is ignored. 1006 * 1007 */ 1008 static bool report_idle_softirq(void) 1009 { 1010 static int ratelimit; 1011 unsigned int pending = local_softirq_pending(); 1012 1013 if (likely(!pending)) 1014 return false; 1015 1016 /* Some softirqs claim to be safe against hotplug and ksoftirqd parking */ 1017 if (!cpu_active(smp_processor_id())) { 1018 pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK; 1019 if (!pending) 1020 return false; 1021 } 1022 1023 if (ratelimit < 10) 1024 return false; 1025 1026 /* On RT, softirqs handling may be waiting on some lock */ 1027 if (!local_bh_blocked()) 1028 return false; 1029 1030 pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", 1031 pending); 1032 ratelimit++; 1033 1034 return true; 1035 } 1036 1037 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 1038 { 1039 /* 1040 * If this CPU is offline and it is the one which updates 1041 * jiffies, then give up the assignment and let it be taken by 1042 * the CPU which runs the tick timer next. If we don't drop 1043 * this here the jiffies might be stale and do_timer() never 1044 * invoked. 1045 */ 1046 if (unlikely(!cpu_online(cpu))) { 1047 if (cpu == tick_do_timer_cpu) 1048 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 1049 /* 1050 * Make sure the CPU doesn't get fooled by obsolete tick 1051 * deadline if it comes back online later. 1052 */ 1053 ts->next_tick = 0; 1054 return false; 1055 } 1056 1057 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 1058 return false; 1059 1060 if (need_resched()) 1061 return false; 1062 1063 if (unlikely(report_idle_softirq())) 1064 return false; 1065 1066 if (tick_nohz_full_enabled()) { 1067 /* 1068 * Keep the tick alive to guarantee timekeeping progression 1069 * if there are full dynticks CPUs around 1070 */ 1071 if (tick_do_timer_cpu == cpu) 1072 return false; 1073 1074 /* Should not happen for nohz-full */ 1075 if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 1076 return false; 1077 } 1078 1079 return true; 1080 } 1081 1082 /** 1083 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task 1084 * 1085 * When the next event is more than a tick into the future, stop the idle tick 1086 */ 1087 void tick_nohz_idle_stop_tick(void) 1088 { 1089 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1090 int cpu = smp_processor_id(); 1091 ktime_t expires; 1092 1093 /* 1094 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the 1095 * tick timer expiration time is known already. 1096 */ 1097 if (ts->timer_expires_base) 1098 expires = ts->timer_expires; 1099 else if (can_stop_idle_tick(cpu, ts)) 1100 expires = tick_nohz_next_event(ts, cpu); 1101 else 1102 return; 1103 1104 ts->idle_calls++; 1105 1106 if (expires > 0LL) { 1107 int was_stopped = ts->tick_stopped; 1108 1109 tick_nohz_stop_tick(ts, cpu); 1110 1111 ts->idle_sleeps++; 1112 ts->idle_expires = expires; 1113 1114 if (!was_stopped && ts->tick_stopped) { 1115 ts->idle_jiffies = ts->last_jiffies; 1116 nohz_balance_enter_idle(cpu); 1117 } 1118 } else { 1119 tick_nohz_retain_tick(ts); 1120 } 1121 } 1122 1123 void tick_nohz_idle_retain_tick(void) 1124 { 1125 tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); 1126 /* 1127 * Undo the effect of get_next_timer_interrupt() called from 1128 * tick_nohz_next_event(). 1129 */ 1130 timer_clear_idle(); 1131 } 1132 1133 /** 1134 * tick_nohz_idle_enter - prepare for entering idle on the current CPU 1135 * 1136 * Called when we start the idle loop. 1137 */ 1138 void tick_nohz_idle_enter(void) 1139 { 1140 struct tick_sched *ts; 1141 1142 lockdep_assert_irqs_enabled(); 1143 1144 local_irq_disable(); 1145 1146 ts = this_cpu_ptr(&tick_cpu_sched); 1147 1148 WARN_ON_ONCE(ts->timer_expires_base); 1149 1150 ts->inidle = 1; 1151 tick_nohz_start_idle(ts); 1152 1153 local_irq_enable(); 1154 } 1155 1156 /** 1157 * tick_nohz_irq_exit - update next tick event from interrupt exit 1158 * 1159 * When an interrupt fires while we are idle and it doesn't cause 1160 * a reschedule, it may still add, modify or delete a timer, enqueue 1161 * an RCU callback, etc... 1162 * So we need to re-calculate and reprogram the next tick event. 1163 */ 1164 void tick_nohz_irq_exit(void) 1165 { 1166 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1167 1168 if (ts->inidle) 1169 tick_nohz_start_idle(ts); 1170 else 1171 tick_nohz_full_update_tick(ts); 1172 } 1173 1174 /** 1175 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run 1176 */ 1177 bool tick_nohz_idle_got_tick(void) 1178 { 1179 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1180 1181 if (ts->got_idle_tick) { 1182 ts->got_idle_tick = 0; 1183 return true; 1184 } 1185 return false; 1186 } 1187 1188 /** 1189 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer 1190 * or the tick, whatever that expires first. Note that, if the tick has been 1191 * stopped, it returns the next hrtimer. 1192 * 1193 * Called from power state control code with interrupts disabled 1194 */ 1195 ktime_t tick_nohz_get_next_hrtimer(void) 1196 { 1197 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; 1198 } 1199 1200 /** 1201 * tick_nohz_get_sleep_length - return the expected length of the current sleep 1202 * @delta_next: duration until the next event if the tick cannot be stopped 1203 * 1204 * Called from power state control code with interrupts disabled. 1205 * 1206 * The return value of this function and/or the value returned by it through the 1207 * @delta_next pointer can be negative which must be taken into account by its 1208 * callers. 1209 */ 1210 ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 1211 { 1212 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 1213 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1214 int cpu = smp_processor_id(); 1215 /* 1216 * The idle entry time is expected to be a sufficient approximation of 1217 * the current time at this point. 1218 */ 1219 ktime_t now = ts->idle_entrytime; 1220 ktime_t next_event; 1221 1222 WARN_ON_ONCE(!ts->inidle); 1223 1224 *delta_next = ktime_sub(dev->next_event, now); 1225 1226 if (!can_stop_idle_tick(cpu, ts)) 1227 return *delta_next; 1228 1229 next_event = tick_nohz_next_event(ts, cpu); 1230 if (!next_event) 1231 return *delta_next; 1232 1233 /* 1234 * If the next highres timer to expire is earlier than next_event, the 1235 * idle governor needs to know that. 1236 */ 1237 next_event = min_t(u64, next_event, 1238 hrtimer_next_event_without(&ts->sched_timer)); 1239 1240 return ktime_sub(next_event, now); 1241 } 1242 1243 /** 1244 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value 1245 * for a particular CPU. 1246 * 1247 * Called from the schedutil frequency scaling governor in scheduler context. 1248 */ 1249 unsigned long tick_nohz_get_idle_calls_cpu(int cpu) 1250 { 1251 struct tick_sched *ts = tick_get_tick_sched(cpu); 1252 1253 return ts->idle_calls; 1254 } 1255 1256 /** 1257 * tick_nohz_get_idle_calls - return the current idle calls counter value 1258 * 1259 * Called from the schedutil frequency scaling governor in scheduler context. 1260 */ 1261 unsigned long tick_nohz_get_idle_calls(void) 1262 { 1263 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1264 1265 return ts->idle_calls; 1266 } 1267 1268 static void tick_nohz_account_idle_time(struct tick_sched *ts, 1269 ktime_t now) 1270 { 1271 unsigned long ticks; 1272 1273 ts->idle_exittime = now; 1274 1275 if (vtime_accounting_enabled_this_cpu()) 1276 return; 1277 /* 1278 * We stopped the tick in idle. Update process times would miss the 1279 * time we slept as update_process_times does only a 1 tick 1280 * accounting. Enforce that this is accounted to idle ! 1281 */ 1282 ticks = jiffies - ts->idle_jiffies; 1283 /* 1284 * We might be one off. Do not randomly account a huge number of ticks! 1285 */ 1286 if (ticks && ticks < LONG_MAX) 1287 account_idle_ticks(ticks); 1288 } 1289 1290 void tick_nohz_idle_restart_tick(void) 1291 { 1292 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1293 1294 if (ts->tick_stopped) { 1295 ktime_t now = ktime_get(); 1296 tick_nohz_restart_sched_tick(ts, now); 1297 tick_nohz_account_idle_time(ts, now); 1298 } 1299 } 1300 1301 static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) 1302 { 1303 if (tick_nohz_full_cpu(smp_processor_id())) 1304 __tick_nohz_full_update_tick(ts, now); 1305 else 1306 tick_nohz_restart_sched_tick(ts, now); 1307 1308 tick_nohz_account_idle_time(ts, now); 1309 } 1310 1311 /** 1312 * tick_nohz_idle_exit - restart the idle tick from the idle task 1313 * 1314 * Restart the idle tick when the CPU is woken up from idle 1315 * This also exit the RCU extended quiescent state. The CPU 1316 * can use RCU again after this function is called. 1317 */ 1318 void tick_nohz_idle_exit(void) 1319 { 1320 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1321 bool idle_active, tick_stopped; 1322 ktime_t now; 1323 1324 local_irq_disable(); 1325 1326 WARN_ON_ONCE(!ts->inidle); 1327 WARN_ON_ONCE(ts->timer_expires_base); 1328 1329 ts->inidle = 0; 1330 idle_active = ts->idle_active; 1331 tick_stopped = ts->tick_stopped; 1332 1333 if (idle_active || tick_stopped) 1334 now = ktime_get(); 1335 1336 if (idle_active) 1337 tick_nohz_stop_idle(ts, now); 1338 1339 if (tick_stopped) 1340 tick_nohz_idle_update_tick(ts, now); 1341 1342 local_irq_enable(); 1343 } 1344 1345 /* 1346 * The nohz low res interrupt handler 1347 */ 1348 static void tick_nohz_handler(struct clock_event_device *dev) 1349 { 1350 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1351 struct pt_regs *regs = get_irq_regs(); 1352 ktime_t now = ktime_get(); 1353 1354 dev->next_event = KTIME_MAX; 1355 1356 tick_sched_do_timer(ts, now); 1357 tick_sched_handle(ts, regs); 1358 1359 if (unlikely(ts->tick_stopped)) { 1360 /* 1361 * The clockevent device is not reprogrammed, so change the 1362 * clock event device to ONESHOT_STOPPED to avoid spurious 1363 * interrupts on devices which might not be truly one shot. 1364 */ 1365 tick_program_event(KTIME_MAX, 1); 1366 return; 1367 } 1368 1369 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 1370 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1371 } 1372 1373 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) 1374 { 1375 if (!tick_nohz_enabled) 1376 return; 1377 ts->nohz_mode = mode; 1378 /* One update is enough */ 1379 if (!test_and_set_bit(0, &tick_nohz_active)) 1380 timers_update_nohz(); 1381 } 1382 1383 /** 1384 * tick_nohz_switch_to_nohz - switch to nohz mode 1385 */ 1386 static void tick_nohz_switch_to_nohz(void) 1387 { 1388 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1389 ktime_t next; 1390 1391 if (!tick_nohz_enabled) 1392 return; 1393 1394 if (tick_switch_to_oneshot(tick_nohz_handler)) 1395 return; 1396 1397 /* 1398 * Recycle the hrtimer in ts, so we can share the 1399 * hrtimer_forward with the highres code. 1400 */ 1401 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1402 /* Get the next period */ 1403 next = tick_init_jiffy_update(); 1404 1405 hrtimer_set_expires(&ts->sched_timer, next); 1406 hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); 1407 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1408 tick_nohz_activate(ts, NOHZ_MODE_LOWRES); 1409 } 1410 1411 static inline void tick_nohz_irq_enter(void) 1412 { 1413 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1414 ktime_t now; 1415 1416 if (!ts->idle_active && !ts->tick_stopped) 1417 return; 1418 now = ktime_get(); 1419 if (ts->idle_active) 1420 tick_nohz_stop_idle(ts, now); 1421 /* 1422 * If all CPUs are idle. We may need to update a stale jiffies value. 1423 * Note nohz_full is a special case: a timekeeper is guaranteed to stay 1424 * alive but it might be busy looping with interrupts disabled in some 1425 * rare case (typically stop machine). So we must make sure we have a 1426 * last resort. 1427 */ 1428 if (ts->tick_stopped) 1429 tick_nohz_update_jiffies(now); 1430 } 1431 1432 #else 1433 1434 static inline void tick_nohz_switch_to_nohz(void) { } 1435 static inline void tick_nohz_irq_enter(void) { } 1436 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } 1437 1438 #endif /* CONFIG_NO_HZ_COMMON */ 1439 1440 /* 1441 * Called from irq_enter to notify about the possible interruption of idle() 1442 */ 1443 void tick_irq_enter(void) 1444 { 1445 tick_check_oneshot_broadcast_this_cpu(); 1446 tick_nohz_irq_enter(); 1447 } 1448 1449 /* 1450 * High resolution timer specific code 1451 */ 1452 #ifdef CONFIG_HIGH_RES_TIMERS 1453 /* 1454 * We rearm the timer until we get disabled by the idle code. 1455 * Called with interrupts disabled. 1456 */ 1457 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 1458 { 1459 struct tick_sched *ts = 1460 container_of(timer, struct tick_sched, sched_timer); 1461 struct pt_regs *regs = get_irq_regs(); 1462 ktime_t now = ktime_get(); 1463 1464 tick_sched_do_timer(ts, now); 1465 1466 /* 1467 * Do not call, when we are not in irq context and have 1468 * no valid regs pointer 1469 */ 1470 if (regs) 1471 tick_sched_handle(ts, regs); 1472 else 1473 ts->next_tick = 0; 1474 1475 /* No need to reprogram if we are in idle or full dynticks mode */ 1476 if (unlikely(ts->tick_stopped)) 1477 return HRTIMER_NORESTART; 1478 1479 hrtimer_forward(timer, now, TICK_NSEC); 1480 1481 return HRTIMER_RESTART; 1482 } 1483 1484 static int sched_skew_tick; 1485 1486 static int __init skew_tick(char *str) 1487 { 1488 get_option(&str, &sched_skew_tick); 1489 1490 return 0; 1491 } 1492 early_param("skew_tick", skew_tick); 1493 1494 /** 1495 * tick_setup_sched_timer - setup the tick emulation timer 1496 */ 1497 void tick_setup_sched_timer(void) 1498 { 1499 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1500 ktime_t now = ktime_get(); 1501 1502 /* 1503 * Emulate tick processing via per-CPU hrtimers: 1504 */ 1505 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1506 ts->sched_timer.function = tick_sched_timer; 1507 1508 /* Get the next period (per-CPU) */ 1509 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 1510 1511 /* Offset the tick to avert jiffies_lock contention. */ 1512 if (sched_skew_tick) { 1513 u64 offset = TICK_NSEC >> 1; 1514 do_div(offset, num_possible_cpus()); 1515 offset *= smp_processor_id(); 1516 hrtimer_add_expires_ns(&ts->sched_timer, offset); 1517 } 1518 1519 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 1520 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); 1521 tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); 1522 } 1523 #endif /* HIGH_RES_TIMERS */ 1524 1525 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS 1526 void tick_cancel_sched_timer(int cpu) 1527 { 1528 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1529 1530 # ifdef CONFIG_HIGH_RES_TIMERS 1531 if (ts->sched_timer.base) 1532 hrtimer_cancel(&ts->sched_timer); 1533 # endif 1534 1535 memset(ts, 0, sizeof(*ts)); 1536 } 1537 #endif 1538 1539 /* 1540 * Async notification about clocksource changes 1541 */ 1542 void tick_clock_notify(void) 1543 { 1544 int cpu; 1545 1546 for_each_possible_cpu(cpu) 1547 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); 1548 } 1549 1550 /* 1551 * Async notification about clock event changes 1552 */ 1553 void tick_oneshot_notify(void) 1554 { 1555 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1556 1557 set_bit(0, &ts->check_clocks); 1558 } 1559 1560 /* 1561 * Check, if a change happened, which makes oneshot possible. 1562 * 1563 * Called cyclic from the hrtimer softirq (driven by the timer 1564 * softirq) allow_nohz signals, that we can switch into low-res nohz 1565 * mode, because high resolution timers are disabled (either compile 1566 * or runtime). Called with interrupts disabled. 1567 */ 1568 int tick_check_oneshot_change(int allow_nohz) 1569 { 1570 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1571 1572 if (!test_and_clear_bit(0, &ts->check_clocks)) 1573 return 0; 1574 1575 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) 1576 return 0; 1577 1578 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) 1579 return 0; 1580 1581 if (!allow_nohz) 1582 return 1; 1583 1584 tick_nohz_switch_to_nohz(); 1585 return 0; 1586 } 1587