1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 6 * 7 * No idle tick implementation for low and high resolution timers 8 * 9 * Started by: Thomas Gleixner and Ingo Molnar 10 */ 11 #include <linux/cpu.h> 12 #include <linux/err.h> 13 #include <linux/hrtimer.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/percpu.h> 17 #include <linux/nmi.h> 18 #include <linux/profile.h> 19 #include <linux/sched/signal.h> 20 #include <linux/sched/clock.h> 21 #include <linux/sched/stat.h> 22 #include <linux/sched/nohz.h> 23 #include <linux/sched/loadavg.h> 24 #include <linux/module.h> 25 #include <linux/irq_work.h> 26 #include <linux/posix-timers.h> 27 #include <linux/context_tracking.h> 28 #include <linux/mm.h> 29 30 #include <asm/irq_regs.h> 31 32 #include "tick-internal.h" 33 34 #include <trace/events/timer.h> 35 36 /* 37 * Per-CPU nohz control structure 38 */ 39 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 40 41 struct tick_sched *tick_get_tick_sched(int cpu) 42 { 43 return &per_cpu(tick_cpu_sched, cpu); 44 } 45 46 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 47 /* 48 * The time, when the last jiffy update happened. Write access must hold 49 * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a 50 * consistent view of jiffies and last_jiffies_update. 51 */ 52 static ktime_t last_jiffies_update; 53 54 /* 55 * Must be called with interrupts disabled ! 56 */ 57 static void tick_do_update_jiffies64(ktime_t now) 58 { 59 unsigned long ticks = 1; 60 ktime_t delta, nextp; 61 62 /* 63 * 64bit can do a quick check without holding jiffies lock and 64 * without looking at the sequence count. The smp_load_acquire() 65 * pairs with the update done later in this function. 66 * 67 * 32bit cannot do that because the store of tick_next_period 68 * consists of two 32bit stores and the first store could move it 69 * to a random point in the future. 70 */ 71 if (IS_ENABLED(CONFIG_64BIT)) { 72 if (ktime_before(now, smp_load_acquire(&tick_next_period))) 73 return; 74 } else { 75 unsigned int seq; 76 77 /* 78 * Avoid contention on jiffies_lock and protect the quick 79 * check with the sequence count. 80 */ 81 do { 82 seq = read_seqcount_begin(&jiffies_seq); 83 nextp = tick_next_period; 84 } while (read_seqcount_retry(&jiffies_seq, seq)); 85 86 if (ktime_before(now, nextp)) 87 return; 88 } 89 90 /* Quick check failed, i.e. update is required. */ 91 raw_spin_lock(&jiffies_lock); 92 /* 93 * Reevaluate with the lock held. Another CPU might have done the 94 * update already. 95 */ 96 if (ktime_before(now, tick_next_period)) { 97 raw_spin_unlock(&jiffies_lock); 98 return; 99 } 100 101 write_seqcount_begin(&jiffies_seq); 102 103 delta = ktime_sub(now, tick_next_period); 104 if (unlikely(delta >= TICK_NSEC)) { 105 /* Slow path for long idle sleep times */ 106 s64 incr = TICK_NSEC; 107 108 ticks += ktime_divns(delta, incr); 109 110 last_jiffies_update = ktime_add_ns(last_jiffies_update, 111 incr * ticks); 112 } else { 113 last_jiffies_update = ktime_add_ns(last_jiffies_update, 114 TICK_NSEC); 115 } 116 117 /* Advance jiffies to complete the jiffies_seq protected job */ 118 jiffies_64 += ticks; 119 120 /* 121 * Keep the tick_next_period variable up to date. 122 */ 123 nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); 124 125 if (IS_ENABLED(CONFIG_64BIT)) { 126 /* 127 * Pairs with smp_load_acquire() in the lockless quick 128 * check above and ensures that the update to jiffies_64 is 129 * not reordered vs. the store to tick_next_period, neither 130 * by the compiler nor by the CPU. 131 */ 132 smp_store_release(&tick_next_period, nextp); 133 } else { 134 /* 135 * A plain store is good enough on 32bit as the quick check 136 * above is protected by the sequence count. 137 */ 138 tick_next_period = nextp; 139 } 140 141 /* 142 * Release the sequence count. calc_global_load() below is not 143 * protected by it, but jiffies_lock needs to be held to prevent 144 * concurrent invocations. 145 */ 146 write_seqcount_end(&jiffies_seq); 147 148 calc_global_load(); 149 150 raw_spin_unlock(&jiffies_lock); 151 update_wall_time(); 152 } 153 154 /* 155 * Initialize and return retrieve the jiffies update. 156 */ 157 static ktime_t tick_init_jiffy_update(void) 158 { 159 ktime_t period; 160 161 raw_spin_lock(&jiffies_lock); 162 write_seqcount_begin(&jiffies_seq); 163 /* Did we start the jiffies update yet ? */ 164 if (last_jiffies_update == 0) 165 last_jiffies_update = tick_next_period; 166 period = last_jiffies_update; 167 write_seqcount_end(&jiffies_seq); 168 raw_spin_unlock(&jiffies_lock); 169 return period; 170 } 171 172 #define MAX_STALLED_JIFFIES 5 173 174 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) 175 { 176 int cpu = smp_processor_id(); 177 178 #ifdef CONFIG_NO_HZ_COMMON 179 /* 180 * Check if the do_timer duty was dropped. We don't care about 181 * concurrency: This happens only when the CPU in charge went 182 * into a long sleep. If two CPUs happen to assign themselves to 183 * this duty, then the jiffies update is still serialized by 184 * jiffies_lock. 185 * 186 * If nohz_full is enabled, this should not happen because the 187 * tick_do_timer_cpu never relinquishes. 188 */ 189 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { 190 #ifdef CONFIG_NO_HZ_FULL 191 WARN_ON_ONCE(tick_nohz_full_running); 192 #endif 193 tick_do_timer_cpu = cpu; 194 } 195 #endif 196 197 /* Check, if the jiffies need an update */ 198 if (tick_do_timer_cpu == cpu) 199 tick_do_update_jiffies64(now); 200 201 /* 202 * If jiffies update stalled for too long (timekeeper in stop_machine() 203 * or VMEXIT'ed for several msecs), force an update. 204 */ 205 if (ts->last_tick_jiffies != jiffies) { 206 ts->stalled_jiffies = 0; 207 ts->last_tick_jiffies = READ_ONCE(jiffies); 208 } else { 209 if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { 210 tick_do_update_jiffies64(now); 211 ts->stalled_jiffies = 0; 212 ts->last_tick_jiffies = READ_ONCE(jiffies); 213 } 214 } 215 216 if (ts->inidle) 217 ts->got_idle_tick = 1; 218 } 219 220 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) 221 { 222 #ifdef CONFIG_NO_HZ_COMMON 223 /* 224 * When we are idle and the tick is stopped, we have to touch 225 * the watchdog as we might not schedule for a really long 226 * time. This happens on complete idle SMP systems while 227 * waiting on the login prompt. We also increment the "start of 228 * idle" jiffy stamp so the idle accounting adjustment we do 229 * when we go busy again does not account too much ticks. 230 */ 231 if (ts->tick_stopped) { 232 touch_softlockup_watchdog_sched(); 233 if (is_idle_task(current)) 234 ts->idle_jiffies++; 235 /* 236 * In case the current tick fired too early past its expected 237 * expiration, make sure we don't bypass the next clock reprogramming 238 * to the same deadline. 239 */ 240 ts->next_tick = 0; 241 } 242 #endif 243 update_process_times(user_mode(regs)); 244 profile_tick(CPU_PROFILING); 245 } 246 #endif 247 248 #ifdef CONFIG_NO_HZ_FULL 249 cpumask_var_t tick_nohz_full_mask; 250 EXPORT_SYMBOL_GPL(tick_nohz_full_mask); 251 bool tick_nohz_full_running; 252 EXPORT_SYMBOL_GPL(tick_nohz_full_running); 253 static atomic_t tick_dep_mask; 254 255 static bool check_tick_dependency(atomic_t *dep) 256 { 257 int val = atomic_read(dep); 258 259 if (val & TICK_DEP_MASK_POSIX_TIMER) { 260 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); 261 return true; 262 } 263 264 if (val & TICK_DEP_MASK_PERF_EVENTS) { 265 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); 266 return true; 267 } 268 269 if (val & TICK_DEP_MASK_SCHED) { 270 trace_tick_stop(0, TICK_DEP_MASK_SCHED); 271 return true; 272 } 273 274 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { 275 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); 276 return true; 277 } 278 279 if (val & TICK_DEP_MASK_RCU) { 280 trace_tick_stop(0, TICK_DEP_MASK_RCU); 281 return true; 282 } 283 284 return false; 285 } 286 287 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) 288 { 289 lockdep_assert_irqs_disabled(); 290 291 if (unlikely(!cpu_online(cpu))) 292 return false; 293 294 if (check_tick_dependency(&tick_dep_mask)) 295 return false; 296 297 if (check_tick_dependency(&ts->tick_dep_mask)) 298 return false; 299 300 if (check_tick_dependency(¤t->tick_dep_mask)) 301 return false; 302 303 if (check_tick_dependency(¤t->signal->tick_dep_mask)) 304 return false; 305 306 return true; 307 } 308 309 static void nohz_full_kick_func(struct irq_work *work) 310 { 311 /* Empty, the tick restart happens on tick_nohz_irq_exit() */ 312 } 313 314 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = 315 IRQ_WORK_INIT_HARD(nohz_full_kick_func); 316 317 /* 318 * Kick this CPU if it's full dynticks in order to force it to 319 * re-evaluate its dependency on the tick and restart it if necessary. 320 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), 321 * is NMI safe. 322 */ 323 static void tick_nohz_full_kick(void) 324 { 325 if (!tick_nohz_full_cpu(smp_processor_id())) 326 return; 327 328 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); 329 } 330 331 /* 332 * Kick the CPU if it's full dynticks in order to force it to 333 * re-evaluate its dependency on the tick and restart it if necessary. 334 */ 335 void tick_nohz_full_kick_cpu(int cpu) 336 { 337 if (!tick_nohz_full_cpu(cpu)) 338 return; 339 340 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); 341 } 342 343 static void tick_nohz_kick_task(struct task_struct *tsk) 344 { 345 int cpu; 346 347 /* 348 * If the task is not running, run_posix_cpu_timers() 349 * has nothing to elapse, IPI can then be spared. 350 * 351 * activate_task() STORE p->tick_dep_mask 352 * STORE p->on_rq 353 * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) 354 * LOCK rq->lock LOAD p->on_rq 355 * smp_mb__after_spin_lock() 356 * tick_nohz_task_switch() 357 * LOAD p->tick_dep_mask 358 */ 359 if (!sched_task_on_rq(tsk)) 360 return; 361 362 /* 363 * If the task concurrently migrates to another CPU, 364 * we guarantee it sees the new tick dependency upon 365 * schedule. 366 * 367 * set_task_cpu(p, cpu); 368 * STORE p->cpu = @cpu 369 * __schedule() (switch to task 'p') 370 * LOCK rq->lock 371 * smp_mb__after_spin_lock() STORE p->tick_dep_mask 372 * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) 373 * LOAD p->tick_dep_mask LOAD p->cpu 374 */ 375 cpu = task_cpu(tsk); 376 377 preempt_disable(); 378 if (cpu_online(cpu)) 379 tick_nohz_full_kick_cpu(cpu); 380 preempt_enable(); 381 } 382 383 /* 384 * Kick all full dynticks CPUs in order to force these to re-evaluate 385 * their dependency on the tick and restart it if necessary. 386 */ 387 static void tick_nohz_full_kick_all(void) 388 { 389 int cpu; 390 391 if (!tick_nohz_full_running) 392 return; 393 394 preempt_disable(); 395 for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) 396 tick_nohz_full_kick_cpu(cpu); 397 preempt_enable(); 398 } 399 400 static void tick_nohz_dep_set_all(atomic_t *dep, 401 enum tick_dep_bits bit) 402 { 403 int prev; 404 405 prev = atomic_fetch_or(BIT(bit), dep); 406 if (!prev) 407 tick_nohz_full_kick_all(); 408 } 409 410 /* 411 * Set a global tick dependency. Used by perf events that rely on freq and 412 * by unstable clock. 413 */ 414 void tick_nohz_dep_set(enum tick_dep_bits bit) 415 { 416 tick_nohz_dep_set_all(&tick_dep_mask, bit); 417 } 418 419 void tick_nohz_dep_clear(enum tick_dep_bits bit) 420 { 421 atomic_andnot(BIT(bit), &tick_dep_mask); 422 } 423 424 /* 425 * Set per-CPU tick dependency. Used by scheduler and perf events in order to 426 * manage events throttling. 427 */ 428 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) 429 { 430 int prev; 431 struct tick_sched *ts; 432 433 ts = per_cpu_ptr(&tick_cpu_sched, cpu); 434 435 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); 436 if (!prev) { 437 preempt_disable(); 438 /* Perf needs local kick that is NMI safe */ 439 if (cpu == smp_processor_id()) { 440 tick_nohz_full_kick(); 441 } else { 442 /* Remote irq work not NMI-safe */ 443 if (!WARN_ON_ONCE(in_nmi())) 444 tick_nohz_full_kick_cpu(cpu); 445 } 446 preempt_enable(); 447 } 448 } 449 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); 450 451 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) 452 { 453 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 454 455 atomic_andnot(BIT(bit), &ts->tick_dep_mask); 456 } 457 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); 458 459 /* 460 * Set a per-task tick dependency. RCU need this. Also posix CPU timers 461 * in order to elapse per task timers. 462 */ 463 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) 464 { 465 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) 466 tick_nohz_kick_task(tsk); 467 } 468 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); 469 470 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) 471 { 472 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); 473 } 474 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); 475 476 /* 477 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse 478 * per process timers. 479 */ 480 void tick_nohz_dep_set_signal(struct task_struct *tsk, 481 enum tick_dep_bits bit) 482 { 483 int prev; 484 struct signal_struct *sig = tsk->signal; 485 486 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); 487 if (!prev) { 488 struct task_struct *t; 489 490 lockdep_assert_held(&tsk->sighand->siglock); 491 __for_each_thread(sig, t) 492 tick_nohz_kick_task(t); 493 } 494 } 495 496 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) 497 { 498 atomic_andnot(BIT(bit), &sig->tick_dep_mask); 499 } 500 501 /* 502 * Re-evaluate the need for the tick as we switch the current task. 503 * It might need the tick due to per task/process properties: 504 * perf events, posix CPU timers, ... 505 */ 506 void __tick_nohz_task_switch(void) 507 { 508 struct tick_sched *ts; 509 510 if (!tick_nohz_full_cpu(smp_processor_id())) 511 return; 512 513 ts = this_cpu_ptr(&tick_cpu_sched); 514 515 if (ts->tick_stopped) { 516 if (atomic_read(¤t->tick_dep_mask) || 517 atomic_read(¤t->signal->tick_dep_mask)) 518 tick_nohz_full_kick(); 519 } 520 } 521 522 /* Get the boot-time nohz CPU list from the kernel parameters. */ 523 void __init tick_nohz_full_setup(cpumask_var_t cpumask) 524 { 525 alloc_bootmem_cpumask_var(&tick_nohz_full_mask); 526 cpumask_copy(tick_nohz_full_mask, cpumask); 527 tick_nohz_full_running = true; 528 } 529 530 bool tick_nohz_cpu_hotpluggable(unsigned int cpu) 531 { 532 /* 533 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound 534 * timers, workqueues, timekeeping, ...) on behalf of full dynticks 535 * CPUs. It must remain online when nohz full is enabled. 536 */ 537 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) 538 return false; 539 return true; 540 } 541 542 static int tick_nohz_cpu_down(unsigned int cpu) 543 { 544 return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; 545 } 546 547 void __init tick_nohz_init(void) 548 { 549 int cpu, ret; 550 551 if (!tick_nohz_full_running) 552 return; 553 554 /* 555 * Full dynticks uses irq work to drive the tick rescheduling on safe 556 * locking contexts. But then we need irq work to raise its own 557 * interrupts to avoid circular dependency on the tick 558 */ 559 if (!arch_irq_work_has_interrupt()) { 560 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); 561 cpumask_clear(tick_nohz_full_mask); 562 tick_nohz_full_running = false; 563 return; 564 } 565 566 if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && 567 !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { 568 cpu = smp_processor_id(); 569 570 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { 571 pr_warn("NO_HZ: Clearing %d from nohz_full range " 572 "for timekeeping\n", cpu); 573 cpumask_clear_cpu(cpu, tick_nohz_full_mask); 574 } 575 } 576 577 for_each_cpu(cpu, tick_nohz_full_mask) 578 ct_cpu_track_user(cpu); 579 580 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 581 "kernel/nohz:predown", NULL, 582 tick_nohz_cpu_down); 583 WARN_ON(ret < 0); 584 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", 585 cpumask_pr_args(tick_nohz_full_mask)); 586 } 587 #endif 588 589 /* 590 * NOHZ - aka dynamic tick functionality 591 */ 592 #ifdef CONFIG_NO_HZ_COMMON 593 /* 594 * NO HZ enabled ? 595 */ 596 bool tick_nohz_enabled __read_mostly = true; 597 unsigned long tick_nohz_active __read_mostly; 598 /* 599 * Enable / Disable tickless mode 600 */ 601 static int __init setup_tick_nohz(char *str) 602 { 603 return (kstrtobool(str, &tick_nohz_enabled) == 0); 604 } 605 606 __setup("nohz=", setup_tick_nohz); 607 608 bool tick_nohz_tick_stopped(void) 609 { 610 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 611 612 return ts->tick_stopped; 613 } 614 615 bool tick_nohz_tick_stopped_cpu(int cpu) 616 { 617 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 618 619 return ts->tick_stopped; 620 } 621 622 /** 623 * tick_nohz_update_jiffies - update jiffies when idle was interrupted 624 * 625 * Called from interrupt entry when the CPU was idle 626 * 627 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 628 * must be updated. Otherwise an interrupt handler could use a stale jiffy 629 * value. We do this unconditionally on any CPU, as we don't know whether the 630 * CPU, which has the update task assigned is in a long sleep. 631 */ 632 static void tick_nohz_update_jiffies(ktime_t now) 633 { 634 unsigned long flags; 635 636 __this_cpu_write(tick_cpu_sched.idle_waketime, now); 637 638 local_irq_save(flags); 639 tick_do_update_jiffies64(now); 640 local_irq_restore(flags); 641 642 touch_softlockup_watchdog_sched(); 643 } 644 645 /* 646 * Updates the per-CPU time idle statistics counters 647 */ 648 static void 649 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 650 { 651 ktime_t delta; 652 653 if (ts->idle_active) { 654 delta = ktime_sub(now, ts->idle_entrytime); 655 if (nr_iowait_cpu(cpu) > 0) 656 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 657 else 658 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 659 ts->idle_entrytime = now; 660 } 661 662 if (last_update_time) 663 *last_update_time = ktime_to_us(now); 664 665 } 666 667 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) 668 { 669 update_ts_time_stats(smp_processor_id(), ts, now, NULL); 670 ts->idle_active = 0; 671 672 sched_clock_idle_wakeup_event(); 673 } 674 675 static void tick_nohz_start_idle(struct tick_sched *ts) 676 { 677 ts->idle_entrytime = ktime_get(); 678 ts->idle_active = 1; 679 sched_clock_idle_sleep_event(); 680 } 681 682 /** 683 * get_cpu_idle_time_us - get the total idle time of a CPU 684 * @cpu: CPU number to query 685 * @last_update_time: variable to store update time in. Do not update 686 * counters if NULL. 687 * 688 * Return the cumulative idle time (since boot) for a given 689 * CPU, in microseconds. 690 * 691 * This time is measured via accounting rather than sampling, 692 * and is as accurate as ktime_get() is. 693 * 694 * This function returns -1 if NOHZ is not enabled. 695 */ 696 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 697 { 698 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 699 ktime_t now, idle; 700 701 if (!tick_nohz_active) 702 return -1; 703 704 now = ktime_get(); 705 if (last_update_time) { 706 update_ts_time_stats(cpu, ts, now, last_update_time); 707 idle = ts->idle_sleeptime; 708 } else { 709 if (ts->idle_active && !nr_iowait_cpu(cpu)) { 710 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 711 712 idle = ktime_add(ts->idle_sleeptime, delta); 713 } else { 714 idle = ts->idle_sleeptime; 715 } 716 } 717 718 return ktime_to_us(idle); 719 720 } 721 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 722 723 /** 724 * get_cpu_iowait_time_us - get the total iowait time of a CPU 725 * @cpu: CPU number to query 726 * @last_update_time: variable to store update time in. Do not update 727 * counters if NULL. 728 * 729 * Return the cumulative iowait time (since boot) for a given 730 * CPU, in microseconds. 731 * 732 * This time is measured via accounting rather than sampling, 733 * and is as accurate as ktime_get() is. 734 * 735 * This function returns -1 if NOHZ is not enabled. 736 */ 737 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) 738 { 739 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 740 ktime_t now, iowait; 741 742 if (!tick_nohz_active) 743 return -1; 744 745 now = ktime_get(); 746 if (last_update_time) { 747 update_ts_time_stats(cpu, ts, now, last_update_time); 748 iowait = ts->iowait_sleeptime; 749 } else { 750 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { 751 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 752 753 iowait = ktime_add(ts->iowait_sleeptime, delta); 754 } else { 755 iowait = ts->iowait_sleeptime; 756 } 757 } 758 759 return ktime_to_us(iowait); 760 } 761 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 762 763 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) 764 { 765 hrtimer_cancel(&ts->sched_timer); 766 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); 767 768 /* Forward the time to expire in the future */ 769 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 770 771 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 772 hrtimer_start_expires(&ts->sched_timer, 773 HRTIMER_MODE_ABS_PINNED_HARD); 774 } else { 775 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 776 } 777 778 /* 779 * Reset to make sure next tick stop doesn't get fooled by past 780 * cached clock deadline. 781 */ 782 ts->next_tick = 0; 783 } 784 785 static inline bool local_timer_softirq_pending(void) 786 { 787 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); 788 } 789 790 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) 791 { 792 u64 basemono, next_tick, delta, expires; 793 unsigned long basejiff; 794 unsigned int seq; 795 796 /* Read jiffies and the time when jiffies were updated last */ 797 do { 798 seq = read_seqcount_begin(&jiffies_seq); 799 basemono = last_jiffies_update; 800 basejiff = jiffies; 801 } while (read_seqcount_retry(&jiffies_seq, seq)); 802 ts->last_jiffies = basejiff; 803 ts->timer_expires_base = basemono; 804 805 /* 806 * Keep the periodic tick, when RCU, architecture or irq_work 807 * requests it. 808 * Aside of that check whether the local timer softirq is 809 * pending. If so its a bad idea to call get_next_timer_interrupt() 810 * because there is an already expired timer, so it will request 811 * immediate expiry, which rearms the hardware timer with a 812 * minimal delta which brings us back to this place 813 * immediately. Lather, rinse and repeat... 814 */ 815 if (rcu_needs_cpu() || arch_needs_cpu() || 816 irq_work_needs_cpu() || local_timer_softirq_pending()) { 817 next_tick = basemono + TICK_NSEC; 818 } else { 819 /* 820 * Get the next pending timer. If high resolution 821 * timers are enabled this only takes the timer wheel 822 * timers into account. If high resolution timers are 823 * disabled this also looks at the next expiring 824 * hrtimer. 825 */ 826 next_tick = get_next_timer_interrupt(basejiff, basemono); 827 ts->next_timer = next_tick; 828 } 829 830 /* 831 * If the tick is due in the next period, keep it ticking or 832 * force prod the timer. 833 */ 834 delta = next_tick - basemono; 835 if (delta <= (u64)TICK_NSEC) { 836 /* 837 * Tell the timer code that the base is not idle, i.e. undo 838 * the effect of get_next_timer_interrupt(): 839 */ 840 timer_clear_idle(); 841 /* 842 * We've not stopped the tick yet, and there's a timer in the 843 * next period, so no point in stopping it either, bail. 844 */ 845 if (!ts->tick_stopped) { 846 ts->timer_expires = 0; 847 goto out; 848 } 849 } 850 851 /* 852 * If this CPU is the one which had the do_timer() duty last, we limit 853 * the sleep time to the timekeeping max_deferment value. 854 * Otherwise we can sleep as long as we want. 855 */ 856 delta = timekeeping_max_deferment(); 857 if (cpu != tick_do_timer_cpu && 858 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) 859 delta = KTIME_MAX; 860 861 /* Calculate the next expiry time */ 862 if (delta < (KTIME_MAX - basemono)) 863 expires = basemono + delta; 864 else 865 expires = KTIME_MAX; 866 867 ts->timer_expires = min_t(u64, expires, next_tick); 868 869 out: 870 return ts->timer_expires; 871 } 872 873 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) 874 { 875 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 876 u64 basemono = ts->timer_expires_base; 877 u64 expires = ts->timer_expires; 878 ktime_t tick = expires; 879 880 /* Make sure we won't be trying to stop it twice in a row. */ 881 ts->timer_expires_base = 0; 882 883 /* 884 * If this CPU is the one which updates jiffies, then give up 885 * the assignment and let it be taken by the CPU which runs 886 * the tick timer next, which might be this CPU as well. If we 887 * don't drop this here the jiffies might be stale and 888 * do_timer() never invoked. Keep track of the fact that it 889 * was the one which had the do_timer() duty last. 890 */ 891 if (cpu == tick_do_timer_cpu) { 892 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 893 ts->do_timer_last = 1; 894 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { 895 ts->do_timer_last = 0; 896 } 897 898 /* Skip reprogram of event if its not changed */ 899 if (ts->tick_stopped && (expires == ts->next_tick)) { 900 /* Sanity check: make sure clockevent is actually programmed */ 901 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) 902 return; 903 904 WARN_ON_ONCE(1); 905 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", 906 basemono, ts->next_tick, dev->next_event, 907 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); 908 } 909 910 /* 911 * nohz_stop_sched_tick can be called several times before 912 * the nohz_restart_sched_tick is called. This happens when 913 * interrupts arrive which do not cause a reschedule. In the 914 * first call we save the current tick time, so we can restart 915 * the scheduler tick in nohz_restart_sched_tick. 916 */ 917 if (!ts->tick_stopped) { 918 calc_load_nohz_start(); 919 quiet_vmstat(); 920 921 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); 922 ts->tick_stopped = 1; 923 trace_tick_stop(1, TICK_DEP_MASK_NONE); 924 } 925 926 ts->next_tick = tick; 927 928 /* 929 * If the expiration time == KTIME_MAX, then we simply stop 930 * the tick timer. 931 */ 932 if (unlikely(expires == KTIME_MAX)) { 933 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 934 hrtimer_cancel(&ts->sched_timer); 935 else 936 tick_program_event(KTIME_MAX, 1); 937 return; 938 } 939 940 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 941 hrtimer_start(&ts->sched_timer, tick, 942 HRTIMER_MODE_ABS_PINNED_HARD); 943 } else { 944 hrtimer_set_expires(&ts->sched_timer, tick); 945 tick_program_event(tick, 1); 946 } 947 } 948 949 static void tick_nohz_retain_tick(struct tick_sched *ts) 950 { 951 ts->timer_expires_base = 0; 952 } 953 954 #ifdef CONFIG_NO_HZ_FULL 955 static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) 956 { 957 if (tick_nohz_next_event(ts, cpu)) 958 tick_nohz_stop_tick(ts, cpu); 959 else 960 tick_nohz_retain_tick(ts); 961 } 962 #endif /* CONFIG_NO_HZ_FULL */ 963 964 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) 965 { 966 /* Update jiffies first */ 967 tick_do_update_jiffies64(now); 968 969 /* 970 * Clear the timer idle flag, so we avoid IPIs on remote queueing and 971 * the clock forward checks in the enqueue path: 972 */ 973 timer_clear_idle(); 974 975 calc_load_nohz_stop(); 976 touch_softlockup_watchdog_sched(); 977 /* 978 * Cancel the scheduled timer and restore the tick 979 */ 980 ts->tick_stopped = 0; 981 tick_nohz_restart(ts, now); 982 } 983 984 static void __tick_nohz_full_update_tick(struct tick_sched *ts, 985 ktime_t now) 986 { 987 #ifdef CONFIG_NO_HZ_FULL 988 int cpu = smp_processor_id(); 989 990 if (can_stop_full_tick(cpu, ts)) 991 tick_nohz_stop_sched_tick(ts, cpu); 992 else if (ts->tick_stopped) 993 tick_nohz_restart_sched_tick(ts, now); 994 #endif 995 } 996 997 static void tick_nohz_full_update_tick(struct tick_sched *ts) 998 { 999 if (!tick_nohz_full_cpu(smp_processor_id())) 1000 return; 1001 1002 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) 1003 return; 1004 1005 __tick_nohz_full_update_tick(ts, ktime_get()); 1006 } 1007 1008 /* 1009 * A pending softirq outside an IRQ (or softirq disabled section) context 1010 * should be waiting for ksoftirqd to handle it. Therefore we shouldn't 1011 * reach here due to the need_resched() early check in can_stop_idle_tick(). 1012 * 1013 * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the 1014 * cpu_down() process, softirqs can still be raised while ksoftirqd is parked, 1015 * triggering the below since wakep_softirqd() is ignored. 1016 * 1017 */ 1018 static bool report_idle_softirq(void) 1019 { 1020 static int ratelimit; 1021 unsigned int pending = local_softirq_pending(); 1022 1023 if (likely(!pending)) 1024 return false; 1025 1026 /* Some softirqs claim to be safe against hotplug and ksoftirqd parking */ 1027 if (!cpu_active(smp_processor_id())) { 1028 pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK; 1029 if (!pending) 1030 return false; 1031 } 1032 1033 if (ratelimit < 10) 1034 return false; 1035 1036 /* On RT, softirqs handling may be waiting on some lock */ 1037 if (!local_bh_blocked()) 1038 return false; 1039 1040 pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", 1041 pending); 1042 ratelimit++; 1043 1044 return true; 1045 } 1046 1047 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 1048 { 1049 /* 1050 * If this CPU is offline and it is the one which updates 1051 * jiffies, then give up the assignment and let it be taken by 1052 * the CPU which runs the tick timer next. If we don't drop 1053 * this here the jiffies might be stale and do_timer() never 1054 * invoked. 1055 */ 1056 if (unlikely(!cpu_online(cpu))) { 1057 if (cpu == tick_do_timer_cpu) 1058 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 1059 /* 1060 * Make sure the CPU doesn't get fooled by obsolete tick 1061 * deadline if it comes back online later. 1062 */ 1063 ts->next_tick = 0; 1064 return false; 1065 } 1066 1067 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 1068 return false; 1069 1070 if (need_resched()) 1071 return false; 1072 1073 if (unlikely(report_idle_softirq())) 1074 return false; 1075 1076 if (tick_nohz_full_enabled()) { 1077 /* 1078 * Keep the tick alive to guarantee timekeeping progression 1079 * if there are full dynticks CPUs around 1080 */ 1081 if (tick_do_timer_cpu == cpu) 1082 return false; 1083 1084 /* Should not happen for nohz-full */ 1085 if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 1086 return false; 1087 } 1088 1089 return true; 1090 } 1091 1092 static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) 1093 { 1094 ktime_t expires; 1095 int cpu = smp_processor_id(); 1096 1097 /* 1098 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the 1099 * tick timer expiration time is known already. 1100 */ 1101 if (ts->timer_expires_base) 1102 expires = ts->timer_expires; 1103 else if (can_stop_idle_tick(cpu, ts)) 1104 expires = tick_nohz_next_event(ts, cpu); 1105 else 1106 return; 1107 1108 ts->idle_calls++; 1109 1110 if (expires > 0LL) { 1111 int was_stopped = ts->tick_stopped; 1112 1113 tick_nohz_stop_tick(ts, cpu); 1114 1115 ts->idle_sleeps++; 1116 ts->idle_expires = expires; 1117 1118 if (!was_stopped && ts->tick_stopped) { 1119 ts->idle_jiffies = ts->last_jiffies; 1120 nohz_balance_enter_idle(cpu); 1121 } 1122 } else { 1123 tick_nohz_retain_tick(ts); 1124 } 1125 } 1126 1127 /** 1128 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task 1129 * 1130 * When the next event is more than a tick into the future, stop the idle tick 1131 */ 1132 void tick_nohz_idle_stop_tick(void) 1133 { 1134 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); 1135 } 1136 1137 void tick_nohz_idle_retain_tick(void) 1138 { 1139 tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); 1140 /* 1141 * Undo the effect of get_next_timer_interrupt() called from 1142 * tick_nohz_next_event(). 1143 */ 1144 timer_clear_idle(); 1145 } 1146 1147 /** 1148 * tick_nohz_idle_enter - prepare for entering idle on the current CPU 1149 * 1150 * Called when we start the idle loop. 1151 */ 1152 void tick_nohz_idle_enter(void) 1153 { 1154 struct tick_sched *ts; 1155 1156 lockdep_assert_irqs_enabled(); 1157 1158 local_irq_disable(); 1159 1160 ts = this_cpu_ptr(&tick_cpu_sched); 1161 1162 WARN_ON_ONCE(ts->timer_expires_base); 1163 1164 ts->inidle = 1; 1165 tick_nohz_start_idle(ts); 1166 1167 local_irq_enable(); 1168 } 1169 1170 /** 1171 * tick_nohz_irq_exit - update next tick event from interrupt exit 1172 * 1173 * When an interrupt fires while we are idle and it doesn't cause 1174 * a reschedule, it may still add, modify or delete a timer, enqueue 1175 * an RCU callback, etc... 1176 * So we need to re-calculate and reprogram the next tick event. 1177 */ 1178 void tick_nohz_irq_exit(void) 1179 { 1180 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1181 1182 if (ts->inidle) 1183 tick_nohz_start_idle(ts); 1184 else 1185 tick_nohz_full_update_tick(ts); 1186 } 1187 1188 /** 1189 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run 1190 */ 1191 bool tick_nohz_idle_got_tick(void) 1192 { 1193 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1194 1195 if (ts->got_idle_tick) { 1196 ts->got_idle_tick = 0; 1197 return true; 1198 } 1199 return false; 1200 } 1201 1202 /** 1203 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer 1204 * or the tick, whatever that expires first. Note that, if the tick has been 1205 * stopped, it returns the next hrtimer. 1206 * 1207 * Called from power state control code with interrupts disabled 1208 */ 1209 ktime_t tick_nohz_get_next_hrtimer(void) 1210 { 1211 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; 1212 } 1213 1214 /** 1215 * tick_nohz_get_sleep_length - return the expected length of the current sleep 1216 * @delta_next: duration until the next event if the tick cannot be stopped 1217 * 1218 * Called from power state control code with interrupts disabled. 1219 * 1220 * The return value of this function and/or the value returned by it through the 1221 * @delta_next pointer can be negative which must be taken into account by its 1222 * callers. 1223 */ 1224 ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 1225 { 1226 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 1227 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1228 int cpu = smp_processor_id(); 1229 /* 1230 * The idle entry time is expected to be a sufficient approximation of 1231 * the current time at this point. 1232 */ 1233 ktime_t now = ts->idle_entrytime; 1234 ktime_t next_event; 1235 1236 WARN_ON_ONCE(!ts->inidle); 1237 1238 *delta_next = ktime_sub(dev->next_event, now); 1239 1240 if (!can_stop_idle_tick(cpu, ts)) 1241 return *delta_next; 1242 1243 next_event = tick_nohz_next_event(ts, cpu); 1244 if (!next_event) 1245 return *delta_next; 1246 1247 /* 1248 * If the next highres timer to expire is earlier than next_event, the 1249 * idle governor needs to know that. 1250 */ 1251 next_event = min_t(u64, next_event, 1252 hrtimer_next_event_without(&ts->sched_timer)); 1253 1254 return ktime_sub(next_event, now); 1255 } 1256 1257 /** 1258 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value 1259 * for a particular CPU. 1260 * 1261 * Called from the schedutil frequency scaling governor in scheduler context. 1262 */ 1263 unsigned long tick_nohz_get_idle_calls_cpu(int cpu) 1264 { 1265 struct tick_sched *ts = tick_get_tick_sched(cpu); 1266 1267 return ts->idle_calls; 1268 } 1269 1270 /** 1271 * tick_nohz_get_idle_calls - return the current idle calls counter value 1272 * 1273 * Called from the schedutil frequency scaling governor in scheduler context. 1274 */ 1275 unsigned long tick_nohz_get_idle_calls(void) 1276 { 1277 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1278 1279 return ts->idle_calls; 1280 } 1281 1282 static void tick_nohz_account_idle_time(struct tick_sched *ts, 1283 ktime_t now) 1284 { 1285 unsigned long ticks; 1286 1287 ts->idle_exittime = now; 1288 1289 if (vtime_accounting_enabled_this_cpu()) 1290 return; 1291 /* 1292 * We stopped the tick in idle. Update process times would miss the 1293 * time we slept as update_process_times does only a 1 tick 1294 * accounting. Enforce that this is accounted to idle ! 1295 */ 1296 ticks = jiffies - ts->idle_jiffies; 1297 /* 1298 * We might be one off. Do not randomly account a huge number of ticks! 1299 */ 1300 if (ticks && ticks < LONG_MAX) 1301 account_idle_ticks(ticks); 1302 } 1303 1304 void tick_nohz_idle_restart_tick(void) 1305 { 1306 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1307 1308 if (ts->tick_stopped) { 1309 ktime_t now = ktime_get(); 1310 tick_nohz_restart_sched_tick(ts, now); 1311 tick_nohz_account_idle_time(ts, now); 1312 } 1313 } 1314 1315 static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) 1316 { 1317 if (tick_nohz_full_cpu(smp_processor_id())) 1318 __tick_nohz_full_update_tick(ts, now); 1319 else 1320 tick_nohz_restart_sched_tick(ts, now); 1321 1322 tick_nohz_account_idle_time(ts, now); 1323 } 1324 1325 /** 1326 * tick_nohz_idle_exit - restart the idle tick from the idle task 1327 * 1328 * Restart the idle tick when the CPU is woken up from idle 1329 * This also exit the RCU extended quiescent state. The CPU 1330 * can use RCU again after this function is called. 1331 */ 1332 void tick_nohz_idle_exit(void) 1333 { 1334 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1335 bool idle_active, tick_stopped; 1336 ktime_t now; 1337 1338 local_irq_disable(); 1339 1340 WARN_ON_ONCE(!ts->inidle); 1341 WARN_ON_ONCE(ts->timer_expires_base); 1342 1343 ts->inidle = 0; 1344 idle_active = ts->idle_active; 1345 tick_stopped = ts->tick_stopped; 1346 1347 if (idle_active || tick_stopped) 1348 now = ktime_get(); 1349 1350 if (idle_active) 1351 tick_nohz_stop_idle(ts, now); 1352 1353 if (tick_stopped) 1354 tick_nohz_idle_update_tick(ts, now); 1355 1356 local_irq_enable(); 1357 } 1358 1359 /* 1360 * The nohz low res interrupt handler 1361 */ 1362 static void tick_nohz_handler(struct clock_event_device *dev) 1363 { 1364 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1365 struct pt_regs *regs = get_irq_regs(); 1366 ktime_t now = ktime_get(); 1367 1368 dev->next_event = KTIME_MAX; 1369 1370 tick_sched_do_timer(ts, now); 1371 tick_sched_handle(ts, regs); 1372 1373 if (unlikely(ts->tick_stopped)) { 1374 /* 1375 * The clockevent device is not reprogrammed, so change the 1376 * clock event device to ONESHOT_STOPPED to avoid spurious 1377 * interrupts on devices which might not be truly one shot. 1378 */ 1379 tick_program_event(KTIME_MAX, 1); 1380 return; 1381 } 1382 1383 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 1384 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1385 } 1386 1387 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) 1388 { 1389 if (!tick_nohz_enabled) 1390 return; 1391 ts->nohz_mode = mode; 1392 /* One update is enough */ 1393 if (!test_and_set_bit(0, &tick_nohz_active)) 1394 timers_update_nohz(); 1395 } 1396 1397 /** 1398 * tick_nohz_switch_to_nohz - switch to nohz mode 1399 */ 1400 static void tick_nohz_switch_to_nohz(void) 1401 { 1402 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1403 ktime_t next; 1404 1405 if (!tick_nohz_enabled) 1406 return; 1407 1408 if (tick_switch_to_oneshot(tick_nohz_handler)) 1409 return; 1410 1411 /* 1412 * Recycle the hrtimer in ts, so we can share the 1413 * hrtimer_forward with the highres code. 1414 */ 1415 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1416 /* Get the next period */ 1417 next = tick_init_jiffy_update(); 1418 1419 hrtimer_set_expires(&ts->sched_timer, next); 1420 hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); 1421 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1422 tick_nohz_activate(ts, NOHZ_MODE_LOWRES); 1423 } 1424 1425 static inline void tick_nohz_irq_enter(void) 1426 { 1427 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1428 ktime_t now; 1429 1430 if (!ts->idle_active && !ts->tick_stopped) 1431 return; 1432 now = ktime_get(); 1433 if (ts->idle_active) 1434 tick_nohz_stop_idle(ts, now); 1435 /* 1436 * If all CPUs are idle. We may need to update a stale jiffies value. 1437 * Note nohz_full is a special case: a timekeeper is guaranteed to stay 1438 * alive but it might be busy looping with interrupts disabled in some 1439 * rare case (typically stop machine). So we must make sure we have a 1440 * last resort. 1441 */ 1442 if (ts->tick_stopped) 1443 tick_nohz_update_jiffies(now); 1444 } 1445 1446 #else 1447 1448 static inline void tick_nohz_switch_to_nohz(void) { } 1449 static inline void tick_nohz_irq_enter(void) { } 1450 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } 1451 1452 #endif /* CONFIG_NO_HZ_COMMON */ 1453 1454 /* 1455 * Called from irq_enter to notify about the possible interruption of idle() 1456 */ 1457 void tick_irq_enter(void) 1458 { 1459 tick_check_oneshot_broadcast_this_cpu(); 1460 tick_nohz_irq_enter(); 1461 } 1462 1463 /* 1464 * High resolution timer specific code 1465 */ 1466 #ifdef CONFIG_HIGH_RES_TIMERS 1467 /* 1468 * We rearm the timer until we get disabled by the idle code. 1469 * Called with interrupts disabled. 1470 */ 1471 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 1472 { 1473 struct tick_sched *ts = 1474 container_of(timer, struct tick_sched, sched_timer); 1475 struct pt_regs *regs = get_irq_regs(); 1476 ktime_t now = ktime_get(); 1477 1478 tick_sched_do_timer(ts, now); 1479 1480 /* 1481 * Do not call, when we are not in irq context and have 1482 * no valid regs pointer 1483 */ 1484 if (regs) 1485 tick_sched_handle(ts, regs); 1486 else 1487 ts->next_tick = 0; 1488 1489 /* No need to reprogram if we are in idle or full dynticks mode */ 1490 if (unlikely(ts->tick_stopped)) 1491 return HRTIMER_NORESTART; 1492 1493 hrtimer_forward(timer, now, TICK_NSEC); 1494 1495 return HRTIMER_RESTART; 1496 } 1497 1498 static int sched_skew_tick; 1499 1500 static int __init skew_tick(char *str) 1501 { 1502 get_option(&str, &sched_skew_tick); 1503 1504 return 0; 1505 } 1506 early_param("skew_tick", skew_tick); 1507 1508 /** 1509 * tick_setup_sched_timer - setup the tick emulation timer 1510 */ 1511 void tick_setup_sched_timer(void) 1512 { 1513 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1514 ktime_t now = ktime_get(); 1515 1516 /* 1517 * Emulate tick processing via per-CPU hrtimers: 1518 */ 1519 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1520 ts->sched_timer.function = tick_sched_timer; 1521 1522 /* Get the next period (per-CPU) */ 1523 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 1524 1525 /* Offset the tick to avert jiffies_lock contention. */ 1526 if (sched_skew_tick) { 1527 u64 offset = TICK_NSEC >> 1; 1528 do_div(offset, num_possible_cpus()); 1529 offset *= smp_processor_id(); 1530 hrtimer_add_expires_ns(&ts->sched_timer, offset); 1531 } 1532 1533 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 1534 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); 1535 tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); 1536 } 1537 #endif /* HIGH_RES_TIMERS */ 1538 1539 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS 1540 void tick_cancel_sched_timer(int cpu) 1541 { 1542 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1543 1544 # ifdef CONFIG_HIGH_RES_TIMERS 1545 if (ts->sched_timer.base) 1546 hrtimer_cancel(&ts->sched_timer); 1547 # endif 1548 1549 memset(ts, 0, sizeof(*ts)); 1550 } 1551 #endif 1552 1553 /* 1554 * Async notification about clocksource changes 1555 */ 1556 void tick_clock_notify(void) 1557 { 1558 int cpu; 1559 1560 for_each_possible_cpu(cpu) 1561 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); 1562 } 1563 1564 /* 1565 * Async notification about clock event changes 1566 */ 1567 void tick_oneshot_notify(void) 1568 { 1569 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1570 1571 set_bit(0, &ts->check_clocks); 1572 } 1573 1574 /* 1575 * Check, if a change happened, which makes oneshot possible. 1576 * 1577 * Called cyclic from the hrtimer softirq (driven by the timer 1578 * softirq) allow_nohz signals, that we can switch into low-res nohz 1579 * mode, because high resolution timers are disabled (either compile 1580 * or runtime). Called with interrupts disabled. 1581 */ 1582 int tick_check_oneshot_change(int allow_nohz) 1583 { 1584 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1585 1586 if (!test_and_clear_bit(0, &ts->check_clocks)) 1587 return 0; 1588 1589 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) 1590 return 0; 1591 1592 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) 1593 return 0; 1594 1595 if (!allow_nohz) 1596 return 1; 1597 1598 tick_nohz_switch_to_nohz(); 1599 return 0; 1600 } 1601