1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 6 * 7 * No idle tick implementation for low and high resolution timers 8 * 9 * Started by: Thomas Gleixner and Ingo Molnar 10 */ 11 #include <linux/cpu.h> 12 #include <linux/err.h> 13 #include <linux/hrtimer.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/percpu.h> 17 #include <linux/nmi.h> 18 #include <linux/profile.h> 19 #include <linux/sched/signal.h> 20 #include <linux/sched/clock.h> 21 #include <linux/sched/stat.h> 22 #include <linux/sched/nohz.h> 23 #include <linux/sched/loadavg.h> 24 #include <linux/module.h> 25 #include <linux/irq_work.h> 26 #include <linux/posix-timers.h> 27 #include <linux/context_tracking.h> 28 #include <linux/mm.h> 29 30 #include <asm/irq_regs.h> 31 32 #include "tick-internal.h" 33 34 #include <trace/events/timer.h> 35 36 /* 37 * Per-CPU nohz control structure 38 */ 39 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 40 41 struct tick_sched *tick_get_tick_sched(int cpu) 42 { 43 return &per_cpu(tick_cpu_sched, cpu); 44 } 45 46 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 47 /* 48 * The time, when the last jiffy update happened. Write access must hold 49 * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a 50 * consistent view of jiffies and last_jiffies_update. 51 */ 52 static ktime_t last_jiffies_update; 53 54 /* 55 * Must be called with interrupts disabled ! 56 */ 57 static void tick_do_update_jiffies64(ktime_t now) 58 { 59 unsigned long ticks = 1; 60 ktime_t delta, nextp; 61 62 /* 63 * 64bit can do a quick check without holding jiffies lock and 64 * without looking at the sequence count. The smp_load_acquire() 65 * pairs with the update done later in this function. 66 * 67 * 32bit cannot do that because the store of tick_next_period 68 * consists of two 32bit stores and the first store could move it 69 * to a random point in the future. 70 */ 71 if (IS_ENABLED(CONFIG_64BIT)) { 72 if (ktime_before(now, smp_load_acquire(&tick_next_period))) 73 return; 74 } else { 75 unsigned int seq; 76 77 /* 78 * Avoid contention on jiffies_lock and protect the quick 79 * check with the sequence count. 80 */ 81 do { 82 seq = read_seqcount_begin(&jiffies_seq); 83 nextp = tick_next_period; 84 } while (read_seqcount_retry(&jiffies_seq, seq)); 85 86 if (ktime_before(now, nextp)) 87 return; 88 } 89 90 /* Quick check failed, i.e. update is required. */ 91 raw_spin_lock(&jiffies_lock); 92 /* 93 * Reevaluate with the lock held. Another CPU might have done the 94 * update already. 95 */ 96 if (ktime_before(now, tick_next_period)) { 97 raw_spin_unlock(&jiffies_lock); 98 return; 99 } 100 101 write_seqcount_begin(&jiffies_seq); 102 103 delta = ktime_sub(now, tick_next_period); 104 if (unlikely(delta >= TICK_NSEC)) { 105 /* Slow path for long idle sleep times */ 106 s64 incr = TICK_NSEC; 107 108 ticks += ktime_divns(delta, incr); 109 110 last_jiffies_update = ktime_add_ns(last_jiffies_update, 111 incr * ticks); 112 } else { 113 last_jiffies_update = ktime_add_ns(last_jiffies_update, 114 TICK_NSEC); 115 } 116 117 /* Advance jiffies to complete the jiffies_seq protected job */ 118 jiffies_64 += ticks; 119 120 /* 121 * Keep the tick_next_period variable up to date. 122 */ 123 nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); 124 125 if (IS_ENABLED(CONFIG_64BIT)) { 126 /* 127 * Pairs with smp_load_acquire() in the lockless quick 128 * check above and ensures that the update to jiffies_64 is 129 * not reordered vs. the store to tick_next_period, neither 130 * by the compiler nor by the CPU. 131 */ 132 smp_store_release(&tick_next_period, nextp); 133 } else { 134 /* 135 * A plain store is good enough on 32bit as the quick check 136 * above is protected by the sequence count. 137 */ 138 tick_next_period = nextp; 139 } 140 141 /* 142 * Release the sequence count. calc_global_load() below is not 143 * protected by it, but jiffies_lock needs to be held to prevent 144 * concurrent invocations. 145 */ 146 write_seqcount_end(&jiffies_seq); 147 148 calc_global_load(); 149 150 raw_spin_unlock(&jiffies_lock); 151 update_wall_time(); 152 } 153 154 /* 155 * Initialize and return retrieve the jiffies update. 156 */ 157 static ktime_t tick_init_jiffy_update(void) 158 { 159 ktime_t period; 160 161 raw_spin_lock(&jiffies_lock); 162 write_seqcount_begin(&jiffies_seq); 163 /* Did we start the jiffies update yet ? */ 164 if (last_jiffies_update == 0) 165 last_jiffies_update = tick_next_period; 166 period = last_jiffies_update; 167 write_seqcount_end(&jiffies_seq); 168 raw_spin_unlock(&jiffies_lock); 169 return period; 170 } 171 172 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) 173 { 174 int cpu = smp_processor_id(); 175 176 #ifdef CONFIG_NO_HZ_COMMON 177 /* 178 * Check if the do_timer duty was dropped. We don't care about 179 * concurrency: This happens only when the CPU in charge went 180 * into a long sleep. If two CPUs happen to assign themselves to 181 * this duty, then the jiffies update is still serialized by 182 * jiffies_lock. 183 * 184 * If nohz_full is enabled, this should not happen because the 185 * tick_do_timer_cpu never relinquishes. 186 */ 187 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { 188 #ifdef CONFIG_NO_HZ_FULL 189 WARN_ON(tick_nohz_full_running); 190 #endif 191 tick_do_timer_cpu = cpu; 192 } 193 #endif 194 195 /* Check, if the jiffies need an update */ 196 if (tick_do_timer_cpu == cpu) 197 tick_do_update_jiffies64(now); 198 199 if (ts->inidle) 200 ts->got_idle_tick = 1; 201 } 202 203 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) 204 { 205 #ifdef CONFIG_NO_HZ_COMMON 206 /* 207 * When we are idle and the tick is stopped, we have to touch 208 * the watchdog as we might not schedule for a really long 209 * time. This happens on complete idle SMP systems while 210 * waiting on the login prompt. We also increment the "start of 211 * idle" jiffy stamp so the idle accounting adjustment we do 212 * when we go busy again does not account too much ticks. 213 */ 214 if (ts->tick_stopped) { 215 touch_softlockup_watchdog_sched(); 216 if (is_idle_task(current)) 217 ts->idle_jiffies++; 218 /* 219 * In case the current tick fired too early past its expected 220 * expiration, make sure we don't bypass the next clock reprogramming 221 * to the same deadline. 222 */ 223 ts->next_tick = 0; 224 } 225 #endif 226 update_process_times(user_mode(regs)); 227 profile_tick(CPU_PROFILING); 228 } 229 #endif 230 231 #ifdef CONFIG_NO_HZ_FULL 232 cpumask_var_t tick_nohz_full_mask; 233 bool tick_nohz_full_running; 234 EXPORT_SYMBOL_GPL(tick_nohz_full_running); 235 static atomic_t tick_dep_mask; 236 237 static bool check_tick_dependency(atomic_t *dep) 238 { 239 int val = atomic_read(dep); 240 241 if (val & TICK_DEP_MASK_POSIX_TIMER) { 242 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); 243 return true; 244 } 245 246 if (val & TICK_DEP_MASK_PERF_EVENTS) { 247 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); 248 return true; 249 } 250 251 if (val & TICK_DEP_MASK_SCHED) { 252 trace_tick_stop(0, TICK_DEP_MASK_SCHED); 253 return true; 254 } 255 256 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { 257 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); 258 return true; 259 } 260 261 if (val & TICK_DEP_MASK_RCU) { 262 trace_tick_stop(0, TICK_DEP_MASK_RCU); 263 return true; 264 } 265 266 return false; 267 } 268 269 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) 270 { 271 lockdep_assert_irqs_disabled(); 272 273 if (unlikely(!cpu_online(cpu))) 274 return false; 275 276 if (check_tick_dependency(&tick_dep_mask)) 277 return false; 278 279 if (check_tick_dependency(&ts->tick_dep_mask)) 280 return false; 281 282 if (check_tick_dependency(¤t->tick_dep_mask)) 283 return false; 284 285 if (check_tick_dependency(¤t->signal->tick_dep_mask)) 286 return false; 287 288 return true; 289 } 290 291 static void nohz_full_kick_func(struct irq_work *work) 292 { 293 /* Empty, the tick restart happens on tick_nohz_irq_exit() */ 294 } 295 296 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = 297 IRQ_WORK_INIT_HARD(nohz_full_kick_func); 298 299 /* 300 * Kick this CPU if it's full dynticks in order to force it to 301 * re-evaluate its dependency on the tick and restart it if necessary. 302 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), 303 * is NMI safe. 304 */ 305 static void tick_nohz_full_kick(void) 306 { 307 if (!tick_nohz_full_cpu(smp_processor_id())) 308 return; 309 310 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); 311 } 312 313 /* 314 * Kick the CPU if it's full dynticks in order to force it to 315 * re-evaluate its dependency on the tick and restart it if necessary. 316 */ 317 void tick_nohz_full_kick_cpu(int cpu) 318 { 319 if (!tick_nohz_full_cpu(cpu)) 320 return; 321 322 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); 323 } 324 325 static void tick_nohz_kick_task(struct task_struct *tsk) 326 { 327 int cpu; 328 329 /* 330 * If the task is not running, run_posix_cpu_timers() 331 * has nothing to elapse, IPI can then be spared. 332 * 333 * activate_task() STORE p->tick_dep_mask 334 * STORE p->on_rq 335 * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) 336 * LOCK rq->lock LOAD p->on_rq 337 * smp_mb__after_spin_lock() 338 * tick_nohz_task_switch() 339 * LOAD p->tick_dep_mask 340 */ 341 if (!sched_task_on_rq(tsk)) 342 return; 343 344 /* 345 * If the task concurrently migrates to another CPU, 346 * we guarantee it sees the new tick dependency upon 347 * schedule. 348 * 349 * set_task_cpu(p, cpu); 350 * STORE p->cpu = @cpu 351 * __schedule() (switch to task 'p') 352 * LOCK rq->lock 353 * smp_mb__after_spin_lock() STORE p->tick_dep_mask 354 * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) 355 * LOAD p->tick_dep_mask LOAD p->cpu 356 */ 357 cpu = task_cpu(tsk); 358 359 preempt_disable(); 360 if (cpu_online(cpu)) 361 tick_nohz_full_kick_cpu(cpu); 362 preempt_enable(); 363 } 364 365 /* 366 * Kick all full dynticks CPUs in order to force these to re-evaluate 367 * their dependency on the tick and restart it if necessary. 368 */ 369 static void tick_nohz_full_kick_all(void) 370 { 371 int cpu; 372 373 if (!tick_nohz_full_running) 374 return; 375 376 preempt_disable(); 377 for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) 378 tick_nohz_full_kick_cpu(cpu); 379 preempt_enable(); 380 } 381 382 static void tick_nohz_dep_set_all(atomic_t *dep, 383 enum tick_dep_bits bit) 384 { 385 int prev; 386 387 prev = atomic_fetch_or(BIT(bit), dep); 388 if (!prev) 389 tick_nohz_full_kick_all(); 390 } 391 392 /* 393 * Set a global tick dependency. Used by perf events that rely on freq and 394 * by unstable clock. 395 */ 396 void tick_nohz_dep_set(enum tick_dep_bits bit) 397 { 398 tick_nohz_dep_set_all(&tick_dep_mask, bit); 399 } 400 401 void tick_nohz_dep_clear(enum tick_dep_bits bit) 402 { 403 atomic_andnot(BIT(bit), &tick_dep_mask); 404 } 405 406 /* 407 * Set per-CPU tick dependency. Used by scheduler and perf events in order to 408 * manage events throttling. 409 */ 410 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) 411 { 412 int prev; 413 struct tick_sched *ts; 414 415 ts = per_cpu_ptr(&tick_cpu_sched, cpu); 416 417 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); 418 if (!prev) { 419 preempt_disable(); 420 /* Perf needs local kick that is NMI safe */ 421 if (cpu == smp_processor_id()) { 422 tick_nohz_full_kick(); 423 } else { 424 /* Remote irq work not NMI-safe */ 425 if (!WARN_ON_ONCE(in_nmi())) 426 tick_nohz_full_kick_cpu(cpu); 427 } 428 preempt_enable(); 429 } 430 } 431 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); 432 433 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) 434 { 435 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 436 437 atomic_andnot(BIT(bit), &ts->tick_dep_mask); 438 } 439 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); 440 441 /* 442 * Set a per-task tick dependency. RCU need this. Also posix CPU timers 443 * in order to elapse per task timers. 444 */ 445 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) 446 { 447 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) 448 tick_nohz_kick_task(tsk); 449 } 450 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); 451 452 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) 453 { 454 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); 455 } 456 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); 457 458 /* 459 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse 460 * per process timers. 461 */ 462 void tick_nohz_dep_set_signal(struct task_struct *tsk, 463 enum tick_dep_bits bit) 464 { 465 int prev; 466 struct signal_struct *sig = tsk->signal; 467 468 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); 469 if (!prev) { 470 struct task_struct *t; 471 472 lockdep_assert_held(&tsk->sighand->siglock); 473 __for_each_thread(sig, t) 474 tick_nohz_kick_task(t); 475 } 476 } 477 478 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) 479 { 480 atomic_andnot(BIT(bit), &sig->tick_dep_mask); 481 } 482 483 /* 484 * Re-evaluate the need for the tick as we switch the current task. 485 * It might need the tick due to per task/process properties: 486 * perf events, posix CPU timers, ... 487 */ 488 void __tick_nohz_task_switch(void) 489 { 490 unsigned long flags; 491 struct tick_sched *ts; 492 493 local_irq_save(flags); 494 495 if (!tick_nohz_full_cpu(smp_processor_id())) 496 goto out; 497 498 ts = this_cpu_ptr(&tick_cpu_sched); 499 500 if (ts->tick_stopped) { 501 if (atomic_read(¤t->tick_dep_mask) || 502 atomic_read(¤t->signal->tick_dep_mask)) 503 tick_nohz_full_kick(); 504 } 505 out: 506 local_irq_restore(flags); 507 } 508 509 /* Get the boot-time nohz CPU list from the kernel parameters. */ 510 void __init tick_nohz_full_setup(cpumask_var_t cpumask) 511 { 512 alloc_bootmem_cpumask_var(&tick_nohz_full_mask); 513 cpumask_copy(tick_nohz_full_mask, cpumask); 514 tick_nohz_full_running = true; 515 } 516 EXPORT_SYMBOL_GPL(tick_nohz_full_setup); 517 518 static int tick_nohz_cpu_down(unsigned int cpu) 519 { 520 /* 521 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound 522 * timers, workqueues, timekeeping, ...) on behalf of full dynticks 523 * CPUs. It must remain online when nohz full is enabled. 524 */ 525 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) 526 return -EBUSY; 527 return 0; 528 } 529 530 void __init tick_nohz_init(void) 531 { 532 int cpu, ret; 533 534 if (!tick_nohz_full_running) 535 return; 536 537 /* 538 * Full dynticks uses irq work to drive the tick rescheduling on safe 539 * locking contexts. But then we need irq work to raise its own 540 * interrupts to avoid circular dependency on the tick 541 */ 542 if (!arch_irq_work_has_interrupt()) { 543 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); 544 cpumask_clear(tick_nohz_full_mask); 545 tick_nohz_full_running = false; 546 return; 547 } 548 549 if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && 550 !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { 551 cpu = smp_processor_id(); 552 553 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { 554 pr_warn("NO_HZ: Clearing %d from nohz_full range " 555 "for timekeeping\n", cpu); 556 cpumask_clear_cpu(cpu, tick_nohz_full_mask); 557 } 558 } 559 560 for_each_cpu(cpu, tick_nohz_full_mask) 561 context_tracking_cpu_set(cpu); 562 563 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 564 "kernel/nohz:predown", NULL, 565 tick_nohz_cpu_down); 566 WARN_ON(ret < 0); 567 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", 568 cpumask_pr_args(tick_nohz_full_mask)); 569 } 570 #endif 571 572 /* 573 * NOHZ - aka dynamic tick functionality 574 */ 575 #ifdef CONFIG_NO_HZ_COMMON 576 /* 577 * NO HZ enabled ? 578 */ 579 bool tick_nohz_enabled __read_mostly = true; 580 unsigned long tick_nohz_active __read_mostly; 581 /* 582 * Enable / Disable tickless mode 583 */ 584 static int __init setup_tick_nohz(char *str) 585 { 586 return (kstrtobool(str, &tick_nohz_enabled) == 0); 587 } 588 589 __setup("nohz=", setup_tick_nohz); 590 591 bool tick_nohz_tick_stopped(void) 592 { 593 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 594 595 return ts->tick_stopped; 596 } 597 598 bool tick_nohz_tick_stopped_cpu(int cpu) 599 { 600 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 601 602 return ts->tick_stopped; 603 } 604 605 /** 606 * tick_nohz_update_jiffies - update jiffies when idle was interrupted 607 * 608 * Called from interrupt entry when the CPU was idle 609 * 610 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 611 * must be updated. Otherwise an interrupt handler could use a stale jiffy 612 * value. We do this unconditionally on any CPU, as we don't know whether the 613 * CPU, which has the update task assigned is in a long sleep. 614 */ 615 static void tick_nohz_update_jiffies(ktime_t now) 616 { 617 unsigned long flags; 618 619 __this_cpu_write(tick_cpu_sched.idle_waketime, now); 620 621 local_irq_save(flags); 622 tick_do_update_jiffies64(now); 623 local_irq_restore(flags); 624 625 touch_softlockup_watchdog_sched(); 626 } 627 628 /* 629 * Updates the per-CPU time idle statistics counters 630 */ 631 static void 632 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 633 { 634 ktime_t delta; 635 636 if (ts->idle_active) { 637 delta = ktime_sub(now, ts->idle_entrytime); 638 if (nr_iowait_cpu(cpu) > 0) 639 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 640 else 641 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 642 ts->idle_entrytime = now; 643 } 644 645 if (last_update_time) 646 *last_update_time = ktime_to_us(now); 647 648 } 649 650 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) 651 { 652 update_ts_time_stats(smp_processor_id(), ts, now, NULL); 653 ts->idle_active = 0; 654 655 sched_clock_idle_wakeup_event(); 656 } 657 658 static void tick_nohz_start_idle(struct tick_sched *ts) 659 { 660 ts->idle_entrytime = ktime_get(); 661 ts->idle_active = 1; 662 sched_clock_idle_sleep_event(); 663 } 664 665 /** 666 * get_cpu_idle_time_us - get the total idle time of a CPU 667 * @cpu: CPU number to query 668 * @last_update_time: variable to store update time in. Do not update 669 * counters if NULL. 670 * 671 * Return the cumulative idle time (since boot) for a given 672 * CPU, in microseconds. 673 * 674 * This time is measured via accounting rather than sampling, 675 * and is as accurate as ktime_get() is. 676 * 677 * This function returns -1 if NOHZ is not enabled. 678 */ 679 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 680 { 681 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 682 ktime_t now, idle; 683 684 if (!tick_nohz_active) 685 return -1; 686 687 now = ktime_get(); 688 if (last_update_time) { 689 update_ts_time_stats(cpu, ts, now, last_update_time); 690 idle = ts->idle_sleeptime; 691 } else { 692 if (ts->idle_active && !nr_iowait_cpu(cpu)) { 693 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 694 695 idle = ktime_add(ts->idle_sleeptime, delta); 696 } else { 697 idle = ts->idle_sleeptime; 698 } 699 } 700 701 return ktime_to_us(idle); 702 703 } 704 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 705 706 /** 707 * get_cpu_iowait_time_us - get the total iowait time of a CPU 708 * @cpu: CPU number to query 709 * @last_update_time: variable to store update time in. Do not update 710 * counters if NULL. 711 * 712 * Return the cumulative iowait time (since boot) for a given 713 * CPU, in microseconds. 714 * 715 * This time is measured via accounting rather than sampling, 716 * and is as accurate as ktime_get() is. 717 * 718 * This function returns -1 if NOHZ is not enabled. 719 */ 720 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) 721 { 722 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 723 ktime_t now, iowait; 724 725 if (!tick_nohz_active) 726 return -1; 727 728 now = ktime_get(); 729 if (last_update_time) { 730 update_ts_time_stats(cpu, ts, now, last_update_time); 731 iowait = ts->iowait_sleeptime; 732 } else { 733 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { 734 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 735 736 iowait = ktime_add(ts->iowait_sleeptime, delta); 737 } else { 738 iowait = ts->iowait_sleeptime; 739 } 740 } 741 742 return ktime_to_us(iowait); 743 } 744 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 745 746 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) 747 { 748 hrtimer_cancel(&ts->sched_timer); 749 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); 750 751 /* Forward the time to expire in the future */ 752 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 753 754 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 755 hrtimer_start_expires(&ts->sched_timer, 756 HRTIMER_MODE_ABS_PINNED_HARD); 757 } else { 758 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 759 } 760 761 /* 762 * Reset to make sure next tick stop doesn't get fooled by past 763 * cached clock deadline. 764 */ 765 ts->next_tick = 0; 766 } 767 768 static inline bool local_timer_softirq_pending(void) 769 { 770 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); 771 } 772 773 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) 774 { 775 u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; 776 unsigned long basejiff; 777 unsigned int seq; 778 779 /* Read jiffies and the time when jiffies were updated last */ 780 do { 781 seq = read_seqcount_begin(&jiffies_seq); 782 basemono = last_jiffies_update; 783 basejiff = jiffies; 784 } while (read_seqcount_retry(&jiffies_seq, seq)); 785 ts->last_jiffies = basejiff; 786 ts->timer_expires_base = basemono; 787 788 /* 789 * Keep the periodic tick, when RCU, architecture or irq_work 790 * requests it. 791 * Aside of that check whether the local timer softirq is 792 * pending. If so its a bad idea to call get_next_timer_interrupt() 793 * because there is an already expired timer, so it will request 794 * immediate expiry, which rearms the hardware timer with a 795 * minimal delta which brings us back to this place 796 * immediately. Lather, rinse and repeat... 797 */ 798 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || 799 irq_work_needs_cpu() || local_timer_softirq_pending()) { 800 next_tick = basemono + TICK_NSEC; 801 } else { 802 /* 803 * Get the next pending timer. If high resolution 804 * timers are enabled this only takes the timer wheel 805 * timers into account. If high resolution timers are 806 * disabled this also looks at the next expiring 807 * hrtimer. 808 */ 809 next_tmr = get_next_timer_interrupt(basejiff, basemono); 810 ts->next_timer = next_tmr; 811 /* Take the next rcu event into account */ 812 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; 813 } 814 815 /* 816 * If the tick is due in the next period, keep it ticking or 817 * force prod the timer. 818 */ 819 delta = next_tick - basemono; 820 if (delta <= (u64)TICK_NSEC) { 821 /* 822 * Tell the timer code that the base is not idle, i.e. undo 823 * the effect of get_next_timer_interrupt(): 824 */ 825 timer_clear_idle(); 826 /* 827 * We've not stopped the tick yet, and there's a timer in the 828 * next period, so no point in stopping it either, bail. 829 */ 830 if (!ts->tick_stopped) { 831 ts->timer_expires = 0; 832 goto out; 833 } 834 } 835 836 /* 837 * If this CPU is the one which had the do_timer() duty last, we limit 838 * the sleep time to the timekeeping max_deferment value. 839 * Otherwise we can sleep as long as we want. 840 */ 841 delta = timekeeping_max_deferment(); 842 if (cpu != tick_do_timer_cpu && 843 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) 844 delta = KTIME_MAX; 845 846 /* Calculate the next expiry time */ 847 if (delta < (KTIME_MAX - basemono)) 848 expires = basemono + delta; 849 else 850 expires = KTIME_MAX; 851 852 ts->timer_expires = min_t(u64, expires, next_tick); 853 854 out: 855 return ts->timer_expires; 856 } 857 858 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) 859 { 860 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 861 u64 basemono = ts->timer_expires_base; 862 u64 expires = ts->timer_expires; 863 ktime_t tick = expires; 864 865 /* Make sure we won't be trying to stop it twice in a row. */ 866 ts->timer_expires_base = 0; 867 868 /* 869 * If this CPU is the one which updates jiffies, then give up 870 * the assignment and let it be taken by the CPU which runs 871 * the tick timer next, which might be this CPU as well. If we 872 * don't drop this here the jiffies might be stale and 873 * do_timer() never invoked. Keep track of the fact that it 874 * was the one which had the do_timer() duty last. 875 */ 876 if (cpu == tick_do_timer_cpu) { 877 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 878 ts->do_timer_last = 1; 879 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { 880 ts->do_timer_last = 0; 881 } 882 883 /* Skip reprogram of event if its not changed */ 884 if (ts->tick_stopped && (expires == ts->next_tick)) { 885 /* Sanity check: make sure clockevent is actually programmed */ 886 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) 887 return; 888 889 WARN_ON_ONCE(1); 890 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", 891 basemono, ts->next_tick, dev->next_event, 892 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); 893 } 894 895 /* 896 * nohz_stop_sched_tick can be called several times before 897 * the nohz_restart_sched_tick is called. This happens when 898 * interrupts arrive which do not cause a reschedule. In the 899 * first call we save the current tick time, so we can restart 900 * the scheduler tick in nohz_restart_sched_tick. 901 */ 902 if (!ts->tick_stopped) { 903 calc_load_nohz_start(); 904 quiet_vmstat(); 905 906 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); 907 ts->tick_stopped = 1; 908 trace_tick_stop(1, TICK_DEP_MASK_NONE); 909 } 910 911 ts->next_tick = tick; 912 913 /* 914 * If the expiration time == KTIME_MAX, then we simply stop 915 * the tick timer. 916 */ 917 if (unlikely(expires == KTIME_MAX)) { 918 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 919 hrtimer_cancel(&ts->sched_timer); 920 return; 921 } 922 923 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 924 hrtimer_start(&ts->sched_timer, tick, 925 HRTIMER_MODE_ABS_PINNED_HARD); 926 } else { 927 hrtimer_set_expires(&ts->sched_timer, tick); 928 tick_program_event(tick, 1); 929 } 930 } 931 932 static void tick_nohz_retain_tick(struct tick_sched *ts) 933 { 934 ts->timer_expires_base = 0; 935 } 936 937 #ifdef CONFIG_NO_HZ_FULL 938 static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) 939 { 940 if (tick_nohz_next_event(ts, cpu)) 941 tick_nohz_stop_tick(ts, cpu); 942 else 943 tick_nohz_retain_tick(ts); 944 } 945 #endif /* CONFIG_NO_HZ_FULL */ 946 947 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) 948 { 949 /* Update jiffies first */ 950 tick_do_update_jiffies64(now); 951 952 /* 953 * Clear the timer idle flag, so we avoid IPIs on remote queueing and 954 * the clock forward checks in the enqueue path: 955 */ 956 timer_clear_idle(); 957 958 calc_load_nohz_stop(); 959 touch_softlockup_watchdog_sched(); 960 /* 961 * Cancel the scheduled timer and restore the tick 962 */ 963 ts->tick_stopped = 0; 964 tick_nohz_restart(ts, now); 965 } 966 967 static void __tick_nohz_full_update_tick(struct tick_sched *ts, 968 ktime_t now) 969 { 970 #ifdef CONFIG_NO_HZ_FULL 971 int cpu = smp_processor_id(); 972 973 if (can_stop_full_tick(cpu, ts)) 974 tick_nohz_stop_sched_tick(ts, cpu); 975 else if (ts->tick_stopped) 976 tick_nohz_restart_sched_tick(ts, now); 977 #endif 978 } 979 980 static void tick_nohz_full_update_tick(struct tick_sched *ts) 981 { 982 if (!tick_nohz_full_cpu(smp_processor_id())) 983 return; 984 985 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) 986 return; 987 988 __tick_nohz_full_update_tick(ts, ktime_get()); 989 } 990 991 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 992 { 993 /* 994 * If this CPU is offline and it is the one which updates 995 * jiffies, then give up the assignment and let it be taken by 996 * the CPU which runs the tick timer next. If we don't drop 997 * this here the jiffies might be stale and do_timer() never 998 * invoked. 999 */ 1000 if (unlikely(!cpu_online(cpu))) { 1001 if (cpu == tick_do_timer_cpu) 1002 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 1003 /* 1004 * Make sure the CPU doesn't get fooled by obsolete tick 1005 * deadline if it comes back online later. 1006 */ 1007 ts->next_tick = 0; 1008 return false; 1009 } 1010 1011 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 1012 return false; 1013 1014 if (need_resched()) 1015 return false; 1016 1017 if (unlikely(local_softirq_pending())) { 1018 static int ratelimit; 1019 1020 if (ratelimit < 10 && !local_bh_blocked() && 1021 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { 1022 pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n", 1023 (unsigned int) local_softirq_pending()); 1024 ratelimit++; 1025 } 1026 return false; 1027 } 1028 1029 if (tick_nohz_full_enabled()) { 1030 /* 1031 * Keep the tick alive to guarantee timekeeping progression 1032 * if there are full dynticks CPUs around 1033 */ 1034 if (tick_do_timer_cpu == cpu) 1035 return false; 1036 1037 /* Should not happen for nohz-full */ 1038 if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 1039 return false; 1040 } 1041 1042 return true; 1043 } 1044 1045 static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) 1046 { 1047 ktime_t expires; 1048 int cpu = smp_processor_id(); 1049 1050 /* 1051 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the 1052 * tick timer expiration time is known already. 1053 */ 1054 if (ts->timer_expires_base) 1055 expires = ts->timer_expires; 1056 else if (can_stop_idle_tick(cpu, ts)) 1057 expires = tick_nohz_next_event(ts, cpu); 1058 else 1059 return; 1060 1061 ts->idle_calls++; 1062 1063 if (expires > 0LL) { 1064 int was_stopped = ts->tick_stopped; 1065 1066 tick_nohz_stop_tick(ts, cpu); 1067 1068 ts->idle_sleeps++; 1069 ts->idle_expires = expires; 1070 1071 if (!was_stopped && ts->tick_stopped) { 1072 ts->idle_jiffies = ts->last_jiffies; 1073 nohz_balance_enter_idle(cpu); 1074 } 1075 } else { 1076 tick_nohz_retain_tick(ts); 1077 } 1078 } 1079 1080 /** 1081 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task 1082 * 1083 * When the next event is more than a tick into the future, stop the idle tick 1084 */ 1085 void tick_nohz_idle_stop_tick(void) 1086 { 1087 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); 1088 } 1089 1090 void tick_nohz_idle_retain_tick(void) 1091 { 1092 tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); 1093 /* 1094 * Undo the effect of get_next_timer_interrupt() called from 1095 * tick_nohz_next_event(). 1096 */ 1097 timer_clear_idle(); 1098 } 1099 1100 /** 1101 * tick_nohz_idle_enter - prepare for entering idle on the current CPU 1102 * 1103 * Called when we start the idle loop. 1104 */ 1105 void tick_nohz_idle_enter(void) 1106 { 1107 struct tick_sched *ts; 1108 1109 lockdep_assert_irqs_enabled(); 1110 1111 local_irq_disable(); 1112 1113 ts = this_cpu_ptr(&tick_cpu_sched); 1114 1115 WARN_ON_ONCE(ts->timer_expires_base); 1116 1117 ts->inidle = 1; 1118 tick_nohz_start_idle(ts); 1119 1120 local_irq_enable(); 1121 } 1122 1123 /** 1124 * tick_nohz_irq_exit - update next tick event from interrupt exit 1125 * 1126 * When an interrupt fires while we are idle and it doesn't cause 1127 * a reschedule, it may still add, modify or delete a timer, enqueue 1128 * an RCU callback, etc... 1129 * So we need to re-calculate and reprogram the next tick event. 1130 */ 1131 void tick_nohz_irq_exit(void) 1132 { 1133 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1134 1135 if (ts->inidle) 1136 tick_nohz_start_idle(ts); 1137 else 1138 tick_nohz_full_update_tick(ts); 1139 } 1140 1141 /** 1142 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run 1143 */ 1144 bool tick_nohz_idle_got_tick(void) 1145 { 1146 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1147 1148 if (ts->got_idle_tick) { 1149 ts->got_idle_tick = 0; 1150 return true; 1151 } 1152 return false; 1153 } 1154 1155 /** 1156 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer 1157 * or the tick, whatever that expires first. Note that, if the tick has been 1158 * stopped, it returns the next hrtimer. 1159 * 1160 * Called from power state control code with interrupts disabled 1161 */ 1162 ktime_t tick_nohz_get_next_hrtimer(void) 1163 { 1164 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; 1165 } 1166 1167 /** 1168 * tick_nohz_get_sleep_length - return the expected length of the current sleep 1169 * @delta_next: duration until the next event if the tick cannot be stopped 1170 * 1171 * Called from power state control code with interrupts disabled. 1172 * 1173 * The return value of this function and/or the value returned by it through the 1174 * @delta_next pointer can be negative which must be taken into account by its 1175 * callers. 1176 */ 1177 ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 1178 { 1179 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 1180 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1181 int cpu = smp_processor_id(); 1182 /* 1183 * The idle entry time is expected to be a sufficient approximation of 1184 * the current time at this point. 1185 */ 1186 ktime_t now = ts->idle_entrytime; 1187 ktime_t next_event; 1188 1189 WARN_ON_ONCE(!ts->inidle); 1190 1191 *delta_next = ktime_sub(dev->next_event, now); 1192 1193 if (!can_stop_idle_tick(cpu, ts)) 1194 return *delta_next; 1195 1196 next_event = tick_nohz_next_event(ts, cpu); 1197 if (!next_event) 1198 return *delta_next; 1199 1200 /* 1201 * If the next highres timer to expire is earlier than next_event, the 1202 * idle governor needs to know that. 1203 */ 1204 next_event = min_t(u64, next_event, 1205 hrtimer_next_event_without(&ts->sched_timer)); 1206 1207 return ktime_sub(next_event, now); 1208 } 1209 1210 /** 1211 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value 1212 * for a particular CPU. 1213 * 1214 * Called from the schedutil frequency scaling governor in scheduler context. 1215 */ 1216 unsigned long tick_nohz_get_idle_calls_cpu(int cpu) 1217 { 1218 struct tick_sched *ts = tick_get_tick_sched(cpu); 1219 1220 return ts->idle_calls; 1221 } 1222 1223 /** 1224 * tick_nohz_get_idle_calls - return the current idle calls counter value 1225 * 1226 * Called from the schedutil frequency scaling governor in scheduler context. 1227 */ 1228 unsigned long tick_nohz_get_idle_calls(void) 1229 { 1230 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1231 1232 return ts->idle_calls; 1233 } 1234 1235 static void tick_nohz_account_idle_time(struct tick_sched *ts, 1236 ktime_t now) 1237 { 1238 unsigned long ticks; 1239 1240 ts->idle_exittime = now; 1241 1242 if (vtime_accounting_enabled_this_cpu()) 1243 return; 1244 /* 1245 * We stopped the tick in idle. Update process times would miss the 1246 * time we slept as update_process_times does only a 1 tick 1247 * accounting. Enforce that this is accounted to idle ! 1248 */ 1249 ticks = jiffies - ts->idle_jiffies; 1250 /* 1251 * We might be one off. Do not randomly account a huge number of ticks! 1252 */ 1253 if (ticks && ticks < LONG_MAX) 1254 account_idle_ticks(ticks); 1255 } 1256 1257 void tick_nohz_idle_restart_tick(void) 1258 { 1259 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1260 1261 if (ts->tick_stopped) { 1262 ktime_t now = ktime_get(); 1263 tick_nohz_restart_sched_tick(ts, now); 1264 tick_nohz_account_idle_time(ts, now); 1265 } 1266 } 1267 1268 static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) 1269 { 1270 if (tick_nohz_full_cpu(smp_processor_id())) 1271 __tick_nohz_full_update_tick(ts, now); 1272 else 1273 tick_nohz_restart_sched_tick(ts, now); 1274 1275 tick_nohz_account_idle_time(ts, now); 1276 } 1277 1278 /** 1279 * tick_nohz_idle_exit - restart the idle tick from the idle task 1280 * 1281 * Restart the idle tick when the CPU is woken up from idle 1282 * This also exit the RCU extended quiescent state. The CPU 1283 * can use RCU again after this function is called. 1284 */ 1285 void tick_nohz_idle_exit(void) 1286 { 1287 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1288 bool idle_active, tick_stopped; 1289 ktime_t now; 1290 1291 local_irq_disable(); 1292 1293 WARN_ON_ONCE(!ts->inidle); 1294 WARN_ON_ONCE(ts->timer_expires_base); 1295 1296 ts->inidle = 0; 1297 idle_active = ts->idle_active; 1298 tick_stopped = ts->tick_stopped; 1299 1300 if (idle_active || tick_stopped) 1301 now = ktime_get(); 1302 1303 if (idle_active) 1304 tick_nohz_stop_idle(ts, now); 1305 1306 if (tick_stopped) 1307 tick_nohz_idle_update_tick(ts, now); 1308 1309 local_irq_enable(); 1310 } 1311 1312 /* 1313 * The nohz low res interrupt handler 1314 */ 1315 static void tick_nohz_handler(struct clock_event_device *dev) 1316 { 1317 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1318 struct pt_regs *regs = get_irq_regs(); 1319 ktime_t now = ktime_get(); 1320 1321 dev->next_event = KTIME_MAX; 1322 1323 tick_sched_do_timer(ts, now); 1324 tick_sched_handle(ts, regs); 1325 1326 /* No need to reprogram if we are running tickless */ 1327 if (unlikely(ts->tick_stopped)) 1328 return; 1329 1330 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 1331 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1332 } 1333 1334 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) 1335 { 1336 if (!tick_nohz_enabled) 1337 return; 1338 ts->nohz_mode = mode; 1339 /* One update is enough */ 1340 if (!test_and_set_bit(0, &tick_nohz_active)) 1341 timers_update_nohz(); 1342 } 1343 1344 /** 1345 * tick_nohz_switch_to_nohz - switch to nohz mode 1346 */ 1347 static void tick_nohz_switch_to_nohz(void) 1348 { 1349 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1350 ktime_t next; 1351 1352 if (!tick_nohz_enabled) 1353 return; 1354 1355 if (tick_switch_to_oneshot(tick_nohz_handler)) 1356 return; 1357 1358 /* 1359 * Recycle the hrtimer in ts, so we can share the 1360 * hrtimer_forward with the highres code. 1361 */ 1362 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1363 /* Get the next period */ 1364 next = tick_init_jiffy_update(); 1365 1366 hrtimer_set_expires(&ts->sched_timer, next); 1367 hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); 1368 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1369 tick_nohz_activate(ts, NOHZ_MODE_LOWRES); 1370 } 1371 1372 static inline void tick_nohz_irq_enter(void) 1373 { 1374 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1375 ktime_t now; 1376 1377 if (!ts->idle_active && !ts->tick_stopped) 1378 return; 1379 now = ktime_get(); 1380 if (ts->idle_active) 1381 tick_nohz_stop_idle(ts, now); 1382 if (ts->tick_stopped) 1383 tick_nohz_update_jiffies(now); 1384 } 1385 1386 #else 1387 1388 static inline void tick_nohz_switch_to_nohz(void) { } 1389 static inline void tick_nohz_irq_enter(void) { } 1390 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } 1391 1392 #endif /* CONFIG_NO_HZ_COMMON */ 1393 1394 /* 1395 * Called from irq_enter to notify about the possible interruption of idle() 1396 */ 1397 void tick_irq_enter(void) 1398 { 1399 tick_check_oneshot_broadcast_this_cpu(); 1400 tick_nohz_irq_enter(); 1401 } 1402 1403 /* 1404 * High resolution timer specific code 1405 */ 1406 #ifdef CONFIG_HIGH_RES_TIMERS 1407 /* 1408 * We rearm the timer until we get disabled by the idle code. 1409 * Called with interrupts disabled. 1410 */ 1411 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 1412 { 1413 struct tick_sched *ts = 1414 container_of(timer, struct tick_sched, sched_timer); 1415 struct pt_regs *regs = get_irq_regs(); 1416 ktime_t now = ktime_get(); 1417 1418 tick_sched_do_timer(ts, now); 1419 1420 /* 1421 * Do not call, when we are not in irq context and have 1422 * no valid regs pointer 1423 */ 1424 if (regs) 1425 tick_sched_handle(ts, regs); 1426 else 1427 ts->next_tick = 0; 1428 1429 /* No need to reprogram if we are in idle or full dynticks mode */ 1430 if (unlikely(ts->tick_stopped)) 1431 return HRTIMER_NORESTART; 1432 1433 hrtimer_forward(timer, now, TICK_NSEC); 1434 1435 return HRTIMER_RESTART; 1436 } 1437 1438 static int sched_skew_tick; 1439 1440 static int __init skew_tick(char *str) 1441 { 1442 get_option(&str, &sched_skew_tick); 1443 1444 return 0; 1445 } 1446 early_param("skew_tick", skew_tick); 1447 1448 /** 1449 * tick_setup_sched_timer - setup the tick emulation timer 1450 */ 1451 void tick_setup_sched_timer(void) 1452 { 1453 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1454 ktime_t now = ktime_get(); 1455 1456 /* 1457 * Emulate tick processing via per-CPU hrtimers: 1458 */ 1459 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1460 ts->sched_timer.function = tick_sched_timer; 1461 1462 /* Get the next period (per-CPU) */ 1463 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 1464 1465 /* Offset the tick to avert jiffies_lock contention. */ 1466 if (sched_skew_tick) { 1467 u64 offset = TICK_NSEC >> 1; 1468 do_div(offset, num_possible_cpus()); 1469 offset *= smp_processor_id(); 1470 hrtimer_add_expires_ns(&ts->sched_timer, offset); 1471 } 1472 1473 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 1474 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); 1475 tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); 1476 } 1477 #endif /* HIGH_RES_TIMERS */ 1478 1479 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS 1480 void tick_cancel_sched_timer(int cpu) 1481 { 1482 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1483 1484 # ifdef CONFIG_HIGH_RES_TIMERS 1485 if (ts->sched_timer.base) 1486 hrtimer_cancel(&ts->sched_timer); 1487 # endif 1488 1489 memset(ts, 0, sizeof(*ts)); 1490 } 1491 #endif 1492 1493 /** 1494 * Async notification about clocksource changes 1495 */ 1496 void tick_clock_notify(void) 1497 { 1498 int cpu; 1499 1500 for_each_possible_cpu(cpu) 1501 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); 1502 } 1503 1504 /* 1505 * Async notification about clock event changes 1506 */ 1507 void tick_oneshot_notify(void) 1508 { 1509 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1510 1511 set_bit(0, &ts->check_clocks); 1512 } 1513 1514 /** 1515 * Check, if a change happened, which makes oneshot possible. 1516 * 1517 * Called cyclic from the hrtimer softirq (driven by the timer 1518 * softirq) allow_nohz signals, that we can switch into low-res nohz 1519 * mode, because high resolution timers are disabled (either compile 1520 * or runtime). Called with interrupts disabled. 1521 */ 1522 int tick_check_oneshot_change(int allow_nohz) 1523 { 1524 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1525 1526 if (!test_and_clear_bit(0, &ts->check_clocks)) 1527 return 0; 1528 1529 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) 1530 return 0; 1531 1532 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) 1533 return 0; 1534 1535 if (!allow_nohz) 1536 return 1; 1537 1538 tick_nohz_switch_to_nohz(); 1539 return 0; 1540 } 1541