1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 5 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 6 * 7 * No idle tick implementation for low and high resolution timers 8 * 9 * Started by: Thomas Gleixner and Ingo Molnar 10 */ 11 #include <linux/cpu.h> 12 #include <linux/err.h> 13 #include <linux/hrtimer.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/percpu.h> 17 #include <linux/nmi.h> 18 #include <linux/profile.h> 19 #include <linux/sched/signal.h> 20 #include <linux/sched/clock.h> 21 #include <linux/sched/stat.h> 22 #include <linux/sched/nohz.h> 23 #include <linux/sched/loadavg.h> 24 #include <linux/module.h> 25 #include <linux/irq_work.h> 26 #include <linux/posix-timers.h> 27 #include <linux/context_tracking.h> 28 #include <linux/mm.h> 29 30 #include <asm/irq_regs.h> 31 32 #include "tick-internal.h" 33 34 #include <trace/events/timer.h> 35 36 /* 37 * Per-CPU nohz control structure 38 */ 39 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 40 41 struct tick_sched *tick_get_tick_sched(int cpu) 42 { 43 return &per_cpu(tick_cpu_sched, cpu); 44 } 45 46 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 47 /* 48 * The time, when the last jiffy update happened. Write access must hold 49 * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a 50 * consistent view of jiffies and last_jiffies_update. 51 */ 52 static ktime_t last_jiffies_update; 53 54 /* 55 * Must be called with interrupts disabled ! 56 */ 57 static void tick_do_update_jiffies64(ktime_t now) 58 { 59 unsigned long ticks = 1; 60 ktime_t delta, nextp; 61 62 /* 63 * 64bit can do a quick check without holding jiffies lock and 64 * without looking at the sequence count. The smp_load_acquire() 65 * pairs with the update done later in this function. 66 * 67 * 32bit cannot do that because the store of tick_next_period 68 * consists of two 32bit stores and the first store could move it 69 * to a random point in the future. 70 */ 71 if (IS_ENABLED(CONFIG_64BIT)) { 72 if (ktime_before(now, smp_load_acquire(&tick_next_period))) 73 return; 74 } else { 75 unsigned int seq; 76 77 /* 78 * Avoid contention on jiffies_lock and protect the quick 79 * check with the sequence count. 80 */ 81 do { 82 seq = read_seqcount_begin(&jiffies_seq); 83 nextp = tick_next_period; 84 } while (read_seqcount_retry(&jiffies_seq, seq)); 85 86 if (ktime_before(now, nextp)) 87 return; 88 } 89 90 /* Quick check failed, i.e. update is required. */ 91 raw_spin_lock(&jiffies_lock); 92 /* 93 * Reevaluate with the lock held. Another CPU might have done the 94 * update already. 95 */ 96 if (ktime_before(now, tick_next_period)) { 97 raw_spin_unlock(&jiffies_lock); 98 return; 99 } 100 101 write_seqcount_begin(&jiffies_seq); 102 103 delta = ktime_sub(now, tick_next_period); 104 if (unlikely(delta >= TICK_NSEC)) { 105 /* Slow path for long idle sleep times */ 106 s64 incr = TICK_NSEC; 107 108 ticks += ktime_divns(delta, incr); 109 110 last_jiffies_update = ktime_add_ns(last_jiffies_update, 111 incr * ticks); 112 } else { 113 last_jiffies_update = ktime_add_ns(last_jiffies_update, 114 TICK_NSEC); 115 } 116 117 /* Advance jiffies to complete the jiffies_seq protected job */ 118 jiffies_64 += ticks; 119 120 /* 121 * Keep the tick_next_period variable up to date. 122 */ 123 nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); 124 125 if (IS_ENABLED(CONFIG_64BIT)) { 126 /* 127 * Pairs with smp_load_acquire() in the lockless quick 128 * check above and ensures that the update to jiffies_64 is 129 * not reordered vs. the store to tick_next_period, neither 130 * by the compiler nor by the CPU. 131 */ 132 smp_store_release(&tick_next_period, nextp); 133 } else { 134 /* 135 * A plain store is good enough on 32bit as the quick check 136 * above is protected by the sequence count. 137 */ 138 tick_next_period = nextp; 139 } 140 141 /* 142 * Release the sequence count. calc_global_load() below is not 143 * protected by it, but jiffies_lock needs to be held to prevent 144 * concurrent invocations. 145 */ 146 write_seqcount_end(&jiffies_seq); 147 148 calc_global_load(); 149 150 raw_spin_unlock(&jiffies_lock); 151 update_wall_time(); 152 } 153 154 /* 155 * Initialize and return retrieve the jiffies update. 156 */ 157 static ktime_t tick_init_jiffy_update(void) 158 { 159 ktime_t period; 160 161 raw_spin_lock(&jiffies_lock); 162 write_seqcount_begin(&jiffies_seq); 163 /* Did we start the jiffies update yet ? */ 164 if (last_jiffies_update == 0) 165 last_jiffies_update = tick_next_period; 166 period = last_jiffies_update; 167 write_seqcount_end(&jiffies_seq); 168 raw_spin_unlock(&jiffies_lock); 169 return period; 170 } 171 172 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) 173 { 174 int cpu = smp_processor_id(); 175 176 #ifdef CONFIG_NO_HZ_COMMON 177 /* 178 * Check if the do_timer duty was dropped. We don't care about 179 * concurrency: This happens only when the CPU in charge went 180 * into a long sleep. If two CPUs happen to assign themselves to 181 * this duty, then the jiffies update is still serialized by 182 * jiffies_lock. 183 * 184 * If nohz_full is enabled, this should not happen because the 185 * tick_do_timer_cpu never relinquishes. 186 */ 187 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { 188 #ifdef CONFIG_NO_HZ_FULL 189 WARN_ON_ONCE(tick_nohz_full_running); 190 #endif 191 tick_do_timer_cpu = cpu; 192 } 193 #endif 194 195 /* Check, if the jiffies need an update */ 196 if (tick_do_timer_cpu == cpu) 197 tick_do_update_jiffies64(now); 198 199 if (ts->inidle) 200 ts->got_idle_tick = 1; 201 } 202 203 static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) 204 { 205 #ifdef CONFIG_NO_HZ_COMMON 206 /* 207 * When we are idle and the tick is stopped, we have to touch 208 * the watchdog as we might not schedule for a really long 209 * time. This happens on complete idle SMP systems while 210 * waiting on the login prompt. We also increment the "start of 211 * idle" jiffy stamp so the idle accounting adjustment we do 212 * when we go busy again does not account too much ticks. 213 */ 214 if (ts->tick_stopped) { 215 touch_softlockup_watchdog_sched(); 216 if (is_idle_task(current)) 217 ts->idle_jiffies++; 218 /* 219 * In case the current tick fired too early past its expected 220 * expiration, make sure we don't bypass the next clock reprogramming 221 * to the same deadline. 222 */ 223 ts->next_tick = 0; 224 } 225 #endif 226 update_process_times(user_mode(regs)); 227 profile_tick(CPU_PROFILING); 228 } 229 #endif 230 231 #ifdef CONFIG_NO_HZ_FULL 232 cpumask_var_t tick_nohz_full_mask; 233 EXPORT_SYMBOL_GPL(tick_nohz_full_mask); 234 bool tick_nohz_full_running; 235 EXPORT_SYMBOL_GPL(tick_nohz_full_running); 236 static atomic_t tick_dep_mask; 237 238 static bool check_tick_dependency(atomic_t *dep) 239 { 240 int val = atomic_read(dep); 241 242 if (val & TICK_DEP_MASK_POSIX_TIMER) { 243 trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); 244 return true; 245 } 246 247 if (val & TICK_DEP_MASK_PERF_EVENTS) { 248 trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); 249 return true; 250 } 251 252 if (val & TICK_DEP_MASK_SCHED) { 253 trace_tick_stop(0, TICK_DEP_MASK_SCHED); 254 return true; 255 } 256 257 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { 258 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); 259 return true; 260 } 261 262 if (val & TICK_DEP_MASK_RCU) { 263 trace_tick_stop(0, TICK_DEP_MASK_RCU); 264 return true; 265 } 266 267 return false; 268 } 269 270 static bool can_stop_full_tick(int cpu, struct tick_sched *ts) 271 { 272 lockdep_assert_irqs_disabled(); 273 274 if (unlikely(!cpu_online(cpu))) 275 return false; 276 277 if (check_tick_dependency(&tick_dep_mask)) 278 return false; 279 280 if (check_tick_dependency(&ts->tick_dep_mask)) 281 return false; 282 283 if (check_tick_dependency(¤t->tick_dep_mask)) 284 return false; 285 286 if (check_tick_dependency(¤t->signal->tick_dep_mask)) 287 return false; 288 289 return true; 290 } 291 292 static void nohz_full_kick_func(struct irq_work *work) 293 { 294 /* Empty, the tick restart happens on tick_nohz_irq_exit() */ 295 } 296 297 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = 298 IRQ_WORK_INIT_HARD(nohz_full_kick_func); 299 300 /* 301 * Kick this CPU if it's full dynticks in order to force it to 302 * re-evaluate its dependency on the tick and restart it if necessary. 303 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), 304 * is NMI safe. 305 */ 306 static void tick_nohz_full_kick(void) 307 { 308 if (!tick_nohz_full_cpu(smp_processor_id())) 309 return; 310 311 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); 312 } 313 314 /* 315 * Kick the CPU if it's full dynticks in order to force it to 316 * re-evaluate its dependency on the tick and restart it if necessary. 317 */ 318 void tick_nohz_full_kick_cpu(int cpu) 319 { 320 if (!tick_nohz_full_cpu(cpu)) 321 return; 322 323 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); 324 } 325 326 static void tick_nohz_kick_task(struct task_struct *tsk) 327 { 328 int cpu; 329 330 /* 331 * If the task is not running, run_posix_cpu_timers() 332 * has nothing to elapse, IPI can then be spared. 333 * 334 * activate_task() STORE p->tick_dep_mask 335 * STORE p->on_rq 336 * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) 337 * LOCK rq->lock LOAD p->on_rq 338 * smp_mb__after_spin_lock() 339 * tick_nohz_task_switch() 340 * LOAD p->tick_dep_mask 341 */ 342 if (!sched_task_on_rq(tsk)) 343 return; 344 345 /* 346 * If the task concurrently migrates to another CPU, 347 * we guarantee it sees the new tick dependency upon 348 * schedule. 349 * 350 * set_task_cpu(p, cpu); 351 * STORE p->cpu = @cpu 352 * __schedule() (switch to task 'p') 353 * LOCK rq->lock 354 * smp_mb__after_spin_lock() STORE p->tick_dep_mask 355 * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) 356 * LOAD p->tick_dep_mask LOAD p->cpu 357 */ 358 cpu = task_cpu(tsk); 359 360 preempt_disable(); 361 if (cpu_online(cpu)) 362 tick_nohz_full_kick_cpu(cpu); 363 preempt_enable(); 364 } 365 366 /* 367 * Kick all full dynticks CPUs in order to force these to re-evaluate 368 * their dependency on the tick and restart it if necessary. 369 */ 370 static void tick_nohz_full_kick_all(void) 371 { 372 int cpu; 373 374 if (!tick_nohz_full_running) 375 return; 376 377 preempt_disable(); 378 for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) 379 tick_nohz_full_kick_cpu(cpu); 380 preempt_enable(); 381 } 382 383 static void tick_nohz_dep_set_all(atomic_t *dep, 384 enum tick_dep_bits bit) 385 { 386 int prev; 387 388 prev = atomic_fetch_or(BIT(bit), dep); 389 if (!prev) 390 tick_nohz_full_kick_all(); 391 } 392 393 /* 394 * Set a global tick dependency. Used by perf events that rely on freq and 395 * by unstable clock. 396 */ 397 void tick_nohz_dep_set(enum tick_dep_bits bit) 398 { 399 tick_nohz_dep_set_all(&tick_dep_mask, bit); 400 } 401 402 void tick_nohz_dep_clear(enum tick_dep_bits bit) 403 { 404 atomic_andnot(BIT(bit), &tick_dep_mask); 405 } 406 407 /* 408 * Set per-CPU tick dependency. Used by scheduler and perf events in order to 409 * manage events throttling. 410 */ 411 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) 412 { 413 int prev; 414 struct tick_sched *ts; 415 416 ts = per_cpu_ptr(&tick_cpu_sched, cpu); 417 418 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); 419 if (!prev) { 420 preempt_disable(); 421 /* Perf needs local kick that is NMI safe */ 422 if (cpu == smp_processor_id()) { 423 tick_nohz_full_kick(); 424 } else { 425 /* Remote irq work not NMI-safe */ 426 if (!WARN_ON_ONCE(in_nmi())) 427 tick_nohz_full_kick_cpu(cpu); 428 } 429 preempt_enable(); 430 } 431 } 432 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); 433 434 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) 435 { 436 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 437 438 atomic_andnot(BIT(bit), &ts->tick_dep_mask); 439 } 440 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); 441 442 /* 443 * Set a per-task tick dependency. RCU need this. Also posix CPU timers 444 * in order to elapse per task timers. 445 */ 446 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) 447 { 448 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) 449 tick_nohz_kick_task(tsk); 450 } 451 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); 452 453 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) 454 { 455 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); 456 } 457 EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); 458 459 /* 460 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse 461 * per process timers. 462 */ 463 void tick_nohz_dep_set_signal(struct task_struct *tsk, 464 enum tick_dep_bits bit) 465 { 466 int prev; 467 struct signal_struct *sig = tsk->signal; 468 469 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); 470 if (!prev) { 471 struct task_struct *t; 472 473 lockdep_assert_held(&tsk->sighand->siglock); 474 __for_each_thread(sig, t) 475 tick_nohz_kick_task(t); 476 } 477 } 478 479 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) 480 { 481 atomic_andnot(BIT(bit), &sig->tick_dep_mask); 482 } 483 484 /* 485 * Re-evaluate the need for the tick as we switch the current task. 486 * It might need the tick due to per task/process properties: 487 * perf events, posix CPU timers, ... 488 */ 489 void __tick_nohz_task_switch(void) 490 { 491 struct tick_sched *ts; 492 493 if (!tick_nohz_full_cpu(smp_processor_id())) 494 return; 495 496 ts = this_cpu_ptr(&tick_cpu_sched); 497 498 if (ts->tick_stopped) { 499 if (atomic_read(¤t->tick_dep_mask) || 500 atomic_read(¤t->signal->tick_dep_mask)) 501 tick_nohz_full_kick(); 502 } 503 } 504 505 /* Get the boot-time nohz CPU list from the kernel parameters. */ 506 void __init tick_nohz_full_setup(cpumask_var_t cpumask) 507 { 508 alloc_bootmem_cpumask_var(&tick_nohz_full_mask); 509 cpumask_copy(tick_nohz_full_mask, cpumask); 510 tick_nohz_full_running = true; 511 } 512 513 static int tick_nohz_cpu_down(unsigned int cpu) 514 { 515 /* 516 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound 517 * timers, workqueues, timekeeping, ...) on behalf of full dynticks 518 * CPUs. It must remain online when nohz full is enabled. 519 */ 520 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) 521 return -EBUSY; 522 return 0; 523 } 524 525 void __init tick_nohz_init(void) 526 { 527 int cpu, ret; 528 529 if (!tick_nohz_full_running) 530 return; 531 532 /* 533 * Full dynticks uses irq work to drive the tick rescheduling on safe 534 * locking contexts. But then we need irq work to raise its own 535 * interrupts to avoid circular dependency on the tick 536 */ 537 if (!arch_irq_work_has_interrupt()) { 538 pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); 539 cpumask_clear(tick_nohz_full_mask); 540 tick_nohz_full_running = false; 541 return; 542 } 543 544 if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && 545 !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { 546 cpu = smp_processor_id(); 547 548 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { 549 pr_warn("NO_HZ: Clearing %d from nohz_full range " 550 "for timekeeping\n", cpu); 551 cpumask_clear_cpu(cpu, tick_nohz_full_mask); 552 } 553 } 554 555 for_each_cpu(cpu, tick_nohz_full_mask) 556 context_tracking_cpu_set(cpu); 557 558 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 559 "kernel/nohz:predown", NULL, 560 tick_nohz_cpu_down); 561 WARN_ON(ret < 0); 562 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", 563 cpumask_pr_args(tick_nohz_full_mask)); 564 } 565 #endif 566 567 /* 568 * NOHZ - aka dynamic tick functionality 569 */ 570 #ifdef CONFIG_NO_HZ_COMMON 571 /* 572 * NO HZ enabled ? 573 */ 574 bool tick_nohz_enabled __read_mostly = true; 575 unsigned long tick_nohz_active __read_mostly; 576 /* 577 * Enable / Disable tickless mode 578 */ 579 static int __init setup_tick_nohz(char *str) 580 { 581 return (kstrtobool(str, &tick_nohz_enabled) == 0); 582 } 583 584 __setup("nohz=", setup_tick_nohz); 585 586 bool tick_nohz_tick_stopped(void) 587 { 588 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 589 590 return ts->tick_stopped; 591 } 592 593 bool tick_nohz_tick_stopped_cpu(int cpu) 594 { 595 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); 596 597 return ts->tick_stopped; 598 } 599 600 /** 601 * tick_nohz_update_jiffies - update jiffies when idle was interrupted 602 * 603 * Called from interrupt entry when the CPU was idle 604 * 605 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 606 * must be updated. Otherwise an interrupt handler could use a stale jiffy 607 * value. We do this unconditionally on any CPU, as we don't know whether the 608 * CPU, which has the update task assigned is in a long sleep. 609 */ 610 static void tick_nohz_update_jiffies(ktime_t now) 611 { 612 unsigned long flags; 613 614 __this_cpu_write(tick_cpu_sched.idle_waketime, now); 615 616 local_irq_save(flags); 617 tick_do_update_jiffies64(now); 618 local_irq_restore(flags); 619 620 touch_softlockup_watchdog_sched(); 621 } 622 623 /* 624 * Updates the per-CPU time idle statistics counters 625 */ 626 static void 627 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 628 { 629 ktime_t delta; 630 631 if (ts->idle_active) { 632 delta = ktime_sub(now, ts->idle_entrytime); 633 if (nr_iowait_cpu(cpu) > 0) 634 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 635 else 636 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 637 ts->idle_entrytime = now; 638 } 639 640 if (last_update_time) 641 *last_update_time = ktime_to_us(now); 642 643 } 644 645 static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) 646 { 647 update_ts_time_stats(smp_processor_id(), ts, now, NULL); 648 ts->idle_active = 0; 649 650 sched_clock_idle_wakeup_event(); 651 } 652 653 static void tick_nohz_start_idle(struct tick_sched *ts) 654 { 655 ts->idle_entrytime = ktime_get(); 656 ts->idle_active = 1; 657 sched_clock_idle_sleep_event(); 658 } 659 660 /** 661 * get_cpu_idle_time_us - get the total idle time of a CPU 662 * @cpu: CPU number to query 663 * @last_update_time: variable to store update time in. Do not update 664 * counters if NULL. 665 * 666 * Return the cumulative idle time (since boot) for a given 667 * CPU, in microseconds. 668 * 669 * This time is measured via accounting rather than sampling, 670 * and is as accurate as ktime_get() is. 671 * 672 * This function returns -1 if NOHZ is not enabled. 673 */ 674 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 675 { 676 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 677 ktime_t now, idle; 678 679 if (!tick_nohz_active) 680 return -1; 681 682 now = ktime_get(); 683 if (last_update_time) { 684 update_ts_time_stats(cpu, ts, now, last_update_time); 685 idle = ts->idle_sleeptime; 686 } else { 687 if (ts->idle_active && !nr_iowait_cpu(cpu)) { 688 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 689 690 idle = ktime_add(ts->idle_sleeptime, delta); 691 } else { 692 idle = ts->idle_sleeptime; 693 } 694 } 695 696 return ktime_to_us(idle); 697 698 } 699 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 700 701 /** 702 * get_cpu_iowait_time_us - get the total iowait time of a CPU 703 * @cpu: CPU number to query 704 * @last_update_time: variable to store update time in. Do not update 705 * counters if NULL. 706 * 707 * Return the cumulative iowait time (since boot) for a given 708 * CPU, in microseconds. 709 * 710 * This time is measured via accounting rather than sampling, 711 * and is as accurate as ktime_get() is. 712 * 713 * This function returns -1 if NOHZ is not enabled. 714 */ 715 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) 716 { 717 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 718 ktime_t now, iowait; 719 720 if (!tick_nohz_active) 721 return -1; 722 723 now = ktime_get(); 724 if (last_update_time) { 725 update_ts_time_stats(cpu, ts, now, last_update_time); 726 iowait = ts->iowait_sleeptime; 727 } else { 728 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { 729 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 730 731 iowait = ktime_add(ts->iowait_sleeptime, delta); 732 } else { 733 iowait = ts->iowait_sleeptime; 734 } 735 } 736 737 return ktime_to_us(iowait); 738 } 739 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 740 741 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) 742 { 743 hrtimer_cancel(&ts->sched_timer); 744 hrtimer_set_expires(&ts->sched_timer, ts->last_tick); 745 746 /* Forward the time to expire in the future */ 747 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 748 749 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 750 hrtimer_start_expires(&ts->sched_timer, 751 HRTIMER_MODE_ABS_PINNED_HARD); 752 } else { 753 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 754 } 755 756 /* 757 * Reset to make sure next tick stop doesn't get fooled by past 758 * cached clock deadline. 759 */ 760 ts->next_tick = 0; 761 } 762 763 static inline bool local_timer_softirq_pending(void) 764 { 765 return local_softirq_pending() & BIT(TIMER_SOFTIRQ); 766 } 767 768 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) 769 { 770 u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; 771 unsigned long basejiff; 772 unsigned int seq; 773 774 /* Read jiffies and the time when jiffies were updated last */ 775 do { 776 seq = read_seqcount_begin(&jiffies_seq); 777 basemono = last_jiffies_update; 778 basejiff = jiffies; 779 } while (read_seqcount_retry(&jiffies_seq, seq)); 780 ts->last_jiffies = basejiff; 781 ts->timer_expires_base = basemono; 782 783 /* 784 * Keep the periodic tick, when RCU, architecture or irq_work 785 * requests it. 786 * Aside of that check whether the local timer softirq is 787 * pending. If so its a bad idea to call get_next_timer_interrupt() 788 * because there is an already expired timer, so it will request 789 * immediate expiry, which rearms the hardware timer with a 790 * minimal delta which brings us back to this place 791 * immediately. Lather, rinse and repeat... 792 */ 793 if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || 794 irq_work_needs_cpu() || local_timer_softirq_pending()) { 795 next_tick = basemono + TICK_NSEC; 796 } else { 797 /* 798 * Get the next pending timer. If high resolution 799 * timers are enabled this only takes the timer wheel 800 * timers into account. If high resolution timers are 801 * disabled this also looks at the next expiring 802 * hrtimer. 803 */ 804 next_tmr = get_next_timer_interrupt(basejiff, basemono); 805 ts->next_timer = next_tmr; 806 /* Take the next rcu event into account */ 807 next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; 808 } 809 810 /* 811 * If the tick is due in the next period, keep it ticking or 812 * force prod the timer. 813 */ 814 delta = next_tick - basemono; 815 if (delta <= (u64)TICK_NSEC) { 816 /* 817 * Tell the timer code that the base is not idle, i.e. undo 818 * the effect of get_next_timer_interrupt(): 819 */ 820 timer_clear_idle(); 821 /* 822 * We've not stopped the tick yet, and there's a timer in the 823 * next period, so no point in stopping it either, bail. 824 */ 825 if (!ts->tick_stopped) { 826 ts->timer_expires = 0; 827 goto out; 828 } 829 } 830 831 /* 832 * If this CPU is the one which had the do_timer() duty last, we limit 833 * the sleep time to the timekeeping max_deferment value. 834 * Otherwise we can sleep as long as we want. 835 */ 836 delta = timekeeping_max_deferment(); 837 if (cpu != tick_do_timer_cpu && 838 (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) 839 delta = KTIME_MAX; 840 841 /* Calculate the next expiry time */ 842 if (delta < (KTIME_MAX - basemono)) 843 expires = basemono + delta; 844 else 845 expires = KTIME_MAX; 846 847 ts->timer_expires = min_t(u64, expires, next_tick); 848 849 out: 850 return ts->timer_expires; 851 } 852 853 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) 854 { 855 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 856 u64 basemono = ts->timer_expires_base; 857 u64 expires = ts->timer_expires; 858 ktime_t tick = expires; 859 860 /* Make sure we won't be trying to stop it twice in a row. */ 861 ts->timer_expires_base = 0; 862 863 /* 864 * If this CPU is the one which updates jiffies, then give up 865 * the assignment and let it be taken by the CPU which runs 866 * the tick timer next, which might be this CPU as well. If we 867 * don't drop this here the jiffies might be stale and 868 * do_timer() never invoked. Keep track of the fact that it 869 * was the one which had the do_timer() duty last. 870 */ 871 if (cpu == tick_do_timer_cpu) { 872 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 873 ts->do_timer_last = 1; 874 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { 875 ts->do_timer_last = 0; 876 } 877 878 /* Skip reprogram of event if its not changed */ 879 if (ts->tick_stopped && (expires == ts->next_tick)) { 880 /* Sanity check: make sure clockevent is actually programmed */ 881 if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) 882 return; 883 884 WARN_ON_ONCE(1); 885 printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", 886 basemono, ts->next_tick, dev->next_event, 887 hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); 888 } 889 890 /* 891 * nohz_stop_sched_tick can be called several times before 892 * the nohz_restart_sched_tick is called. This happens when 893 * interrupts arrive which do not cause a reschedule. In the 894 * first call we save the current tick time, so we can restart 895 * the scheduler tick in nohz_restart_sched_tick. 896 */ 897 if (!ts->tick_stopped) { 898 calc_load_nohz_start(); 899 quiet_vmstat(); 900 901 ts->last_tick = hrtimer_get_expires(&ts->sched_timer); 902 ts->tick_stopped = 1; 903 trace_tick_stop(1, TICK_DEP_MASK_NONE); 904 } 905 906 ts->next_tick = tick; 907 908 /* 909 * If the expiration time == KTIME_MAX, then we simply stop 910 * the tick timer. 911 */ 912 if (unlikely(expires == KTIME_MAX)) { 913 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 914 hrtimer_cancel(&ts->sched_timer); 915 return; 916 } 917 918 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 919 hrtimer_start(&ts->sched_timer, tick, 920 HRTIMER_MODE_ABS_PINNED_HARD); 921 } else { 922 hrtimer_set_expires(&ts->sched_timer, tick); 923 tick_program_event(tick, 1); 924 } 925 } 926 927 static void tick_nohz_retain_tick(struct tick_sched *ts) 928 { 929 ts->timer_expires_base = 0; 930 } 931 932 #ifdef CONFIG_NO_HZ_FULL 933 static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) 934 { 935 if (tick_nohz_next_event(ts, cpu)) 936 tick_nohz_stop_tick(ts, cpu); 937 else 938 tick_nohz_retain_tick(ts); 939 } 940 #endif /* CONFIG_NO_HZ_FULL */ 941 942 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) 943 { 944 /* Update jiffies first */ 945 tick_do_update_jiffies64(now); 946 947 /* 948 * Clear the timer idle flag, so we avoid IPIs on remote queueing and 949 * the clock forward checks in the enqueue path: 950 */ 951 timer_clear_idle(); 952 953 calc_load_nohz_stop(); 954 touch_softlockup_watchdog_sched(); 955 /* 956 * Cancel the scheduled timer and restore the tick 957 */ 958 ts->tick_stopped = 0; 959 tick_nohz_restart(ts, now); 960 } 961 962 static void __tick_nohz_full_update_tick(struct tick_sched *ts, 963 ktime_t now) 964 { 965 #ifdef CONFIG_NO_HZ_FULL 966 int cpu = smp_processor_id(); 967 968 if (can_stop_full_tick(cpu, ts)) 969 tick_nohz_stop_sched_tick(ts, cpu); 970 else if (ts->tick_stopped) 971 tick_nohz_restart_sched_tick(ts, now); 972 #endif 973 } 974 975 static void tick_nohz_full_update_tick(struct tick_sched *ts) 976 { 977 if (!tick_nohz_full_cpu(smp_processor_id())) 978 return; 979 980 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) 981 return; 982 983 __tick_nohz_full_update_tick(ts, ktime_get()); 984 } 985 986 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) 987 { 988 /* 989 * If this CPU is offline and it is the one which updates 990 * jiffies, then give up the assignment and let it be taken by 991 * the CPU which runs the tick timer next. If we don't drop 992 * this here the jiffies might be stale and do_timer() never 993 * invoked. 994 */ 995 if (unlikely(!cpu_online(cpu))) { 996 if (cpu == tick_do_timer_cpu) 997 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 998 /* 999 * Make sure the CPU doesn't get fooled by obsolete tick 1000 * deadline if it comes back online later. 1001 */ 1002 ts->next_tick = 0; 1003 return false; 1004 } 1005 1006 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 1007 return false; 1008 1009 if (need_resched()) 1010 return false; 1011 1012 if (unlikely(local_softirq_pending())) { 1013 static int ratelimit; 1014 1015 if (ratelimit < 10 && !local_bh_blocked() && 1016 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { 1017 pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n", 1018 (unsigned int) local_softirq_pending()); 1019 ratelimit++; 1020 } 1021 return false; 1022 } 1023 1024 if (tick_nohz_full_enabled()) { 1025 /* 1026 * Keep the tick alive to guarantee timekeeping progression 1027 * if there are full dynticks CPUs around 1028 */ 1029 if (tick_do_timer_cpu == cpu) 1030 return false; 1031 1032 /* Should not happen for nohz-full */ 1033 if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 1034 return false; 1035 } 1036 1037 return true; 1038 } 1039 1040 static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) 1041 { 1042 ktime_t expires; 1043 int cpu = smp_processor_id(); 1044 1045 /* 1046 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the 1047 * tick timer expiration time is known already. 1048 */ 1049 if (ts->timer_expires_base) 1050 expires = ts->timer_expires; 1051 else if (can_stop_idle_tick(cpu, ts)) 1052 expires = tick_nohz_next_event(ts, cpu); 1053 else 1054 return; 1055 1056 ts->idle_calls++; 1057 1058 if (expires > 0LL) { 1059 int was_stopped = ts->tick_stopped; 1060 1061 tick_nohz_stop_tick(ts, cpu); 1062 1063 ts->idle_sleeps++; 1064 ts->idle_expires = expires; 1065 1066 if (!was_stopped && ts->tick_stopped) { 1067 ts->idle_jiffies = ts->last_jiffies; 1068 nohz_balance_enter_idle(cpu); 1069 } 1070 } else { 1071 tick_nohz_retain_tick(ts); 1072 } 1073 } 1074 1075 /** 1076 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task 1077 * 1078 * When the next event is more than a tick into the future, stop the idle tick 1079 */ 1080 void tick_nohz_idle_stop_tick(void) 1081 { 1082 __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched)); 1083 } 1084 1085 void tick_nohz_idle_retain_tick(void) 1086 { 1087 tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); 1088 /* 1089 * Undo the effect of get_next_timer_interrupt() called from 1090 * tick_nohz_next_event(). 1091 */ 1092 timer_clear_idle(); 1093 } 1094 1095 /** 1096 * tick_nohz_idle_enter - prepare for entering idle on the current CPU 1097 * 1098 * Called when we start the idle loop. 1099 */ 1100 void tick_nohz_idle_enter(void) 1101 { 1102 struct tick_sched *ts; 1103 1104 lockdep_assert_irqs_enabled(); 1105 1106 local_irq_disable(); 1107 1108 ts = this_cpu_ptr(&tick_cpu_sched); 1109 1110 WARN_ON_ONCE(ts->timer_expires_base); 1111 1112 ts->inidle = 1; 1113 tick_nohz_start_idle(ts); 1114 1115 local_irq_enable(); 1116 } 1117 1118 /** 1119 * tick_nohz_irq_exit - update next tick event from interrupt exit 1120 * 1121 * When an interrupt fires while we are idle and it doesn't cause 1122 * a reschedule, it may still add, modify or delete a timer, enqueue 1123 * an RCU callback, etc... 1124 * So we need to re-calculate and reprogram the next tick event. 1125 */ 1126 void tick_nohz_irq_exit(void) 1127 { 1128 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1129 1130 if (ts->inidle) 1131 tick_nohz_start_idle(ts); 1132 else 1133 tick_nohz_full_update_tick(ts); 1134 } 1135 1136 /** 1137 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run 1138 */ 1139 bool tick_nohz_idle_got_tick(void) 1140 { 1141 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1142 1143 if (ts->got_idle_tick) { 1144 ts->got_idle_tick = 0; 1145 return true; 1146 } 1147 return false; 1148 } 1149 1150 /** 1151 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer 1152 * or the tick, whatever that expires first. Note that, if the tick has been 1153 * stopped, it returns the next hrtimer. 1154 * 1155 * Called from power state control code with interrupts disabled 1156 */ 1157 ktime_t tick_nohz_get_next_hrtimer(void) 1158 { 1159 return __this_cpu_read(tick_cpu_device.evtdev)->next_event; 1160 } 1161 1162 /** 1163 * tick_nohz_get_sleep_length - return the expected length of the current sleep 1164 * @delta_next: duration until the next event if the tick cannot be stopped 1165 * 1166 * Called from power state control code with interrupts disabled. 1167 * 1168 * The return value of this function and/or the value returned by it through the 1169 * @delta_next pointer can be negative which must be taken into account by its 1170 * callers. 1171 */ 1172 ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) 1173 { 1174 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 1175 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1176 int cpu = smp_processor_id(); 1177 /* 1178 * The idle entry time is expected to be a sufficient approximation of 1179 * the current time at this point. 1180 */ 1181 ktime_t now = ts->idle_entrytime; 1182 ktime_t next_event; 1183 1184 WARN_ON_ONCE(!ts->inidle); 1185 1186 *delta_next = ktime_sub(dev->next_event, now); 1187 1188 if (!can_stop_idle_tick(cpu, ts)) 1189 return *delta_next; 1190 1191 next_event = tick_nohz_next_event(ts, cpu); 1192 if (!next_event) 1193 return *delta_next; 1194 1195 /* 1196 * If the next highres timer to expire is earlier than next_event, the 1197 * idle governor needs to know that. 1198 */ 1199 next_event = min_t(u64, next_event, 1200 hrtimer_next_event_without(&ts->sched_timer)); 1201 1202 return ktime_sub(next_event, now); 1203 } 1204 1205 /** 1206 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value 1207 * for a particular CPU. 1208 * 1209 * Called from the schedutil frequency scaling governor in scheduler context. 1210 */ 1211 unsigned long tick_nohz_get_idle_calls_cpu(int cpu) 1212 { 1213 struct tick_sched *ts = tick_get_tick_sched(cpu); 1214 1215 return ts->idle_calls; 1216 } 1217 1218 /** 1219 * tick_nohz_get_idle_calls - return the current idle calls counter value 1220 * 1221 * Called from the schedutil frequency scaling governor in scheduler context. 1222 */ 1223 unsigned long tick_nohz_get_idle_calls(void) 1224 { 1225 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1226 1227 return ts->idle_calls; 1228 } 1229 1230 static void tick_nohz_account_idle_time(struct tick_sched *ts, 1231 ktime_t now) 1232 { 1233 unsigned long ticks; 1234 1235 ts->idle_exittime = now; 1236 1237 if (vtime_accounting_enabled_this_cpu()) 1238 return; 1239 /* 1240 * We stopped the tick in idle. Update process times would miss the 1241 * time we slept as update_process_times does only a 1 tick 1242 * accounting. Enforce that this is accounted to idle ! 1243 */ 1244 ticks = jiffies - ts->idle_jiffies; 1245 /* 1246 * We might be one off. Do not randomly account a huge number of ticks! 1247 */ 1248 if (ticks && ticks < LONG_MAX) 1249 account_idle_ticks(ticks); 1250 } 1251 1252 void tick_nohz_idle_restart_tick(void) 1253 { 1254 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1255 1256 if (ts->tick_stopped) { 1257 ktime_t now = ktime_get(); 1258 tick_nohz_restart_sched_tick(ts, now); 1259 tick_nohz_account_idle_time(ts, now); 1260 } 1261 } 1262 1263 static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) 1264 { 1265 if (tick_nohz_full_cpu(smp_processor_id())) 1266 __tick_nohz_full_update_tick(ts, now); 1267 else 1268 tick_nohz_restart_sched_tick(ts, now); 1269 1270 tick_nohz_account_idle_time(ts, now); 1271 } 1272 1273 /** 1274 * tick_nohz_idle_exit - restart the idle tick from the idle task 1275 * 1276 * Restart the idle tick when the CPU is woken up from idle 1277 * This also exit the RCU extended quiescent state. The CPU 1278 * can use RCU again after this function is called. 1279 */ 1280 void tick_nohz_idle_exit(void) 1281 { 1282 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1283 bool idle_active, tick_stopped; 1284 ktime_t now; 1285 1286 local_irq_disable(); 1287 1288 WARN_ON_ONCE(!ts->inidle); 1289 WARN_ON_ONCE(ts->timer_expires_base); 1290 1291 ts->inidle = 0; 1292 idle_active = ts->idle_active; 1293 tick_stopped = ts->tick_stopped; 1294 1295 if (idle_active || tick_stopped) 1296 now = ktime_get(); 1297 1298 if (idle_active) 1299 tick_nohz_stop_idle(ts, now); 1300 1301 if (tick_stopped) 1302 tick_nohz_idle_update_tick(ts, now); 1303 1304 local_irq_enable(); 1305 } 1306 1307 /* 1308 * The nohz low res interrupt handler 1309 */ 1310 static void tick_nohz_handler(struct clock_event_device *dev) 1311 { 1312 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1313 struct pt_regs *regs = get_irq_regs(); 1314 ktime_t now = ktime_get(); 1315 1316 dev->next_event = KTIME_MAX; 1317 1318 tick_sched_do_timer(ts, now); 1319 tick_sched_handle(ts, regs); 1320 1321 /* No need to reprogram if we are running tickless */ 1322 if (unlikely(ts->tick_stopped)) 1323 return; 1324 1325 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 1326 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1327 } 1328 1329 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) 1330 { 1331 if (!tick_nohz_enabled) 1332 return; 1333 ts->nohz_mode = mode; 1334 /* One update is enough */ 1335 if (!test_and_set_bit(0, &tick_nohz_active)) 1336 timers_update_nohz(); 1337 } 1338 1339 /** 1340 * tick_nohz_switch_to_nohz - switch to nohz mode 1341 */ 1342 static void tick_nohz_switch_to_nohz(void) 1343 { 1344 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1345 ktime_t next; 1346 1347 if (!tick_nohz_enabled) 1348 return; 1349 1350 if (tick_switch_to_oneshot(tick_nohz_handler)) 1351 return; 1352 1353 /* 1354 * Recycle the hrtimer in ts, so we can share the 1355 * hrtimer_forward with the highres code. 1356 */ 1357 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1358 /* Get the next period */ 1359 next = tick_init_jiffy_update(); 1360 1361 hrtimer_set_expires(&ts->sched_timer, next); 1362 hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); 1363 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); 1364 tick_nohz_activate(ts, NOHZ_MODE_LOWRES); 1365 } 1366 1367 static inline void tick_nohz_irq_enter(void) 1368 { 1369 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1370 ktime_t now; 1371 1372 if (!ts->idle_active && !ts->tick_stopped) 1373 return; 1374 now = ktime_get(); 1375 if (ts->idle_active) 1376 tick_nohz_stop_idle(ts, now); 1377 if (ts->tick_stopped) 1378 tick_nohz_update_jiffies(now); 1379 } 1380 1381 #else 1382 1383 static inline void tick_nohz_switch_to_nohz(void) { } 1384 static inline void tick_nohz_irq_enter(void) { } 1385 static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } 1386 1387 #endif /* CONFIG_NO_HZ_COMMON */ 1388 1389 /* 1390 * Called from irq_enter to notify about the possible interruption of idle() 1391 */ 1392 void tick_irq_enter(void) 1393 { 1394 tick_check_oneshot_broadcast_this_cpu(); 1395 tick_nohz_irq_enter(); 1396 } 1397 1398 /* 1399 * High resolution timer specific code 1400 */ 1401 #ifdef CONFIG_HIGH_RES_TIMERS 1402 /* 1403 * We rearm the timer until we get disabled by the idle code. 1404 * Called with interrupts disabled. 1405 */ 1406 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 1407 { 1408 struct tick_sched *ts = 1409 container_of(timer, struct tick_sched, sched_timer); 1410 struct pt_regs *regs = get_irq_regs(); 1411 ktime_t now = ktime_get(); 1412 1413 tick_sched_do_timer(ts, now); 1414 1415 /* 1416 * Do not call, when we are not in irq context and have 1417 * no valid regs pointer 1418 */ 1419 if (regs) 1420 tick_sched_handle(ts, regs); 1421 else 1422 ts->next_tick = 0; 1423 1424 /* No need to reprogram if we are in idle or full dynticks mode */ 1425 if (unlikely(ts->tick_stopped)) 1426 return HRTIMER_NORESTART; 1427 1428 hrtimer_forward(timer, now, TICK_NSEC); 1429 1430 return HRTIMER_RESTART; 1431 } 1432 1433 static int sched_skew_tick; 1434 1435 static int __init skew_tick(char *str) 1436 { 1437 get_option(&str, &sched_skew_tick); 1438 1439 return 0; 1440 } 1441 early_param("skew_tick", skew_tick); 1442 1443 /** 1444 * tick_setup_sched_timer - setup the tick emulation timer 1445 */ 1446 void tick_setup_sched_timer(void) 1447 { 1448 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1449 ktime_t now = ktime_get(); 1450 1451 /* 1452 * Emulate tick processing via per-CPU hrtimers: 1453 */ 1454 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 1455 ts->sched_timer.function = tick_sched_timer; 1456 1457 /* Get the next period (per-CPU) */ 1458 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 1459 1460 /* Offset the tick to avert jiffies_lock contention. */ 1461 if (sched_skew_tick) { 1462 u64 offset = TICK_NSEC >> 1; 1463 do_div(offset, num_possible_cpus()); 1464 offset *= smp_processor_id(); 1465 hrtimer_add_expires_ns(&ts->sched_timer, offset); 1466 } 1467 1468 hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); 1469 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); 1470 tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); 1471 } 1472 #endif /* HIGH_RES_TIMERS */ 1473 1474 #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS 1475 void tick_cancel_sched_timer(int cpu) 1476 { 1477 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 1478 1479 # ifdef CONFIG_HIGH_RES_TIMERS 1480 if (ts->sched_timer.base) 1481 hrtimer_cancel(&ts->sched_timer); 1482 # endif 1483 1484 memset(ts, 0, sizeof(*ts)); 1485 } 1486 #endif 1487 1488 /** 1489 * Async notification about clocksource changes 1490 */ 1491 void tick_clock_notify(void) 1492 { 1493 int cpu; 1494 1495 for_each_possible_cpu(cpu) 1496 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); 1497 } 1498 1499 /* 1500 * Async notification about clock event changes 1501 */ 1502 void tick_oneshot_notify(void) 1503 { 1504 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1505 1506 set_bit(0, &ts->check_clocks); 1507 } 1508 1509 /** 1510 * Check, if a change happened, which makes oneshot possible. 1511 * 1512 * Called cyclic from the hrtimer softirq (driven by the timer 1513 * softirq) allow_nohz signals, that we can switch into low-res nohz 1514 * mode, because high resolution timers are disabled (either compile 1515 * or runtime). Called with interrupts disabled. 1516 */ 1517 int tick_check_oneshot_change(int allow_nohz) 1518 { 1519 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); 1520 1521 if (!test_and_clear_bit(0, &ts->check_clocks)) 1522 return 0; 1523 1524 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) 1525 return 0; 1526 1527 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) 1528 return 0; 1529 1530 if (!allow_nohz) 1531 return 1; 1532 1533 tick_nohz_switch_to_nohz(); 1534 return 0; 1535 } 1536