1 /* 2 * linux/kernel/time/tick-sched.c 3 * 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 7 * 8 * No idle tick implementation for low and high resolution timers 9 * 10 * Started by: Thomas Gleixner and Ingo Molnar 11 * 12 * Distribute under GPLv2. 13 */ 14 #include <linux/cpu.h> 15 #include <linux/err.h> 16 #include <linux/hrtimer.h> 17 #include <linux/interrupt.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/percpu.h> 20 #include <linux/profile.h> 21 #include <linux/sched.h> 22 #include <linux/module.h> 23 24 #include <asm/irq_regs.h> 25 26 #include "tick-internal.h" 27 28 /* 29 * Per cpu nohz control structure 30 */ 31 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 32 33 /* 34 * The time, when the last jiffy update happened. Protected by xtime_lock. 35 */ 36 static ktime_t last_jiffies_update; 37 38 struct tick_sched *tick_get_tick_sched(int cpu) 39 { 40 return &per_cpu(tick_cpu_sched, cpu); 41 } 42 43 /* 44 * Must be called with interrupts disabled ! 45 */ 46 static void tick_do_update_jiffies64(ktime_t now) 47 { 48 unsigned long ticks = 0; 49 ktime_t delta; 50 51 /* 52 * Do a quick check without holding xtime_lock: 53 */ 54 delta = ktime_sub(now, last_jiffies_update); 55 if (delta.tv64 < tick_period.tv64) 56 return; 57 58 /* Reevalute with xtime_lock held */ 59 write_seqlock(&xtime_lock); 60 61 delta = ktime_sub(now, last_jiffies_update); 62 if (delta.tv64 >= tick_period.tv64) { 63 64 delta = ktime_sub(delta, tick_period); 65 last_jiffies_update = ktime_add(last_jiffies_update, 66 tick_period); 67 68 /* Slow path for long timeouts */ 69 if (unlikely(delta.tv64 >= tick_period.tv64)) { 70 s64 incr = ktime_to_ns(tick_period); 71 72 ticks = ktime_divns(delta, incr); 73 74 last_jiffies_update = ktime_add_ns(last_jiffies_update, 75 incr * ticks); 76 } 77 do_timer(++ticks); 78 79 /* Keep the tick_next_period variable up to date */ 80 tick_next_period = ktime_add(last_jiffies_update, tick_period); 81 } 82 write_sequnlock(&xtime_lock); 83 } 84 85 /* 86 * Initialize and return retrieve the jiffies update. 87 */ 88 static ktime_t tick_init_jiffy_update(void) 89 { 90 ktime_t period; 91 92 write_seqlock(&xtime_lock); 93 /* Did we start the jiffies update yet ? */ 94 if (last_jiffies_update.tv64 == 0) 95 last_jiffies_update = tick_next_period; 96 period = last_jiffies_update; 97 write_sequnlock(&xtime_lock); 98 return period; 99 } 100 101 /* 102 * NOHZ - aka dynamic tick functionality 103 */ 104 #ifdef CONFIG_NO_HZ 105 /* 106 * NO HZ enabled ? 107 */ 108 static int tick_nohz_enabled __read_mostly = 1; 109 110 /* 111 * Enable / Disable tickless mode 112 */ 113 static int __init setup_tick_nohz(char *str) 114 { 115 if (!strcmp(str, "off")) 116 tick_nohz_enabled = 0; 117 else if (!strcmp(str, "on")) 118 tick_nohz_enabled = 1; 119 else 120 return 0; 121 return 1; 122 } 123 124 __setup("nohz=", setup_tick_nohz); 125 126 /** 127 * tick_nohz_update_jiffies - update jiffies when idle was interrupted 128 * 129 * Called from interrupt entry when the CPU was idle 130 * 131 * In case the sched_tick was stopped on this CPU, we have to check if jiffies 132 * must be updated. Otherwise an interrupt handler could use a stale jiffy 133 * value. We do this unconditionally on any cpu, as we don't know whether the 134 * cpu, which has the update task assigned is in a long sleep. 135 */ 136 static void tick_nohz_update_jiffies(ktime_t now) 137 { 138 int cpu = smp_processor_id(); 139 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 140 unsigned long flags; 141 142 ts->idle_waketime = now; 143 144 local_irq_save(flags); 145 tick_do_update_jiffies64(now); 146 local_irq_restore(flags); 147 148 touch_softlockup_watchdog(); 149 } 150 151 /* 152 * Updates the per cpu time idle statistics counters 153 */ 154 static void 155 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) 156 { 157 ktime_t delta; 158 159 if (ts->idle_active) { 160 delta = ktime_sub(now, ts->idle_entrytime); 161 if (nr_iowait_cpu(cpu) > 0) 162 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 163 else 164 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 165 ts->idle_entrytime = now; 166 } 167 168 if (last_update_time) 169 *last_update_time = ktime_to_us(now); 170 171 } 172 173 static void tick_nohz_stop_idle(int cpu, ktime_t now) 174 { 175 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 176 177 update_ts_time_stats(cpu, ts, now, NULL); 178 ts->idle_active = 0; 179 180 sched_clock_idle_wakeup_event(0); 181 } 182 183 static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) 184 { 185 ktime_t now; 186 187 now = ktime_get(); 188 189 update_ts_time_stats(cpu, ts, now, NULL); 190 191 ts->idle_entrytime = now; 192 ts->idle_active = 1; 193 sched_clock_idle_sleep_event(); 194 return now; 195 } 196 197 /** 198 * get_cpu_idle_time_us - get the total idle time of a cpu 199 * @cpu: CPU number to query 200 * @last_update_time: variable to store update time in. Do not update 201 * counters if NULL. 202 * 203 * Return the cummulative idle time (since boot) for a given 204 * CPU, in microseconds. 205 * 206 * This time is measured via accounting rather than sampling, 207 * and is as accurate as ktime_get() is. 208 * 209 * This function returns -1 if NOHZ is not enabled. 210 */ 211 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 212 { 213 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 214 ktime_t now, idle; 215 216 if (!tick_nohz_enabled) 217 return -1; 218 219 now = ktime_get(); 220 if (last_update_time) { 221 update_ts_time_stats(cpu, ts, now, last_update_time); 222 idle = ts->idle_sleeptime; 223 } else { 224 if (ts->idle_active && !nr_iowait_cpu(cpu)) { 225 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 226 227 idle = ktime_add(ts->idle_sleeptime, delta); 228 } else { 229 idle = ts->idle_sleeptime; 230 } 231 } 232 233 return ktime_to_us(idle); 234 235 } 236 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 237 238 /** 239 * get_cpu_iowait_time_us - get the total iowait time of a cpu 240 * @cpu: CPU number to query 241 * @last_update_time: variable to store update time in. Do not update 242 * counters if NULL. 243 * 244 * Return the cummulative iowait time (since boot) for a given 245 * CPU, in microseconds. 246 * 247 * This time is measured via accounting rather than sampling, 248 * and is as accurate as ktime_get() is. 249 * 250 * This function returns -1 if NOHZ is not enabled. 251 */ 252 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) 253 { 254 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 255 ktime_t now, iowait; 256 257 if (!tick_nohz_enabled) 258 return -1; 259 260 now = ktime_get(); 261 if (last_update_time) { 262 update_ts_time_stats(cpu, ts, now, last_update_time); 263 iowait = ts->iowait_sleeptime; 264 } else { 265 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { 266 ktime_t delta = ktime_sub(now, ts->idle_entrytime); 267 268 iowait = ktime_add(ts->iowait_sleeptime, delta); 269 } else { 270 iowait = ts->iowait_sleeptime; 271 } 272 } 273 274 return ktime_to_us(iowait); 275 } 276 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); 277 278 /** 279 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task 280 * 281 * When the next event is more than a tick into the future, stop the idle tick 282 * Called either from the idle loop or from irq_exit() when an idle period was 283 * just interrupted by an interrupt which did not cause a reschedule. 284 */ 285 void tick_nohz_stop_sched_tick(int inidle) 286 { 287 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; 288 struct tick_sched *ts; 289 ktime_t last_update, expires, now; 290 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 291 u64 time_delta; 292 int cpu; 293 294 local_irq_save(flags); 295 296 cpu = smp_processor_id(); 297 ts = &per_cpu(tick_cpu_sched, cpu); 298 299 /* 300 * Call to tick_nohz_start_idle stops the last_update_time from being 301 * updated. Thus, it must not be called in the event we are called from 302 * irq_exit() with the prior state different than idle. 303 */ 304 if (!inidle && !ts->inidle) 305 goto end; 306 307 /* 308 * Set ts->inidle unconditionally. Even if the system did not 309 * switch to NOHZ mode the cpu frequency governers rely on the 310 * update of the idle time accounting in tick_nohz_start_idle(). 311 */ 312 ts->inidle = 1; 313 314 now = tick_nohz_start_idle(cpu, ts); 315 316 /* 317 * If this cpu is offline and it is the one which updates 318 * jiffies, then give up the assignment and let it be taken by 319 * the cpu which runs the tick timer next. If we don't drop 320 * this here the jiffies might be stale and do_timer() never 321 * invoked. 322 */ 323 if (unlikely(!cpu_online(cpu))) { 324 if (cpu == tick_do_timer_cpu) 325 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 326 } 327 328 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 329 goto end; 330 331 if (need_resched()) 332 goto end; 333 334 if (unlikely(local_softirq_pending() && cpu_online(cpu))) { 335 static int ratelimit; 336 337 if (ratelimit < 10) { 338 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", 339 (unsigned int) local_softirq_pending()); 340 ratelimit++; 341 } 342 goto end; 343 } 344 345 ts->idle_calls++; 346 /* Read jiffies and the time when jiffies were updated last */ 347 do { 348 seq = read_seqbegin(&xtime_lock); 349 last_update = last_jiffies_update; 350 last_jiffies = jiffies; 351 time_delta = timekeeping_max_deferment(); 352 } while (read_seqretry(&xtime_lock, seq)); 353 354 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || 355 arch_needs_cpu(cpu)) { 356 next_jiffies = last_jiffies + 1; 357 delta_jiffies = 1; 358 } else { 359 /* Get the next timer wheel timer */ 360 next_jiffies = get_next_timer_interrupt(last_jiffies); 361 delta_jiffies = next_jiffies - last_jiffies; 362 } 363 /* 364 * Do not stop the tick, if we are only one off 365 * or if the cpu is required for rcu 366 */ 367 if (!ts->tick_stopped && delta_jiffies == 1) 368 goto out; 369 370 /* Schedule the tick, if we are at least one jiffie off */ 371 if ((long)delta_jiffies >= 1) { 372 373 /* 374 * If this cpu is the one which updates jiffies, then 375 * give up the assignment and let it be taken by the 376 * cpu which runs the tick timer next, which might be 377 * this cpu as well. If we don't drop this here the 378 * jiffies might be stale and do_timer() never 379 * invoked. Keep track of the fact that it was the one 380 * which had the do_timer() duty last. If this cpu is 381 * the one which had the do_timer() duty last, we 382 * limit the sleep time to the timekeeping 383 * max_deferement value which we retrieved 384 * above. Otherwise we can sleep as long as we want. 385 */ 386 if (cpu == tick_do_timer_cpu) { 387 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 388 ts->do_timer_last = 1; 389 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { 390 time_delta = KTIME_MAX; 391 ts->do_timer_last = 0; 392 } else if (!ts->do_timer_last) { 393 time_delta = KTIME_MAX; 394 } 395 396 /* 397 * calculate the expiry time for the next timer wheel 398 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals 399 * that there is no timer pending or at least extremely 400 * far into the future (12 days for HZ=1000). In this 401 * case we set the expiry to the end of time. 402 */ 403 if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { 404 /* 405 * Calculate the time delta for the next timer event. 406 * If the time delta exceeds the maximum time delta 407 * permitted by the current clocksource then adjust 408 * the time delta accordingly to ensure the 409 * clocksource does not wrap. 410 */ 411 time_delta = min_t(u64, time_delta, 412 tick_period.tv64 * delta_jiffies); 413 } 414 415 if (time_delta < KTIME_MAX) 416 expires = ktime_add_ns(last_update, time_delta); 417 else 418 expires.tv64 = KTIME_MAX; 419 420 /* Skip reprogram of event if its not changed */ 421 if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) 422 goto out; 423 424 /* 425 * nohz_stop_sched_tick can be called several times before 426 * the nohz_restart_sched_tick is called. This happens when 427 * interrupts arrive which do not cause a reschedule. In the 428 * first call we save the current tick time, so we can restart 429 * the scheduler tick in nohz_restart_sched_tick. 430 */ 431 if (!ts->tick_stopped) { 432 select_nohz_load_balancer(1); 433 434 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); 435 ts->tick_stopped = 1; 436 ts->idle_jiffies = last_jiffies; 437 rcu_enter_nohz(); 438 } 439 440 ts->idle_sleeps++; 441 442 /* Mark expires */ 443 ts->idle_expires = expires; 444 445 /* 446 * If the expiration time == KTIME_MAX, then 447 * in this case we simply stop the tick timer. 448 */ 449 if (unlikely(expires.tv64 == KTIME_MAX)) { 450 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 451 hrtimer_cancel(&ts->sched_timer); 452 goto out; 453 } 454 455 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 456 hrtimer_start(&ts->sched_timer, expires, 457 HRTIMER_MODE_ABS_PINNED); 458 /* Check, if the timer was already in the past */ 459 if (hrtimer_active(&ts->sched_timer)) 460 goto out; 461 } else if (!tick_program_event(expires, 0)) 462 goto out; 463 /* 464 * We are past the event already. So we crossed a 465 * jiffie boundary. Update jiffies and raise the 466 * softirq. 467 */ 468 tick_do_update_jiffies64(ktime_get()); 469 } 470 raise_softirq_irqoff(TIMER_SOFTIRQ); 471 out: 472 ts->next_jiffies = next_jiffies; 473 ts->last_jiffies = last_jiffies; 474 ts->sleep_length = ktime_sub(dev->next_event, now); 475 end: 476 local_irq_restore(flags); 477 } 478 479 /** 480 * tick_nohz_get_sleep_length - return the length of the current sleep 481 * 482 * Called from power state control code with interrupts disabled 483 */ 484 ktime_t tick_nohz_get_sleep_length(void) 485 { 486 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 487 488 return ts->sleep_length; 489 } 490 491 static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) 492 { 493 hrtimer_cancel(&ts->sched_timer); 494 hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); 495 496 while (1) { 497 /* Forward the time to expire in the future */ 498 hrtimer_forward(&ts->sched_timer, now, tick_period); 499 500 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 501 hrtimer_start_expires(&ts->sched_timer, 502 HRTIMER_MODE_ABS_PINNED); 503 /* Check, if the timer was already in the past */ 504 if (hrtimer_active(&ts->sched_timer)) 505 break; 506 } else { 507 if (!tick_program_event( 508 hrtimer_get_expires(&ts->sched_timer), 0)) 509 break; 510 } 511 /* Update jiffies and reread time */ 512 tick_do_update_jiffies64(now); 513 now = ktime_get(); 514 } 515 } 516 517 /** 518 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task 519 * 520 * Restart the idle tick when the CPU is woken up from idle 521 */ 522 void tick_nohz_restart_sched_tick(void) 523 { 524 int cpu = smp_processor_id(); 525 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 526 #ifndef CONFIG_VIRT_CPU_ACCOUNTING 527 unsigned long ticks; 528 #endif 529 ktime_t now; 530 531 local_irq_disable(); 532 if (ts->idle_active || (ts->inidle && ts->tick_stopped)) 533 now = ktime_get(); 534 535 if (ts->idle_active) 536 tick_nohz_stop_idle(cpu, now); 537 538 if (!ts->inidle || !ts->tick_stopped) { 539 ts->inidle = 0; 540 local_irq_enable(); 541 return; 542 } 543 544 ts->inidle = 0; 545 546 rcu_exit_nohz(); 547 548 /* Update jiffies first */ 549 select_nohz_load_balancer(0); 550 tick_do_update_jiffies64(now); 551 552 #ifndef CONFIG_VIRT_CPU_ACCOUNTING 553 /* 554 * We stopped the tick in idle. Update process times would miss the 555 * time we slept as update_process_times does only a 1 tick 556 * accounting. Enforce that this is accounted to idle ! 557 */ 558 ticks = jiffies - ts->idle_jiffies; 559 /* 560 * We might be one off. Do not randomly account a huge number of ticks! 561 */ 562 if (ticks && ticks < LONG_MAX) 563 account_idle_ticks(ticks); 564 #endif 565 566 touch_softlockup_watchdog(); 567 /* 568 * Cancel the scheduled timer and restore the tick 569 */ 570 ts->tick_stopped = 0; 571 ts->idle_exittime = now; 572 573 tick_nohz_restart(ts, now); 574 575 local_irq_enable(); 576 } 577 578 static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) 579 { 580 hrtimer_forward(&ts->sched_timer, now, tick_period); 581 return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); 582 } 583 584 /* 585 * The nohz low res interrupt handler 586 */ 587 static void tick_nohz_handler(struct clock_event_device *dev) 588 { 589 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 590 struct pt_regs *regs = get_irq_regs(); 591 int cpu = smp_processor_id(); 592 ktime_t now = ktime_get(); 593 594 dev->next_event.tv64 = KTIME_MAX; 595 596 /* 597 * Check if the do_timer duty was dropped. We don't care about 598 * concurrency: This happens only when the cpu in charge went 599 * into a long sleep. If two cpus happen to assign themself to 600 * this duty, then the jiffies update is still serialized by 601 * xtime_lock. 602 */ 603 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 604 tick_do_timer_cpu = cpu; 605 606 /* Check, if the jiffies need an update */ 607 if (tick_do_timer_cpu == cpu) 608 tick_do_update_jiffies64(now); 609 610 /* 611 * When we are idle and the tick is stopped, we have to touch 612 * the watchdog as we might not schedule for a really long 613 * time. This happens on complete idle SMP systems while 614 * waiting on the login prompt. We also increment the "start 615 * of idle" jiffy stamp so the idle accounting adjustment we 616 * do when we go busy again does not account too much ticks. 617 */ 618 if (ts->tick_stopped) { 619 touch_softlockup_watchdog(); 620 ts->idle_jiffies++; 621 } 622 623 update_process_times(user_mode(regs)); 624 profile_tick(CPU_PROFILING); 625 626 while (tick_nohz_reprogram(ts, now)) { 627 now = ktime_get(); 628 tick_do_update_jiffies64(now); 629 } 630 } 631 632 /** 633 * tick_nohz_switch_to_nohz - switch to nohz mode 634 */ 635 static void tick_nohz_switch_to_nohz(void) 636 { 637 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 638 ktime_t next; 639 640 if (!tick_nohz_enabled) 641 return; 642 643 local_irq_disable(); 644 if (tick_switch_to_oneshot(tick_nohz_handler)) { 645 local_irq_enable(); 646 return; 647 } 648 649 ts->nohz_mode = NOHZ_MODE_LOWRES; 650 651 /* 652 * Recycle the hrtimer in ts, so we can share the 653 * hrtimer_forward with the highres code. 654 */ 655 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 656 /* Get the next period */ 657 next = tick_init_jiffy_update(); 658 659 for (;;) { 660 hrtimer_set_expires(&ts->sched_timer, next); 661 if (!tick_program_event(next, 0)) 662 break; 663 next = ktime_add(next, tick_period); 664 } 665 local_irq_enable(); 666 } 667 668 /* 669 * When NOHZ is enabled and the tick is stopped, we need to kick the 670 * tick timer from irq_enter() so that the jiffies update is kept 671 * alive during long running softirqs. That's ugly as hell, but 672 * correctness is key even if we need to fix the offending softirq in 673 * the first place. 674 * 675 * Note, this is different to tick_nohz_restart. We just kick the 676 * timer and do not touch the other magic bits which need to be done 677 * when idle is left. 678 */ 679 static void tick_nohz_kick_tick(int cpu, ktime_t now) 680 { 681 #if 0 682 /* Switch back to 2.6.27 behaviour */ 683 684 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 685 ktime_t delta; 686 687 /* 688 * Do not touch the tick device, when the next expiry is either 689 * already reached or less/equal than the tick period. 690 */ 691 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); 692 if (delta.tv64 <= tick_period.tv64) 693 return; 694 695 tick_nohz_restart(ts, now); 696 #endif 697 } 698 699 static inline void tick_check_nohz(int cpu) 700 { 701 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 702 ktime_t now; 703 704 if (!ts->idle_active && !ts->tick_stopped) 705 return; 706 now = ktime_get(); 707 if (ts->idle_active) 708 tick_nohz_stop_idle(cpu, now); 709 if (ts->tick_stopped) { 710 tick_nohz_update_jiffies(now); 711 tick_nohz_kick_tick(cpu, now); 712 } 713 } 714 715 #else 716 717 static inline void tick_nohz_switch_to_nohz(void) { } 718 static inline void tick_check_nohz(int cpu) { } 719 720 #endif /* NO_HZ */ 721 722 /* 723 * Called from irq_enter to notify about the possible interruption of idle() 724 */ 725 void tick_check_idle(int cpu) 726 { 727 tick_check_oneshot_broadcast(cpu); 728 tick_check_nohz(cpu); 729 } 730 731 /* 732 * High resolution timer specific code 733 */ 734 #ifdef CONFIG_HIGH_RES_TIMERS 735 /* 736 * We rearm the timer until we get disabled by the idle code. 737 * Called with interrupts disabled and timer->base->cpu_base->lock held. 738 */ 739 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 740 { 741 struct tick_sched *ts = 742 container_of(timer, struct tick_sched, sched_timer); 743 struct pt_regs *regs = get_irq_regs(); 744 ktime_t now = ktime_get(); 745 int cpu = smp_processor_id(); 746 747 #ifdef CONFIG_NO_HZ 748 /* 749 * Check if the do_timer duty was dropped. We don't care about 750 * concurrency: This happens only when the cpu in charge went 751 * into a long sleep. If two cpus happen to assign themself to 752 * this duty, then the jiffies update is still serialized by 753 * xtime_lock. 754 */ 755 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 756 tick_do_timer_cpu = cpu; 757 #endif 758 759 /* Check, if the jiffies need an update */ 760 if (tick_do_timer_cpu == cpu) 761 tick_do_update_jiffies64(now); 762 763 /* 764 * Do not call, when we are not in irq context and have 765 * no valid regs pointer 766 */ 767 if (regs) { 768 /* 769 * When we are idle and the tick is stopped, we have to touch 770 * the watchdog as we might not schedule for a really long 771 * time. This happens on complete idle SMP systems while 772 * waiting on the login prompt. We also increment the "start of 773 * idle" jiffy stamp so the idle accounting adjustment we do 774 * when we go busy again does not account too much ticks. 775 */ 776 if (ts->tick_stopped) { 777 touch_softlockup_watchdog(); 778 ts->idle_jiffies++; 779 } 780 update_process_times(user_mode(regs)); 781 profile_tick(CPU_PROFILING); 782 } 783 784 hrtimer_forward(timer, now, tick_period); 785 786 return HRTIMER_RESTART; 787 } 788 789 /** 790 * tick_setup_sched_timer - setup the tick emulation timer 791 */ 792 void tick_setup_sched_timer(void) 793 { 794 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 795 ktime_t now = ktime_get(); 796 797 /* 798 * Emulate tick processing via per-CPU hrtimers: 799 */ 800 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 801 ts->sched_timer.function = tick_sched_timer; 802 803 /* Get the next period (per cpu) */ 804 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 805 806 for (;;) { 807 hrtimer_forward(&ts->sched_timer, now, tick_period); 808 hrtimer_start_expires(&ts->sched_timer, 809 HRTIMER_MODE_ABS_PINNED); 810 /* Check, if the timer was already in the past */ 811 if (hrtimer_active(&ts->sched_timer)) 812 break; 813 now = ktime_get(); 814 } 815 816 #ifdef CONFIG_NO_HZ 817 if (tick_nohz_enabled) 818 ts->nohz_mode = NOHZ_MODE_HIGHRES; 819 #endif 820 } 821 #endif /* HIGH_RES_TIMERS */ 822 823 #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS 824 void tick_cancel_sched_timer(int cpu) 825 { 826 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 827 828 # ifdef CONFIG_HIGH_RES_TIMERS 829 if (ts->sched_timer.base) 830 hrtimer_cancel(&ts->sched_timer); 831 # endif 832 833 ts->nohz_mode = NOHZ_MODE_INACTIVE; 834 } 835 #endif 836 837 /** 838 * Async notification about clocksource changes 839 */ 840 void tick_clock_notify(void) 841 { 842 int cpu; 843 844 for_each_possible_cpu(cpu) 845 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); 846 } 847 848 /* 849 * Async notification about clock event changes 850 */ 851 void tick_oneshot_notify(void) 852 { 853 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 854 855 set_bit(0, &ts->check_clocks); 856 } 857 858 /** 859 * Check, if a change happened, which makes oneshot possible. 860 * 861 * Called cyclic from the hrtimer softirq (driven by the timer 862 * softirq) allow_nohz signals, that we can switch into low-res nohz 863 * mode, because high resolution timers are disabled (either compile 864 * or runtime). 865 */ 866 int tick_check_oneshot_change(int allow_nohz) 867 { 868 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 869 870 if (!test_and_clear_bit(0, &ts->check_clocks)) 871 return 0; 872 873 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) 874 return 0; 875 876 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) 877 return 0; 878 879 if (!allow_nohz) 880 return 1; 881 882 tick_nohz_switch_to_nohz(); 883 return 0; 884 } 885