tick-sched.c (b8f61116c1ce342804a0897b0a80eb4df5f19453) | tick-sched.c (d6ad418763888f617ac5b4849823e4cd670df1dd) |
---|---|
1/* 2 * linux/kernel/time/tick-sched.c 3 * 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 7 * 8 * No idle tick implementation for low and high resolution timers --- 17 unchanged lines hidden (view full) --- 26#include "tick-internal.h" 27 28/* 29 * Per cpu nohz control structure 30 */ 31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 32 33/* | 1/* 2 * linux/kernel/time/tick-sched.c 3 * 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner 7 * 8 * No idle tick implementation for low and high resolution timers --- 17 unchanged lines hidden (view full) --- 26#include "tick-internal.h" 27 28/* 29 * Per cpu nohz control structure 30 */ 31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 32 33/* |
34 * The time, when the last jiffy update happened. Protected by xtime_lock. | 34 * The time, when the last jiffy update happened. Protected by jiffies_lock. |
35 */ 36static ktime_t last_jiffies_update; 37 38struct tick_sched *tick_get_tick_sched(int cpu) 39{ 40 return &per_cpu(tick_cpu_sched, cpu); 41} 42 43/* 44 * Must be called with interrupts disabled ! 45 */ 46static void tick_do_update_jiffies64(ktime_t now) 47{ 48 unsigned long ticks = 0; 49 ktime_t delta; 50 51 /* | 35 */ 36static ktime_t last_jiffies_update; 37 38struct tick_sched *tick_get_tick_sched(int cpu) 39{ 40 return &per_cpu(tick_cpu_sched, cpu); 41} 42 43/* 44 * Must be called with interrupts disabled ! 45 */ 46static void tick_do_update_jiffies64(ktime_t now) 47{ 48 unsigned long ticks = 0; 49 ktime_t delta; 50 51 /* |
52 * Do a quick check without holding xtime_lock: | 52 * Do a quick check without holding jiffies_lock: |
53 */ 54 delta = ktime_sub(now, last_jiffies_update); 55 if (delta.tv64 < tick_period.tv64) 56 return; 57 | 53 */ 54 delta = ktime_sub(now, last_jiffies_update); 55 if (delta.tv64 < tick_period.tv64) 56 return; 57 |
58 /* Reevalute with xtime_lock held */ 59 write_seqlock(&xtime_lock); | 58 /* Reevalute with jiffies_lock held */ 59 write_seqlock(&jiffies_lock); |
60 61 delta = ktime_sub(now, last_jiffies_update); 62 if (delta.tv64 >= tick_period.tv64) { 63 64 delta = ktime_sub(delta, tick_period); 65 last_jiffies_update = ktime_add(last_jiffies_update, 66 tick_period); 67 --- 6 unchanged lines hidden (view full) --- 74 last_jiffies_update = ktime_add_ns(last_jiffies_update, 75 incr * ticks); 76 } 77 do_timer(++ticks); 78 79 /* Keep the tick_next_period variable up to date */ 80 tick_next_period = ktime_add(last_jiffies_update, tick_period); 81 } | 60 61 delta = ktime_sub(now, last_jiffies_update); 62 if (delta.tv64 >= tick_period.tv64) { 63 64 delta = ktime_sub(delta, tick_period); 65 last_jiffies_update = ktime_add(last_jiffies_update, 66 tick_period); 67 --- 6 unchanged lines hidden (view full) --- 74 last_jiffies_update = ktime_add_ns(last_jiffies_update, 75 incr * ticks); 76 } 77 do_timer(++ticks); 78 79 /* Keep the tick_next_period variable up to date */ 80 tick_next_period = ktime_add(last_jiffies_update, tick_period); 81 } |
82 write_sequnlock(&xtime_lock); | 82 write_sequnlock(&jiffies_lock); |
83} 84 85/* 86 * Initialize and return retrieve the jiffies update. 87 */ 88static ktime_t tick_init_jiffy_update(void) 89{ 90 ktime_t period; 91 | 83} 84 85/* 86 * Initialize and return retrieve the jiffies update. 87 */ 88static ktime_t tick_init_jiffy_update(void) 89{ 90 ktime_t period; 91 |
92 write_seqlock(&xtime_lock); | 92 write_seqlock(&jiffies_lock); |
93 /* Did we start the jiffies update yet ? */ 94 if (last_jiffies_update.tv64 == 0) 95 last_jiffies_update = tick_next_period; 96 period = last_jiffies_update; | 93 /* Did we start the jiffies update yet ? */ 94 if (last_jiffies_update.tv64 == 0) 95 last_jiffies_update = tick_next_period; 96 period = last_jiffies_update; |
97 write_sequnlock(&xtime_lock); | 97 write_sequnlock(&jiffies_lock); |
98 return period; 99} 100 | 98 return period; 99} 100 |
101 102static void tick_sched_do_timer(ktime_t now) 103{ 104 int cpu = smp_processor_id(); 105 106#ifdef CONFIG_NO_HZ 107 /* 108 * Check if the do_timer duty was dropped. We don't care about 109 * concurrency: This happens only when the cpu in charge went 110 * into a long sleep. If two cpus happen to assign themself to 111 * this duty, then the jiffies update is still serialized by 112 * xtime_lock. 113 */ 114 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 115 tick_do_timer_cpu = cpu; 116#endif 117 118 /* Check, if the jiffies need an update */ 119 if (tick_do_timer_cpu == cpu) 120 tick_do_update_jiffies64(now); 121} 122 123static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) 124{ 125#ifdef CONFIG_NO_HZ 126 /* 127 * When we are idle and the tick is stopped, we have to touch 128 * the watchdog as we might not schedule for a really long 129 * time. This happens on complete idle SMP systems while 130 * waiting on the login prompt. We also increment the "start of 131 * idle" jiffy stamp so the idle accounting adjustment we do 132 * when we go busy again does not account too much ticks. 133 */ 134 if (ts->tick_stopped) { 135 touch_softlockup_watchdog(); 136 if (is_idle_task(current)) 137 ts->idle_jiffies++; 138 } 139#endif 140 update_process_times(user_mode(regs)); 141 profile_tick(CPU_PROFILING); 142} 143 | |
144/* 145 * NOHZ - aka dynamic tick functionality 146 */ 147#ifdef CONFIG_NO_HZ 148/* 149 * NO HZ enabled ? 150 */ 151int tick_nohz_enabled __read_mostly = 1; --- 168 unchanged lines hidden (view full) --- 320 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 321 ktime_t last_update, expires, ret = { .tv64 = 0 }; 322 unsigned long rcu_delta_jiffies; 323 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 324 u64 time_delta; 325 326 /* Read jiffies and the time when jiffies were updated last */ 327 do { | 101/* 102 * NOHZ - aka dynamic tick functionality 103 */ 104#ifdef CONFIG_NO_HZ 105/* 106 * NO HZ enabled ? 107 */ 108int tick_nohz_enabled __read_mostly = 1; --- 168 unchanged lines hidden (view full) --- 277 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 278 ktime_t last_update, expires, ret = { .tv64 = 0 }; 279 unsigned long rcu_delta_jiffies; 280 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 281 u64 time_delta; 282 283 /* Read jiffies and the time when jiffies were updated last */ 284 do { |
328 seq = read_seqbegin(&xtime_lock); | 285 seq = read_seqbegin(&jiffies_lock); |
329 last_update = last_jiffies_update; 330 last_jiffies = jiffies; 331 time_delta = timekeeping_max_deferment(); | 286 last_update = last_jiffies_update; 287 last_jiffies = jiffies; 288 time_delta = timekeeping_max_deferment(); |
332 } while (read_seqretry(&xtime_lock, seq)); | 289 } while (read_seqretry(&jiffies_lock, seq)); |
333 334 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || 335 arch_needs_cpu(cpu)) { 336 next_jiffies = last_jiffies + 1; 337 delta_jiffies = 1; 338 } else { 339 /* Get the next timer wheel timer */ 340 next_jiffies = get_next_timer_interrupt(last_jiffies); --- 345 unchanged lines hidden (view full) --- 686 687/* 688 * The nohz low res interrupt handler 689 */ 690static void tick_nohz_handler(struct clock_event_device *dev) 691{ 692 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 693 struct pt_regs *regs = get_irq_regs(); | 290 291 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || 292 arch_needs_cpu(cpu)) { 293 next_jiffies = last_jiffies + 1; 294 delta_jiffies = 1; 295 } else { 296 /* Get the next timer wheel timer */ 297 next_jiffies = get_next_timer_interrupt(last_jiffies); --- 345 unchanged lines hidden (view full) --- 643 644/* 645 * The nohz low res interrupt handler 646 */ 647static void tick_nohz_handler(struct clock_event_device *dev) 648{ 649 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 650 struct pt_regs *regs = get_irq_regs(); |
651 int cpu = smp_processor_id(); |
|
694 ktime_t now = ktime_get(); 695 696 dev->next_event.tv64 = KTIME_MAX; 697 | 652 ktime_t now = ktime_get(); 653 654 dev->next_event.tv64 = KTIME_MAX; 655 |
698 tick_sched_do_timer(now); 699 tick_sched_handle(ts, regs); | 656 /* 657 * Check if the do_timer duty was dropped. We don't care about 658 * concurrency: This happens only when the cpu in charge went 659 * into a long sleep. If two cpus happen to assign themself to 660 * this duty, then the jiffies update is still serialized by 661 * jiffies_lock. 662 */ 663 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 664 tick_do_timer_cpu = cpu; |
700 | 665 |
666 /* Check, if the jiffies need an update */ 667 if (tick_do_timer_cpu == cpu) 668 tick_do_update_jiffies64(now); 669 670 /* 671 * When we are idle and the tick is stopped, we have to touch 672 * the watchdog as we might not schedule for a really long 673 * time. This happens on complete idle SMP systems while 674 * waiting on the login prompt. We also increment the "start 675 * of idle" jiffy stamp so the idle accounting adjustment we 676 * do when we go busy again does not account too much ticks. 677 */ 678 if (ts->tick_stopped) { 679 touch_softlockup_watchdog(); 680 ts->idle_jiffies++; 681 } 682 683 update_process_times(user_mode(regs)); 684 profile_tick(CPU_PROFILING); 685 |
|
701 while (tick_nohz_reprogram(ts, now)) { 702 now = ktime_get(); 703 tick_do_update_jiffies64(now); 704 } 705} 706 707/** 708 * tick_nohz_switch_to_nohz - switch to nohz mode --- 95 unchanged lines hidden (view full) --- 804} 805 806/* 807 * High resolution timer specific code 808 */ 809#ifdef CONFIG_HIGH_RES_TIMERS 810/* 811 * We rearm the timer until we get disabled by the idle code. | 686 while (tick_nohz_reprogram(ts, now)) { 687 now = ktime_get(); 688 tick_do_update_jiffies64(now); 689 } 690} 691 692/** 693 * tick_nohz_switch_to_nohz - switch to nohz mode --- 95 unchanged lines hidden (view full) --- 789} 790 791/* 792 * High resolution timer specific code 793 */ 794#ifdef CONFIG_HIGH_RES_TIMERS 795/* 796 * We rearm the timer until we get disabled by the idle code. |
812 * Called with interrupts disabled. | 797 * Called with interrupts disabled and timer->base->cpu_base->lock held. |
813 */ 814static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 815{ 816 struct tick_sched *ts = 817 container_of(timer, struct tick_sched, sched_timer); 818 struct pt_regs *regs = get_irq_regs(); 819 ktime_t now = ktime_get(); | 798 */ 799static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) 800{ 801 struct tick_sched *ts = 802 container_of(timer, struct tick_sched, sched_timer); 803 struct pt_regs *regs = get_irq_regs(); 804 ktime_t now = ktime_get(); |
805 int cpu = smp_processor_id(); |
|
820 | 806 |
821 tick_sched_do_timer(now); | 807#ifdef CONFIG_NO_HZ 808 /* 809 * Check if the do_timer duty was dropped. We don't care about 810 * concurrency: This happens only when the cpu in charge went 811 * into a long sleep. If two cpus happen to assign themself to 812 * this duty, then the jiffies update is still serialized by 813 * jiffies_lock. 814 */ 815 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) 816 tick_do_timer_cpu = cpu; 817#endif |
822 | 818 |
819 /* Check, if the jiffies need an update */ 820 if (tick_do_timer_cpu == cpu) 821 tick_do_update_jiffies64(now); 822 |
|
823 /* 824 * Do not call, when we are not in irq context and have 825 * no valid regs pointer 826 */ | 823 /* 824 * Do not call, when we are not in irq context and have 825 * no valid regs pointer 826 */ |
827 if (regs) 828 tick_sched_handle(ts, regs); | 827 if (regs) { 828 /* 829 * When we are idle and the tick is stopped, we have to touch 830 * the watchdog as we might not schedule for a really long 831 * time. This happens on complete idle SMP systems while 832 * waiting on the login prompt. We also increment the "start of 833 * idle" jiffy stamp so the idle accounting adjustment we do 834 * when we go busy again does not account too much ticks. 835 */ 836 if (ts->tick_stopped) { 837 touch_softlockup_watchdog(); 838 if (is_idle_task(current)) 839 ts->idle_jiffies++; 840 } 841 update_process_times(user_mode(regs)); 842 profile_tick(CPU_PROFILING); 843 } |
829 830 hrtimer_forward(timer, now, tick_period); 831 832 return HRTIMER_RESTART; 833} 834 835static int sched_skew_tick; 836 --- 112 unchanged lines hidden --- | 844 845 hrtimer_forward(timer, now, tick_period); 846 847 return HRTIMER_RESTART; 848} 849 850static int sched_skew_tick; 851 --- 112 unchanged lines hidden --- |