1 /* linux/include/linux/tick.h 2 * 3 * This file contains the structure definitions for tick related functions 4 * 5 */ 6 #ifndef _LINUX_TICK_H 7 #define _LINUX_TICK_H 8 9 #include <linux/clockchips.h> 10 #include <linux/irqflags.h> 11 #include <linux/percpu.h> 12 #include <linux/hrtimer.h> 13 #include <linux/context_tracking_state.h> 14 #include <linux/cpumask.h> 15 16 #ifdef CONFIG_GENERIC_CLOCKEVENTS 17 18 enum tick_device_mode { 19 TICKDEV_MODE_PERIODIC, 20 TICKDEV_MODE_ONESHOT, 21 }; 22 23 struct tick_device { 24 struct clock_event_device *evtdev; 25 enum tick_device_mode mode; 26 }; 27 28 enum tick_nohz_mode { 29 NOHZ_MODE_INACTIVE, 30 NOHZ_MODE_LOWRES, 31 NOHZ_MODE_HIGHRES, 32 }; 33 34 /** 35 * struct tick_sched - sched tick emulation and no idle tick control/stats 36 * @sched_timer: hrtimer to schedule the periodic tick in high 37 * resolution mode 38 * @last_tick: Store the last tick expiry time when the tick 39 * timer is modified for nohz sleeps. This is necessary 40 * to resume the tick timer operation in the timeline 41 * when the CPU returns from nohz sleep. 42 * @tick_stopped: Indicator that the idle tick has been stopped 43 * @idle_jiffies: jiffies at the entry to idle for idle time accounting 44 * @idle_calls: Total number of idle calls 45 * @idle_sleeps: Number of idle calls, where the sched tick was stopped 46 * @idle_entrytime: Time when the idle call was entered 47 * @idle_waketime: Time when the idle was interrupted 48 * @idle_exittime: Time when the idle state was left 49 * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped 50 * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding 51 * @sleep_length: Duration of the current idle sleep 52 * @do_timer_lst: CPU was the last one doing do_timer before going idle 53 */ 54 struct tick_sched { 55 struct hrtimer sched_timer; 56 unsigned long check_clocks; 57 enum tick_nohz_mode nohz_mode; 58 ktime_t last_tick; 59 int inidle; 60 int tick_stopped; 61 unsigned long idle_jiffies; 62 unsigned long idle_calls; 63 unsigned long idle_sleeps; 64 int idle_active; 65 ktime_t idle_entrytime; 66 ktime_t idle_waketime; 67 ktime_t idle_exittime; 68 ktime_t idle_sleeptime; 69 ktime_t iowait_sleeptime; 70 ktime_t sleep_length; 71 unsigned long last_jiffies; 72 unsigned long next_jiffies; 73 ktime_t idle_expires; 74 int do_timer_last; 75 }; 76 77 extern void __init tick_init(void); 78 extern int tick_is_oneshot_available(void); 79 extern struct tick_device *tick_get_device(int cpu); 80 81 # ifdef CONFIG_HIGH_RES_TIMERS 82 extern int tick_init_highres(void); 83 extern int tick_program_event(ktime_t expires, int force); 84 extern void tick_setup_sched_timer(void); 85 # endif 86 87 # if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS 88 extern void tick_cancel_sched_timer(int cpu); 89 # else 90 static inline void tick_cancel_sched_timer(int cpu) { } 91 # endif 92 93 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 94 extern struct tick_device *tick_get_broadcast_device(void); 95 extern struct cpumask *tick_get_broadcast_mask(void); 96 97 # ifdef CONFIG_TICK_ONESHOT 98 extern struct cpumask *tick_get_broadcast_oneshot_mask(void); 99 # endif 100 101 # endif /* BROADCAST */ 102 103 # ifdef CONFIG_TICK_ONESHOT 104 extern void tick_clock_notify(void); 105 extern int tick_check_oneshot_change(int allow_nohz); 106 extern struct tick_sched *tick_get_tick_sched(int cpu); 107 extern void tick_check_idle(int cpu); 108 extern int tick_oneshot_mode_active(void); 109 # ifndef arch_needs_cpu 110 # define arch_needs_cpu(cpu) (0) 111 # endif 112 # else 113 static inline void tick_clock_notify(void) { } 114 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } 115 static inline void tick_check_idle(int cpu) { } 116 static inline int tick_oneshot_mode_active(void) { return 0; } 117 # endif 118 119 #else /* CONFIG_GENERIC_CLOCKEVENTS */ 120 static inline void tick_init(void) { } 121 static inline void tick_cancel_sched_timer(int cpu) { } 122 static inline void tick_clock_notify(void) { } 123 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } 124 static inline void tick_check_idle(int cpu) { } 125 static inline int tick_oneshot_mode_active(void) { return 0; } 126 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 127 128 # ifdef CONFIG_NO_HZ_COMMON 129 DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched); 130 131 static inline int tick_nohz_tick_stopped(void) 132 { 133 return __this_cpu_read(tick_cpu_sched.tick_stopped); 134 } 135 136 extern void tick_nohz_idle_enter(void); 137 extern void tick_nohz_idle_exit(void); 138 extern void tick_nohz_irq_exit(void); 139 extern ktime_t tick_nohz_get_sleep_length(void); 140 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 141 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 142 143 # else /* !CONFIG_NO_HZ_COMMON */ 144 static inline int tick_nohz_tick_stopped(void) 145 { 146 return 0; 147 } 148 149 static inline void tick_nohz_idle_enter(void) { } 150 static inline void tick_nohz_idle_exit(void) { } 151 152 static inline ktime_t tick_nohz_get_sleep_length(void) 153 { 154 ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; 155 156 return len; 157 } 158 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 159 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } 160 # endif /* !CONFIG_NO_HZ_COMMON */ 161 162 #ifdef CONFIG_NO_HZ_FULL 163 extern bool tick_nohz_full_running; 164 extern cpumask_var_t tick_nohz_full_mask; 165 166 static inline bool tick_nohz_full_enabled(void) 167 { 168 if (!static_key_false(&context_tracking_enabled)) 169 return false; 170 171 return tick_nohz_full_running; 172 } 173 174 static inline bool tick_nohz_full_cpu(int cpu) 175 { 176 if (!tick_nohz_full_enabled()) 177 return false; 178 179 return cpumask_test_cpu(cpu, tick_nohz_full_mask); 180 } 181 182 extern void tick_nohz_init(void); 183 extern void __tick_nohz_full_check(void); 184 extern void tick_nohz_full_kick(void); 185 extern void tick_nohz_full_kick_all(void); 186 extern void __tick_nohz_task_switch(struct task_struct *tsk); 187 #else 188 static inline void tick_nohz_init(void) { } 189 static inline bool tick_nohz_full_enabled(void) { return false; } 190 static inline bool tick_nohz_full_cpu(int cpu) { return false; } 191 static inline void __tick_nohz_full_check(void) { } 192 static inline void tick_nohz_full_kick(void) { } 193 static inline void tick_nohz_full_kick_all(void) { } 194 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } 195 #endif 196 197 static inline void tick_nohz_full_check(void) 198 { 199 if (tick_nohz_full_enabled()) 200 __tick_nohz_full_check(); 201 } 202 203 static inline void tick_nohz_task_switch(struct task_struct *tsk) 204 { 205 if (tick_nohz_full_enabled()) 206 __tick_nohz_task_switch(tsk); 207 } 208 209 210 #endif 211