xref: /openbmc/linux/include/linux/tick.h (revision 93d90ad7)
1 /*  linux/include/linux/tick.h
2  *
3  *  This file contains the structure definitions for tick related functions
4  *
5  */
6 #ifndef _LINUX_TICK_H
7 #define _LINUX_TICK_H
8 
9 #include <linux/clockchips.h>
10 #include <linux/irqflags.h>
11 #include <linux/percpu.h>
12 #include <linux/hrtimer.h>
13 #include <linux/context_tracking_state.h>
14 #include <linux/cpumask.h>
15 #include <linux/sched.h>
16 
17 #ifdef CONFIG_GENERIC_CLOCKEVENTS
18 
19 enum tick_device_mode {
20 	TICKDEV_MODE_PERIODIC,
21 	TICKDEV_MODE_ONESHOT,
22 };
23 
24 struct tick_device {
25 	struct clock_event_device *evtdev;
26 	enum tick_device_mode mode;
27 };
28 
29 enum tick_nohz_mode {
30 	NOHZ_MODE_INACTIVE,
31 	NOHZ_MODE_LOWRES,
32 	NOHZ_MODE_HIGHRES,
33 };
34 
35 /**
36  * struct tick_sched - sched tick emulation and no idle tick control/stats
37  * @sched_timer:	hrtimer to schedule the periodic tick in high
38  *			resolution mode
39  * @last_tick:		Store the last tick expiry time when the tick
40  *			timer is modified for nohz sleeps. This is necessary
41  *			to resume the tick timer operation in the timeline
42  *			when the CPU returns from nohz sleep.
43  * @tick_stopped:	Indicator that the idle tick has been stopped
44  * @idle_jiffies:	jiffies at the entry to idle for idle time accounting
45  * @idle_calls:		Total number of idle calls
46  * @idle_sleeps:	Number of idle calls, where the sched tick was stopped
47  * @idle_entrytime:	Time when the idle call was entered
48  * @idle_waketime:	Time when the idle was interrupted
49  * @idle_exittime:	Time when the idle state was left
50  * @idle_sleeptime:	Sum of the time slept in idle with sched tick stopped
51  * @iowait_sleeptime:	Sum of the time slept in idle with sched tick stopped, with IO outstanding
52  * @sleep_length:	Duration of the current idle sleep
53  * @do_timer_lst:	CPU was the last one doing do_timer before going idle
54  */
55 struct tick_sched {
56 	struct hrtimer			sched_timer;
57 	unsigned long			check_clocks;
58 	enum tick_nohz_mode		nohz_mode;
59 	ktime_t				last_tick;
60 	int				inidle;
61 	int				tick_stopped;
62 	unsigned long			idle_jiffies;
63 	unsigned long			idle_calls;
64 	unsigned long			idle_sleeps;
65 	int				idle_active;
66 	ktime_t				idle_entrytime;
67 	ktime_t				idle_waketime;
68 	ktime_t				idle_exittime;
69 	ktime_t				idle_sleeptime;
70 	ktime_t				iowait_sleeptime;
71 	ktime_t				sleep_length;
72 	unsigned long			last_jiffies;
73 	unsigned long			next_jiffies;
74 	ktime_t				idle_expires;
75 	int				do_timer_last;
76 };
77 
78 extern void __init tick_init(void);
79 extern int tick_is_oneshot_available(void);
80 extern struct tick_device *tick_get_device(int cpu);
81 
82 # ifdef CONFIG_HIGH_RES_TIMERS
83 extern int tick_init_highres(void);
84 extern int tick_program_event(ktime_t expires, int force);
85 extern void tick_setup_sched_timer(void);
86 # endif
87 
88 # if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
89 extern void tick_cancel_sched_timer(int cpu);
90 # else
91 static inline void tick_cancel_sched_timer(int cpu) { }
92 # endif
93 
94 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
95 extern struct tick_device *tick_get_broadcast_device(void);
96 extern struct cpumask *tick_get_broadcast_mask(void);
97 
98 #  ifdef CONFIG_TICK_ONESHOT
99 extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
100 #  endif
101 
102 # endif /* BROADCAST */
103 
104 # ifdef CONFIG_TICK_ONESHOT
105 extern void tick_clock_notify(void);
106 extern int tick_check_oneshot_change(int allow_nohz);
107 extern struct tick_sched *tick_get_tick_sched(int cpu);
108 extern void tick_irq_enter(void);
109 extern int tick_oneshot_mode_active(void);
110 #  ifndef arch_needs_cpu
111 #   define arch_needs_cpu() (0)
112 #  endif
113 # else
114 static inline void tick_clock_notify(void) { }
115 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
116 static inline void tick_irq_enter(void) { }
117 static inline int tick_oneshot_mode_active(void) { return 0; }
118 # endif
119 
120 #else /* CONFIG_GENERIC_CLOCKEVENTS */
121 static inline void tick_init(void) { }
122 static inline void tick_cancel_sched_timer(int cpu) { }
123 static inline void tick_clock_notify(void) { }
124 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
125 static inline void tick_irq_enter(void) { }
126 static inline int tick_oneshot_mode_active(void) { return 0; }
127 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
128 
129 # ifdef CONFIG_NO_HZ_COMMON
130 DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
131 
132 static inline int tick_nohz_tick_stopped(void)
133 {
134 	return __this_cpu_read(tick_cpu_sched.tick_stopped);
135 }
136 
137 extern void tick_nohz_idle_enter(void);
138 extern void tick_nohz_idle_exit(void);
139 extern void tick_nohz_irq_exit(void);
140 extern ktime_t tick_nohz_get_sleep_length(void);
141 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
142 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
143 
144 # else /* !CONFIG_NO_HZ_COMMON */
145 static inline int tick_nohz_tick_stopped(void)
146 {
147 	return 0;
148 }
149 
150 static inline void tick_nohz_idle_enter(void) { }
151 static inline void tick_nohz_idle_exit(void) { }
152 
153 static inline ktime_t tick_nohz_get_sleep_length(void)
154 {
155 	ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
156 
157 	return len;
158 }
159 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
160 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
161 # endif /* !CONFIG_NO_HZ_COMMON */
162 
163 #ifdef CONFIG_NO_HZ_FULL
164 extern bool tick_nohz_full_running;
165 extern cpumask_var_t tick_nohz_full_mask;
166 extern cpumask_var_t housekeeping_mask;
167 
168 static inline bool tick_nohz_full_enabled(void)
169 {
170 	if (!context_tracking_is_enabled())
171 		return false;
172 
173 	return tick_nohz_full_running;
174 }
175 
176 static inline bool tick_nohz_full_cpu(int cpu)
177 {
178 	if (!tick_nohz_full_enabled())
179 		return false;
180 
181 	return cpumask_test_cpu(cpu, tick_nohz_full_mask);
182 }
183 
184 extern void __tick_nohz_full_check(void);
185 extern void tick_nohz_full_kick(void);
186 extern void tick_nohz_full_kick_cpu(int cpu);
187 extern void tick_nohz_full_kick_all(void);
188 extern void __tick_nohz_task_switch(struct task_struct *tsk);
189 #else
190 static inline bool tick_nohz_full_enabled(void) { return false; }
191 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
192 static inline void __tick_nohz_full_check(void) { }
193 static inline void tick_nohz_full_kick_cpu(int cpu) { }
194 static inline void tick_nohz_full_kick(void) { }
195 static inline void tick_nohz_full_kick_all(void) { }
196 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
197 #endif
198 
199 static inline bool is_housekeeping_cpu(int cpu)
200 {
201 #ifdef CONFIG_NO_HZ_FULL
202 	if (tick_nohz_full_enabled())
203 		return cpumask_test_cpu(cpu, housekeeping_mask);
204 #endif
205 	return true;
206 }
207 
208 static inline void housekeeping_affine(struct task_struct *t)
209 {
210 #ifdef CONFIG_NO_HZ_FULL
211 	if (tick_nohz_full_enabled())
212 		set_cpus_allowed_ptr(t, housekeeping_mask);
213 
214 #endif
215 }
216 
217 static inline void tick_nohz_full_check(void)
218 {
219 	if (tick_nohz_full_enabled())
220 		__tick_nohz_full_check();
221 }
222 
223 static inline void tick_nohz_task_switch(struct task_struct *tsk)
224 {
225 	if (tick_nohz_full_enabled())
226 		__tick_nohz_task_switch(tsk);
227 }
228 
229 
230 #endif
231