xref: /openbmc/linux/include/linux/clocksource.h (revision 03c38555)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*  linux/include/linux/clocksource.h
3  *
4  *  This file contains the structure definitions for clocksources.
5  *
6  *  If you are not a clocksource, or timekeeping code, you should
7  *  not be including this file!
8  */
9 #ifndef _LINUX_CLOCKSOURCE_H
10 #define _LINUX_CLOCKSOURCE_H
11 
12 #include <linux/types.h>
13 #include <linux/timex.h>
14 #include <linux/time.h>
15 #include <linux/list.h>
16 #include <linux/cache.h>
17 #include <linux/timer.h>
18 #include <linux/init.h>
19 #include <linux/of.h>
20 #include <linux/clocksource_ids.h>
21 #include <asm/div64.h>
22 #include <asm/io.h>
23 
24 struct clocksource;
25 struct module;
26 
27 #if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \
28     defined(CONFIG_GENERIC_GETTIMEOFDAY)
29 #include <asm/clocksource.h>
30 #endif
31 
32 #include <vdso/clocksource.h>
33 
34 /**
35  * struct clocksource - hardware abstraction for a free running counter
36  *	Provides mostly state-free accessors to the underlying hardware.
37  *	This is the structure used for system time.
38  *
39  * @read:		Returns a cycle value, passes clocksource as argument
40  * @mask:		Bitmask for two's complement
41  *			subtraction of non 64 bit counters
42  * @mult:		Cycle to nanosecond multiplier
43  * @shift:		Cycle to nanosecond divisor (power of two)
44  * @max_idle_ns:	Maximum idle time permitted by the clocksource (nsecs)
45  * @maxadj:		Maximum adjustment value to mult (~11%)
46  * @uncertainty_margin:	Maximum uncertainty in nanoseconds per half second.
47  *			Zero says to use default WATCHDOG_THRESHOLD.
48  * @archdata:		Optional arch-specific data
49  * @max_cycles:		Maximum safe cycle value which won't overflow on
50  *			multiplication
51  * @name:		Pointer to clocksource name
52  * @list:		List head for registration (internal)
53  * @rating:		Rating value for selection (higher is better)
54  *			To avoid rating inflation the following
55  *			list should give you a guide as to how
56  *			to assign your clocksource a rating
57  *			1-99: Unfit for real use
58  *				Only available for bootup and testing purposes.
59  *			100-199: Base level usability.
60  *				Functional for real use, but not desired.
61  *			200-299: Good.
62  *				A correct and usable clocksource.
63  *			300-399: Desired.
64  *				A reasonably fast and accurate clocksource.
65  *			400-499: Perfect
66  *				The ideal clocksource. A must-use where
67  *				available.
68  * @id:			Defaults to CSID_GENERIC. The id value is captured
69  *			in certain snapshot functions to allow callers to
70  *			validate the clocksource from which the snapshot was
71  *			taken.
72  * @flags:		Flags describing special properties
73  * @enable:		Optional function to enable the clocksource
74  * @disable:		Optional function to disable the clocksource
75  * @suspend:		Optional suspend function for the clocksource
76  * @resume:		Optional resume function for the clocksource
77  * @mark_unstable:	Optional function to inform the clocksource driver that
78  *			the watchdog marked the clocksource unstable
79  * @tick_stable:        Optional function called periodically from the watchdog
80  *			code to provide stable synchronization points
81  * @wd_list:		List head to enqueue into the watchdog list (internal)
82  * @cs_last:		Last clocksource value for clocksource watchdog
83  * @wd_last:		Last watchdog value corresponding to @cs_last
84  * @owner:		Module reference, must be set by clocksource in modules
85  *
86  * Note: This struct is not used in hotpathes of the timekeeping code
87  * because the timekeeper caches the hot path fields in its own data
88  * structure, so no cache line alignment is required,
89  *
90  * The pointer to the clocksource itself is handed to the read
91  * callback. If you need extra information there you can wrap struct
92  * clocksource into your own struct. Depending on the amount of
93  * information you need you should consider to cache line align that
94  * structure.
95  */
96 struct clocksource {
97 	u64			(*read)(struct clocksource *cs);
98 	u64			mask;
99 	u32			mult;
100 	u32			shift;
101 	u64			max_idle_ns;
102 	u32			maxadj;
103 	u32			uncertainty_margin;
104 #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
105 	struct arch_clocksource_data archdata;
106 #endif
107 	u64			max_cycles;
108 	const char		*name;
109 	struct list_head	list;
110 	int			rating;
111 	enum clocksource_ids	id;
112 	enum vdso_clock_mode	vdso_clock_mode;
113 	unsigned long		flags;
114 
115 	int			(*enable)(struct clocksource *cs);
116 	void			(*disable)(struct clocksource *cs);
117 	void			(*suspend)(struct clocksource *cs);
118 	void			(*resume)(struct clocksource *cs);
119 	void			(*mark_unstable)(struct clocksource *cs);
120 	void			(*tick_stable)(struct clocksource *cs);
121 
122 	/* private: */
123 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
124 	/* Watchdog related data, used by the framework */
125 	struct list_head	wd_list;
126 	u64			cs_last;
127 	u64			wd_last;
128 #endif
129 	struct module		*owner;
130 };
131 
132 /*
133  * Clock source flags bits::
134  */
135 #define CLOCK_SOURCE_IS_CONTINUOUS		0x01
136 #define CLOCK_SOURCE_MUST_VERIFY		0x02
137 
138 #define CLOCK_SOURCE_WATCHDOG			0x10
139 #define CLOCK_SOURCE_VALID_FOR_HRES		0x20
140 #define CLOCK_SOURCE_UNSTABLE			0x40
141 #define CLOCK_SOURCE_SUSPEND_NONSTOP		0x80
142 #define CLOCK_SOURCE_RESELECT			0x100
143 #define CLOCK_SOURCE_VERIFY_PERCPU		0x200
144 /* simplify initialization of mask field */
145 #define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
146 
clocksource_freq2mult(u32 freq,u32 shift_constant,u64 from)147 static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
148 {
149 	/*  freq = cyc/from
150 	 *  mult/2^shift  = ns/cyc
151 	 *  mult = ns/cyc * 2^shift
152 	 *  mult = from/freq * 2^shift
153 	 *  mult = from * 2^shift / freq
154 	 *  mult = (from<<shift) / freq
155 	 */
156 	u64 tmp = ((u64)from) << shift_constant;
157 
158 	tmp += freq/2; /* round for do_div */
159 	do_div(tmp, freq);
160 
161 	return (u32)tmp;
162 }
163 
164 /**
165  * clocksource_khz2mult - calculates mult from khz and shift
166  * @khz:		Clocksource frequency in KHz
167  * @shift_constant:	Clocksource shift factor
168  *
169  * Helper functions that converts a khz counter frequency to a timsource
170  * multiplier, given the clocksource shift value
171  */
clocksource_khz2mult(u32 khz,u32 shift_constant)172 static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
173 {
174 	return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
175 }
176 
177 /**
178  * clocksource_hz2mult - calculates mult from hz and shift
179  * @hz:			Clocksource frequency in Hz
180  * @shift_constant:	Clocksource shift factor
181  *
182  * Helper functions that converts a hz counter
183  * frequency to a timsource multiplier, given the
184  * clocksource shift value
185  */
clocksource_hz2mult(u32 hz,u32 shift_constant)186 static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
187 {
188 	return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
189 }
190 
191 /**
192  * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
193  * @cycles:	cycles
194  * @mult:	cycle to nanosecond multiplier
195  * @shift:	cycle to nanosecond divisor (power of two)
196  *
197  * Converts clocksource cycles to nanoseconds, using the given @mult and @shift.
198  * The code is optimized for performance and is not intended to work
199  * with absolute clocksource cycles (as those will easily overflow),
200  * but is only intended to be used with relative (delta) clocksource cycles.
201  *
202  * XXX - This could use some mult_lxl_ll() asm optimization
203  */
clocksource_cyc2ns(u64 cycles,u32 mult,u32 shift)204 static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
205 {
206 	return ((u64) cycles * mult) >> shift;
207 }
208 
209 
210 extern int clocksource_unregister(struct clocksource*);
211 extern void clocksource_touch_watchdog(void);
212 extern void clocksource_change_rating(struct clocksource *cs, int rating);
213 extern void clocksource_suspend(void);
214 extern void clocksource_resume(void);
215 extern struct clocksource * __init clocksource_default_clock(void);
216 extern void clocksource_mark_unstable(struct clocksource *cs);
217 extern void
218 clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
219 extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
220 
221 extern u64
222 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
223 extern void
224 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
225 
226 /*
227  * Don't call __clocksource_register_scale directly, use
228  * clocksource_register_hz/khz
229  */
230 extern int
231 __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
232 extern void
233 __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq);
234 
235 /*
236  * Don't call this unless you are a default clocksource
237  * (AKA: jiffies) and absolutely have to.
238  */
__clocksource_register(struct clocksource * cs)239 static inline int __clocksource_register(struct clocksource *cs)
240 {
241 	return __clocksource_register_scale(cs, 1, 0);
242 }
243 
clocksource_register_hz(struct clocksource * cs,u32 hz)244 static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
245 {
246 	return __clocksource_register_scale(cs, 1, hz);
247 }
248 
clocksource_register_khz(struct clocksource * cs,u32 khz)249 static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
250 {
251 	return __clocksource_register_scale(cs, 1000, khz);
252 }
253 
__clocksource_update_freq_hz(struct clocksource * cs,u32 hz)254 static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz)
255 {
256 	__clocksource_update_freq_scale(cs, 1, hz);
257 }
258 
__clocksource_update_freq_khz(struct clocksource * cs,u32 khz)259 static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz)
260 {
261 	__clocksource_update_freq_scale(cs, 1000, khz);
262 }
263 
264 #ifdef CONFIG_ARCH_CLOCKSOURCE_INIT
265 extern void clocksource_arch_init(struct clocksource *cs);
266 #else
clocksource_arch_init(struct clocksource * cs)267 static inline void clocksource_arch_init(struct clocksource *cs) { }
268 #endif
269 
270 extern int timekeeping_notify(struct clocksource *clock);
271 
272 extern u64 clocksource_mmio_readl_up(struct clocksource *);
273 extern u64 clocksource_mmio_readl_down(struct clocksource *);
274 extern u64 clocksource_mmio_readw_up(struct clocksource *);
275 extern u64 clocksource_mmio_readw_down(struct clocksource *);
276 
277 extern int clocksource_mmio_init(void __iomem *, const char *,
278 	unsigned long, int, unsigned, u64 (*)(struct clocksource *));
279 
280 extern int clocksource_i8253_init(void);
281 
282 #define TIMER_OF_DECLARE(name, compat, fn) \
283 	OF_DECLARE_1_RET(timer, name, compat, fn)
284 
285 #ifdef CONFIG_TIMER_PROBE
286 extern void timer_probe(void);
287 #else
timer_probe(void)288 static inline void timer_probe(void) {}
289 #endif
290 
291 #define TIMER_ACPI_DECLARE(name, table_id, fn)		\
292 	ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
293 
clocksource_get_max_watchdog_retry(void)294 static inline unsigned int clocksource_get_max_watchdog_retry(void)
295 {
296 	/*
297 	 * When system is in the boot phase or under heavy workload, there
298 	 * can be random big latencies during the clocksource/watchdog
299 	 * read, so allow retries to filter the noise latency. As the
300 	 * latency's frequency and maximum value goes up with the number of
301 	 * CPUs, scale the number of retries with the number of online
302 	 * CPUs.
303 	 */
304 	return (ilog2(num_online_cpus()) / 2) + 1;
305 }
306 
307 void clocksource_verify_percpu(struct clocksource *cs);
308 
309 #endif /* _LINUX_CLOCKSOURCE_H */
310