xref: /openbmc/linux/kernel/sched/clock.c (revision 12eb4683)
1 /*
2  * sched_clock for unstable cpu clocks
3  *
4  *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5  *
6  *  Updates and enhancements:
7  *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8  *
9  * Based on code by:
10  *   Ingo Molnar <mingo@redhat.com>
11  *   Guillaume Chazarain <guichaz@gmail.com>
12  *
13  *
14  * What:
15  *
16  * cpu_clock(i) provides a fast (execution time) high resolution
17  * clock with bounded drift between CPUs. The value of cpu_clock(i)
18  * is monotonic for constant i. The timestamp returned is in nanoseconds.
19  *
20  * ######################### BIG FAT WARNING ##########################
21  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
22  * # go backwards !!                                                  #
23  * ####################################################################
24  *
25  * There is no strict promise about the base, although it tends to start
26  * at 0 on boot (but people really shouldn't rely on that).
27  *
28  * cpu_clock(i)       -- can be used from any context, including NMI.
29  * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
30  * local_clock()      -- is cpu_clock() on the current cpu.
31  *
32  * How:
33  *
34  * The implementation either uses sched_clock() when
35  * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
36  * sched_clock() is assumed to provide these properties (mostly it means
37  * the architecture provides a globally synchronized highres time source).
38  *
39  * Otherwise it tries to create a semi stable clock from a mixture of other
40  * clocks, including:
41  *
42  *  - GTOD (clock monotomic)
43  *  - sched_clock()
44  *  - explicit idle events
45  *
46  * We use GTOD as base and use sched_clock() deltas to improve resolution. The
47  * deltas are filtered to provide monotonicity and keeping it within an
48  * expected window.
49  *
50  * Furthermore, explicit sleep and wakeup hooks allow us to account for time
51  * that is otherwise invisible (TSC gets stopped).
52  *
53  *
54  * Notes:
55  *
56  * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
57  * like cpufreq interrupts that can change the base clock (TSC) multiplier
58  * and cause funny jumps in time -- although the filtering provided by
59  * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
60  * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
61  * sched_clock().
62  */
63 #include <linux/spinlock.h>
64 #include <linux/hardirq.h>
65 #include <linux/export.h>
66 #include <linux/percpu.h>
67 #include <linux/ktime.h>
68 #include <linux/sched.h>
69 
70 /*
71  * Scheduler clock - returns current time in nanosec units.
72  * This is default implementation.
73  * Architectures and sub-architectures can override this.
74  */
75 unsigned long long __attribute__((weak)) sched_clock(void)
76 {
77 	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
78 					* (NSEC_PER_SEC / HZ);
79 }
80 EXPORT_SYMBOL_GPL(sched_clock);
81 
82 __read_mostly int sched_clock_running;
83 
84 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85 __read_mostly int sched_clock_stable;
86 
87 struct sched_clock_data {
88 	u64			tick_raw;
89 	u64			tick_gtod;
90 	u64			clock;
91 };
92 
93 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
94 
95 static inline struct sched_clock_data *this_scd(void)
96 {
97 	return &__get_cpu_var(sched_clock_data);
98 }
99 
100 static inline struct sched_clock_data *cpu_sdc(int cpu)
101 {
102 	return &per_cpu(sched_clock_data, cpu);
103 }
104 
105 void sched_clock_init(void)
106 {
107 	u64 ktime_now = ktime_to_ns(ktime_get());
108 	int cpu;
109 
110 	for_each_possible_cpu(cpu) {
111 		struct sched_clock_data *scd = cpu_sdc(cpu);
112 
113 		scd->tick_raw = 0;
114 		scd->tick_gtod = ktime_now;
115 		scd->clock = ktime_now;
116 	}
117 
118 	sched_clock_running = 1;
119 }
120 
121 /*
122  * min, max except they take wrapping into account
123  */
124 
125 static inline u64 wrap_min(u64 x, u64 y)
126 {
127 	return (s64)(x - y) < 0 ? x : y;
128 }
129 
130 static inline u64 wrap_max(u64 x, u64 y)
131 {
132 	return (s64)(x - y) > 0 ? x : y;
133 }
134 
135 /*
136  * update the percpu scd from the raw @now value
137  *
138  *  - filter out backward motion
139  *  - use the GTOD tick value to create a window to filter crazy TSC values
140  */
141 static u64 sched_clock_local(struct sched_clock_data *scd)
142 {
143 	u64 now, clock, old_clock, min_clock, max_clock;
144 	s64 delta;
145 
146 again:
147 	now = sched_clock();
148 	delta = now - scd->tick_raw;
149 	if (unlikely(delta < 0))
150 		delta = 0;
151 
152 	old_clock = scd->clock;
153 
154 	/*
155 	 * scd->clock = clamp(scd->tick_gtod + delta,
156 	 *		      max(scd->tick_gtod, scd->clock),
157 	 *		      scd->tick_gtod + TICK_NSEC);
158 	 */
159 
160 	clock = scd->tick_gtod + delta;
161 	min_clock = wrap_max(scd->tick_gtod, old_clock);
162 	max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
163 
164 	clock = wrap_max(clock, min_clock);
165 	clock = wrap_min(clock, max_clock);
166 
167 	if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
168 		goto again;
169 
170 	return clock;
171 }
172 
173 static u64 sched_clock_remote(struct sched_clock_data *scd)
174 {
175 	struct sched_clock_data *my_scd = this_scd();
176 	u64 this_clock, remote_clock;
177 	u64 *ptr, old_val, val;
178 
179 #if BITS_PER_LONG != 64
180 again:
181 	/*
182 	 * Careful here: The local and the remote clock values need to
183 	 * be read out atomic as we need to compare the values and
184 	 * then update either the local or the remote side. So the
185 	 * cmpxchg64 below only protects one readout.
186 	 *
187 	 * We must reread via sched_clock_local() in the retry case on
188 	 * 32bit as an NMI could use sched_clock_local() via the
189 	 * tracer and hit between the readout of
190 	 * the low32bit and the high 32bit portion.
191 	 */
192 	this_clock = sched_clock_local(my_scd);
193 	/*
194 	 * We must enforce atomic readout on 32bit, otherwise the
195 	 * update on the remote cpu can hit inbetween the readout of
196 	 * the low32bit and the high 32bit portion.
197 	 */
198 	remote_clock = cmpxchg64(&scd->clock, 0, 0);
199 #else
200 	/*
201 	 * On 64bit the read of [my]scd->clock is atomic versus the
202 	 * update, so we can avoid the above 32bit dance.
203 	 */
204 	sched_clock_local(my_scd);
205 again:
206 	this_clock = my_scd->clock;
207 	remote_clock = scd->clock;
208 #endif
209 
210 	/*
211 	 * Use the opportunity that we have both locks
212 	 * taken to couple the two clocks: we take the
213 	 * larger time as the latest time for both
214 	 * runqueues. (this creates monotonic movement)
215 	 */
216 	if (likely((s64)(remote_clock - this_clock) < 0)) {
217 		ptr = &scd->clock;
218 		old_val = remote_clock;
219 		val = this_clock;
220 	} else {
221 		/*
222 		 * Should be rare, but possible:
223 		 */
224 		ptr = &my_scd->clock;
225 		old_val = this_clock;
226 		val = remote_clock;
227 	}
228 
229 	if (cmpxchg64(ptr, old_val, val) != old_val)
230 		goto again;
231 
232 	return val;
233 }
234 
235 /*
236  * Similar to cpu_clock(), but requires local IRQs to be disabled.
237  *
238  * See cpu_clock().
239  */
240 u64 sched_clock_cpu(int cpu)
241 {
242 	struct sched_clock_data *scd;
243 	u64 clock;
244 
245 	WARN_ON_ONCE(!irqs_disabled());
246 
247 	if (sched_clock_stable)
248 		return sched_clock();
249 
250 	if (unlikely(!sched_clock_running))
251 		return 0ull;
252 
253 	scd = cpu_sdc(cpu);
254 
255 	if (cpu != smp_processor_id())
256 		clock = sched_clock_remote(scd);
257 	else
258 		clock = sched_clock_local(scd);
259 
260 	return clock;
261 }
262 
263 void sched_clock_tick(void)
264 {
265 	struct sched_clock_data *scd;
266 	u64 now, now_gtod;
267 
268 	if (sched_clock_stable)
269 		return;
270 
271 	if (unlikely(!sched_clock_running))
272 		return;
273 
274 	WARN_ON_ONCE(!irqs_disabled());
275 
276 	scd = this_scd();
277 	now_gtod = ktime_to_ns(ktime_get());
278 	now = sched_clock();
279 
280 	scd->tick_raw = now;
281 	scd->tick_gtod = now_gtod;
282 	sched_clock_local(scd);
283 }
284 
285 /*
286  * We are going deep-idle (irqs are disabled):
287  */
288 void sched_clock_idle_sleep_event(void)
289 {
290 	sched_clock_cpu(smp_processor_id());
291 }
292 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
293 
294 /*
295  * We just idled delta nanoseconds (called with irqs disabled):
296  */
297 void sched_clock_idle_wakeup_event(u64 delta_ns)
298 {
299 	if (timekeeping_suspended)
300 		return;
301 
302 	sched_clock_tick();
303 	touch_softlockup_watchdog();
304 }
305 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
306 
307 /*
308  * As outlined at the top, provides a fast, high resolution, nanosecond
309  * time source that is monotonic per cpu argument and has bounded drift
310  * between cpus.
311  *
312  * ######################### BIG FAT WARNING ##########################
313  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
314  * # go backwards !!                                                  #
315  * ####################################################################
316  */
317 u64 cpu_clock(int cpu)
318 {
319 	u64 clock;
320 	unsigned long flags;
321 
322 	local_irq_save(flags);
323 	clock = sched_clock_cpu(cpu);
324 	local_irq_restore(flags);
325 
326 	return clock;
327 }
328 
329 /*
330  * Similar to cpu_clock() for the current cpu. Time will only be observed
331  * to be monotonic if care is taken to only compare timestampt taken on the
332  * same CPU.
333  *
334  * See cpu_clock().
335  */
336 u64 local_clock(void)
337 {
338 	u64 clock;
339 	unsigned long flags;
340 
341 	local_irq_save(flags);
342 	clock = sched_clock_cpu(smp_processor_id());
343 	local_irq_restore(flags);
344 
345 	return clock;
346 }
347 
348 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
349 
350 void sched_clock_init(void)
351 {
352 	sched_clock_running = 1;
353 }
354 
355 u64 sched_clock_cpu(int cpu)
356 {
357 	if (unlikely(!sched_clock_running))
358 		return 0;
359 
360 	return sched_clock();
361 }
362 
363 u64 cpu_clock(int cpu)
364 {
365 	return sched_clock_cpu(cpu);
366 }
367 
368 u64 local_clock(void)
369 {
370 	return sched_clock_cpu(0);
371 }
372 
373 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
374 
375 EXPORT_SYMBOL_GPL(cpu_clock);
376 EXPORT_SYMBOL_GPL(local_clock);
377