xref: /openbmc/linux/include/linux/sched/clock.h (revision dd093fb0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_CLOCK_H
3 #define _LINUX_SCHED_CLOCK_H
4 
5 #include <linux/smp.h>
6 
7 /*
8  * Do not use outside of architecture code which knows its limitations.
9  *
10  * sched_clock() has no promise of monotonicity or bounded drift between
11  * CPUs, use (which you should not) requires disabling IRQs.
12  *
13  * Please use one of the three interfaces below.
14  */
15 extern unsigned long long notrace sched_clock(void);
16 
17 /*
18  * See the comment in kernel/sched/clock.c
19  */
20 extern u64 running_clock(void);
21 extern u64 sched_clock_cpu(int cpu);
22 
23 
24 extern void sched_clock_init(void);
25 
26 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
27 static inline void sched_clock_tick(void)
28 {
29 }
30 
31 static inline void clear_sched_clock_stable(void)
32 {
33 }
34 
35 static inline void sched_clock_idle_sleep_event(void)
36 {
37 }
38 
39 static inline void sched_clock_idle_wakeup_event(void)
40 {
41 }
42 
43 static inline u64 cpu_clock(int cpu)
44 {
45 	return sched_clock();
46 }
47 
48 static __always_inline u64 local_clock(void)
49 {
50 	return sched_clock();
51 }
52 #else
53 extern int sched_clock_stable(void);
54 extern void clear_sched_clock_stable(void);
55 
56 /*
57  * When sched_clock_stable(), __sched_clock_offset provides the offset
58  * between local_clock() and sched_clock().
59  */
60 extern u64 __sched_clock_offset;
61 
62 extern void sched_clock_tick(void);
63 extern void sched_clock_tick_stable(void);
64 extern void sched_clock_idle_sleep_event(void);
65 extern void sched_clock_idle_wakeup_event(void);
66 
67 /*
68  * As outlined in clock.c, provides a fast, high resolution, nanosecond
69  * time source that is monotonic per cpu argument and has bounded drift
70  * between cpus.
71  *
72  * ######################### BIG FAT WARNING ##########################
73  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
74  * # go backwards !!                                                  #
75  * ####################################################################
76  */
77 static inline u64 cpu_clock(int cpu)
78 {
79 	return sched_clock_cpu(cpu);
80 }
81 
82 extern u64 local_clock(void);
83 
84 #endif
85 
86 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
87 /*
88  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
89  * The reason for this explicit opt-in is not to have perf penalty with
90  * slow sched_clocks.
91  */
92 extern void enable_sched_clock_irqtime(void);
93 extern void disable_sched_clock_irqtime(void);
94 #else
95 static inline void enable_sched_clock_irqtime(void) {}
96 static inline void disable_sched_clock_irqtime(void) {}
97 #endif
98 
99 #endif /* _LINUX_SCHED_CLOCK_H */
100