xref: /openbmc/linux/arch/x86/include/asm/vgtod.h (revision bcc4a62a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_VGTOD_H
3 #define _ASM_X86_VGTOD_H
4 
5 #include <linux/compiler.h>
6 #include <linux/clocksource.h>
7 
8 #include <uapi/linux/time.h>
9 
10 #ifdef BUILD_VDSO32_64
11 typedef u64 gtod_long_t;
12 #else
13 typedef unsigned long gtod_long_t;
14 #endif
15 
16 /*
17  * There is one of these objects in the vvar page for each
18  * vDSO-accelerated clockid.  For high-resolution clocks, this encodes
19  * the time corresponding to vsyscall_gtod_data.cycle_last.  For coarse
20  * clocks, this encodes the actual time.
21  *
22  * To confuse the reader, for high-resolution clocks, nsec is left-shifted
23  * by vsyscall_gtod_data.shift.
24  */
25 struct vgtod_ts {
26 	u64		sec;
27 	u64		nsec;
28 };
29 
30 #define VGTOD_BASES	(CLOCK_TAI + 1)
31 #define VGTOD_HRES	(BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC) | BIT(CLOCK_TAI))
32 #define VGTOD_COARSE	(BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE))
33 
34 /*
35  * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
36  * so be carefull by modifying this structure.
37  */
38 struct vsyscall_gtod_data {
39 	unsigned int	seq;
40 
41 	int		vclock_mode;
42 	u64		cycle_last;
43 	u64		mask;
44 	u32		mult;
45 	u32		shift;
46 
47 	struct vgtod_ts	basetime[VGTOD_BASES];
48 
49 	int		tz_minuteswest;
50 	int		tz_dsttime;
51 };
52 extern struct vsyscall_gtod_data vsyscall_gtod_data;
53 
54 extern int vclocks_used;
55 static inline bool vclock_was_used(int vclock)
56 {
57 	return READ_ONCE(vclocks_used) & (1 << vclock);
58 }
59 
60 static inline unsigned int gtod_read_begin(const struct vsyscall_gtod_data *s)
61 {
62 	unsigned int ret;
63 
64 repeat:
65 	ret = READ_ONCE(s->seq);
66 	if (unlikely(ret & 1)) {
67 		cpu_relax();
68 		goto repeat;
69 	}
70 	smp_rmb();
71 	return ret;
72 }
73 
74 static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
75 				  unsigned int start)
76 {
77 	smp_rmb();
78 	return unlikely(s->seq != start);
79 }
80 
81 static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
82 {
83 	++s->seq;
84 	smp_wmb();
85 }
86 
87 static inline void gtod_write_end(struct vsyscall_gtod_data *s)
88 {
89 	smp_wmb();
90 	++s->seq;
91 }
92 
93 #ifdef CONFIG_X86_64
94 
95 #define VGETCPU_CPU_MASK 0xfff
96 
97 static inline unsigned int __getcpu(void)
98 {
99 	unsigned int p;
100 
101 	/*
102 	 * Load per CPU data from GDT.  LSL is faster than RDTSCP and
103 	 * works on all CPUs.  This is volatile so that it orders
104 	 * correctly wrt barrier() and to keep gcc from cleverly
105 	 * hoisting it out of the calling function.
106 	 *
107 	 * If RDPID is available, use it.
108 	 */
109 	alternative_io ("lsl %[seg],%[p]",
110 			".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
111 			X86_FEATURE_RDPID,
112 			[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
113 
114 	return p;
115 }
116 
117 #endif /* CONFIG_X86_64 */
118 
119 #endif /* _ASM_X86_VGTOD_H */
120