xref: /openbmc/linux/arch/x86/include/asm/vgtod.h (revision 49116f20)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_VGTOD_H
3 #define _ASM_X86_VGTOD_H
4 
5 #include <linux/compiler.h>
6 #include <linux/clocksource.h>
7 
8 #include <uapi/linux/time.h>
9 
10 #ifdef BUILD_VDSO32_64
11 typedef u64 gtod_long_t;
12 #else
13 typedef unsigned long gtod_long_t;
14 #endif
15 
16 struct vgtod_ts {
17 	u64		sec;
18 	u64		nsec;
19 };
20 
21 #define VGTOD_BASES	(CLOCK_MONOTONIC_COARSE + 1)
22 #define VGTOD_HRES	(BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC))
23 #define VGTOD_COARSE	(BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE))
24 
25 /*
26  * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
27  * so be carefull by modifying this structure.
28  */
29 struct vsyscall_gtod_data {
30 	unsigned int	seq;
31 
32 	int		vclock_mode;
33 	u64		cycle_last;
34 	u64		mask;
35 	u32		mult;
36 	u32		shift;
37 
38 	struct vgtod_ts	basetime[VGTOD_BASES];
39 
40 	int		tz_minuteswest;
41 	int		tz_dsttime;
42 };
43 extern struct vsyscall_gtod_data vsyscall_gtod_data;
44 
45 extern int vclocks_used;
46 static inline bool vclock_was_used(int vclock)
47 {
48 	return READ_ONCE(vclocks_used) & (1 << vclock);
49 }
50 
51 static inline unsigned int gtod_read_begin(const struct vsyscall_gtod_data *s)
52 {
53 	unsigned int ret;
54 
55 repeat:
56 	ret = READ_ONCE(s->seq);
57 	if (unlikely(ret & 1)) {
58 		cpu_relax();
59 		goto repeat;
60 	}
61 	smp_rmb();
62 	return ret;
63 }
64 
65 static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
66 				  unsigned int start)
67 {
68 	smp_rmb();
69 	return unlikely(s->seq != start);
70 }
71 
72 static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
73 {
74 	++s->seq;
75 	smp_wmb();
76 }
77 
78 static inline void gtod_write_end(struct vsyscall_gtod_data *s)
79 {
80 	smp_wmb();
81 	++s->seq;
82 }
83 
84 #ifdef CONFIG_X86_64
85 
86 #define VGETCPU_CPU_MASK 0xfff
87 
88 static inline unsigned int __getcpu(void)
89 {
90 	unsigned int p;
91 
92 	/*
93 	 * Load per CPU data from GDT.  LSL is faster than RDTSCP and
94 	 * works on all CPUs.  This is volatile so that it orders
95 	 * correctly wrt barrier() and to keep gcc from cleverly
96 	 * hoisting it out of the calling function.
97 	 *
98 	 * If RDPID is available, use it.
99 	 */
100 	alternative_io ("lsl %[seg],%[p]",
101 			".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
102 			X86_FEATURE_RDPID,
103 			[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
104 
105 	return p;
106 }
107 
108 #endif /* CONFIG_X86_64 */
109 
110 #endif /* _ASM_X86_VGTOD_H */
111