xref: /openbmc/linux/arch/x86/include/asm/vgtod.h (revision 49116f20)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_VGTOD_H
31965aae3SH. Peter Anvin #define _ASM_X86_VGTOD_H
4bb898558SAl Viro 
57c03156fSStefani Seibold #include <linux/compiler.h>
6bb898558SAl Viro #include <linux/clocksource.h>
7bb898558SAl Viro 
849116f20SThomas Gleixner #include <uapi/linux/time.h>
949116f20SThomas Gleixner 
107c03156fSStefani Seibold #ifdef BUILD_VDSO32_64
117c03156fSStefani Seibold typedef u64 gtod_long_t;
127c03156fSStefani Seibold #else
137c03156fSStefani Seibold typedef unsigned long gtod_long_t;
147c03156fSStefani Seibold #endif
1549116f20SThomas Gleixner 
1649116f20SThomas Gleixner struct vgtod_ts {
1749116f20SThomas Gleixner 	u64		sec;
1849116f20SThomas Gleixner 	u64		nsec;
1949116f20SThomas Gleixner };
2049116f20SThomas Gleixner 
2149116f20SThomas Gleixner #define VGTOD_BASES	(CLOCK_MONOTONIC_COARSE + 1)
2249116f20SThomas Gleixner #define VGTOD_HRES	(BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC))
2349116f20SThomas Gleixner #define VGTOD_COARSE	(BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE))
2449116f20SThomas Gleixner 
257c03156fSStefani Seibold /*
267c03156fSStefani Seibold  * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
277c03156fSStefani Seibold  * so be carefull by modifying this structure.
287c03156fSStefani Seibold  */
29bb898558SAl Viro struct vsyscall_gtod_data {
3077e9c678SThomas Gleixner 	unsigned int	seq;
31bb898558SAl Viro 
3298d0ac38SAndy Lutomirski 	int		vclock_mode;
33a5a1d1c2SThomas Gleixner 	u64		cycle_last;
34a5a1d1c2SThomas Gleixner 	u64		mask;
35bb898558SAl Viro 	u32		mult;
36bb898558SAl Viro 	u32		shift;
3791ec87d5SAndy Lutomirski 
3849116f20SThomas Gleixner 	struct vgtod_ts	basetime[VGTOD_BASES];
3991ec87d5SAndy Lutomirski 
407c03156fSStefani Seibold 	int		tz_minuteswest;
417c03156fSStefani Seibold 	int		tz_dsttime;
42bb898558SAl Viro };
43bb898558SAl Viro extern struct vsyscall_gtod_data vsyscall_gtod_data;
44bb898558SAl Viro 
45bd902c53SAndy Lutomirski extern int vclocks_used;
46bd902c53SAndy Lutomirski static inline bool vclock_was_used(int vclock)
47bd902c53SAndy Lutomirski {
48bd902c53SAndy Lutomirski 	return READ_ONCE(vclocks_used) & (1 << vclock);
49bd902c53SAndy Lutomirski }
50bd902c53SAndy Lutomirski 
5177e9c678SThomas Gleixner static inline unsigned int gtod_read_begin(const struct vsyscall_gtod_data *s)
527c03156fSStefani Seibold {
5377e9c678SThomas Gleixner 	unsigned int ret;
547c03156fSStefani Seibold 
557c03156fSStefani Seibold repeat:
566aa7de05SMark Rutland 	ret = READ_ONCE(s->seq);
577c03156fSStefani Seibold 	if (unlikely(ret & 1)) {
587c03156fSStefani Seibold 		cpu_relax();
597c03156fSStefani Seibold 		goto repeat;
607c03156fSStefani Seibold 	}
617c03156fSStefani Seibold 	smp_rmb();
627c03156fSStefani Seibold 	return ret;
637c03156fSStefani Seibold }
647c03156fSStefani Seibold 
657c03156fSStefani Seibold static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
6677e9c678SThomas Gleixner 				  unsigned int start)
677c03156fSStefani Seibold {
687c03156fSStefani Seibold 	smp_rmb();
697c03156fSStefani Seibold 	return unlikely(s->seq != start);
707c03156fSStefani Seibold }
717c03156fSStefani Seibold 
727c03156fSStefani Seibold static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
737c03156fSStefani Seibold {
747c03156fSStefani Seibold 	++s->seq;
757c03156fSStefani Seibold 	smp_wmb();
767c03156fSStefani Seibold }
777c03156fSStefani Seibold 
787c03156fSStefani Seibold static inline void gtod_write_end(struct vsyscall_gtod_data *s)
797c03156fSStefani Seibold {
807c03156fSStefani Seibold 	smp_wmb();
817c03156fSStefani Seibold 	++s->seq;
827c03156fSStefani Seibold }
837c03156fSStefani Seibold 
84e76b027eSAndy Lutomirski #ifdef CONFIG_X86_64
85e76b027eSAndy Lutomirski 
86e76b027eSAndy Lutomirski #define VGETCPU_CPU_MASK 0xfff
87e76b027eSAndy Lutomirski 
88e76b027eSAndy Lutomirski static inline unsigned int __getcpu(void)
89e76b027eSAndy Lutomirski {
90e76b027eSAndy Lutomirski 	unsigned int p;
91e76b027eSAndy Lutomirski 
92e76b027eSAndy Lutomirski 	/*
93e76b027eSAndy Lutomirski 	 * Load per CPU data from GDT.  LSL is faster than RDTSCP and
941ddf0b1bSAndy Lutomirski 	 * works on all CPUs.  This is volatile so that it orders
951ddf0b1bSAndy Lutomirski 	 * correctly wrt barrier() and to keep gcc from cleverly
961ddf0b1bSAndy Lutomirski 	 * hoisting it out of the calling function.
97a582c540SAndy Lutomirski 	 *
98a582c540SAndy Lutomirski 	 * If RDPID is available, use it.
99e76b027eSAndy Lutomirski 	 */
100e78e5a91SSamuel Neves 	alternative_io ("lsl %[seg],%[p]",
101a582c540SAndy Lutomirski 			".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
102a582c540SAndy Lutomirski 			X86_FEATURE_RDPID,
103a582c540SAndy Lutomirski 			[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
104e76b027eSAndy Lutomirski 
105e76b027eSAndy Lutomirski 	return p;
106e76b027eSAndy Lutomirski }
107e76b027eSAndy Lutomirski 
108e76b027eSAndy Lutomirski #endif /* CONFIG_X86_64 */
109e76b027eSAndy Lutomirski 
1101965aae3SH. Peter Anvin #endif /* _ASM_X86_VGTOD_H */
111