1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <asm/unistd.h>
11 #include <uapi/linux/time.h>
12 
13 #define __VDSO_USE_SYSCALL		ULLONG_MAX
14 
15 #define VDSO_HAS_CLOCK_GETRES		1
16 
17 static __always_inline
18 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
19 			  struct timezone *_tz)
20 {
21 	register struct timezone *tz asm("x1") = _tz;
22 	register struct __kernel_old_timeval *tv asm("x0") = _tv;
23 	register long ret asm ("x0");
24 	register long nr asm("x8") = __NR_gettimeofday;
25 
26 	asm volatile(
27 	"       svc #0\n"
28 	: "=r" (ret)
29 	: "r" (tv), "r" (tz), "r" (nr)
30 	: "memory");
31 
32 	return ret;
33 }
34 
35 static __always_inline
36 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
37 {
38 	register struct __kernel_timespec *ts asm("x1") = _ts;
39 	register clockid_t clkid asm("x0") = _clkid;
40 	register long ret asm ("x0");
41 	register long nr asm("x8") = __NR_clock_gettime;
42 
43 	asm volatile(
44 	"       svc #0\n"
45 	: "=r" (ret)
46 	: "r" (clkid), "r" (ts), "r" (nr)
47 	: "memory");
48 
49 	return ret;
50 }
51 
52 static __always_inline
53 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
54 {
55 	register struct __kernel_timespec *ts asm("x1") = _ts;
56 	register clockid_t clkid asm("x0") = _clkid;
57 	register long ret asm ("x0");
58 	register long nr asm("x8") = __NR_clock_getres;
59 
60 	asm volatile(
61 	"       svc #0\n"
62 	: "=r" (ret)
63 	: "r" (clkid), "r" (ts), "r" (nr)
64 	: "memory");
65 
66 	return ret;
67 }
68 
69 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
70 {
71 	u64 res;
72 
73 	/*
74 	 * clock_mode == 0 implies that vDSO are enabled otherwise
75 	 * fallback on syscall.
76 	 */
77 	if (clock_mode)
78 		return __VDSO_USE_SYSCALL;
79 
80 	/*
81 	 * This isb() is required to prevent that the counter value
82 	 * is speculated.
83 	 */
84 	isb();
85 	asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
86 	/*
87 	 * This isb() is required to prevent that the seq lock is
88 	 * speculated.#
89 	 */
90 	isb();
91 
92 	return res;
93 }
94 
95 static __always_inline
96 const struct vdso_data *__arch_get_vdso_data(void)
97 {
98 	return _vdso_data;
99 }
100 
101 #endif /* !__ASSEMBLY__ */
102 
103 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
104