1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <asm/unistd.h>
11 
12 #define VDSO_HAS_CLOCK_GETRES		1
13 
14 static __always_inline
15 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
16 			  struct timezone *_tz)
17 {
18 	register struct timezone *tz asm("x1") = _tz;
19 	register struct __kernel_old_timeval *tv asm("x0") = _tv;
20 	register long ret asm ("x0");
21 	register long nr asm("x8") = __NR_gettimeofday;
22 
23 	asm volatile(
24 	"       svc #0\n"
25 	: "=r" (ret)
26 	: "r" (tv), "r" (tz), "r" (nr)
27 	: "memory");
28 
29 	return ret;
30 }
31 
32 static __always_inline
33 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
34 {
35 	register struct __kernel_timespec *ts asm("x1") = _ts;
36 	register clockid_t clkid asm("x0") = _clkid;
37 	register long ret asm ("x0");
38 	register long nr asm("x8") = __NR_clock_gettime;
39 
40 	asm volatile(
41 	"       svc #0\n"
42 	: "=r" (ret)
43 	: "r" (clkid), "r" (ts), "r" (nr)
44 	: "memory");
45 
46 	return ret;
47 }
48 
49 static __always_inline
50 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
51 {
52 	register struct __kernel_timespec *ts asm("x1") = _ts;
53 	register clockid_t clkid asm("x0") = _clkid;
54 	register long ret asm ("x0");
55 	register long nr asm("x8") = __NR_clock_getres;
56 
57 	asm volatile(
58 	"       svc #0\n"
59 	: "=r" (ret)
60 	: "r" (clkid), "r" (ts), "r" (nr)
61 	: "memory");
62 
63 	return ret;
64 }
65 
66 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
67 {
68 	u64 res;
69 
70 	/*
71 	 * Core checks for mode already, so this raced against a concurrent
72 	 * update. Return something. Core will do another round and then
73 	 * see the mode change and fallback to the syscall.
74 	 */
75 	if (clock_mode == VDSO_CLOCKMODE_NONE)
76 		return 0;
77 
78 	/*
79 	 * This isb() is required to prevent that the counter value
80 	 * is speculated.
81 	 */
82 	isb();
83 	asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
84 	/*
85 	 * This isb() is required to prevent that the seq lock is
86 	 * speculated.#
87 	 */
88 	isb();
89 
90 	return res;
91 }
92 
93 static __always_inline
94 const struct vdso_data *__arch_get_vdso_data(void)
95 {
96 	return _vdso_data;
97 }
98 
99 #endif /* !__ASSEMBLY__ */
100 
101 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
102