1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <asm/barrier.h>
11 #include <asm/unistd.h>
12 
13 #define VDSO_HAS_CLOCK_GETRES		1
14 
15 static __always_inline
16 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
17 			  struct timezone *_tz)
18 {
19 	register struct timezone *tz asm("x1") = _tz;
20 	register struct __kernel_old_timeval *tv asm("x0") = _tv;
21 	register long ret asm ("x0");
22 	register long nr asm("x8") = __NR_gettimeofday;
23 
24 	asm volatile(
25 	"       svc #0\n"
26 	: "=r" (ret)
27 	: "r" (tv), "r" (tz), "r" (nr)
28 	: "memory");
29 
30 	return ret;
31 }
32 
33 static __always_inline
34 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
35 {
36 	register struct __kernel_timespec *ts asm("x1") = _ts;
37 	register clockid_t clkid asm("x0") = _clkid;
38 	register long ret asm ("x0");
39 	register long nr asm("x8") = __NR_clock_gettime;
40 
41 	asm volatile(
42 	"       svc #0\n"
43 	: "=r" (ret)
44 	: "r" (clkid), "r" (ts), "r" (nr)
45 	: "memory");
46 
47 	return ret;
48 }
49 
50 static __always_inline
51 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
52 {
53 	register struct __kernel_timespec *ts asm("x1") = _ts;
54 	register clockid_t clkid asm("x0") = _clkid;
55 	register long ret asm ("x0");
56 	register long nr asm("x8") = __NR_clock_getres;
57 
58 	asm volatile(
59 	"       svc #0\n"
60 	: "=r" (ret)
61 	: "r" (clkid), "r" (ts), "r" (nr)
62 	: "memory");
63 
64 	return ret;
65 }
66 
67 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
68 {
69 	u64 res;
70 
71 	/*
72 	 * Core checks for mode already, so this raced against a concurrent
73 	 * update. Return something. Core will do another round and then
74 	 * see the mode change and fallback to the syscall.
75 	 */
76 	if (clock_mode == VDSO_CLOCKMODE_NONE)
77 		return 0;
78 
79 	/*
80 	 * This isb() is required to prevent that the counter value
81 	 * is speculated.
82 	 */
83 	isb();
84 	asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
85 	/*
86 	 * This isb() is required to prevent that the seq lock is
87 	 * speculated.#
88 	 */
89 	isb();
90 
91 	return res;
92 }
93 
94 static __always_inline
95 const struct vdso_data *__arch_get_vdso_data(void)
96 {
97 	return _vdso_data;
98 }
99 
100 #endif /* !__ASSEMBLY__ */
101 
102 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
103