1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <asm/errno.h>
11 #include <asm/unistd.h>
12 #include <asm/vdso/cp15.h>
13 #include <uapi/linux/time.h>
14 
15 #define VDSO_HAS_CLOCK_GETRES		1
16 
17 extern struct vdso_data *__get_datapage(void);
18 
19 static __always_inline int gettimeofday_fallback(
20 				struct __kernel_old_timeval *_tv,
21 				struct timezone *_tz)
22 {
23 	register struct timezone *tz asm("r1") = _tz;
24 	register struct __kernel_old_timeval *tv asm("r0") = _tv;
25 	register long ret asm ("r0");
26 	register long nr asm("r7") = __NR_gettimeofday;
27 
28 	asm volatile(
29 	"	swi #0\n"
30 	: "=r" (ret)
31 	: "r" (tv), "r" (tz), "r" (nr)
32 	: "memory");
33 
34 	return ret;
35 }
36 
37 static __always_inline long clock_gettime_fallback(
38 					clockid_t _clkid,
39 					struct __kernel_timespec *_ts)
40 {
41 	register struct __kernel_timespec *ts asm("r1") = _ts;
42 	register clockid_t clkid asm("r0") = _clkid;
43 	register long ret asm ("r0");
44 	register long nr asm("r7") = __NR_clock_gettime64;
45 
46 	asm volatile(
47 	"	swi #0\n"
48 	: "=r" (ret)
49 	: "r" (clkid), "r" (ts), "r" (nr)
50 	: "memory");
51 
52 	return ret;
53 }
54 
55 static __always_inline long clock_gettime32_fallback(
56 					clockid_t _clkid,
57 					struct old_timespec32 *_ts)
58 {
59 	register struct old_timespec32 *ts asm("r1") = _ts;
60 	register clockid_t clkid asm("r0") = _clkid;
61 	register long ret asm ("r0");
62 	register long nr asm("r7") = __NR_clock_gettime;
63 
64 	asm volatile(
65 	"	swi #0\n"
66 	: "=r" (ret)
67 	: "r" (clkid), "r" (ts), "r" (nr)
68 	: "memory");
69 
70 	return ret;
71 }
72 
73 static __always_inline int clock_getres_fallback(
74 					clockid_t _clkid,
75 					struct __kernel_timespec *_ts)
76 {
77 	register struct __kernel_timespec *ts asm("r1") = _ts;
78 	register clockid_t clkid asm("r0") = _clkid;
79 	register long ret asm ("r0");
80 	register long nr asm("r7") = __NR_clock_getres_time64;
81 
82 	asm volatile(
83 	"       swi #0\n"
84 	: "=r" (ret)
85 	: "r" (clkid), "r" (ts), "r" (nr)
86 	: "memory");
87 
88 	return ret;
89 }
90 
91 static __always_inline int clock_getres32_fallback(
92 					clockid_t _clkid,
93 					struct old_timespec32 *_ts)
94 {
95 	register struct old_timespec32 *ts asm("r1") = _ts;
96 	register clockid_t clkid asm("r0") = _clkid;
97 	register long ret asm ("r0");
98 	register long nr asm("r7") = __NR_clock_getres;
99 
100 	asm volatile(
101 	"       swi #0\n"
102 	: "=r" (ret)
103 	: "r" (clkid), "r" (ts), "r" (nr)
104 	: "memory");
105 
106 	return ret;
107 }
108 
109 static inline bool arm_vdso_hres_capable(void)
110 {
111 	return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
112 }
113 #define __arch_vdso_hres_capable arm_vdso_hres_capable
114 
115 static __always_inline u64 __arch_get_hw_counter(int clock_mode)
116 {
117 #ifdef CONFIG_ARM_ARCH_TIMER
118 	u64 cycle_now;
119 
120 	/*
121 	 * Core checks for mode already, so this raced against a concurrent
122 	 * update. Return something. Core will do another round and then
123 	 * see the mode change and fallback to the syscall.
124 	 */
125 	if (clock_mode == VDSO_CLOCKMODE_NONE)
126 		return 0;
127 
128 	isb();
129 	cycle_now = read_sysreg(CNTVCT);
130 
131 	return cycle_now;
132 #else
133 	/* Make GCC happy. This is compiled out anyway */
134 	return 0;
135 #endif
136 }
137 
138 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
139 {
140 	return __get_datapage();
141 }
142 
143 #endif /* !__ASSEMBLY__ */
144 
145 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
146