1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <asm/barrier.h>
11 #include <asm/unistd.h>
12 #include <asm/errno.h>
13 
14 #include <asm/vdso/compat_barrier.h>
15 
16 #define VDSO_HAS_CLOCK_GETRES		1
17 
18 #define BUILD_VDSO32			1
19 
20 static __always_inline
21 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
22 			  struct timezone *_tz)
23 {
24 	register struct timezone *tz asm("r1") = _tz;
25 	register struct __kernel_old_timeval *tv asm("r0") = _tv;
26 	register long ret asm ("r0");
27 	register long nr asm("r7") = __NR_compat_gettimeofday;
28 
29 	asm volatile(
30 	"	swi #0\n"
31 	: "=r" (ret)
32 	: "r" (tv), "r" (tz), "r" (nr)
33 	: "memory");
34 
35 	return ret;
36 }
37 
38 static __always_inline
39 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
40 {
41 	register struct __kernel_timespec *ts asm("r1") = _ts;
42 	register clockid_t clkid asm("r0") = _clkid;
43 	register long ret asm ("r0");
44 	register long nr asm("r7") = __NR_compat_clock_gettime64;
45 
46 	asm volatile(
47 	"	swi #0\n"
48 	: "=r" (ret)
49 	: "r" (clkid), "r" (ts), "r" (nr)
50 	: "memory");
51 
52 	return ret;
53 }
54 
55 static __always_inline
56 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
57 {
58 	register struct old_timespec32 *ts asm("r1") = _ts;
59 	register clockid_t clkid asm("r0") = _clkid;
60 	register long ret asm ("r0");
61 	register long nr asm("r7") = __NR_compat_clock_gettime;
62 
63 	asm volatile(
64 	"	swi #0\n"
65 	: "=r" (ret)
66 	: "r" (clkid), "r" (ts), "r" (nr)
67 	: "memory");
68 
69 	return ret;
70 }
71 
72 static __always_inline
73 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
74 {
75 	register struct __kernel_timespec *ts asm("r1") = _ts;
76 	register clockid_t clkid asm("r0") = _clkid;
77 	register long ret asm ("r0");
78 	register long nr asm("r7") = __NR_compat_clock_getres_time64;
79 
80 	asm volatile(
81 	"       swi #0\n"
82 	: "=r" (ret)
83 	: "r" (clkid), "r" (ts), "r" (nr)
84 	: "memory");
85 
86 	return ret;
87 }
88 
89 static __always_inline
90 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
91 {
92 	register struct old_timespec32 *ts asm("r1") = _ts;
93 	register clockid_t clkid asm("r0") = _clkid;
94 	register long ret asm ("r0");
95 	register long nr asm("r7") = __NR_compat_clock_getres;
96 
97 	asm volatile(
98 	"       swi #0\n"
99 	: "=r" (ret)
100 	: "r" (clkid), "r" (ts), "r" (nr)
101 	: "memory");
102 
103 	return ret;
104 }
105 
106 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
107 {
108 	u64 res;
109 
110 	/*
111 	 * Core checks for mode already, so this raced against a concurrent
112 	 * update. Return something. Core will do another round and then
113 	 * see the mode change and fallback to the syscall.
114 	 */
115 	if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
116 		return 0;
117 
118 	/*
119 	 * This isb() is required to prevent that the counter value
120 	 * is speculated.
121 	 */
122 	isb();
123 	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
124 	/*
125 	 * This isb() is required to prevent that the seq lock is
126 	 * speculated.
127 	 */
128 	isb();
129 
130 	return res;
131 }
132 
133 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
134 {
135 	const struct vdso_data *ret;
136 
137 	/*
138 	 * This simply puts &_vdso_data into ret. The reason why we don't use
139 	 * `ret = _vdso_data` is that the compiler tends to optimise this in a
140 	 * very suboptimal way: instead of keeping &_vdso_data in a register,
141 	 * it goes through a relocation almost every time _vdso_data must be
142 	 * accessed (even in subfunctions). This is both time and space
143 	 * consuming: each relocation uses a word in the code section, and it
144 	 * has to be loaded at runtime.
145 	 *
146 	 * This trick hides the assignment from the compiler. Since it cannot
147 	 * track where the pointer comes from, it will only use one relocation
148 	 * where __arch_get_vdso_data() is called, and then keep the result in
149 	 * a register.
150 	 */
151 	asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
152 
153 	return ret;
154 }
155 
156 #ifdef CONFIG_TIME_NS
157 static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
158 {
159 	const struct vdso_data *ret;
160 
161 	/* See __arch_get_vdso_data(). */
162 	asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
163 
164 	return ret;
165 }
166 #endif
167 
168 static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
169 {
170 	return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
171 }
172 #define vdso_clocksource_ok	vdso_clocksource_ok
173 
174 #endif /* !__ASSEMBLY__ */
175 
176 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
177