1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <asm/unistd.h>
11 #include <asm/errno.h>
12 
13 #include <asm/vdso/compat_barrier.h>
14 
15 #define VDSO_HAS_CLOCK_GETRES		1
16 
17 #define BUILD_VDSO32			1
18 
19 static __always_inline
20 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
21 			  struct timezone *_tz)
22 {
23 	register struct timezone *tz asm("r1") = _tz;
24 	register struct __kernel_old_timeval *tv asm("r0") = _tv;
25 	register long ret asm ("r0");
26 	register long nr asm("r7") = __NR_compat_gettimeofday;
27 
28 	asm volatile(
29 	"	swi #0\n"
30 	: "=r" (ret)
31 	: "r" (tv), "r" (tz), "r" (nr)
32 	: "memory");
33 
34 	return ret;
35 }
36 
37 static __always_inline
38 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
39 {
40 	register struct __kernel_timespec *ts asm("r1") = _ts;
41 	register clockid_t clkid asm("r0") = _clkid;
42 	register long ret asm ("r0");
43 	register long nr asm("r7") = __NR_compat_clock_gettime64;
44 
45 	asm volatile(
46 	"	swi #0\n"
47 	: "=r" (ret)
48 	: "r" (clkid), "r" (ts), "r" (nr)
49 	: "memory");
50 
51 	return ret;
52 }
53 
54 static __always_inline
55 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
56 {
57 	register struct old_timespec32 *ts asm("r1") = _ts;
58 	register clockid_t clkid asm("r0") = _clkid;
59 	register long ret asm ("r0");
60 	register long nr asm("r7") = __NR_compat_clock_gettime;
61 
62 	asm volatile(
63 	"	swi #0\n"
64 	: "=r" (ret)
65 	: "r" (clkid), "r" (ts), "r" (nr)
66 	: "memory");
67 
68 	return ret;
69 }
70 
71 static __always_inline
72 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
73 {
74 	register struct __kernel_timespec *ts asm("r1") = _ts;
75 	register clockid_t clkid asm("r0") = _clkid;
76 	register long ret asm ("r0");
77 	register long nr asm("r7") = __NR_compat_clock_getres_time64;
78 
79 	asm volatile(
80 	"       swi #0\n"
81 	: "=r" (ret)
82 	: "r" (clkid), "r" (ts), "r" (nr)
83 	: "memory");
84 
85 	return ret;
86 }
87 
88 static __always_inline
89 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
90 {
91 	register struct old_timespec32 *ts asm("r1") = _ts;
92 	register clockid_t clkid asm("r0") = _clkid;
93 	register long ret asm ("r0");
94 	register long nr asm("r7") = __NR_compat_clock_getres;
95 
96 	asm volatile(
97 	"       swi #0\n"
98 	: "=r" (ret)
99 	: "r" (clkid), "r" (ts), "r" (nr)
100 	: "memory");
101 
102 	return ret;
103 }
104 
105 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
106 {
107 	u64 res;
108 
109 	/*
110 	 * Core checks for mode already, so this raced against a concurrent
111 	 * update. Return something. Core will do another round and then
112 	 * see the mode change and fallback to the syscall.
113 	 */
114 	if (clock_mode == VDSO_CLOCKMODE_NONE)
115 		return 0;
116 
117 	/*
118 	 * This isb() is required to prevent that the counter value
119 	 * is speculated.
120 	 */
121 	isb();
122 	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
123 	/*
124 	 * This isb() is required to prevent that the seq lock is
125 	 * speculated.
126 	 */
127 	isb();
128 
129 	return res;
130 }
131 
132 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
133 {
134 	const struct vdso_data *ret;
135 
136 	/*
137 	 * This simply puts &_vdso_data into ret. The reason why we don't use
138 	 * `ret = _vdso_data` is that the compiler tends to optimise this in a
139 	 * very suboptimal way: instead of keeping &_vdso_data in a register,
140 	 * it goes through a relocation almost every time _vdso_data must be
141 	 * accessed (even in subfunctions). This is both time and space
142 	 * consuming: each relocation uses a word in the code section, and it
143 	 * has to be loaded at runtime.
144 	 *
145 	 * This trick hides the assignment from the compiler. Since it cannot
146 	 * track where the pointer comes from, it will only use one relocation
147 	 * where __arch_get_vdso_data() is called, and then keep the result in
148 	 * a register.
149 	 */
150 	asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
151 
152 	return ret;
153 }
154 
155 #endif /* !__ASSEMBLY__ */
156 
157 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
158