1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <asm/unistd.h>
11 #include <uapi/linux/time.h>
12 
13 #include <asm/vdso/compat_barrier.h>
14 
15 #define __VDSO_USE_SYSCALL		ULLONG_MAX
16 
17 #define VDSO_HAS_CLOCK_GETRES		1
18 
19 static __always_inline
20 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
21 			  struct timezone *_tz)
22 {
23 	register struct timezone *tz asm("r1") = _tz;
24 	register struct __kernel_old_timeval *tv asm("r0") = _tv;
25 	register long ret asm ("r0");
26 	register long nr asm("r7") = __NR_compat_gettimeofday;
27 
28 	asm volatile(
29 	"	swi #0\n"
30 	: "=r" (ret)
31 	: "r" (tv), "r" (tz), "r" (nr)
32 	: "memory");
33 
34 	return ret;
35 }
36 
37 static __always_inline
38 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
39 {
40 	register struct __kernel_timespec *ts asm("r1") = _ts;
41 	register clockid_t clkid asm("r0") = _clkid;
42 	register long ret asm ("r0");
43 	register long nr asm("r7") = __NR_compat_clock_gettime64;
44 
45 	asm volatile(
46 	"	swi #0\n"
47 	: "=r" (ret)
48 	: "r" (clkid), "r" (ts), "r" (nr)
49 	: "memory");
50 
51 	return ret;
52 }
53 
54 static __always_inline
55 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
56 {
57 	register struct __kernel_timespec *ts asm("r1") = _ts;
58 	register clockid_t clkid asm("r0") = _clkid;
59 	register long ret asm ("r0");
60 	register long nr asm("r7") = __NR_compat_clock_getres_time64;
61 
62 	/* The checks below are required for ABI consistency with arm */
63 	if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
64 		return -EINVAL;
65 
66 	asm volatile(
67 	"       swi #0\n"
68 	: "=r" (ret)
69 	: "r" (clkid), "r" (ts), "r" (nr)
70 	: "memory");
71 
72 	return ret;
73 }
74 
75 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
76 {
77 	u64 res;
78 
79 	/*
80 	 * clock_mode == 0 implies that vDSO are enabled otherwise
81 	 * fallback on syscall.
82 	 */
83 	if (clock_mode)
84 		return __VDSO_USE_SYSCALL;
85 
86 	/*
87 	 * This isb() is required to prevent that the counter value
88 	 * is speculated.
89 	 */
90 	isb();
91 	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
92 	/*
93 	 * This isb() is required to prevent that the seq lock is
94 	 * speculated.
95 	 */
96 	isb();
97 
98 	return res;
99 }
100 
101 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
102 {
103 	const struct vdso_data *ret;
104 
105 	/*
106 	 * This simply puts &_vdso_data into ret. The reason why we don't use
107 	 * `ret = _vdso_data` is that the compiler tends to optimise this in a
108 	 * very suboptimal way: instead of keeping &_vdso_data in a register,
109 	 * it goes through a relocation almost every time _vdso_data must be
110 	 * accessed (even in subfunctions). This is both time and space
111 	 * consuming: each relocation uses a word in the code section, and it
112 	 * has to be loaded at runtime.
113 	 *
114 	 * This trick hides the assignment from the compiler. Since it cannot
115 	 * track where the pointer comes from, it will only use one relocation
116 	 * where __arch_get_vdso_data() is called, and then keep the result in
117 	 * a register.
118 	 */
119 	asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
120 
121 	return ret;
122 }
123 
124 #endif /* !__ASSEMBLY__ */
125 
126 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
127