1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 ARM Limited
4  */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <asm/barrier.h>
11 #include <asm/errno.h>
12 #include <asm/unistd.h>
13 #include <asm/vdso/cp15.h>
14 #include <uapi/linux/time.h>
15 
16 #define VDSO_HAS_CLOCK_GETRES		1
17 
18 extern struct vdso_data *__get_datapage(void);
19 
20 static __always_inline int gettimeofday_fallback(
21 				struct __kernel_old_timeval *_tv,
22 				struct timezone *_tz)
23 {
24 	register struct timezone *tz asm("r1") = _tz;
25 	register struct __kernel_old_timeval *tv asm("r0") = _tv;
26 	register long ret asm ("r0");
27 	register long nr asm("r7") = __NR_gettimeofday;
28 
29 	asm volatile(
30 	"	swi #0\n"
31 	: "=r" (ret)
32 	: "r" (tv), "r" (tz), "r" (nr)
33 	: "memory");
34 
35 	return ret;
36 }
37 
38 static __always_inline long clock_gettime_fallback(
39 					clockid_t _clkid,
40 					struct __kernel_timespec *_ts)
41 {
42 	register struct __kernel_timespec *ts asm("r1") = _ts;
43 	register clockid_t clkid asm("r0") = _clkid;
44 	register long ret asm ("r0");
45 	register long nr asm("r7") = __NR_clock_gettime64;
46 
47 	asm volatile(
48 	"	swi #0\n"
49 	: "=r" (ret)
50 	: "r" (clkid), "r" (ts), "r" (nr)
51 	: "memory");
52 
53 	return ret;
54 }
55 
56 static __always_inline long clock_gettime32_fallback(
57 					clockid_t _clkid,
58 					struct old_timespec32 *_ts)
59 {
60 	register struct old_timespec32 *ts asm("r1") = _ts;
61 	register clockid_t clkid asm("r0") = _clkid;
62 	register long ret asm ("r0");
63 	register long nr asm("r7") = __NR_clock_gettime;
64 
65 	asm volatile(
66 	"	swi #0\n"
67 	: "=r" (ret)
68 	: "r" (clkid), "r" (ts), "r" (nr)
69 	: "memory");
70 
71 	return ret;
72 }
73 
74 static __always_inline int clock_getres_fallback(
75 					clockid_t _clkid,
76 					struct __kernel_timespec *_ts)
77 {
78 	register struct __kernel_timespec *ts asm("r1") = _ts;
79 	register clockid_t clkid asm("r0") = _clkid;
80 	register long ret asm ("r0");
81 	register long nr asm("r7") = __NR_clock_getres_time64;
82 
83 	asm volatile(
84 	"       swi #0\n"
85 	: "=r" (ret)
86 	: "r" (clkid), "r" (ts), "r" (nr)
87 	: "memory");
88 
89 	return ret;
90 }
91 
92 static __always_inline int clock_getres32_fallback(
93 					clockid_t _clkid,
94 					struct old_timespec32 *_ts)
95 {
96 	register struct old_timespec32 *ts asm("r1") = _ts;
97 	register clockid_t clkid asm("r0") = _clkid;
98 	register long ret asm ("r0");
99 	register long nr asm("r7") = __NR_clock_getres;
100 
101 	asm volatile(
102 	"       swi #0\n"
103 	: "=r" (ret)
104 	: "r" (clkid), "r" (ts), "r" (nr)
105 	: "memory");
106 
107 	return ret;
108 }
109 
110 static inline bool arm_vdso_hres_capable(void)
111 {
112 	return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
113 }
114 #define __arch_vdso_hres_capable arm_vdso_hres_capable
115 
116 static __always_inline u64 __arch_get_hw_counter(int clock_mode,
117 						 const struct vdso_data *vd)
118 {
119 #ifdef CONFIG_ARM_ARCH_TIMER
120 	u64 cycle_now;
121 
122 	/*
123 	 * Core checks for mode already, so this raced against a concurrent
124 	 * update. Return something. Core will do another round and then
125 	 * see the mode change and fallback to the syscall.
126 	 */
127 	if (clock_mode == VDSO_CLOCKMODE_NONE)
128 		return 0;
129 
130 	isb();
131 	cycle_now = read_sysreg(CNTVCT);
132 
133 	return cycle_now;
134 #else
135 	/* Make GCC happy. This is compiled out anyway */
136 	return 0;
137 #endif
138 }
139 
140 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
141 {
142 	return __get_datapage();
143 }
144 
145 #endif /* !__ASSEMBLY__ */
146 
147 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
148