1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
3 #define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
4 
5 #ifdef __ASSEMBLY__
6 
7 #include <asm/ppc_asm.h>
8 
9 /*
10  * The macros sets two stack frames, one for the caller and one for the callee
11  * because there are no requirement for the caller to set a stack frame when
12  * calling VDSO so it may have omitted to set one, especially on PPC64
13  */
14 
15 .macro cvdso_call funct
16   .cfi_startproc
17 	PPC_STLU	r1, -PPC_MIN_STKFRM(r1)
18 	mflr		r0
19   .cfi_register lr, r0
20 	PPC_STLU	r1, -PPC_MIN_STKFRM(r1)
21 	PPC_STL		r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
22 #ifdef __powerpc64__
23 	PPC_STL		r2, PPC_MIN_STKFRM + STK_GOT(r1)
24 #endif
25 	get_datapage	r5
26 	addi		r5, r5, VDSO_DATA_OFFSET
27 	bl		DOTSYM(\funct)
28 	PPC_LL		r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
29 #ifdef __powerpc64__
30 	PPC_LL		r2, PPC_MIN_STKFRM + STK_GOT(r1)
31 #endif
32 	cmpwi		r3, 0
33 	mtlr		r0
34   .cfi_restore lr
35 	addi		r1, r1, 2 * PPC_MIN_STKFRM
36 	crclr		so
37 	beqlr+
38 	crset		so
39 	neg		r3, r3
40 	blr
41   .cfi_endproc
42 .endm
43 
44 .macro cvdso_call_time funct
45   .cfi_startproc
46 	PPC_STLU	r1, -PPC_MIN_STKFRM(r1)
47 	mflr		r0
48   .cfi_register lr, r0
49 	PPC_STLU	r1, -PPC_MIN_STKFRM(r1)
50 	PPC_STL		r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
51 #ifdef __powerpc64__
52 	PPC_STL		r2, PPC_MIN_STKFRM + STK_GOT(r1)
53 #endif
54 	get_datapage	r4
55 	addi		r4, r4, VDSO_DATA_OFFSET
56 	bl		DOTSYM(\funct)
57 	PPC_LL		r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
58 #ifdef __powerpc64__
59 	PPC_LL		r2, PPC_MIN_STKFRM + STK_GOT(r1)
60 #endif
61 	crclr		so
62 	mtlr		r0
63   .cfi_restore lr
64 	addi		r1, r1, 2 * PPC_MIN_STKFRM
65 	blr
66   .cfi_endproc
67 .endm
68 
69 #else
70 
71 #include <asm/vdso/timebase.h>
72 #include <asm/barrier.h>
73 #include <asm/unistd.h>
74 #include <uapi/linux/time.h>
75 
76 #define VDSO_HAS_CLOCK_GETRES		1
77 
78 #define VDSO_HAS_TIME			1
79 
80 static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3,
81 					const unsigned long _r4)
82 {
83 	register long r0 asm("r0") = _r0;
84 	register unsigned long r3 asm("r3") = _r3;
85 	register unsigned long r4 asm("r4") = _r4;
86 	register int ret asm ("r3");
87 
88 	asm volatile(
89 		"       sc\n"
90 		"	bns+	1f\n"
91 		"	neg	%0, %0\n"
92 		"1:\n"
93 	: "=r" (ret), "+r" (r4), "+r" (r0)
94 	: "r" (r3)
95 	: "memory", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr");
96 
97 	return ret;
98 }
99 
100 static __always_inline
101 int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz)
102 {
103 	return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
104 }
105 
106 static __always_inline
107 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
108 {
109 	return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
110 }
111 
112 static __always_inline
113 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
114 {
115 	return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
116 }
117 
118 #ifdef CONFIG_VDSO32
119 
120 #define BUILD_VDSO32		1
121 
122 static __always_inline
123 int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
124 {
125 	return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
126 }
127 
128 static __always_inline
129 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
130 {
131 	return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
132 }
133 #endif
134 
135 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
136 						 const struct vdso_data *vd)
137 {
138 	return get_tb();
139 }
140 
141 const struct vdso_data *__arch_get_vdso_data(void);
142 
143 static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
144 {
145 	return true;
146 }
147 #define vdso_clocksource_ok vdso_clocksource_ok
148 
149 /*
150  * powerpc specific delta calculation.
151  *
152  * This variant removes the masking of the subtraction because the
153  * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX
154  * which would result in a pointless operation. The compiler cannot
155  * optimize it away as the mask comes from the vdso data and is not compile
156  * time constant.
157  */
158 static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
159 {
160 	return (cycles - last) * mult;
161 }
162 #define vdso_calc_delta vdso_calc_delta
163 
164 #ifndef __powerpc64__
165 static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift)
166 {
167 	u32 hi = ns >> 32;
168 	u32 lo = ns;
169 
170 	lo >>= shift;
171 	lo |= hi << (32 - shift);
172 	hi >>= shift;
173 
174 	if (likely(hi == 0))
175 		return lo;
176 
177 	return ((u64)hi << 32) | lo;
178 }
179 #define vdso_shift_ns vdso_shift_ns
180 #endif
181 
182 #ifdef __powerpc64__
183 int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
184 			     const struct vdso_data *vd);
185 int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
186 			    const struct vdso_data *vd);
187 #else
188 int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
189 			     const struct vdso_data *vd);
190 int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
191 			       const struct vdso_data *vd);
192 int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
193 			    const struct vdso_data *vd);
194 #endif
195 int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
196 			    const struct vdso_data *vd);
197 __kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time,
198 				    const struct vdso_data *vd);
199 #endif /* __ASSEMBLY__ */
200 
201 #endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */
202