1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H 3 #define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H 4 5 #ifdef __ASSEMBLY__ 6 7 #include <asm/ppc_asm.h> 8 9 /* 10 * The macros sets two stack frames, one for the caller and one for the callee 11 * because there are no requirement for the caller to set a stack frame when 12 * calling VDSO so it may have omitted to set one, especially on PPC64 13 */ 14 15 .macro cvdso_call funct 16 .cfi_startproc 17 PPC_STLU r1, -PPC_MIN_STKFRM(r1) 18 mflr r0 19 .cfi_register lr, r0 20 PPC_STLU r1, -PPC_MIN_STKFRM(r1) 21 PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) 22 #ifdef __powerpc64__ 23 PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1) 24 #endif 25 get_datapage r5 26 addi r5, r5, VDSO_DATA_OFFSET 27 bl DOTSYM(\funct) 28 PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) 29 #ifdef __powerpc64__ 30 PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) 31 #endif 32 cmpwi r3, 0 33 mtlr r0 34 .cfi_restore lr 35 addi r1, r1, 2 * PPC_MIN_STKFRM 36 crclr so 37 beqlr+ 38 crset so 39 neg r3, r3 40 blr 41 .cfi_endproc 42 .endm 43 44 .macro cvdso_call_time funct 45 .cfi_startproc 46 PPC_STLU r1, -PPC_MIN_STKFRM(r1) 47 mflr r0 48 .cfi_register lr, r0 49 PPC_STLU r1, -PPC_MIN_STKFRM(r1) 50 PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) 51 #ifdef __powerpc64__ 52 PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1) 53 #endif 54 get_datapage r4 55 addi r4, r4, VDSO_DATA_OFFSET 56 bl DOTSYM(\funct) 57 PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) 58 #ifdef __powerpc64__ 59 PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) 60 #endif 61 crclr so 62 mtlr r0 63 .cfi_restore lr 64 addi r1, r1, 2 * PPC_MIN_STKFRM 65 blr 66 .cfi_endproc 67 .endm 68 69 #else 70 71 #include <asm/vdso/timebase.h> 72 #include <asm/barrier.h> 73 #include <asm/unistd.h> 74 #include <uapi/linux/time.h> 75 76 #define VDSO_HAS_CLOCK_GETRES 1 77 78 #define VDSO_HAS_TIME 1 79 80 static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3, 81 const unsigned long _r4) 82 { 83 register long r0 asm("r0") = _r0; 84 register unsigned long r3 asm("r3") = _r3; 85 register unsigned long r4 asm("r4") = _r4; 86 register int ret asm ("r3"); 87 88 asm volatile( 89 " sc\n" 90 " bns+ 1f\n" 91 " neg %0, %0\n" 92 "1:\n" 93 : "=r" (ret), "+r" (r4), "+r" (r0) 94 : "r" (r3) 95 : "memory", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr"); 96 97 return ret; 98 } 99 100 static __always_inline 101 int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz) 102 { 103 return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz); 104 } 105 106 #ifdef __powerpc64__ 107 108 static __always_inline 109 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 110 { 111 return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts); 112 } 113 114 static __always_inline 115 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 116 { 117 return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts); 118 } 119 120 #else 121 122 #define BUILD_VDSO32 1 123 124 static __always_inline 125 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 126 { 127 return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts); 128 } 129 130 static __always_inline 131 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 132 { 133 return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts); 134 } 135 136 static __always_inline 137 int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 138 { 139 return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts); 140 } 141 142 static __always_inline 143 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 144 { 145 return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts); 146 } 147 #endif 148 149 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, 150 const struct vdso_data *vd) 151 { 152 return get_tb(); 153 } 154 155 const struct vdso_data *__arch_get_vdso_data(void); 156 157 static inline bool vdso_clocksource_ok(const struct vdso_data *vd) 158 { 159 return true; 160 } 161 #define vdso_clocksource_ok vdso_clocksource_ok 162 163 /* 164 * powerpc specific delta calculation. 165 * 166 * This variant removes the masking of the subtraction because the 167 * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX 168 * which would result in a pointless operation. The compiler cannot 169 * optimize it away as the mask comes from the vdso data and is not compile 170 * time constant. 171 */ 172 static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) 173 { 174 return (cycles - last) * mult; 175 } 176 #define vdso_calc_delta vdso_calc_delta 177 178 #ifndef __powerpc64__ 179 static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift) 180 { 181 u32 hi = ns >> 32; 182 u32 lo = ns; 183 184 lo >>= shift; 185 lo |= hi << (32 - shift); 186 hi >>= shift; 187 188 if (likely(hi == 0)) 189 return lo; 190 191 return ((u64)hi << 32) | lo; 192 } 193 #define vdso_shift_ns vdso_shift_ns 194 #endif 195 196 #ifdef __powerpc64__ 197 int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts, 198 const struct vdso_data *vd); 199 int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res, 200 const struct vdso_data *vd); 201 #else 202 int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts, 203 const struct vdso_data *vd); 204 int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts, 205 const struct vdso_data *vd); 206 int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res, 207 const struct vdso_data *vd); 208 #endif 209 int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz, 210 const struct vdso_data *vd); 211 __kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, 212 const struct vdso_data *vd); 213 #endif /* __ASSEMBLY__ */ 214 215 #endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */ 216