1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Fast user context implementation of clock_gettime, gettimeofday, and time. 4 * 5 * Copyright (C) 2019 ARM Limited. 6 * Copyright 2006 Andi Kleen, SUSE Labs. 7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net> 8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany 9 */ 10 #ifndef __ASM_VDSO_GETTIMEOFDAY_H 11 #define __ASM_VDSO_GETTIMEOFDAY_H 12 13 #ifndef __ASSEMBLY__ 14 15 #include <uapi/linux/time.h> 16 #include <asm/vgtod.h> 17 #include <asm/vvar.h> 18 #include <asm/unistd.h> 19 #include <asm/msr.h> 20 #include <asm/pvclock.h> 21 #include <clocksource/hyperv_timer.h> 22 23 #define __vdso_data (VVAR(_vdso_data)) 24 #define __timens_vdso_data (TIMENS(_vdso_data)) 25 26 #define VDSO_HAS_TIME 1 27 28 #define VDSO_HAS_CLOCK_GETRES 1 29 30 /* 31 * Declare the memory-mapped vclock data pages. These come from hypervisors. 32 * If we ever reintroduce something like direct access to an MMIO clock like 33 * the HPET again, it will go here as well. 34 * 35 * A load from any of these pages will segfault if the clock in question is 36 * disabled, so appropriate compiler barriers and checks need to be used 37 * to prevent stray loads. 38 * 39 * These declarations MUST NOT be const. The compiler will assume that 40 * an extern const variable has genuinely constant contents, and the 41 * resulting code won't work, since the whole point is that these pages 42 * change over time, possibly while we're accessing them. 43 */ 44 45 #ifdef CONFIG_PARAVIRT_CLOCK 46 /* 47 * This is the vCPU 0 pvclock page. We only use pvclock from the vDSO 48 * if the hypervisor tells us that all vCPUs can get valid data from the 49 * vCPU 0 page. 50 */ 51 extern struct pvclock_vsyscall_time_info pvclock_page 52 __attribute__((visibility("hidden"))); 53 #endif 54 55 #ifdef CONFIG_HYPERV_TIMER 56 extern struct ms_hyperv_tsc_page hvclock_page 57 __attribute__((visibility("hidden"))); 58 #endif 59 60 #ifdef CONFIG_TIME_NS 61 static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) 62 { 63 return __timens_vdso_data; 64 } 65 #endif 66 67 #ifndef BUILD_VDSO32 68 69 static __always_inline 70 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 71 { 72 long ret; 73 74 asm ("syscall" : "=a" (ret), "=m" (*_ts) : 75 "0" (__NR_clock_gettime), "D" (_clkid), "S" (_ts) : 76 "rcx", "r11"); 77 78 return ret; 79 } 80 81 static __always_inline 82 long gettimeofday_fallback(struct __kernel_old_timeval *_tv, 83 struct timezone *_tz) 84 { 85 long ret; 86 87 asm("syscall" : "=a" (ret) : 88 "0" (__NR_gettimeofday), "D" (_tv), "S" (_tz) : "memory"); 89 90 return ret; 91 } 92 93 static __always_inline 94 long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 95 { 96 long ret; 97 98 asm ("syscall" : "=a" (ret), "=m" (*_ts) : 99 "0" (__NR_clock_getres), "D" (_clkid), "S" (_ts) : 100 "rcx", "r11"); 101 102 return ret; 103 } 104 105 #else 106 107 static __always_inline 108 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 109 { 110 long ret; 111 112 asm ( 113 "mov %%ebx, %%edx \n" 114 "mov %[clock], %%ebx \n" 115 "call __kernel_vsyscall \n" 116 "mov %%edx, %%ebx \n" 117 : "=a" (ret), "=m" (*_ts) 118 : "0" (__NR_clock_gettime64), [clock] "g" (_clkid), "c" (_ts) 119 : "edx"); 120 121 return ret; 122 } 123 124 static __always_inline 125 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 126 { 127 long ret; 128 129 asm ( 130 "mov %%ebx, %%edx \n" 131 "mov %[clock], %%ebx \n" 132 "call __kernel_vsyscall \n" 133 "mov %%edx, %%ebx \n" 134 : "=a" (ret), "=m" (*_ts) 135 : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts) 136 : "edx"); 137 138 return ret; 139 } 140 141 static __always_inline 142 long gettimeofday_fallback(struct __kernel_old_timeval *_tv, 143 struct timezone *_tz) 144 { 145 long ret; 146 147 asm( 148 "mov %%ebx, %%edx \n" 149 "mov %2, %%ebx \n" 150 "call __kernel_vsyscall \n" 151 "mov %%edx, %%ebx \n" 152 : "=a" (ret) 153 : "0" (__NR_gettimeofday), "g" (_tv), "c" (_tz) 154 : "memory", "edx"); 155 156 return ret; 157 } 158 159 static __always_inline long 160 clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) 161 { 162 long ret; 163 164 asm ( 165 "mov %%ebx, %%edx \n" 166 "mov %[clock], %%ebx \n" 167 "call __kernel_vsyscall \n" 168 "mov %%edx, %%ebx \n" 169 : "=a" (ret), "=m" (*_ts) 170 : "0" (__NR_clock_getres_time64), [clock] "g" (_clkid), "c" (_ts) 171 : "edx"); 172 173 return ret; 174 } 175 176 static __always_inline 177 long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) 178 { 179 long ret; 180 181 asm ( 182 "mov %%ebx, %%edx \n" 183 "mov %[clock], %%ebx \n" 184 "call __kernel_vsyscall \n" 185 "mov %%edx, %%ebx \n" 186 : "=a" (ret), "=m" (*_ts) 187 : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts) 188 : "edx"); 189 190 return ret; 191 } 192 193 #endif 194 195 #ifdef CONFIG_PARAVIRT_CLOCK 196 static u64 vread_pvclock(void) 197 { 198 const struct pvclock_vcpu_time_info *pvti = &pvclock_page.pvti; 199 u32 version; 200 u64 ret; 201 202 /* 203 * Note: The kernel and hypervisor must guarantee that cpu ID 204 * number maps 1:1 to per-CPU pvclock time info. 205 * 206 * Because the hypervisor is entirely unaware of guest userspace 207 * preemption, it cannot guarantee that per-CPU pvclock time 208 * info is updated if the underlying CPU changes or that that 209 * version is increased whenever underlying CPU changes. 210 * 211 * On KVM, we are guaranteed that pvti updates for any vCPU are 212 * atomic as seen by *all* vCPUs. This is an even stronger 213 * guarantee than we get with a normal seqlock. 214 * 215 * On Xen, we don't appear to have that guarantee, but Xen still 216 * supplies a valid seqlock using the version field. 217 * 218 * We only do pvclock vdso timing at all if 219 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to 220 * mean that all vCPUs have matching pvti and that the TSC is 221 * synced, so we can just look at vCPU 0's pvti. 222 */ 223 224 do { 225 version = pvclock_read_begin(pvti); 226 227 if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) 228 return U64_MAX; 229 230 ret = __pvclock_read_cycles(pvti, rdtsc_ordered()); 231 } while (pvclock_read_retry(pvti, version)); 232 233 return ret; 234 } 235 #endif 236 237 #ifdef CONFIG_HYPERV_TIMER 238 static u64 vread_hvclock(void) 239 { 240 return hv_read_tsc_page(&hvclock_page); 241 } 242 #endif 243 244 static inline u64 __arch_get_hw_counter(s32 clock_mode) 245 { 246 if (likely(clock_mode == VDSO_CLOCKMODE_TSC)) 247 return (u64)rdtsc_ordered(); 248 /* 249 * For any memory-mapped vclock type, we need to make sure that gcc 250 * doesn't cleverly hoist a load before the mode check. Otherwise we 251 * might end up touching the memory-mapped page even if the vclock in 252 * question isn't enabled, which will segfault. Hence the barriers. 253 */ 254 #ifdef CONFIG_PARAVIRT_CLOCK 255 if (clock_mode == VDSO_CLOCKMODE_PVCLOCK) { 256 barrier(); 257 return vread_pvclock(); 258 } 259 #endif 260 #ifdef CONFIG_HYPERV_TIMER 261 if (clock_mode == VDSO_CLOCKMODE_HVCLOCK) { 262 barrier(); 263 return vread_hvclock(); 264 } 265 #endif 266 return U64_MAX; 267 } 268 269 static __always_inline const struct vdso_data *__arch_get_vdso_data(void) 270 { 271 return __vdso_data; 272 } 273 274 /* 275 * x86 specific delta calculation. 276 * 277 * The regular implementation assumes that clocksource reads are globally 278 * monotonic. The TSC can be slightly off across sockets which can cause 279 * the regular delta calculation (@cycles - @last) to return a huge time 280 * jump. 281 * 282 * Therefore it needs to be verified that @cycles are greater than 283 * @last. If not then use @last, which is the base time of the current 284 * conversion period. 285 * 286 * This variant also removes the masking of the subtraction because the 287 * clocksource mask of all VDSO capable clocksources on x86 is U64_MAX 288 * which would result in a pointless operation. The compiler cannot 289 * optimize it away as the mask comes from the vdso data and is not compile 290 * time constant. 291 */ 292 static __always_inline 293 u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) 294 { 295 if (cycles > last) 296 return (cycles - last) * mult; 297 return 0; 298 } 299 #define vdso_calc_delta vdso_calc_delta 300 301 #endif /* !__ASSEMBLY__ */ 302 303 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ 304