1 /* 2 * sched_clock for unstable cpu clocks 3 * 4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra 5 * 6 * Updates and enhancements: 7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> 8 * 9 * Based on code by: 10 * Ingo Molnar <mingo@redhat.com> 11 * Guillaume Chazarain <guichaz@gmail.com> 12 * 13 * 14 * What: 15 * 16 * cpu_clock(i) provides a fast (execution time) high resolution 17 * clock with bounded drift between CPUs. The value of cpu_clock(i) 18 * is monotonic for constant i. The timestamp returned is in nanoseconds. 19 * 20 * ######################### BIG FAT WARNING ########################## 21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # 22 * # go backwards !! # 23 * #################################################################### 24 * 25 * There is no strict promise about the base, although it tends to start 26 * at 0 on boot (but people really shouldn't rely on that). 27 * 28 * cpu_clock(i) -- can be used from any context, including NMI. 29 * local_clock() -- is cpu_clock() on the current cpu. 30 * 31 * sched_clock_cpu(i) 32 * 33 * How: 34 * 35 * The implementation either uses sched_clock() when 36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the 37 * sched_clock() is assumed to provide these properties (mostly it means 38 * the architecture provides a globally synchronized highres time source). 39 * 40 * Otherwise it tries to create a semi stable clock from a mixture of other 41 * clocks, including: 42 * 43 * - GTOD (clock monotomic) 44 * - sched_clock() 45 * - explicit idle events 46 * 47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The 48 * deltas are filtered to provide monotonicity and keeping it within an 49 * expected window. 50 * 51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time 52 * that is otherwise invisible (TSC gets stopped). 53 * 54 */ 55 #include <linux/spinlock.h> 56 #include <linux/hardirq.h> 57 #include <linux/export.h> 58 #include <linux/percpu.h> 59 #include <linux/ktime.h> 60 #include <linux/sched.h> 61 #include <linux/static_key.h> 62 #include <linux/workqueue.h> 63 #include <linux/compiler.h> 64 #include <linux/tick.h> 65 66 /* 67 * Scheduler clock - returns current time in nanosec units. 68 * This is default implementation. 69 * Architectures and sub-architectures can override this. 70 */ 71 unsigned long long __weak sched_clock(void) 72 { 73 return (unsigned long long)(jiffies - INITIAL_JIFFIES) 74 * (NSEC_PER_SEC / HZ); 75 } 76 EXPORT_SYMBOL_GPL(sched_clock); 77 78 __read_mostly int sched_clock_running; 79 80 void sched_clock_init(void) 81 { 82 sched_clock_running = 1; 83 } 84 85 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 86 /* 87 * We must start with !__sched_clock_stable because the unstable -> stable 88 * transition is accurate, while the stable -> unstable transition is not. 89 * 90 * Similarly we start with __sched_clock_stable_early, thereby assuming we 91 * will become stable, such that there's only a single 1 -> 0 transition. 92 */ 93 static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); 94 static int __sched_clock_stable_early = 1; 95 96 /* 97 * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset 98 */ 99 static __read_mostly u64 raw_offset; 100 static __read_mostly u64 gtod_offset; 101 102 struct sched_clock_data { 103 u64 tick_raw; 104 u64 tick_gtod; 105 u64 clock; 106 }; 107 108 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); 109 110 static inline struct sched_clock_data *this_scd(void) 111 { 112 return this_cpu_ptr(&sched_clock_data); 113 } 114 115 static inline struct sched_clock_data *cpu_sdc(int cpu) 116 { 117 return &per_cpu(sched_clock_data, cpu); 118 } 119 120 int sched_clock_stable(void) 121 { 122 return static_branch_likely(&__sched_clock_stable); 123 } 124 125 static void __set_sched_clock_stable(void) 126 { 127 struct sched_clock_data *scd = this_scd(); 128 129 /* 130 * Attempt to make the (initial) unstable->stable transition continuous. 131 */ 132 raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); 133 134 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 135 scd->tick_gtod, gtod_offset, 136 scd->tick_raw, raw_offset); 137 138 static_branch_enable(&__sched_clock_stable); 139 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 140 } 141 142 static void __clear_sched_clock_stable(struct work_struct *work) 143 { 144 struct sched_clock_data *scd = this_scd(); 145 146 /* 147 * Attempt to make the stable->unstable transition continuous. 148 * 149 * Trouble is, this is typically called from the TSC watchdog 150 * timer, which is late per definition. This means the tick 151 * values can already be screwy. 152 * 153 * Still do what we can. 154 */ 155 gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); 156 157 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 158 scd->tick_gtod, gtod_offset, 159 scd->tick_raw, raw_offset); 160 161 static_branch_disable(&__sched_clock_stable); 162 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 163 } 164 165 static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); 166 167 void clear_sched_clock_stable(void) 168 { 169 __sched_clock_stable_early = 0; 170 171 smp_mb(); /* matches sched_clock_init_late() */ 172 173 if (sched_clock_running == 2) 174 schedule_work(&sched_clock_work); 175 } 176 177 void sched_clock_init_late(void) 178 { 179 sched_clock_running = 2; 180 /* 181 * Ensure that it is impossible to not do a static_key update. 182 * 183 * Either {set,clear}_sched_clock_stable() must see sched_clock_running 184 * and do the update, or we must see their __sched_clock_stable_early 185 * and do the update, or both. 186 */ 187 smp_mb(); /* matches {set,clear}_sched_clock_stable() */ 188 189 if (__sched_clock_stable_early) 190 __set_sched_clock_stable(); 191 } 192 193 /* 194 * min, max except they take wrapping into account 195 */ 196 197 static inline u64 wrap_min(u64 x, u64 y) 198 { 199 return (s64)(x - y) < 0 ? x : y; 200 } 201 202 static inline u64 wrap_max(u64 x, u64 y) 203 { 204 return (s64)(x - y) > 0 ? x : y; 205 } 206 207 /* 208 * update the percpu scd from the raw @now value 209 * 210 * - filter out backward motion 211 * - use the GTOD tick value to create a window to filter crazy TSC values 212 */ 213 static u64 sched_clock_local(struct sched_clock_data *scd) 214 { 215 u64 now, clock, old_clock, min_clock, max_clock; 216 s64 delta; 217 218 again: 219 now = sched_clock(); 220 delta = now - scd->tick_raw; 221 if (unlikely(delta < 0)) 222 delta = 0; 223 224 old_clock = scd->clock; 225 226 /* 227 * scd->clock = clamp(scd->tick_gtod + delta, 228 * max(scd->tick_gtod, scd->clock), 229 * scd->tick_gtod + TICK_NSEC); 230 */ 231 232 clock = scd->tick_gtod + gtod_offset + delta; 233 min_clock = wrap_max(scd->tick_gtod, old_clock); 234 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); 235 236 clock = wrap_max(clock, min_clock); 237 clock = wrap_min(clock, max_clock); 238 239 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) 240 goto again; 241 242 return clock; 243 } 244 245 static u64 sched_clock_remote(struct sched_clock_data *scd) 246 { 247 struct sched_clock_data *my_scd = this_scd(); 248 u64 this_clock, remote_clock; 249 u64 *ptr, old_val, val; 250 251 #if BITS_PER_LONG != 64 252 again: 253 /* 254 * Careful here: The local and the remote clock values need to 255 * be read out atomic as we need to compare the values and 256 * then update either the local or the remote side. So the 257 * cmpxchg64 below only protects one readout. 258 * 259 * We must reread via sched_clock_local() in the retry case on 260 * 32bit as an NMI could use sched_clock_local() via the 261 * tracer and hit between the readout of 262 * the low32bit and the high 32bit portion. 263 */ 264 this_clock = sched_clock_local(my_scd); 265 /* 266 * We must enforce atomic readout on 32bit, otherwise the 267 * update on the remote cpu can hit inbetween the readout of 268 * the low32bit and the high 32bit portion. 269 */ 270 remote_clock = cmpxchg64(&scd->clock, 0, 0); 271 #else 272 /* 273 * On 64bit the read of [my]scd->clock is atomic versus the 274 * update, so we can avoid the above 32bit dance. 275 */ 276 sched_clock_local(my_scd); 277 again: 278 this_clock = my_scd->clock; 279 remote_clock = scd->clock; 280 #endif 281 282 /* 283 * Use the opportunity that we have both locks 284 * taken to couple the two clocks: we take the 285 * larger time as the latest time for both 286 * runqueues. (this creates monotonic movement) 287 */ 288 if (likely((s64)(remote_clock - this_clock) < 0)) { 289 ptr = &scd->clock; 290 old_val = remote_clock; 291 val = this_clock; 292 } else { 293 /* 294 * Should be rare, but possible: 295 */ 296 ptr = &my_scd->clock; 297 old_val = this_clock; 298 val = remote_clock; 299 } 300 301 if (cmpxchg64(ptr, old_val, val) != old_val) 302 goto again; 303 304 return val; 305 } 306 307 /* 308 * Similar to cpu_clock(), but requires local IRQs to be disabled. 309 * 310 * See cpu_clock(). 311 */ 312 u64 sched_clock_cpu(int cpu) 313 { 314 struct sched_clock_data *scd; 315 u64 clock; 316 317 if (sched_clock_stable()) 318 return sched_clock() + raw_offset; 319 320 if (unlikely(!sched_clock_running)) 321 return 0ull; 322 323 preempt_disable_notrace(); 324 scd = cpu_sdc(cpu); 325 326 if (cpu != smp_processor_id()) 327 clock = sched_clock_remote(scd); 328 else 329 clock = sched_clock_local(scd); 330 preempt_enable_notrace(); 331 332 return clock; 333 } 334 EXPORT_SYMBOL_GPL(sched_clock_cpu); 335 336 void sched_clock_tick(void) 337 { 338 struct sched_clock_data *scd; 339 340 WARN_ON_ONCE(!irqs_disabled()); 341 342 /* 343 * Update these values even if sched_clock_stable(), because it can 344 * become unstable at any point in time at which point we need some 345 * values to fall back on. 346 * 347 * XXX arguably we can skip this if we expose tsc_clocksource_reliable 348 */ 349 scd = this_scd(); 350 scd->tick_raw = sched_clock(); 351 scd->tick_gtod = ktime_get_ns(); 352 353 if (!sched_clock_stable() && likely(sched_clock_running)) 354 sched_clock_local(scd); 355 } 356 357 /* 358 * We are going deep-idle (irqs are disabled): 359 */ 360 void sched_clock_idle_sleep_event(void) 361 { 362 sched_clock_cpu(smp_processor_id()); 363 } 364 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); 365 366 /* 367 * We just idled delta nanoseconds (called with irqs disabled): 368 */ 369 void sched_clock_idle_wakeup_event(u64 delta_ns) 370 { 371 if (timekeeping_suspended) 372 return; 373 374 sched_clock_tick(); 375 touch_softlockup_watchdog_sched(); 376 } 377 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 378 379 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 380 381 u64 sched_clock_cpu(int cpu) 382 { 383 if (unlikely(!sched_clock_running)) 384 return 0; 385 386 return sched_clock(); 387 } 388 389 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 390 391 /* 392 * Running clock - returns the time that has elapsed while a guest has been 393 * running. 394 * On a guest this value should be local_clock minus the time the guest was 395 * suspended by the hypervisor (for any reason). 396 * On bare metal this function should return the same as local_clock. 397 * Architectures and sub-architectures can override this. 398 */ 399 u64 __weak running_clock(void) 400 { 401 return local_clock(); 402 } 403