1 /* 2 * sched_clock() for unstable CPU clocks 3 * 4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra 5 * 6 * Updates and enhancements: 7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> 8 * 9 * Based on code by: 10 * Ingo Molnar <mingo@redhat.com> 11 * Guillaume Chazarain <guichaz@gmail.com> 12 * 13 * 14 * What this file implements: 15 * 16 * cpu_clock(i) provides a fast (execution time) high resolution 17 * clock with bounded drift between CPUs. The value of cpu_clock(i) 18 * is monotonic for constant i. The timestamp returned is in nanoseconds. 19 * 20 * ######################### BIG FAT WARNING ########################## 21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # 22 * # go backwards !! # 23 * #################################################################### 24 * 25 * There is no strict promise about the base, although it tends to start 26 * at 0 on boot (but people really shouldn't rely on that). 27 * 28 * cpu_clock(i) -- can be used from any context, including NMI. 29 * local_clock() -- is cpu_clock() on the current CPU. 30 * 31 * sched_clock_cpu(i) 32 * 33 * How it is implemented: 34 * 35 * The implementation either uses sched_clock() when 36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the 37 * sched_clock() is assumed to provide these properties (mostly it means 38 * the architecture provides a globally synchronized highres time source). 39 * 40 * Otherwise it tries to create a semi stable clock from a mixture of other 41 * clocks, including: 42 * 43 * - GTOD (clock monotomic) 44 * - sched_clock() 45 * - explicit idle events 46 * 47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The 48 * deltas are filtered to provide monotonicity and keeping it within an 49 * expected window. 50 * 51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time 52 * that is otherwise invisible (TSC gets stopped). 53 * 54 */ 55 #include "sched.h" 56 #include <linux/sched_clock.h> 57 58 /* 59 * Scheduler clock - returns current time in nanosec units. 60 * This is default implementation. 61 * Architectures and sub-architectures can override this. 62 */ 63 unsigned long long __weak sched_clock(void) 64 { 65 return (unsigned long long)(jiffies - INITIAL_JIFFIES) 66 * (NSEC_PER_SEC / HZ); 67 } 68 EXPORT_SYMBOL_GPL(sched_clock); 69 70 static DEFINE_STATIC_KEY_FALSE(sched_clock_running); 71 72 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 73 /* 74 * We must start with !__sched_clock_stable because the unstable -> stable 75 * transition is accurate, while the stable -> unstable transition is not. 76 * 77 * Similarly we start with __sched_clock_stable_early, thereby assuming we 78 * will become stable, such that there's only a single 1 -> 0 transition. 79 */ 80 static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); 81 static int __sched_clock_stable_early = 1; 82 83 /* 84 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset 85 */ 86 __read_mostly u64 __sched_clock_offset; 87 static __read_mostly u64 __gtod_offset; 88 89 struct sched_clock_data { 90 u64 tick_raw; 91 u64 tick_gtod; 92 u64 clock; 93 }; 94 95 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); 96 97 static inline struct sched_clock_data *this_scd(void) 98 { 99 return this_cpu_ptr(&sched_clock_data); 100 } 101 102 static inline struct sched_clock_data *cpu_sdc(int cpu) 103 { 104 return &per_cpu(sched_clock_data, cpu); 105 } 106 107 int sched_clock_stable(void) 108 { 109 return static_branch_likely(&__sched_clock_stable); 110 } 111 112 static void __scd_stamp(struct sched_clock_data *scd) 113 { 114 scd->tick_gtod = ktime_get_ns(); 115 scd->tick_raw = sched_clock(); 116 } 117 118 static void __set_sched_clock_stable(void) 119 { 120 struct sched_clock_data *scd; 121 122 /* 123 * Since we're still unstable and the tick is already running, we have 124 * to disable IRQs in order to get a consistent scd->tick* reading. 125 */ 126 local_irq_disable(); 127 scd = this_scd(); 128 /* 129 * Attempt to make the (initial) unstable->stable transition continuous. 130 */ 131 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); 132 local_irq_enable(); 133 134 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 135 scd->tick_gtod, __gtod_offset, 136 scd->tick_raw, __sched_clock_offset); 137 138 static_branch_enable(&__sched_clock_stable); 139 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 140 } 141 142 /* 143 * If we ever get here, we're screwed, because we found out -- typically after 144 * the fact -- that TSC wasn't good. This means all our clocksources (including 145 * ktime) could have reported wrong values. 146 * 147 * What we do here is an attempt to fix up and continue sort of where we left 148 * off in a coherent manner. 149 * 150 * The only way to fully avoid random clock jumps is to boot with: 151 * "tsc=unstable". 152 */ 153 static void __sched_clock_work(struct work_struct *work) 154 { 155 struct sched_clock_data *scd; 156 int cpu; 157 158 /* take a current timestamp and set 'now' */ 159 preempt_disable(); 160 scd = this_scd(); 161 __scd_stamp(scd); 162 scd->clock = scd->tick_gtod + __gtod_offset; 163 preempt_enable(); 164 165 /* clone to all CPUs */ 166 for_each_possible_cpu(cpu) 167 per_cpu(sched_clock_data, cpu) = *scd; 168 169 printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n"); 170 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 171 scd->tick_gtod, __gtod_offset, 172 scd->tick_raw, __sched_clock_offset); 173 174 static_branch_disable(&__sched_clock_stable); 175 } 176 177 static DECLARE_WORK(sched_clock_work, __sched_clock_work); 178 179 static void __clear_sched_clock_stable(void) 180 { 181 if (!sched_clock_stable()) 182 return; 183 184 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 185 schedule_work(&sched_clock_work); 186 } 187 188 void clear_sched_clock_stable(void) 189 { 190 __sched_clock_stable_early = 0; 191 192 smp_mb(); /* matches sched_clock_init_late() */ 193 194 if (static_key_count(&sched_clock_running.key) == 2) 195 __clear_sched_clock_stable(); 196 } 197 198 static void __sched_clock_gtod_offset(void) 199 { 200 struct sched_clock_data *scd = this_scd(); 201 202 __scd_stamp(scd); 203 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod; 204 } 205 206 void __init sched_clock_init(void) 207 { 208 /* 209 * Set __gtod_offset such that once we mark sched_clock_running, 210 * sched_clock_tick() continues where sched_clock() left off. 211 * 212 * Even if TSC is buggered, we're still UP at this point so it 213 * can't really be out of sync. 214 */ 215 local_irq_disable(); 216 __sched_clock_gtod_offset(); 217 local_irq_enable(); 218 219 static_branch_inc(&sched_clock_running); 220 } 221 /* 222 * We run this as late_initcall() such that it runs after all built-in drivers, 223 * notably: acpi_processor and intel_idle, which can mark the TSC as unstable. 224 */ 225 static int __init sched_clock_init_late(void) 226 { 227 static_branch_inc(&sched_clock_running); 228 /* 229 * Ensure that it is impossible to not do a static_key update. 230 * 231 * Either {set,clear}_sched_clock_stable() must see sched_clock_running 232 * and do the update, or we must see their __sched_clock_stable_early 233 * and do the update, or both. 234 */ 235 smp_mb(); /* matches {set,clear}_sched_clock_stable() */ 236 237 if (__sched_clock_stable_early) 238 __set_sched_clock_stable(); 239 240 return 0; 241 } 242 late_initcall(sched_clock_init_late); 243 244 /* 245 * min, max except they take wrapping into account 246 */ 247 248 static inline u64 wrap_min(u64 x, u64 y) 249 { 250 return (s64)(x - y) < 0 ? x : y; 251 } 252 253 static inline u64 wrap_max(u64 x, u64 y) 254 { 255 return (s64)(x - y) > 0 ? x : y; 256 } 257 258 /* 259 * update the percpu scd from the raw @now value 260 * 261 * - filter out backward motion 262 * - use the GTOD tick value to create a window to filter crazy TSC values 263 */ 264 static u64 sched_clock_local(struct sched_clock_data *scd) 265 { 266 u64 now, clock, old_clock, min_clock, max_clock, gtod; 267 s64 delta; 268 269 again: 270 now = sched_clock(); 271 delta = now - scd->tick_raw; 272 if (unlikely(delta < 0)) 273 delta = 0; 274 275 old_clock = scd->clock; 276 277 /* 278 * scd->clock = clamp(scd->tick_gtod + delta, 279 * max(scd->tick_gtod, scd->clock), 280 * scd->tick_gtod + TICK_NSEC); 281 */ 282 283 gtod = scd->tick_gtod + __gtod_offset; 284 clock = gtod + delta; 285 min_clock = wrap_max(gtod, old_clock); 286 max_clock = wrap_max(old_clock, gtod + TICK_NSEC); 287 288 clock = wrap_max(clock, min_clock); 289 clock = wrap_min(clock, max_clock); 290 291 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) 292 goto again; 293 294 return clock; 295 } 296 297 static u64 sched_clock_remote(struct sched_clock_data *scd) 298 { 299 struct sched_clock_data *my_scd = this_scd(); 300 u64 this_clock, remote_clock; 301 u64 *ptr, old_val, val; 302 303 #if BITS_PER_LONG != 64 304 again: 305 /* 306 * Careful here: The local and the remote clock values need to 307 * be read out atomic as we need to compare the values and 308 * then update either the local or the remote side. So the 309 * cmpxchg64 below only protects one readout. 310 * 311 * We must reread via sched_clock_local() in the retry case on 312 * 32-bit kernels as an NMI could use sched_clock_local() via the 313 * tracer and hit between the readout of 314 * the low 32-bit and the high 32-bit portion. 315 */ 316 this_clock = sched_clock_local(my_scd); 317 /* 318 * We must enforce atomic readout on 32-bit, otherwise the 319 * update on the remote CPU can hit inbetween the readout of 320 * the low 32-bit and the high 32-bit portion. 321 */ 322 remote_clock = cmpxchg64(&scd->clock, 0, 0); 323 #else 324 /* 325 * On 64-bit kernels the read of [my]scd->clock is atomic versus the 326 * update, so we can avoid the above 32-bit dance. 327 */ 328 sched_clock_local(my_scd); 329 again: 330 this_clock = my_scd->clock; 331 remote_clock = scd->clock; 332 #endif 333 334 /* 335 * Use the opportunity that we have both locks 336 * taken to couple the two clocks: we take the 337 * larger time as the latest time for both 338 * runqueues. (this creates monotonic movement) 339 */ 340 if (likely((s64)(remote_clock - this_clock) < 0)) { 341 ptr = &scd->clock; 342 old_val = remote_clock; 343 val = this_clock; 344 } else { 345 /* 346 * Should be rare, but possible: 347 */ 348 ptr = &my_scd->clock; 349 old_val = this_clock; 350 val = remote_clock; 351 } 352 353 if (cmpxchg64(ptr, old_val, val) != old_val) 354 goto again; 355 356 return val; 357 } 358 359 /* 360 * Similar to cpu_clock(), but requires local IRQs to be disabled. 361 * 362 * See cpu_clock(). 363 */ 364 u64 sched_clock_cpu(int cpu) 365 { 366 struct sched_clock_data *scd; 367 u64 clock; 368 369 if (sched_clock_stable()) 370 return sched_clock() + __sched_clock_offset; 371 372 if (!static_branch_unlikely(&sched_clock_running)) 373 return sched_clock(); 374 375 preempt_disable_notrace(); 376 scd = cpu_sdc(cpu); 377 378 if (cpu != smp_processor_id()) 379 clock = sched_clock_remote(scd); 380 else 381 clock = sched_clock_local(scd); 382 preempt_enable_notrace(); 383 384 return clock; 385 } 386 EXPORT_SYMBOL_GPL(sched_clock_cpu); 387 388 void sched_clock_tick(void) 389 { 390 struct sched_clock_data *scd; 391 392 if (sched_clock_stable()) 393 return; 394 395 if (!static_branch_unlikely(&sched_clock_running)) 396 return; 397 398 lockdep_assert_irqs_disabled(); 399 400 scd = this_scd(); 401 __scd_stamp(scd); 402 sched_clock_local(scd); 403 } 404 405 void sched_clock_tick_stable(void) 406 { 407 if (!sched_clock_stable()) 408 return; 409 410 /* 411 * Called under watchdog_lock. 412 * 413 * The watchdog just found this TSC to (still) be stable, so now is a 414 * good moment to update our __gtod_offset. Because once we find the 415 * TSC to be unstable, any computation will be computing crap. 416 */ 417 local_irq_disable(); 418 __sched_clock_gtod_offset(); 419 local_irq_enable(); 420 } 421 422 /* 423 * We are going deep-idle (irqs are disabled): 424 */ 425 void sched_clock_idle_sleep_event(void) 426 { 427 sched_clock_cpu(smp_processor_id()); 428 } 429 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); 430 431 /* 432 * We just idled; resync with ktime. 433 */ 434 void sched_clock_idle_wakeup_event(void) 435 { 436 unsigned long flags; 437 438 if (sched_clock_stable()) 439 return; 440 441 if (unlikely(timekeeping_suspended)) 442 return; 443 444 local_irq_save(flags); 445 sched_clock_tick(); 446 local_irq_restore(flags); 447 } 448 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 449 450 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 451 452 void __init sched_clock_init(void) 453 { 454 static_branch_inc(&sched_clock_running); 455 local_irq_disable(); 456 generic_sched_clock_init(); 457 local_irq_enable(); 458 } 459 460 u64 sched_clock_cpu(int cpu) 461 { 462 if (!static_branch_unlikely(&sched_clock_running)) 463 return 0; 464 465 return sched_clock(); 466 } 467 468 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 469 470 /* 471 * Running clock - returns the time that has elapsed while a guest has been 472 * running. 473 * On a guest this value should be local_clock minus the time the guest was 474 * suspended by the hypervisor (for any reason). 475 * On bare metal this function should return the same as local_clock. 476 * Architectures and sub-architectures can override this. 477 */ 478 u64 __weak running_clock(void) 479 { 480 return local_clock(); 481 } 482