1 /* 2 * sched_clock for unstable cpu clocks 3 * 4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * 6 * Updates and enhancements: 7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> 8 * 9 * Based on code by: 10 * Ingo Molnar <mingo@redhat.com> 11 * Guillaume Chazarain <guichaz@gmail.com> 12 * 13 * 14 * What: 15 * 16 * cpu_clock(i) provides a fast (execution time) high resolution 17 * clock with bounded drift between CPUs. The value of cpu_clock(i) 18 * is monotonic for constant i. The timestamp returned is in nanoseconds. 19 * 20 * ######################### BIG FAT WARNING ########################## 21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # 22 * # go backwards !! # 23 * #################################################################### 24 * 25 * There is no strict promise about the base, although it tends to start 26 * at 0 on boot (but people really shouldn't rely on that). 27 * 28 * cpu_clock(i) -- can be used from any context, including NMI. 29 * local_clock() -- is cpu_clock() on the current cpu. 30 * 31 * sched_clock_cpu(i) 32 * 33 * How: 34 * 35 * The implementation either uses sched_clock() when 36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the 37 * sched_clock() is assumed to provide these properties (mostly it means 38 * the architecture provides a globally synchronized highres time source). 39 * 40 * Otherwise it tries to create a semi stable clock from a mixture of other 41 * clocks, including: 42 * 43 * - GTOD (clock monotomic) 44 * - sched_clock() 45 * - explicit idle events 46 * 47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The 48 * deltas are filtered to provide monotonicity and keeping it within an 49 * expected window. 50 * 51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time 52 * that is otherwise invisible (TSC gets stopped). 53 * 54 */ 55 #include <linux/spinlock.h> 56 #include <linux/hardirq.h> 57 #include <linux/export.h> 58 #include <linux/percpu.h> 59 #include <linux/ktime.h> 60 #include <linux/sched.h> 61 #include <linux/static_key.h> 62 #include <linux/workqueue.h> 63 64 /* 65 * Scheduler clock - returns current time in nanosec units. 66 * This is default implementation. 67 * Architectures and sub-architectures can override this. 68 */ 69 unsigned long long __attribute__((weak)) sched_clock(void) 70 { 71 return (unsigned long long)(jiffies - INITIAL_JIFFIES) 72 * (NSEC_PER_SEC / HZ); 73 } 74 EXPORT_SYMBOL_GPL(sched_clock); 75 76 __read_mostly int sched_clock_running; 77 78 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 79 static struct static_key __sched_clock_stable = STATIC_KEY_INIT; 80 static int __sched_clock_stable_early; 81 82 int sched_clock_stable(void) 83 { 84 return static_key_false(&__sched_clock_stable); 85 } 86 87 static void __set_sched_clock_stable(void) 88 { 89 if (!sched_clock_stable()) 90 static_key_slow_inc(&__sched_clock_stable); 91 } 92 93 void set_sched_clock_stable(void) 94 { 95 __sched_clock_stable_early = 1; 96 97 smp_mb(); /* matches sched_clock_init() */ 98 99 if (!sched_clock_running) 100 return; 101 102 __set_sched_clock_stable(); 103 } 104 105 static void __clear_sched_clock_stable(struct work_struct *work) 106 { 107 /* XXX worry about clock continuity */ 108 if (sched_clock_stable()) 109 static_key_slow_dec(&__sched_clock_stable); 110 } 111 112 static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); 113 114 void clear_sched_clock_stable(void) 115 { 116 __sched_clock_stable_early = 0; 117 118 smp_mb(); /* matches sched_clock_init() */ 119 120 if (!sched_clock_running) 121 return; 122 123 schedule_work(&sched_clock_work); 124 } 125 126 struct sched_clock_data { 127 u64 tick_raw; 128 u64 tick_gtod; 129 u64 clock; 130 }; 131 132 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); 133 134 static inline struct sched_clock_data *this_scd(void) 135 { 136 return &__get_cpu_var(sched_clock_data); 137 } 138 139 static inline struct sched_clock_data *cpu_sdc(int cpu) 140 { 141 return &per_cpu(sched_clock_data, cpu); 142 } 143 144 void sched_clock_init(void) 145 { 146 u64 ktime_now = ktime_to_ns(ktime_get()); 147 int cpu; 148 149 for_each_possible_cpu(cpu) { 150 struct sched_clock_data *scd = cpu_sdc(cpu); 151 152 scd->tick_raw = 0; 153 scd->tick_gtod = ktime_now; 154 scd->clock = ktime_now; 155 } 156 157 sched_clock_running = 1; 158 159 /* 160 * Ensure that it is impossible to not do a static_key update. 161 * 162 * Either {set,clear}_sched_clock_stable() must see sched_clock_running 163 * and do the update, or we must see their __sched_clock_stable_early 164 * and do the update, or both. 165 */ 166 smp_mb(); /* matches {set,clear}_sched_clock_stable() */ 167 168 if (__sched_clock_stable_early) 169 __set_sched_clock_stable(); 170 else 171 __clear_sched_clock_stable(NULL); 172 } 173 174 /* 175 * min, max except they take wrapping into account 176 */ 177 178 static inline u64 wrap_min(u64 x, u64 y) 179 { 180 return (s64)(x - y) < 0 ? x : y; 181 } 182 183 static inline u64 wrap_max(u64 x, u64 y) 184 { 185 return (s64)(x - y) > 0 ? x : y; 186 } 187 188 /* 189 * update the percpu scd from the raw @now value 190 * 191 * - filter out backward motion 192 * - use the GTOD tick value to create a window to filter crazy TSC values 193 */ 194 static u64 sched_clock_local(struct sched_clock_data *scd) 195 { 196 u64 now, clock, old_clock, min_clock, max_clock; 197 s64 delta; 198 199 again: 200 now = sched_clock(); 201 delta = now - scd->tick_raw; 202 if (unlikely(delta < 0)) 203 delta = 0; 204 205 old_clock = scd->clock; 206 207 /* 208 * scd->clock = clamp(scd->tick_gtod + delta, 209 * max(scd->tick_gtod, scd->clock), 210 * scd->tick_gtod + TICK_NSEC); 211 */ 212 213 clock = scd->tick_gtod + delta; 214 min_clock = wrap_max(scd->tick_gtod, old_clock); 215 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); 216 217 clock = wrap_max(clock, min_clock); 218 clock = wrap_min(clock, max_clock); 219 220 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) 221 goto again; 222 223 return clock; 224 } 225 226 static u64 sched_clock_remote(struct sched_clock_data *scd) 227 { 228 struct sched_clock_data *my_scd = this_scd(); 229 u64 this_clock, remote_clock; 230 u64 *ptr, old_val, val; 231 232 #if BITS_PER_LONG != 64 233 again: 234 /* 235 * Careful here: The local and the remote clock values need to 236 * be read out atomic as we need to compare the values and 237 * then update either the local or the remote side. So the 238 * cmpxchg64 below only protects one readout. 239 * 240 * We must reread via sched_clock_local() in the retry case on 241 * 32bit as an NMI could use sched_clock_local() via the 242 * tracer and hit between the readout of 243 * the low32bit and the high 32bit portion. 244 */ 245 this_clock = sched_clock_local(my_scd); 246 /* 247 * We must enforce atomic readout on 32bit, otherwise the 248 * update on the remote cpu can hit inbetween the readout of 249 * the low32bit and the high 32bit portion. 250 */ 251 remote_clock = cmpxchg64(&scd->clock, 0, 0); 252 #else 253 /* 254 * On 64bit the read of [my]scd->clock is atomic versus the 255 * update, so we can avoid the above 32bit dance. 256 */ 257 sched_clock_local(my_scd); 258 again: 259 this_clock = my_scd->clock; 260 remote_clock = scd->clock; 261 #endif 262 263 /* 264 * Use the opportunity that we have both locks 265 * taken to couple the two clocks: we take the 266 * larger time as the latest time for both 267 * runqueues. (this creates monotonic movement) 268 */ 269 if (likely((s64)(remote_clock - this_clock) < 0)) { 270 ptr = &scd->clock; 271 old_val = remote_clock; 272 val = this_clock; 273 } else { 274 /* 275 * Should be rare, but possible: 276 */ 277 ptr = &my_scd->clock; 278 old_val = this_clock; 279 val = remote_clock; 280 } 281 282 if (cmpxchg64(ptr, old_val, val) != old_val) 283 goto again; 284 285 return val; 286 } 287 288 /* 289 * Similar to cpu_clock(), but requires local IRQs to be disabled. 290 * 291 * See cpu_clock(). 292 */ 293 u64 sched_clock_cpu(int cpu) 294 { 295 struct sched_clock_data *scd; 296 u64 clock; 297 298 if (sched_clock_stable()) 299 return sched_clock(); 300 301 if (unlikely(!sched_clock_running)) 302 return 0ull; 303 304 preempt_disable(); 305 scd = cpu_sdc(cpu); 306 307 if (cpu != smp_processor_id()) 308 clock = sched_clock_remote(scd); 309 else 310 clock = sched_clock_local(scd); 311 preempt_enable(); 312 313 return clock; 314 } 315 316 void sched_clock_tick(void) 317 { 318 struct sched_clock_data *scd; 319 u64 now, now_gtod; 320 321 if (sched_clock_stable()) 322 return; 323 324 if (unlikely(!sched_clock_running)) 325 return; 326 327 WARN_ON_ONCE(!irqs_disabled()); 328 329 scd = this_scd(); 330 now_gtod = ktime_to_ns(ktime_get()); 331 now = sched_clock(); 332 333 scd->tick_raw = now; 334 scd->tick_gtod = now_gtod; 335 sched_clock_local(scd); 336 } 337 338 /* 339 * We are going deep-idle (irqs are disabled): 340 */ 341 void sched_clock_idle_sleep_event(void) 342 { 343 sched_clock_cpu(smp_processor_id()); 344 } 345 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); 346 347 /* 348 * We just idled delta nanoseconds (called with irqs disabled): 349 */ 350 void sched_clock_idle_wakeup_event(u64 delta_ns) 351 { 352 if (timekeeping_suspended) 353 return; 354 355 sched_clock_tick(); 356 touch_softlockup_watchdog(); 357 } 358 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 359 360 /* 361 * As outlined at the top, provides a fast, high resolution, nanosecond 362 * time source that is monotonic per cpu argument and has bounded drift 363 * between cpus. 364 * 365 * ######################### BIG FAT WARNING ########################## 366 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # 367 * # go backwards !! # 368 * #################################################################### 369 */ 370 u64 cpu_clock(int cpu) 371 { 372 if (!sched_clock_stable()) 373 return sched_clock_cpu(cpu); 374 375 return sched_clock(); 376 } 377 378 /* 379 * Similar to cpu_clock() for the current cpu. Time will only be observed 380 * to be monotonic if care is taken to only compare timestampt taken on the 381 * same CPU. 382 * 383 * See cpu_clock(). 384 */ 385 u64 local_clock(void) 386 { 387 if (!sched_clock_stable()) 388 return sched_clock_cpu(raw_smp_processor_id()); 389 390 return sched_clock(); 391 } 392 393 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 394 395 void sched_clock_init(void) 396 { 397 sched_clock_running = 1; 398 } 399 400 u64 sched_clock_cpu(int cpu) 401 { 402 if (unlikely(!sched_clock_running)) 403 return 0; 404 405 return sched_clock(); 406 } 407 408 u64 cpu_clock(int cpu) 409 { 410 return sched_clock(); 411 } 412 413 u64 local_clock(void) 414 { 415 return sched_clock(); 416 } 417 418 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 419 420 EXPORT_SYMBOL_GPL(cpu_clock); 421 EXPORT_SYMBOL_GPL(local_clock); 422