1 /* 2 * tracing clocks 3 * 4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 5 * 6 * Implements 3 trace clock variants, with differing scalability/precision 7 * tradeoffs: 8 * 9 * - local: CPU-local trace clock 10 * - medium: scalable global clock with some jitter 11 * - global: globally monotonic, serialized clock 12 * 13 * Tracer plugins will chose a default from these clocks. 14 */ 15 #include <linux/spinlock.h> 16 #include <linux/irqflags.h> 17 #include <linux/hardirq.h> 18 #include <linux/module.h> 19 #include <linux/percpu.h> 20 #include <linux/sched.h> 21 #include <linux/ktime.h> 22 #include <linux/trace_clock.h> 23 24 /* 25 * trace_clock_local(): the simplest and least coherent tracing clock. 26 * 27 * Useful for tracing that does not cross to other CPUs nor 28 * does it go through idle events. 29 */ 30 u64 notrace trace_clock_local(void) 31 { 32 u64 clock; 33 34 /* 35 * sched_clock() is an architecture implemented, fast, scalable, 36 * lockless clock. It is not guaranteed to be coherent across 37 * CPUs, nor across CPU idle events. 38 */ 39 preempt_disable_notrace(); 40 clock = sched_clock(); 41 preempt_enable_notrace(); 42 43 return clock; 44 } 45 EXPORT_SYMBOL_GPL(trace_clock_local); 46 47 /* 48 * trace_clock(): 'between' trace clock. Not completely serialized, 49 * but not completely incorrect when crossing CPUs either. 50 * 51 * This is based on cpu_clock(), which will allow at most ~1 jiffy of 52 * jitter between CPUs. So it's a pretty scalable clock, but there 53 * can be offsets in the trace data. 54 */ 55 u64 notrace trace_clock(void) 56 { 57 return local_clock(); 58 } 59 60 /* 61 * trace_jiffy_clock(): Simply use jiffies as a clock counter. 62 * Note that this use of jiffies_64 is not completely safe on 63 * 32-bit systems. But the window is tiny, and the effect if 64 * we are affected is that we will have an obviously bogus 65 * timestamp on a trace event - i.e. not life threatening. 66 */ 67 u64 notrace trace_clock_jiffies(void) 68 { 69 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); 70 } 71 72 /* 73 * trace_clock_global(): special globally coherent trace clock 74 * 75 * It has higher overhead than the other trace clocks but is still 76 * an order of magnitude faster than GTOD derived hardware clocks. 77 * 78 * Used by plugins that need globally coherent timestamps. 79 */ 80 81 /* keep prev_time and lock in the same cacheline. */ 82 static struct { 83 u64 prev_time; 84 arch_spinlock_t lock; 85 } trace_clock_struct ____cacheline_aligned_in_smp = 86 { 87 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, 88 }; 89 90 u64 notrace trace_clock_global(void) 91 { 92 unsigned long flags; 93 int this_cpu; 94 u64 now; 95 96 local_irq_save(flags); 97 98 this_cpu = raw_smp_processor_id(); 99 now = sched_clock_cpu(this_cpu); 100 /* 101 * If in an NMI context then dont risk lockups and return the 102 * cpu_clock() time: 103 */ 104 if (unlikely(in_nmi())) 105 goto out; 106 107 arch_spin_lock(&trace_clock_struct.lock); 108 109 /* 110 * TODO: if this happens often then maybe we should reset 111 * my_scd->clock to prev_time+1, to make sure 112 * we start ticking with the local clock from now on? 113 */ 114 if ((s64)(now - trace_clock_struct.prev_time) < 0) 115 now = trace_clock_struct.prev_time + 1; 116 117 trace_clock_struct.prev_time = now; 118 119 arch_spin_unlock(&trace_clock_struct.lock); 120 121 out: 122 local_irq_restore(flags); 123 124 return now; 125 } 126 127 static atomic64_t trace_counter; 128 129 /* 130 * trace_clock_counter(): simply an atomic counter. 131 * Use the trace_counter "counter" for cases where you do not care 132 * about timings, but are interested in strict ordering. 133 */ 134 u64 notrace trace_clock_counter(void) 135 { 136 return atomic64_add_return(1, &trace_counter); 137 } 138