1 #include <linux/delay.h> 2 #include <linux/module.h> 3 #include <linux/kthread.h> 4 #include <linux/trace_clock.h> 5 6 #define CREATE_TRACE_POINTS 7 #include "trace_benchmark.h" 8 9 static struct task_struct *bm_event_thread; 10 11 static char bm_str[BENCHMARK_EVENT_STRLEN] = "START"; 12 13 static u64 bm_total; 14 static u64 bm_totalsq; 15 static u64 bm_last; 16 static u64 bm_max; 17 static u64 bm_min; 18 static u64 bm_first; 19 static u64 bm_cnt; 20 static u64 bm_stddev; 21 static unsigned int bm_avg; 22 static unsigned int bm_std; 23 24 static bool ok_to_run; 25 26 /* 27 * This gets called in a loop recording the time it took to write 28 * the tracepoint. What it writes is the time statistics of the last 29 * tracepoint write. As there is nothing to write the first time 30 * it simply writes "START". As the first write is cold cache and 31 * the rest is hot, we save off that time in bm_first and it is 32 * reported as "first", which is shown in the second write to the 33 * tracepoint. The "first" field is writen within the statics from 34 * then on but never changes. 35 */ 36 static void trace_do_benchmark(void) 37 { 38 u64 start; 39 u64 stop; 40 u64 delta; 41 u64 stddev; 42 u64 seed; 43 u64 last_seed; 44 unsigned int avg; 45 unsigned int std = 0; 46 47 /* Only run if the tracepoint is actually active */ 48 if (!trace_benchmark_event_enabled() || !tracing_is_on()) 49 return; 50 51 local_irq_disable(); 52 start = trace_clock_local(); 53 trace_benchmark_event(bm_str); 54 stop = trace_clock_local(); 55 local_irq_enable(); 56 57 bm_cnt++; 58 59 delta = stop - start; 60 61 /* 62 * The first read is cold cached, keep it separate from the 63 * other calculations. 64 */ 65 if (bm_cnt == 1) { 66 bm_first = delta; 67 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN, 68 "first=%llu [COLD CACHED]", bm_first); 69 return; 70 } 71 72 bm_last = delta; 73 74 if (delta > bm_max) 75 bm_max = delta; 76 if (!bm_min || delta < bm_min) 77 bm_min = delta; 78 79 /* 80 * When bm_cnt is greater than UINT_MAX, it breaks the statistics 81 * accounting. Freeze the statistics when that happens. 82 * We should have enough data for the avg and stddev anyway. 83 */ 84 if (bm_cnt > UINT_MAX) { 85 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN, 86 "last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld", 87 bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev); 88 return; 89 } 90 91 bm_total += delta; 92 bm_totalsq += delta * delta; 93 94 95 if (bm_cnt > 1) { 96 /* 97 * Apply Welford's method to calculate standard deviation: 98 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 99 */ 100 stddev = (u64)bm_cnt * bm_totalsq - bm_total * bm_total; 101 do_div(stddev, (u32)bm_cnt); 102 do_div(stddev, (u32)bm_cnt - 1); 103 } else 104 stddev = 0; 105 106 delta = bm_total; 107 do_div(delta, bm_cnt); 108 avg = delta; 109 110 if (stddev > 0) { 111 int i = 0; 112 /* 113 * stddev is the square of standard deviation but 114 * we want the actualy number. Use the average 115 * as our seed to find the std. 116 * 117 * The next try is: 118 * x = (x + N/x) / 2 119 * 120 * Where N is the squared number to find the square 121 * root of. 122 */ 123 seed = avg; 124 do { 125 last_seed = seed; 126 seed = stddev; 127 if (!last_seed) 128 break; 129 do_div(seed, last_seed); 130 seed += last_seed; 131 do_div(seed, 2); 132 } while (i++ < 10 && last_seed != seed); 133 134 std = seed; 135 } 136 137 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN, 138 "last=%llu first=%llu max=%llu min=%llu avg=%u std=%d std^2=%lld", 139 bm_last, bm_first, bm_max, bm_min, avg, std, stddev); 140 141 bm_std = std; 142 bm_avg = avg; 143 bm_stddev = stddev; 144 } 145 146 static int benchmark_event_kthread(void *arg) 147 { 148 /* sleep a bit to make sure the tracepoint gets activated */ 149 msleep(100); 150 151 while (!kthread_should_stop()) { 152 153 trace_do_benchmark(); 154 155 /* 156 * We don't go to sleep, but let others run as well. 157 * This is bascially a "yield()" to let any task that 158 * wants to run, schedule in, but if the CPU is idle, 159 * we'll keep burning cycles. 160 * 161 * Note the _rcu_qs() version of cond_resched() will 162 * notify synchronize_rcu_tasks() that this thread has 163 * passed a quiescent state for rcu_tasks. Otherwise 164 * this thread will never voluntarily schedule which would 165 * block synchronize_rcu_tasks() indefinitely. 166 */ 167 cond_resched_rcu_qs(); 168 } 169 170 return 0; 171 } 172 173 /* 174 * When the benchmark tracepoint is enabled, it calls this 175 * function and the thread that calls the tracepoint is created. 176 */ 177 int trace_benchmark_reg(void) 178 { 179 if (!ok_to_run) { 180 pr_warning("trace benchmark cannot be started via kernel command line\n"); 181 return -EBUSY; 182 } 183 184 bm_event_thread = kthread_run(benchmark_event_kthread, 185 NULL, "event_benchmark"); 186 if (IS_ERR(bm_event_thread)) { 187 pr_warning("trace benchmark failed to create kernel thread\n"); 188 return PTR_ERR(bm_event_thread); 189 } 190 191 return 0; 192 } 193 194 /* 195 * When the benchmark tracepoint is disabled, it calls this 196 * function and the thread that calls the tracepoint is deleted 197 * and all the numbers are reset. 198 */ 199 void trace_benchmark_unreg(void) 200 { 201 if (!bm_event_thread) 202 return; 203 204 kthread_stop(bm_event_thread); 205 bm_event_thread = NULL; 206 207 strcpy(bm_str, "START"); 208 bm_total = 0; 209 bm_totalsq = 0; 210 bm_last = 0; 211 bm_max = 0; 212 bm_min = 0; 213 bm_cnt = 0; 214 /* These don't need to be reset but reset them anyway */ 215 bm_first = 0; 216 bm_std = 0; 217 bm_avg = 0; 218 bm_stddev = 0; 219 } 220 221 static __init int ok_to_run_trace_benchmark(void) 222 { 223 ok_to_run = true; 224 225 return 0; 226 } 227 228 early_initcall(ok_to_run_trace_benchmark); 229