1 #include <errno.h> 2 #include <inttypes.h> 3 #include <math.h> 4 #include "stat.h" 5 #include "evlist.h" 6 #include "evsel.h" 7 #include "thread_map.h" 8 9 void update_stats(struct stats *stats, u64 val) 10 { 11 double delta; 12 13 stats->n++; 14 delta = val - stats->mean; 15 stats->mean += delta / stats->n; 16 stats->M2 += delta*(val - stats->mean); 17 18 if (val > stats->max) 19 stats->max = val; 20 21 if (val < stats->min) 22 stats->min = val; 23 } 24 25 double avg_stats(struct stats *stats) 26 { 27 return stats->mean; 28 } 29 30 /* 31 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance 32 * 33 * (\Sum n_i^2) - ((\Sum n_i)^2)/n 34 * s^2 = ------------------------------- 35 * n - 1 36 * 37 * http://en.wikipedia.org/wiki/Stddev 38 * 39 * The std dev of the mean is related to the std dev by: 40 * 41 * s 42 * s_mean = ------- 43 * sqrt(n) 44 * 45 */ 46 double stddev_stats(struct stats *stats) 47 { 48 double variance, variance_mean; 49 50 if (stats->n < 2) 51 return 0.0; 52 53 variance = stats->M2 / (stats->n - 1); 54 variance_mean = variance / stats->n; 55 56 return sqrt(variance_mean); 57 } 58 59 double rel_stddev_stats(double stddev, double avg) 60 { 61 double pct = 0.0; 62 63 if (avg) 64 pct = 100.0 * stddev/avg; 65 66 return pct; 67 } 68 69 bool __perf_evsel_stat__is(struct perf_evsel *evsel, 70 enum perf_stat_evsel_id id) 71 { 72 struct perf_stat_evsel *ps = evsel->priv; 73 74 return ps->id == id; 75 } 76 77 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name 78 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = { 79 ID(NONE, x), 80 ID(CYCLES_IN_TX, cpu/cycles-t/), 81 ID(TRANSACTION_START, cpu/tx-start/), 82 ID(ELISION_START, cpu/el-start/), 83 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/), 84 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots), 85 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued), 86 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired), 87 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles), 88 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles), 89 }; 90 #undef ID 91 92 void perf_stat_evsel_id_init(struct perf_evsel *evsel) 93 { 94 struct perf_stat_evsel *ps = evsel->priv; 95 int i; 96 97 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ 98 99 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) { 100 if (!strcmp(perf_evsel__name(evsel), id_str[i])) { 101 ps->id = i; 102 break; 103 } 104 } 105 } 106 107 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) 108 { 109 int i; 110 struct perf_stat_evsel *ps = evsel->priv; 111 112 for (i = 0; i < 3; i++) 113 init_stats(&ps->res_stats[i]); 114 115 perf_stat_evsel_id_init(evsel); 116 } 117 118 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) 119 { 120 evsel->priv = zalloc(sizeof(struct perf_stat_evsel)); 121 if (evsel->priv == NULL) 122 return -ENOMEM; 123 perf_evsel__reset_stat_priv(evsel); 124 return 0; 125 } 126 127 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) 128 { 129 zfree(&evsel->priv); 130 } 131 132 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, 133 int ncpus, int nthreads) 134 { 135 struct perf_counts *counts; 136 137 counts = perf_counts__new(ncpus, nthreads); 138 if (counts) 139 evsel->prev_raw_counts = counts; 140 141 return counts ? 0 : -ENOMEM; 142 } 143 144 static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) 145 { 146 perf_counts__delete(evsel->prev_raw_counts); 147 evsel->prev_raw_counts = NULL; 148 } 149 150 static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw) 151 { 152 int ncpus = perf_evsel__nr_cpus(evsel); 153 int nthreads = thread_map__nr(evsel->threads); 154 155 if (perf_evsel__alloc_stat_priv(evsel) < 0 || 156 perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || 157 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) 158 return -ENOMEM; 159 160 return 0; 161 } 162 163 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) 164 { 165 struct perf_evsel *evsel; 166 167 evlist__for_each_entry(evlist, evsel) { 168 if (perf_evsel__alloc_stats(evsel, alloc_raw)) 169 goto out_free; 170 } 171 172 return 0; 173 174 out_free: 175 perf_evlist__free_stats(evlist); 176 return -1; 177 } 178 179 void perf_evlist__free_stats(struct perf_evlist *evlist) 180 { 181 struct perf_evsel *evsel; 182 183 evlist__for_each_entry(evlist, evsel) { 184 perf_evsel__free_stat_priv(evsel); 185 perf_evsel__free_counts(evsel); 186 perf_evsel__free_prev_raw_counts(evsel); 187 } 188 } 189 190 void perf_evlist__reset_stats(struct perf_evlist *evlist) 191 { 192 struct perf_evsel *evsel; 193 194 evlist__for_each_entry(evlist, evsel) { 195 perf_evsel__reset_stat_priv(evsel); 196 perf_evsel__reset_counts(evsel); 197 } 198 } 199 200 static void zero_per_pkg(struct perf_evsel *counter) 201 { 202 if (counter->per_pkg_mask) 203 memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); 204 } 205 206 static int check_per_pkg(struct perf_evsel *counter, 207 struct perf_counts_values *vals, int cpu, bool *skip) 208 { 209 unsigned long *mask = counter->per_pkg_mask; 210 struct cpu_map *cpus = perf_evsel__cpus(counter); 211 int s; 212 213 *skip = false; 214 215 if (!counter->per_pkg) 216 return 0; 217 218 if (cpu_map__empty(cpus)) 219 return 0; 220 221 if (!mask) { 222 mask = zalloc(MAX_NR_CPUS); 223 if (!mask) 224 return -ENOMEM; 225 226 counter->per_pkg_mask = mask; 227 } 228 229 /* 230 * we do not consider an event that has not run as a good 231 * instance to mark a package as used (skip=1). Otherwise 232 * we may run into a situation where the first CPU in a package 233 * is not running anything, yet the second is, and this function 234 * would mark the package as used after the first CPU and would 235 * not read the values from the second CPU. 236 */ 237 if (!(vals->run && vals->ena)) 238 return 0; 239 240 s = cpu_map__get_socket(cpus, cpu, NULL); 241 if (s < 0) 242 return -1; 243 244 *skip = test_and_set_bit(s, mask) == 1; 245 return 0; 246 } 247 248 static int 249 process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel, 250 int cpu, int thread, 251 struct perf_counts_values *count) 252 { 253 struct perf_counts_values *aggr = &evsel->counts->aggr; 254 static struct perf_counts_values zero; 255 bool skip = false; 256 257 if (check_per_pkg(evsel, count, cpu, &skip)) { 258 pr_err("failed to read per-pkg counter\n"); 259 return -1; 260 } 261 262 if (skip) 263 count = &zero; 264 265 switch (config->aggr_mode) { 266 case AGGR_THREAD: 267 case AGGR_CORE: 268 case AGGR_SOCKET: 269 case AGGR_NONE: 270 if (!evsel->snapshot) 271 perf_evsel__compute_deltas(evsel, cpu, thread, count); 272 perf_counts_values__scale(count, config->scale, NULL); 273 if (config->aggr_mode == AGGR_NONE) 274 perf_stat__update_shadow_stats(evsel, count->values, cpu); 275 break; 276 case AGGR_GLOBAL: 277 aggr->val += count->val; 278 if (config->scale) { 279 aggr->ena += count->ena; 280 aggr->run += count->run; 281 } 282 case AGGR_UNSET: 283 default: 284 break; 285 } 286 287 return 0; 288 } 289 290 static int process_counter_maps(struct perf_stat_config *config, 291 struct perf_evsel *counter) 292 { 293 int nthreads = thread_map__nr(counter->threads); 294 int ncpus = perf_evsel__nr_cpus(counter); 295 int cpu, thread; 296 297 if (counter->system_wide) 298 nthreads = 1; 299 300 for (thread = 0; thread < nthreads; thread++) { 301 for (cpu = 0; cpu < ncpus; cpu++) { 302 if (process_counter_values(config, counter, cpu, thread, 303 perf_counts(counter->counts, cpu, thread))) 304 return -1; 305 } 306 } 307 308 return 0; 309 } 310 311 int perf_stat_process_counter(struct perf_stat_config *config, 312 struct perf_evsel *counter) 313 { 314 struct perf_counts_values *aggr = &counter->counts->aggr; 315 struct perf_stat_evsel *ps = counter->priv; 316 u64 *count = counter->counts->aggr.values; 317 u64 val; 318 int i, ret; 319 320 aggr->val = aggr->ena = aggr->run = 0; 321 322 /* 323 * We calculate counter's data every interval, 324 * and the display code shows ps->res_stats 325 * avg value. We need to zero the stats for 326 * interval mode, otherwise overall avg running 327 * averages will be shown for each interval. 328 */ 329 if (config->interval) 330 init_stats(ps->res_stats); 331 332 if (counter->per_pkg) 333 zero_per_pkg(counter); 334 335 ret = process_counter_maps(config, counter); 336 if (ret) 337 return ret; 338 339 if (config->aggr_mode != AGGR_GLOBAL) 340 return 0; 341 342 if (!counter->snapshot) 343 perf_evsel__compute_deltas(counter, -1, -1, aggr); 344 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled); 345 346 for (i = 0; i < 3; i++) 347 update_stats(&ps->res_stats[i], count[i]); 348 349 if (verbose > 0) { 350 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 351 perf_evsel__name(counter), count[0], count[1], count[2]); 352 } 353 354 /* 355 * Save the full runtime - to allow normalization during printout: 356 */ 357 val = counter->scale * *count; 358 perf_stat__update_shadow_stats(counter, &val, 0); 359 360 return 0; 361 } 362 363 int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused, 364 union perf_event *event, 365 struct perf_session *session) 366 { 367 struct perf_counts_values count; 368 struct stat_event *st = &event->stat; 369 struct perf_evsel *counter; 370 371 count.val = st->val; 372 count.ena = st->ena; 373 count.run = st->run; 374 375 counter = perf_evlist__id2evsel(session->evlist, st->id); 376 if (!counter) { 377 pr_err("Failed to resolve counter for stat event.\n"); 378 return -EINVAL; 379 } 380 381 *perf_counts(counter->counts, st->cpu, st->thread) = count; 382 counter->supported = true; 383 return 0; 384 } 385 386 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp) 387 { 388 struct stat_event *st = (struct stat_event *) event; 389 size_t ret; 390 391 ret = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n", 392 st->id, st->cpu, st->thread); 393 ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n", 394 st->val, st->ena, st->run); 395 396 return ret; 397 } 398 399 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp) 400 { 401 struct stat_round_event *rd = (struct stat_round_event *)event; 402 size_t ret; 403 404 ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time, 405 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL"); 406 407 return ret; 408 } 409 410 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) 411 { 412 struct perf_stat_config sc; 413 size_t ret; 414 415 perf_event__read_stat_config(&sc, &event->stat_config); 416 417 ret = fprintf(fp, "\n"); 418 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode); 419 ret += fprintf(fp, "... scale %d\n", sc.scale); 420 ret += fprintf(fp, "... interval %u\n", sc.interval); 421 422 return ret; 423 } 424