1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/compiler.h> 3 #include <string.h> 4 #include <perf/cpumap.h> 5 #include <perf/evlist.h> 6 #include "metricgroup.h" 7 #include "tests.h" 8 #include "pmu-events/pmu-events.h" 9 #include "evlist.h" 10 #include "rblist.h" 11 #include "debug.h" 12 #include "expr.h" 13 #include "stat.h" 14 #include "pmu.h" 15 16 static struct pmu_event pme_test[] = { 17 { 18 .metric_expr = "inst_retired.any / cpu_clk_unhalted.thread", 19 .metric_name = "IPC", 20 .metric_group = "group1", 21 }, 22 { 23 .metric_expr = "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * " 24 "( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))", 25 .metric_name = "Frontend_Bound_SMT", 26 }, 27 { 28 .metric_expr = "l1d\\-loads\\-misses / inst_retired.any", 29 .metric_name = "dcache_miss_cpi", 30 }, 31 { 32 .metric_expr = "l1i\\-loads\\-misses / inst_retired.any", 33 .metric_name = "icache_miss_cycles", 34 }, 35 { 36 .metric_expr = "(dcache_miss_cpi + icache_miss_cycles)", 37 .metric_name = "cache_miss_cycles", 38 .metric_group = "group1", 39 }, 40 { 41 .metric_expr = "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit", 42 .metric_name = "DCache_L2_All_Hits", 43 }, 44 { 45 .metric_expr = "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + " 46 "l2_rqsts.pf_miss + l2_rqsts.rfo_miss", 47 .metric_name = "DCache_L2_All_Miss", 48 }, 49 { 50 .metric_expr = "dcache_l2_all_hits + dcache_l2_all_miss", 51 .metric_name = "DCache_L2_All", 52 }, 53 { 54 .metric_expr = "d_ratio(dcache_l2_all_hits, dcache_l2_all)", 55 .metric_name = "DCache_L2_Hits", 56 }, 57 { 58 .metric_expr = "d_ratio(dcache_l2_all_miss, dcache_l2_all)", 59 .metric_name = "DCache_L2_Misses", 60 }, 61 { 62 .metric_expr = "ipc + m2", 63 .metric_name = "M1", 64 }, 65 { 66 .metric_expr = "ipc + m1", 67 .metric_name = "M2", 68 }, 69 { 70 .metric_expr = "1/m3", 71 .metric_name = "M3", 72 }, 73 { 74 .metric_expr = "64 * l1d.replacement / 1000000000 / duration_time", 75 .metric_name = "L1D_Cache_Fill_BW", 76 }, 77 { 78 .name = NULL, 79 } 80 }; 81 82 static struct pmu_events_map map = { 83 .cpuid = "test", 84 .version = "1", 85 .type = "core", 86 .table = pme_test, 87 }; 88 89 struct value { 90 const char *event; 91 u64 val; 92 }; 93 94 static u64 find_value(const char *name, struct value *values) 95 { 96 struct value *v = values; 97 98 while (v->event) { 99 if (!strcmp(name, v->event)) 100 return v->val; 101 v++; 102 } 103 return 0; 104 } 105 106 static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist, 107 struct value *vals) 108 { 109 struct evsel *evsel; 110 u64 count; 111 112 evlist__for_each_entry(evlist, evsel) { 113 count = find_value(evsel->name, vals); 114 perf_stat__update_shadow_stats(evsel, count, 0, st); 115 if (!strcmp(evsel->name, "duration_time")) 116 update_stats(&walltime_nsecs_stats, count); 117 } 118 } 119 120 static double compute_single(struct rblist *metric_events, struct evlist *evlist, 121 struct runtime_stat *st, const char *name) 122 { 123 struct metric_expr *mexp; 124 struct metric_event *me; 125 struct evsel *evsel; 126 127 evlist__for_each_entry(evlist, evsel) { 128 me = metricgroup__lookup(metric_events, evsel, false); 129 if (me != NULL) { 130 list_for_each_entry (mexp, &me->head, nd) { 131 if (strcmp(mexp->metric_name, name)) 132 continue; 133 return test_generic_metric(mexp, 0, st); 134 } 135 } 136 } 137 return 0.; 138 } 139 140 static int __compute_metric(const char *name, struct value *vals, 141 const char *name1, double *ratio1, 142 const char *name2, double *ratio2) 143 { 144 struct rblist metric_events = { 145 .nr_entries = 0, 146 }; 147 struct perf_cpu_map *cpus; 148 struct runtime_stat st; 149 struct evlist *evlist; 150 int err; 151 152 /* 153 * We need to prepare evlist for stat mode running on CPU 0 154 * because that's where all the stats are going to be created. 155 */ 156 evlist = evlist__new(); 157 if (!evlist) 158 return -ENOMEM; 159 160 cpus = perf_cpu_map__new("0"); 161 if (!cpus) { 162 evlist__delete(evlist); 163 return -ENOMEM; 164 } 165 166 perf_evlist__set_maps(&evlist->core, cpus, NULL); 167 runtime_stat__init(&st); 168 169 /* Parse the metric into metric_events list. */ 170 err = metricgroup__parse_groups_test(evlist, &map, name, 171 false, false, 172 &metric_events); 173 if (err) 174 goto out; 175 176 err = evlist__alloc_stats(evlist, false); 177 if (err) 178 goto out; 179 180 /* Load the runtime stats with given numbers for events. */ 181 load_runtime_stat(&st, evlist, vals); 182 183 /* And execute the metric */ 184 if (name1 && ratio1) 185 *ratio1 = compute_single(&metric_events, evlist, &st, name1); 186 if (name2 && ratio2) 187 *ratio2 = compute_single(&metric_events, evlist, &st, name2); 188 189 out: 190 /* ... cleanup. */ 191 metricgroup__rblist_exit(&metric_events); 192 runtime_stat__exit(&st); 193 evlist__free_stats(evlist); 194 perf_cpu_map__put(cpus); 195 evlist__delete(evlist); 196 return err; 197 } 198 199 static int compute_metric(const char *name, struct value *vals, double *ratio) 200 { 201 return __compute_metric(name, vals, name, ratio, NULL, NULL); 202 } 203 204 static int compute_metric_group(const char *name, struct value *vals, 205 const char *name1, double *ratio1, 206 const char *name2, double *ratio2) 207 { 208 return __compute_metric(name, vals, name1, ratio1, name2, ratio2); 209 } 210 211 static int test_ipc(void) 212 { 213 double ratio; 214 struct value vals[] = { 215 { .event = "inst_retired.any", .val = 300 }, 216 { .event = "cpu_clk_unhalted.thread", .val = 200 }, 217 { .event = NULL, }, 218 }; 219 220 TEST_ASSERT_VAL("failed to compute metric", 221 compute_metric("IPC", vals, &ratio) == 0); 222 223 TEST_ASSERT_VAL("IPC failed, wrong ratio", 224 ratio == 1.5); 225 return 0; 226 } 227 228 static int test_frontend(void) 229 { 230 double ratio; 231 struct value vals[] = { 232 { .event = "idq_uops_not_delivered.core", .val = 300 }, 233 { .event = "cpu_clk_unhalted.thread", .val = 200 }, 234 { .event = "cpu_clk_unhalted.one_thread_active", .val = 400 }, 235 { .event = "cpu_clk_unhalted.ref_xclk", .val = 600 }, 236 { .event = NULL, }, 237 }; 238 239 TEST_ASSERT_VAL("failed to compute metric", 240 compute_metric("Frontend_Bound_SMT", vals, &ratio) == 0); 241 242 TEST_ASSERT_VAL("Frontend_Bound_SMT failed, wrong ratio", 243 ratio == 0.45); 244 return 0; 245 } 246 247 static int test_cache_miss_cycles(void) 248 { 249 double ratio; 250 struct value vals[] = { 251 { .event = "l1d-loads-misses", .val = 300 }, 252 { .event = "l1i-loads-misses", .val = 200 }, 253 { .event = "inst_retired.any", .val = 400 }, 254 { .event = NULL, }, 255 }; 256 257 TEST_ASSERT_VAL("failed to compute metric", 258 compute_metric("cache_miss_cycles", vals, &ratio) == 0); 259 260 TEST_ASSERT_VAL("cache_miss_cycles failed, wrong ratio", 261 ratio == 1.25); 262 return 0; 263 } 264 265 266 /* 267 * DCache_L2_All_Hits = l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hi 268 * DCache_L2_All_Miss = max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + 269 * l2_rqsts.pf_miss + l2_rqsts.rfo_miss 270 * DCache_L2_All = dcache_l2_all_hits + dcache_l2_all_miss 271 * DCache_L2_Hits = d_ratio(dcache_l2_all_hits, dcache_l2_all) 272 * DCache_L2_Misses = d_ratio(dcache_l2_all_miss, dcache_l2_all) 273 * 274 * l2_rqsts.demand_data_rd_hit = 100 275 * l2_rqsts.pf_hit = 200 276 * l2_rqsts.rfo_hi = 300 277 * l2_rqsts.all_demand_data_rd = 400 278 * l2_rqsts.pf_miss = 500 279 * l2_rqsts.rfo_miss = 600 280 * 281 * DCache_L2_All_Hits = 600 282 * DCache_L2_All_Miss = MAX(400 - 100, 0) + 500 + 600 = 1400 283 * DCache_L2_All = 600 + 1400 = 2000 284 * DCache_L2_Hits = 600 / 2000 = 0.3 285 * DCache_L2_Misses = 1400 / 2000 = 0.7 286 */ 287 static int test_dcache_l2(void) 288 { 289 double ratio; 290 struct value vals[] = { 291 { .event = "l2_rqsts.demand_data_rd_hit", .val = 100 }, 292 { .event = "l2_rqsts.pf_hit", .val = 200 }, 293 { .event = "l2_rqsts.rfo_hit", .val = 300 }, 294 { .event = "l2_rqsts.all_demand_data_rd", .val = 400 }, 295 { .event = "l2_rqsts.pf_miss", .val = 500 }, 296 { .event = "l2_rqsts.rfo_miss", .val = 600 }, 297 { .event = NULL, }, 298 }; 299 300 TEST_ASSERT_VAL("failed to compute metric", 301 compute_metric("DCache_L2_Hits", vals, &ratio) == 0); 302 303 TEST_ASSERT_VAL("DCache_L2_Hits failed, wrong ratio", 304 ratio == 0.3); 305 306 TEST_ASSERT_VAL("failed to compute metric", 307 compute_metric("DCache_L2_Misses", vals, &ratio) == 0); 308 309 TEST_ASSERT_VAL("DCache_L2_Misses failed, wrong ratio", 310 ratio == 0.7); 311 return 0; 312 } 313 314 static int test_recursion_fail(void) 315 { 316 double ratio; 317 struct value vals[] = { 318 { .event = "inst_retired.any", .val = 300 }, 319 { .event = "cpu_clk_unhalted.thread", .val = 200 }, 320 { .event = NULL, }, 321 }; 322 323 TEST_ASSERT_VAL("failed to find recursion", 324 compute_metric("M1", vals, &ratio) == -1); 325 326 TEST_ASSERT_VAL("failed to find recursion", 327 compute_metric("M3", vals, &ratio) == -1); 328 return 0; 329 } 330 331 static int test_memory_bandwidth(void) 332 { 333 double ratio; 334 struct value vals[] = { 335 { .event = "l1d.replacement", .val = 4000000 }, 336 { .event = "duration_time", .val = 200000000 }, 337 { .event = NULL, }, 338 }; 339 340 TEST_ASSERT_VAL("failed to compute metric", 341 compute_metric("L1D_Cache_Fill_BW", vals, &ratio) == 0); 342 TEST_ASSERT_VAL("L1D_Cache_Fill_BW, wrong ratio", 343 1.28 == ratio); 344 345 return 0; 346 } 347 348 static int test_metric_group(void) 349 { 350 double ratio1, ratio2; 351 struct value vals[] = { 352 { .event = "cpu_clk_unhalted.thread", .val = 200 }, 353 { .event = "l1d-loads-misses", .val = 300 }, 354 { .event = "l1i-loads-misses", .val = 200 }, 355 { .event = "inst_retired.any", .val = 400 }, 356 { .event = NULL, }, 357 }; 358 359 TEST_ASSERT_VAL("failed to find recursion", 360 compute_metric_group("group1", vals, 361 "IPC", &ratio1, 362 "cache_miss_cycles", &ratio2) == 0); 363 364 TEST_ASSERT_VAL("group IPC failed, wrong ratio", 365 ratio1 == 2.0); 366 367 TEST_ASSERT_VAL("group cache_miss_cycles failed, wrong ratio", 368 ratio2 == 1.25); 369 return 0; 370 } 371 372 int test__parse_metric(struct test *test __maybe_unused, int subtest __maybe_unused) 373 { 374 TEST_ASSERT_VAL("IPC failed", test_ipc() == 0); 375 TEST_ASSERT_VAL("frontend failed", test_frontend() == 0); 376 TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0); 377 TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0); 378 TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0); 379 380 if (!perf_pmu__has_hybrid()) { 381 TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0); 382 TEST_ASSERT_VAL("test metric group", test_metric_group() == 0); 383 } 384 return 0; 385 } 386