1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * An empty pmu-events.c file used when there is no architecture json files in
4 * arch or when the jevents.py script cannot be run.
5 *
6 * The test cpu/soc is provided for testing.
7 */
8 #include "pmu-events/pmu-events.h"
9 #include "util/header.h"
10 #include "util/pmu.h"
11 #include <string.h>
12 #include <stddef.h>
13
14 static const struct pmu_event pmu_events__test_soc_cpu[] = {
15 {
16 .name = "l3_cache_rd",
17 .event = "event=0x40",
18 .desc = "L3 cache access, read",
19 .topic = "cache",
20 .long_desc = "Attributable Level 3 cache access, read",
21 },
22 {
23 .name = "segment_reg_loads.any",
24 .event = "event=0x6,period=200000,umask=0x80",
25 .desc = "Number of segment register loads",
26 .topic = "other",
27 },
28 {
29 .name = "dispatch_blocked.any",
30 .event = "event=0x9,period=200000,umask=0x20",
31 .desc = "Memory cluster signals to block micro-op dispatch for any reason",
32 .topic = "other",
33 },
34 {
35 .name = "eist_trans",
36 .event = "event=0x3a,period=200000,umask=0x0",
37 .desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
38 .topic = "other",
39 },
40 {
41 .name = "uncore_hisi_ddrc.flux_wcmd",
42 .event = "event=0x2",
43 .desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
44 .topic = "uncore",
45 .long_desc = "DDRC write commands",
46 .pmu = "hisi_sccl,ddrc",
47 },
48 {
49 .name = "unc_cbo_xsnp_response.miss_eviction",
50 .event = "event=0x22,umask=0x81",
51 .desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ",
52 .topic = "uncore",
53 .long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
54 .pmu = "uncore_cbox",
55 },
56 {
57 .name = "event-hyphen",
58 .event = "event=0xe0,umask=0x00",
59 .desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ",
60 .topic = "uncore",
61 .long_desc = "UNC_CBO_HYPHEN",
62 .pmu = "uncore_cbox",
63 },
64 {
65 .name = "event-two-hyph",
66 .event = "event=0xc0,umask=0x00",
67 .desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ",
68 .topic = "uncore",
69 .long_desc = "UNC_CBO_TWO_HYPH",
70 .pmu = "uncore_cbox",
71 },
72 {
73 .name = "uncore_hisi_l3c.rd_hit_cpipe",
74 .event = "event=0x7",
75 .desc = "Total read hits. Unit: hisi_sccl,l3c ",
76 .topic = "uncore",
77 .long_desc = "Total read hits",
78 .pmu = "hisi_sccl,l3c",
79 },
80 {
81 .name = "uncore_imc_free_running.cache_miss",
82 .event = "event=0x12",
83 .desc = "Total cache misses. Unit: uncore_imc_free_running ",
84 .topic = "uncore",
85 .long_desc = "Total cache misses",
86 .pmu = "uncore_imc_free_running",
87 },
88 {
89 .name = "uncore_imc.cache_hits",
90 .event = "event=0x34",
91 .desc = "Total cache hits. Unit: uncore_imc ",
92 .topic = "uncore",
93 .long_desc = "Total cache hits",
94 .pmu = "uncore_imc",
95 },
96 {
97 .name = "bp_l1_btb_correct",
98 .event = "event=0x8a",
99 .desc = "L1 BTB Correction",
100 .topic = "branch",
101 },
102 {
103 .name = "bp_l2_btb_correct",
104 .event = "event=0x8b",
105 .desc = "L2 BTB Correction",
106 .topic = "branch",
107 },
108 {
109 .name = 0,
110 .event = 0,
111 .desc = 0,
112 },
113 };
114
115 static const struct pmu_metric pmu_metrics__test_soc_cpu[] = {
116 {
117 .metric_expr = "1 / IPC",
118 .metric_name = "CPI",
119 },
120 {
121 .metric_expr = "inst_retired.any / cpu_clk_unhalted.thread",
122 .metric_name = "IPC",
123 .metric_group = "group1",
124 },
125 {
126 .metric_expr = "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * "
127 "( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
128 .metric_name = "Frontend_Bound_SMT",
129 },
130 {
131 .metric_expr = "l1d\\-loads\\-misses / inst_retired.any",
132 .metric_name = "dcache_miss_cpi",
133 },
134 {
135 .metric_expr = "l1i\\-loads\\-misses / inst_retired.any",
136 .metric_name = "icache_miss_cycles",
137 },
138 {
139 .metric_expr = "(dcache_miss_cpi + icache_miss_cycles)",
140 .metric_name = "cache_miss_cycles",
141 .metric_group = "group1",
142 },
143 {
144 .metric_expr = "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
145 .metric_name = "DCache_L2_All_Hits",
146 },
147 {
148 .metric_expr = "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + "
149 "l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
150 .metric_name = "DCache_L2_All_Miss",
151 },
152 {
153 .metric_expr = "DCache_L2_All_Hits + DCache_L2_All_Miss",
154 .metric_name = "DCache_L2_All",
155 },
156 {
157 .metric_expr = "d_ratio(DCache_L2_All_Hits, DCache_L2_All)",
158 .metric_name = "DCache_L2_Hits",
159 },
160 {
161 .metric_expr = "d_ratio(DCache_L2_All_Miss, DCache_L2_All)",
162 .metric_name = "DCache_L2_Misses",
163 },
164 {
165 .metric_expr = "ipc + M2",
166 .metric_name = "M1",
167 },
168 {
169 .metric_expr = "ipc + M1",
170 .metric_name = "M2",
171 },
172 {
173 .metric_expr = "1/M3",
174 .metric_name = "M3",
175 },
176 {
177 .metric_expr = "64 * l1d.replacement / 1000000000 / duration_time",
178 .metric_name = "L1D_Cache_Fill_BW",
179 },
180 {
181 .metric_expr = 0,
182 .metric_name = 0,
183 },
184 };
185
186 /* Struct used to make the PMU event table implementation opaque to callers. */
187 struct pmu_events_table {
188 const struct pmu_event *entries;
189 };
190
191 /* Struct used to make the PMU metric table implementation opaque to callers. */
192 struct pmu_metrics_table {
193 const struct pmu_metric *entries;
194 };
195
196 /*
197 * Map a CPU to its table of PMU events. The CPU is identified by the
198 * cpuid field, which is an arch-specific identifier for the CPU.
199 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
200 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
201 *
202 * The cpuid can contain any character other than the comma.
203 */
204 struct pmu_events_map {
205 const char *arch;
206 const char *cpuid;
207 const struct pmu_events_table event_table;
208 const struct pmu_metrics_table metric_table;
209 };
210
211 /*
212 * Global table mapping each known CPU for the architecture to its
213 * table of PMU events.
214 */
215 static const struct pmu_events_map pmu_events_map[] = {
216 {
217 .arch = "testarch",
218 .cpuid = "testcpu",
219 .event_table = { pmu_events__test_soc_cpu },
220 .metric_table = { pmu_metrics__test_soc_cpu },
221 },
222 {
223 .arch = 0,
224 .cpuid = 0,
225 .event_table = { 0 },
226 .metric_table = { 0 },
227 },
228 };
229
230 static const struct pmu_event pmu_events__test_soc_sys[] = {
231 {
232 .name = "sys_ddr_pmu.write_cycles",
233 .event = "event=0x2b",
234 .desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
235 .compat = "v8",
236 .topic = "uncore",
237 .pmu = "uncore_sys_ddr_pmu",
238 },
239 {
240 .name = "sys_ccn_pmu.read_cycles",
241 .event = "config=0x2c",
242 .desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
243 .compat = "0x01",
244 .topic = "uncore",
245 .pmu = "uncore_sys_ccn_pmu",
246 },
247 {
248 .name = 0,
249 .event = 0,
250 .desc = 0,
251 },
252 };
253
254 struct pmu_sys_events {
255 const char *name;
256 const struct pmu_events_table table;
257 };
258
259 static const struct pmu_sys_events pmu_sys_event_tables[] = {
260 {
261 .table = { pmu_events__test_soc_sys },
262 .name = "pmu_events__test_soc_sys",
263 },
264 {
265 .table = { 0 }
266 },
267 };
268
pmu_events_table__for_each_event(const struct pmu_events_table * table,struct perf_pmu * pmu,pmu_event_iter_fn fn,void * data)269 int pmu_events_table__for_each_event(const struct pmu_events_table *table, struct perf_pmu *pmu,
270 pmu_event_iter_fn fn, void *data)
271 {
272 for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
273 int ret;
274
275 if (pmu && !pmu__name_match(pmu, pe->pmu))
276 continue;
277
278 ret = fn(pe, table, data);
279 if (ret)
280 return ret;
281 }
282 return 0;
283 }
284
pmu_events_table__find_event(const struct pmu_events_table * table,struct perf_pmu * pmu,const char * name,pmu_event_iter_fn fn,void * data)285 int pmu_events_table__find_event(const struct pmu_events_table *table,
286 struct perf_pmu *pmu,
287 const char *name,
288 pmu_event_iter_fn fn,
289 void *data)
290 {
291 for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
292 if (pmu && !pmu__name_match(pmu, pe->pmu))
293 continue;
294
295 if (!strcasecmp(pe->name, name))
296 return fn(pe, table, data);
297 }
298 return -1000;
299 }
300
pmu_events_table__num_events(const struct pmu_events_table * table,struct perf_pmu * pmu)301 size_t pmu_events_table__num_events(const struct pmu_events_table *table,
302 struct perf_pmu *pmu)
303 {
304 size_t count = 0;
305
306 for (const struct pmu_event *pe = &table->entries[0]; pe->name; pe++) {
307 if (pmu && !pmu__name_match(pmu, pe->pmu))
308 continue;
309
310 count++;
311 }
312 return count;
313 }
314
pmu_metrics_table__for_each_metric(const struct pmu_metrics_table * table,pmu_metric_iter_fn fn,void * data)315 int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
316 void *data)
317 {
318 for (const struct pmu_metric *pm = &table->entries[0]; pm->metric_expr; pm++) {
319 int ret = fn(pm, table, data);
320
321 if (ret)
322 return ret;
323 }
324 return 0;
325 }
326
perf_pmu__find_events_table(struct perf_pmu * pmu)327 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
328 {
329 const struct pmu_events_table *table = NULL;
330 char *cpuid = perf_pmu__getcpuid(pmu);
331 int i;
332
333 /* on some platforms which uses cpus map, cpuid can be NULL for
334 * PMUs other than CORE PMUs.
335 */
336 if (!cpuid)
337 return NULL;
338
339 i = 0;
340 for (;;) {
341 const struct pmu_events_map *map = &pmu_events_map[i++];
342
343 if (!map->cpuid)
344 break;
345
346 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
347 table = &map->event_table;
348 break;
349 }
350 }
351 free(cpuid);
352 return table;
353 }
354
perf_pmu__find_metrics_table(struct perf_pmu * pmu)355 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
356 {
357 const struct pmu_metrics_table *table = NULL;
358 char *cpuid = perf_pmu__getcpuid(pmu);
359 int i;
360
361 /* on some platforms which uses cpus map, cpuid can be NULL for
362 * PMUs other than CORE PMUs.
363 */
364 if (!cpuid)
365 return NULL;
366
367 i = 0;
368 for (;;) {
369 const struct pmu_events_map *map = &pmu_events_map[i++];
370
371 if (!map->cpuid)
372 break;
373
374 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
375 table = &map->metric_table;
376 break;
377 }
378 }
379 free(cpuid);
380 return table;
381 }
382
find_core_events_table(const char * arch,const char * cpuid)383 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
384 {
385 for (const struct pmu_events_map *tables = &pmu_events_map[0];
386 tables->arch;
387 tables++) {
388 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
389 return &tables->event_table;
390 }
391 return NULL;
392 }
393
find_core_metrics_table(const char * arch,const char * cpuid)394 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
395 {
396 for (const struct pmu_events_map *tables = &pmu_events_map[0];
397 tables->arch;
398 tables++) {
399 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
400 return &tables->metric_table;
401 }
402 return NULL;
403 }
404
pmu_for_each_core_event(pmu_event_iter_fn fn,void * data)405 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
406 {
407 for (const struct pmu_events_map *tables = &pmu_events_map[0]; tables->arch; tables++) {
408 int ret = pmu_events_table__for_each_event(&tables->event_table,
409 /*pmu=*/ NULL, fn, data);
410
411 if (ret)
412 return ret;
413 }
414 return 0;
415 }
416
pmu_for_each_core_metric(pmu_metric_iter_fn fn,void * data)417 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
418 {
419 for (const struct pmu_events_map *tables = &pmu_events_map[0];
420 tables->arch;
421 tables++) {
422 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
423
424 if (ret)
425 return ret;
426 }
427 return 0;
428 }
429
find_sys_events_table(const char * name)430 const struct pmu_events_table *find_sys_events_table(const char *name)
431 {
432 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
433 tables->name;
434 tables++) {
435 if (!strcmp(tables->name, name))
436 return &tables->table;
437 }
438 return NULL;
439 }
440
pmu_for_each_sys_event(pmu_event_iter_fn fn,void * data)441 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
442 {
443 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
444 tables->name;
445 tables++) {
446 int ret = pmu_events_table__for_each_event(&tables->table, /*pmu=*/ NULL, fn, data);
447
448 if (ret)
449 return ret;
450 }
451 return 0;
452 }
453
pmu_for_each_sys_metric(pmu_metric_iter_fn fn __maybe_unused,void * data __maybe_unused)454 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn __maybe_unused, void *data __maybe_unused)
455 {
456 return 0;
457 }
458
describe_metricgroup(const char * group __maybe_unused)459 const char *describe_metricgroup(const char *group __maybe_unused)
460 {
461 return NULL;
462 }
463