1 // SPDX-License-Identifier: GPL-2.0 2 #include <stdio.h> 3 #include <stdlib.h> 4 #include "util/evsel.h" 5 #include "util/env.h" 6 #include "util/pmu.h" 7 #include "util/pmus.h" 8 #include "linux/string.h" 9 #include "evsel.h" 10 #include "util/debug.h" 11 #include "env.h" 12 13 #define IBS_FETCH_L3MISSONLY (1ULL << 59) 14 #define IBS_OP_L3MISSONLY (1ULL << 16) 15 16 void arch_evsel__set_sample_weight(struct evsel *evsel) 17 { 18 evsel__set_sample_bit(evsel, WEIGHT_STRUCT); 19 } 20 21 /* Check whether the evsel's PMU supports the perf metrics */ 22 bool evsel__sys_has_perf_metrics(const struct evsel *evsel) 23 { 24 const char *pmu_name = evsel->pmu_name ? evsel->pmu_name : "cpu"; 25 26 /* 27 * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU 28 * on a non-hybrid machine, "cpu_core" PMU on a hybrid machine. 29 * The slots event is only available for the core PMU, which 30 * supports the perf metrics feature. 31 * Checking both the PERF_TYPE_RAW type and the slots event 32 * should be good enough to detect the perf metrics feature. 33 */ 34 if ((evsel->core.attr.type == PERF_TYPE_RAW) && 35 perf_pmus__have_event(pmu_name, "slots")) 36 return true; 37 38 return false; 39 } 40 41 bool arch_evsel__must_be_in_group(const struct evsel *evsel) 42 { 43 if (!evsel__sys_has_perf_metrics(evsel)) 44 return false; 45 46 return evsel->name && 47 (strcasestr(evsel->name, "slots") || 48 strcasestr(evsel->name, "topdown")); 49 } 50 51 int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size) 52 { 53 u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK; 54 u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT; 55 const char *event_name; 56 57 if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event]) 58 event_name = evsel__hw_names[event]; 59 else 60 event_name = "unknown-hardware"; 61 62 /* The PMU type is not required for the non-hybrid platform. */ 63 if (!pmu) 64 return scnprintf(bf, size, "%s", event_name); 65 66 return scnprintf(bf, size, "%s/%s/", 67 evsel->pmu_name ? evsel->pmu_name : "cpu", 68 event_name); 69 } 70 71 static void ibs_l3miss_warn(void) 72 { 73 pr_warning( 74 "WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n" 75 "and tagged operation does not cause L3 Miss. This causes sampling period skew.\n"); 76 } 77 78 void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr) 79 { 80 struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu; 81 static int warned_once; 82 83 if (warned_once || !x86__is_amd_cpu()) 84 return; 85 86 evsel_pmu = evsel__find_pmu(evsel); 87 if (!evsel_pmu) 88 return; 89 90 ibs_fetch_pmu = perf_pmus__find("ibs_fetch"); 91 ibs_op_pmu = perf_pmus__find("ibs_op"); 92 93 if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) { 94 if (attr->config & IBS_FETCH_L3MISSONLY) { 95 ibs_l3miss_warn(); 96 warned_once = 1; 97 } 98 } else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) { 99 if (attr->config & IBS_OP_L3MISSONLY) { 100 ibs_l3miss_warn(); 101 warned_once = 1; 102 } 103 } 104 } 105 106 int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size) 107 { 108 if (!x86__is_amd_cpu()) 109 return 0; 110 111 if (!evsel->core.attr.precise_ip && 112 !(evsel->pmu_name && !strncmp(evsel->pmu_name, "ibs", 3))) 113 return 0; 114 115 /* More verbose IBS errors. */ 116 if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user || 117 evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle || 118 evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) { 119 return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try " 120 "again without the privilege modifiers (like 'k') at the end."); 121 } 122 123 return 0; 124 } 125