1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Per core/cpu state 4 * 5 * Used to coordinate shared registers between HT threads or 6 * among events on a single PMU. 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/stddef.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/nmi.h> 17 #include <linux/kvm_host.h> 18 19 #include <asm/cpufeature.h> 20 #include <asm/hardirq.h> 21 #include <asm/intel-family.h> 22 #include <asm/intel_pt.h> 23 #include <asm/apic.h> 24 #include <asm/cpu_device_id.h> 25 26 #include "../perf_event.h" 27 28 /* 29 * Intel PerfMon, used on Core and later. 30 */ 31 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = 32 { 33 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, 34 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 35 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, 36 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, 37 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 38 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 39 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 40 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ 41 }; 42 43 static struct event_constraint intel_core_event_constraints[] __read_mostly = 44 { 45 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 46 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 47 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 48 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 49 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ 51 EVENT_CONSTRAINT_END 52 }; 53 54 static struct event_constraint intel_core2_event_constraints[] __read_mostly = 55 { 56 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 57 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 58 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 59 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 60 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 61 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 62 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 63 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 64 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ 65 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 66 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ 67 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ 68 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ 69 EVENT_CONSTRAINT_END 70 }; 71 72 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = 73 { 74 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 75 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 76 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 77 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ 78 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ 79 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ 80 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ 81 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ 82 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ 83 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 84 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 85 EVENT_CONSTRAINT_END 86 }; 87 88 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 89 { 90 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 91 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 92 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 93 EVENT_EXTRA_END 94 }; 95 96 static struct event_constraint intel_westmere_event_constraints[] __read_mostly = 97 { 98 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 99 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 100 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 101 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 102 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ 103 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 104 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ 105 EVENT_CONSTRAINT_END 106 }; 107 108 static struct event_constraint intel_snb_event_constraints[] __read_mostly = 109 { 110 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 111 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 112 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 113 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 114 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 116 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 117 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 118 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 119 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 120 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 121 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 122 123 /* 124 * When HT is off these events can only run on the bottom 4 counters 125 * When HT is on, they are impacted by the HT bug and require EXCL access 126 */ 127 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 128 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 129 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 130 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 131 132 EVENT_CONSTRAINT_END 133 }; 134 135 static struct event_constraint intel_ivb_event_constraints[] __read_mostly = 136 { 137 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 138 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 139 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 140 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ 141 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */ 142 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ 143 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ 144 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 145 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 146 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */ 147 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 148 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 149 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 150 151 /* 152 * When HT is off these events can only run on the bottom 4 counters 153 * When HT is on, they are impacted by the HT bug and require EXCL access 154 */ 155 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 156 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 157 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 158 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 159 160 EVENT_CONSTRAINT_END 161 }; 162 163 static struct extra_reg intel_westmere_extra_regs[] __read_mostly = 164 { 165 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 166 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 167 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), 168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 169 EVENT_EXTRA_END 170 }; 171 172 static struct event_constraint intel_v1_event_constraints[] __read_mostly = 173 { 174 EVENT_CONSTRAINT_END 175 }; 176 177 static struct event_constraint intel_gen_event_constraints[] __read_mostly = 178 { 179 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 180 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 181 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 182 EVENT_CONSTRAINT_END 183 }; 184 185 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly = 186 { 187 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 188 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 189 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 190 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 191 FIXED_EVENT_CONSTRAINT(0x0500, 4), 192 FIXED_EVENT_CONSTRAINT(0x0600, 5), 193 FIXED_EVENT_CONSTRAINT(0x0700, 6), 194 FIXED_EVENT_CONSTRAINT(0x0800, 7), 195 FIXED_EVENT_CONSTRAINT(0x0900, 8), 196 FIXED_EVENT_CONSTRAINT(0x0a00, 9), 197 FIXED_EVENT_CONSTRAINT(0x0b00, 10), 198 FIXED_EVENT_CONSTRAINT(0x0c00, 11), 199 FIXED_EVENT_CONSTRAINT(0x0d00, 12), 200 FIXED_EVENT_CONSTRAINT(0x0e00, 13), 201 FIXED_EVENT_CONSTRAINT(0x0f00, 14), 202 FIXED_EVENT_CONSTRAINT(0x1000, 15), 203 EVENT_CONSTRAINT_END 204 }; 205 206 static struct event_constraint intel_slm_event_constraints[] __read_mostly = 207 { 208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 211 EVENT_CONSTRAINT_END 212 }; 213 214 static struct event_constraint intel_skl_event_constraints[] = { 215 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 216 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 217 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 218 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ 219 220 /* 221 * when HT is off, these can only run on the bottom 4 counters 222 */ 223 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 224 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 225 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 226 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 227 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */ 228 229 EVENT_CONSTRAINT_END 230 }; 231 232 static struct extra_reg intel_knl_extra_regs[] __read_mostly = { 233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0), 234 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1), 235 EVENT_EXTRA_END 236 }; 237 238 static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 239 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 240 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 241 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 242 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 243 EVENT_EXTRA_END 244 }; 245 246 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 247 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 248 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 249 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 250 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 251 EVENT_EXTRA_END 252 }; 253 254 static struct extra_reg intel_skl_extra_regs[] __read_mostly = { 255 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 256 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 257 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 258 /* 259 * Note the low 8 bits eventsel code is not a continuous field, containing 260 * some #GPing bits. These are masked out. 261 */ 262 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 263 EVENT_EXTRA_END 264 }; 265 266 static struct event_constraint intel_icl_event_constraints[] = { 267 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 268 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */ 269 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 270 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 271 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 272 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 273 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 274 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 275 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 276 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 277 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), 278 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), 279 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ 280 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf), 281 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), 282 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ 283 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ 284 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ 285 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */ 286 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), 287 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), 288 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), 289 INTEL_EVENT_CONSTRAINT(0xef, 0xf), 290 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), 291 EVENT_CONSTRAINT_END 292 }; 293 294 static struct extra_reg intel_icl_extra_regs[] __read_mostly = { 295 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0), 296 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1), 297 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 298 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 299 EVENT_EXTRA_END 300 }; 301 302 static struct extra_reg intel_spr_extra_regs[] __read_mostly = { 303 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 304 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 305 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 306 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 307 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), 308 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 309 EVENT_EXTRA_END 310 }; 311 312 static struct event_constraint intel_spr_event_constraints[] = { 313 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 314 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 315 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 316 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 317 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 318 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 319 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 320 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 321 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 322 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4), 323 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5), 324 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), 325 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), 326 327 INTEL_EVENT_CONSTRAINT(0x2e, 0xff), 328 INTEL_EVENT_CONSTRAINT(0x3c, 0xff), 329 /* 330 * Generally event codes < 0x90 are restricted to counters 0-3. 331 * The 0x2E and 0x3C are exception, which has no restriction. 332 */ 333 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf), 334 335 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf), 336 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), 337 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf), 338 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), 339 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), 340 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1), 341 INTEL_EVENT_CONSTRAINT(0xce, 0x1), 342 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), 343 /* 344 * Generally event codes >= 0x90 are likely to have no restrictions. 345 * The exception are defined as above. 346 */ 347 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff), 348 349 EVENT_CONSTRAINT_END 350 }; 351 352 static struct extra_reg intel_gnr_extra_regs[] __read_mostly = { 353 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 354 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 355 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 356 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), 357 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 358 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), 359 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 360 EVENT_EXTRA_END 361 }; 362 363 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); 364 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); 365 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); 366 367 static struct attribute *nhm_mem_events_attrs[] = { 368 EVENT_PTR(mem_ld_nhm), 369 NULL, 370 }; 371 372 /* 373 * topdown events for Intel Core CPUs. 374 * 375 * The events are all in slots, which is a free slot in a 4 wide 376 * pipeline. Some events are already reported in slots, for cycle 377 * events we multiply by the pipeline width (4). 378 * 379 * With Hyper Threading on, topdown metrics are either summed or averaged 380 * between the threads of a core: (count_t0 + count_t1). 381 * 382 * For the average case the metric is always scaled to pipeline width, 383 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4) 384 */ 385 386 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots, 387 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */ 388 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */ 389 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2"); 390 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued, 391 "event=0xe,umask=0x1"); /* uops_issued.any */ 392 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired, 393 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */ 394 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles, 395 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */ 396 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, 397 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */ 398 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */ 399 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, 400 "4", "2"); 401 402 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4"); 403 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80"); 404 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81"); 405 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82"); 406 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83"); 407 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84"); 408 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85"); 409 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86"); 410 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87"); 411 412 static struct attribute *snb_events_attrs[] = { 413 EVENT_PTR(td_slots_issued), 414 EVENT_PTR(td_slots_retired), 415 EVENT_PTR(td_fetch_bubbles), 416 EVENT_PTR(td_total_slots), 417 EVENT_PTR(td_total_slots_scale), 418 EVENT_PTR(td_recovery_bubbles), 419 EVENT_PTR(td_recovery_bubbles_scale), 420 NULL, 421 }; 422 423 static struct attribute *snb_mem_events_attrs[] = { 424 EVENT_PTR(mem_ld_snb), 425 EVENT_PTR(mem_st_snb), 426 NULL, 427 }; 428 429 static struct event_constraint intel_hsw_event_constraints[] = { 430 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 431 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 432 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 433 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 434 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 435 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 436 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 437 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), 438 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 439 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), 440 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 441 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), 442 443 /* 444 * When HT is off these events can only run on the bottom 4 counters 445 * When HT is on, they are impacted by the HT bug and require EXCL access 446 */ 447 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 448 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 449 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 450 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 451 452 EVENT_CONSTRAINT_END 453 }; 454 455 static struct event_constraint intel_bdw_event_constraints[] = { 456 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 457 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 458 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 459 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 460 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ 461 /* 462 * when HT is off, these can only run on the bottom 4 counters 463 */ 464 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 465 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 466 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 467 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 468 EVENT_CONSTRAINT_END 469 }; 470 471 static u64 intel_pmu_event_map(int hw_event) 472 { 473 return intel_perfmon_event_map[hw_event]; 474 } 475 476 static __initconst const u64 spr_hw_cache_event_ids 477 [PERF_COUNT_HW_CACHE_MAX] 478 [PERF_COUNT_HW_CACHE_OP_MAX] 479 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 480 { 481 [ C(L1D ) ] = { 482 [ C(OP_READ) ] = { 483 [ C(RESULT_ACCESS) ] = 0x81d0, 484 [ C(RESULT_MISS) ] = 0xe124, 485 }, 486 [ C(OP_WRITE) ] = { 487 [ C(RESULT_ACCESS) ] = 0x82d0, 488 }, 489 }, 490 [ C(L1I ) ] = { 491 [ C(OP_READ) ] = { 492 [ C(RESULT_MISS) ] = 0xe424, 493 }, 494 [ C(OP_WRITE) ] = { 495 [ C(RESULT_ACCESS) ] = -1, 496 [ C(RESULT_MISS) ] = -1, 497 }, 498 }, 499 [ C(LL ) ] = { 500 [ C(OP_READ) ] = { 501 [ C(RESULT_ACCESS) ] = 0x12a, 502 [ C(RESULT_MISS) ] = 0x12a, 503 }, 504 [ C(OP_WRITE) ] = { 505 [ C(RESULT_ACCESS) ] = 0x12a, 506 [ C(RESULT_MISS) ] = 0x12a, 507 }, 508 }, 509 [ C(DTLB) ] = { 510 [ C(OP_READ) ] = { 511 [ C(RESULT_ACCESS) ] = 0x81d0, 512 [ C(RESULT_MISS) ] = 0xe12, 513 }, 514 [ C(OP_WRITE) ] = { 515 [ C(RESULT_ACCESS) ] = 0x82d0, 516 [ C(RESULT_MISS) ] = 0xe13, 517 }, 518 }, 519 [ C(ITLB) ] = { 520 [ C(OP_READ) ] = { 521 [ C(RESULT_ACCESS) ] = -1, 522 [ C(RESULT_MISS) ] = 0xe11, 523 }, 524 [ C(OP_WRITE) ] = { 525 [ C(RESULT_ACCESS) ] = -1, 526 [ C(RESULT_MISS) ] = -1, 527 }, 528 [ C(OP_PREFETCH) ] = { 529 [ C(RESULT_ACCESS) ] = -1, 530 [ C(RESULT_MISS) ] = -1, 531 }, 532 }, 533 [ C(BPU ) ] = { 534 [ C(OP_READ) ] = { 535 [ C(RESULT_ACCESS) ] = 0x4c4, 536 [ C(RESULT_MISS) ] = 0x4c5, 537 }, 538 [ C(OP_WRITE) ] = { 539 [ C(RESULT_ACCESS) ] = -1, 540 [ C(RESULT_MISS) ] = -1, 541 }, 542 [ C(OP_PREFETCH) ] = { 543 [ C(RESULT_ACCESS) ] = -1, 544 [ C(RESULT_MISS) ] = -1, 545 }, 546 }, 547 [ C(NODE) ] = { 548 [ C(OP_READ) ] = { 549 [ C(RESULT_ACCESS) ] = 0x12a, 550 [ C(RESULT_MISS) ] = 0x12a, 551 }, 552 }, 553 }; 554 555 static __initconst const u64 spr_hw_cache_extra_regs 556 [PERF_COUNT_HW_CACHE_MAX] 557 [PERF_COUNT_HW_CACHE_OP_MAX] 558 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 559 { 560 [ C(LL ) ] = { 561 [ C(OP_READ) ] = { 562 [ C(RESULT_ACCESS) ] = 0x10001, 563 [ C(RESULT_MISS) ] = 0x3fbfc00001, 564 }, 565 [ C(OP_WRITE) ] = { 566 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002, 567 [ C(RESULT_MISS) ] = 0x3f3fc00002, 568 }, 569 }, 570 [ C(NODE) ] = { 571 [ C(OP_READ) ] = { 572 [ C(RESULT_ACCESS) ] = 0x10c000001, 573 [ C(RESULT_MISS) ] = 0x3fb3000001, 574 }, 575 }, 576 }; 577 578 /* 579 * Notes on the events: 580 * - data reads do not include code reads (comparable to earlier tables) 581 * - data counts include speculative execution (except L1 write, dtlb, bpu) 582 * - remote node access includes remote memory, remote cache, remote mmio. 583 * - prefetches are not included in the counts. 584 * - icache miss does not include decoded icache 585 */ 586 587 #define SKL_DEMAND_DATA_RD BIT_ULL(0) 588 #define SKL_DEMAND_RFO BIT_ULL(1) 589 #define SKL_ANY_RESPONSE BIT_ULL(16) 590 #define SKL_SUPPLIER_NONE BIT_ULL(17) 591 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26) 592 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27) 593 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28) 594 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29) 595 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \ 596 SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 597 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 598 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 599 #define SKL_SPL_HIT BIT_ULL(30) 600 #define SKL_SNOOP_NONE BIT_ULL(31) 601 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32) 602 #define SKL_SNOOP_MISS BIT_ULL(33) 603 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34) 604 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35) 605 #define SKL_SNOOP_HITM BIT_ULL(36) 606 #define SKL_SNOOP_NON_DRAM BIT_ULL(37) 607 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \ 608 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 609 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 610 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM) 611 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD 612 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \ 613 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 614 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 615 SKL_SNOOP_HITM|SKL_SPL_HIT) 616 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO 617 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE 618 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 619 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 620 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 621 622 static __initconst const u64 skl_hw_cache_event_ids 623 [PERF_COUNT_HW_CACHE_MAX] 624 [PERF_COUNT_HW_CACHE_OP_MAX] 625 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 626 { 627 [ C(L1D ) ] = { 628 [ C(OP_READ) ] = { 629 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 630 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 631 }, 632 [ C(OP_WRITE) ] = { 633 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 634 [ C(RESULT_MISS) ] = 0x0, 635 }, 636 [ C(OP_PREFETCH) ] = { 637 [ C(RESULT_ACCESS) ] = 0x0, 638 [ C(RESULT_MISS) ] = 0x0, 639 }, 640 }, 641 [ C(L1I ) ] = { 642 [ C(OP_READ) ] = { 643 [ C(RESULT_ACCESS) ] = 0x0, 644 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */ 645 }, 646 [ C(OP_WRITE) ] = { 647 [ C(RESULT_ACCESS) ] = -1, 648 [ C(RESULT_MISS) ] = -1, 649 }, 650 [ C(OP_PREFETCH) ] = { 651 [ C(RESULT_ACCESS) ] = 0x0, 652 [ C(RESULT_MISS) ] = 0x0, 653 }, 654 }, 655 [ C(LL ) ] = { 656 [ C(OP_READ) ] = { 657 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 658 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 659 }, 660 [ C(OP_WRITE) ] = { 661 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 662 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 663 }, 664 [ C(OP_PREFETCH) ] = { 665 [ C(RESULT_ACCESS) ] = 0x0, 666 [ C(RESULT_MISS) ] = 0x0, 667 }, 668 }, 669 [ C(DTLB) ] = { 670 [ C(OP_READ) ] = { 671 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 672 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 673 }, 674 [ C(OP_WRITE) ] = { 675 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 676 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 677 }, 678 [ C(OP_PREFETCH) ] = { 679 [ C(RESULT_ACCESS) ] = 0x0, 680 [ C(RESULT_MISS) ] = 0x0, 681 }, 682 }, 683 [ C(ITLB) ] = { 684 [ C(OP_READ) ] = { 685 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */ 686 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */ 687 }, 688 [ C(OP_WRITE) ] = { 689 [ C(RESULT_ACCESS) ] = -1, 690 [ C(RESULT_MISS) ] = -1, 691 }, 692 [ C(OP_PREFETCH) ] = { 693 [ C(RESULT_ACCESS) ] = -1, 694 [ C(RESULT_MISS) ] = -1, 695 }, 696 }, 697 [ C(BPU ) ] = { 698 [ C(OP_READ) ] = { 699 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 700 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 701 }, 702 [ C(OP_WRITE) ] = { 703 [ C(RESULT_ACCESS) ] = -1, 704 [ C(RESULT_MISS) ] = -1, 705 }, 706 [ C(OP_PREFETCH) ] = { 707 [ C(RESULT_ACCESS) ] = -1, 708 [ C(RESULT_MISS) ] = -1, 709 }, 710 }, 711 [ C(NODE) ] = { 712 [ C(OP_READ) ] = { 713 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 714 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 715 }, 716 [ C(OP_WRITE) ] = { 717 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 718 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 719 }, 720 [ C(OP_PREFETCH) ] = { 721 [ C(RESULT_ACCESS) ] = 0x0, 722 [ C(RESULT_MISS) ] = 0x0, 723 }, 724 }, 725 }; 726 727 static __initconst const u64 skl_hw_cache_extra_regs 728 [PERF_COUNT_HW_CACHE_MAX] 729 [PERF_COUNT_HW_CACHE_OP_MAX] 730 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 731 { 732 [ C(LL ) ] = { 733 [ C(OP_READ) ] = { 734 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 735 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 736 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 737 SKL_L3_MISS|SKL_ANY_SNOOP| 738 SKL_SUPPLIER_NONE, 739 }, 740 [ C(OP_WRITE) ] = { 741 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 742 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 743 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 744 SKL_L3_MISS|SKL_ANY_SNOOP| 745 SKL_SUPPLIER_NONE, 746 }, 747 [ C(OP_PREFETCH) ] = { 748 [ C(RESULT_ACCESS) ] = 0x0, 749 [ C(RESULT_MISS) ] = 0x0, 750 }, 751 }, 752 [ C(NODE) ] = { 753 [ C(OP_READ) ] = { 754 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 755 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 756 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 757 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 758 }, 759 [ C(OP_WRITE) ] = { 760 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 761 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 762 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 763 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 764 }, 765 [ C(OP_PREFETCH) ] = { 766 [ C(RESULT_ACCESS) ] = 0x0, 767 [ C(RESULT_MISS) ] = 0x0, 768 }, 769 }, 770 }; 771 772 #define SNB_DMND_DATA_RD (1ULL << 0) 773 #define SNB_DMND_RFO (1ULL << 1) 774 #define SNB_DMND_IFETCH (1ULL << 2) 775 #define SNB_DMND_WB (1ULL << 3) 776 #define SNB_PF_DATA_RD (1ULL << 4) 777 #define SNB_PF_RFO (1ULL << 5) 778 #define SNB_PF_IFETCH (1ULL << 6) 779 #define SNB_LLC_DATA_RD (1ULL << 7) 780 #define SNB_LLC_RFO (1ULL << 8) 781 #define SNB_LLC_IFETCH (1ULL << 9) 782 #define SNB_BUS_LOCKS (1ULL << 10) 783 #define SNB_STRM_ST (1ULL << 11) 784 #define SNB_OTHER (1ULL << 15) 785 #define SNB_RESP_ANY (1ULL << 16) 786 #define SNB_NO_SUPP (1ULL << 17) 787 #define SNB_LLC_HITM (1ULL << 18) 788 #define SNB_LLC_HITE (1ULL << 19) 789 #define SNB_LLC_HITS (1ULL << 20) 790 #define SNB_LLC_HITF (1ULL << 21) 791 #define SNB_LOCAL (1ULL << 22) 792 #define SNB_REMOTE (0xffULL << 23) 793 #define SNB_SNP_NONE (1ULL << 31) 794 #define SNB_SNP_NOT_NEEDED (1ULL << 32) 795 #define SNB_SNP_MISS (1ULL << 33) 796 #define SNB_NO_FWD (1ULL << 34) 797 #define SNB_SNP_FWD (1ULL << 35) 798 #define SNB_HITM (1ULL << 36) 799 #define SNB_NON_DRAM (1ULL << 37) 800 801 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) 802 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO) 803 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 804 805 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ 806 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ 807 SNB_HITM) 808 809 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) 810 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY) 811 812 #define SNB_L3_ACCESS SNB_RESP_ANY 813 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM) 814 815 static __initconst const u64 snb_hw_cache_extra_regs 816 [PERF_COUNT_HW_CACHE_MAX] 817 [PERF_COUNT_HW_CACHE_OP_MAX] 818 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 819 { 820 [ C(LL ) ] = { 821 [ C(OP_READ) ] = { 822 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, 823 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS, 824 }, 825 [ C(OP_WRITE) ] = { 826 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, 827 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS, 828 }, 829 [ C(OP_PREFETCH) ] = { 830 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, 831 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS, 832 }, 833 }, 834 [ C(NODE) ] = { 835 [ C(OP_READ) ] = { 836 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, 837 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE, 838 }, 839 [ C(OP_WRITE) ] = { 840 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, 841 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, 842 }, 843 [ C(OP_PREFETCH) ] = { 844 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, 845 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, 846 }, 847 }, 848 }; 849 850 static __initconst const u64 snb_hw_cache_event_ids 851 [PERF_COUNT_HW_CACHE_MAX] 852 [PERF_COUNT_HW_CACHE_OP_MAX] 853 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 854 { 855 [ C(L1D) ] = { 856 [ C(OP_READ) ] = { 857 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ 858 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ 859 }, 860 [ C(OP_WRITE) ] = { 861 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ 862 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ 863 }, 864 [ C(OP_PREFETCH) ] = { 865 [ C(RESULT_ACCESS) ] = 0x0, 866 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ 867 }, 868 }, 869 [ C(L1I ) ] = { 870 [ C(OP_READ) ] = { 871 [ C(RESULT_ACCESS) ] = 0x0, 872 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ 873 }, 874 [ C(OP_WRITE) ] = { 875 [ C(RESULT_ACCESS) ] = -1, 876 [ C(RESULT_MISS) ] = -1, 877 }, 878 [ C(OP_PREFETCH) ] = { 879 [ C(RESULT_ACCESS) ] = 0x0, 880 [ C(RESULT_MISS) ] = 0x0, 881 }, 882 }, 883 [ C(LL ) ] = { 884 [ C(OP_READ) ] = { 885 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 886 [ C(RESULT_ACCESS) ] = 0x01b7, 887 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 888 [ C(RESULT_MISS) ] = 0x01b7, 889 }, 890 [ C(OP_WRITE) ] = { 891 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 892 [ C(RESULT_ACCESS) ] = 0x01b7, 893 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 894 [ C(RESULT_MISS) ] = 0x01b7, 895 }, 896 [ C(OP_PREFETCH) ] = { 897 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 898 [ C(RESULT_ACCESS) ] = 0x01b7, 899 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 900 [ C(RESULT_MISS) ] = 0x01b7, 901 }, 902 }, 903 [ C(DTLB) ] = { 904 [ C(OP_READ) ] = { 905 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ 906 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ 907 }, 908 [ C(OP_WRITE) ] = { 909 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ 910 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 911 }, 912 [ C(OP_PREFETCH) ] = { 913 [ C(RESULT_ACCESS) ] = 0x0, 914 [ C(RESULT_MISS) ] = 0x0, 915 }, 916 }, 917 [ C(ITLB) ] = { 918 [ C(OP_READ) ] = { 919 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ 920 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ 921 }, 922 [ C(OP_WRITE) ] = { 923 [ C(RESULT_ACCESS) ] = -1, 924 [ C(RESULT_MISS) ] = -1, 925 }, 926 [ C(OP_PREFETCH) ] = { 927 [ C(RESULT_ACCESS) ] = -1, 928 [ C(RESULT_MISS) ] = -1, 929 }, 930 }, 931 [ C(BPU ) ] = { 932 [ C(OP_READ) ] = { 933 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 934 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 935 }, 936 [ C(OP_WRITE) ] = { 937 [ C(RESULT_ACCESS) ] = -1, 938 [ C(RESULT_MISS) ] = -1, 939 }, 940 [ C(OP_PREFETCH) ] = { 941 [ C(RESULT_ACCESS) ] = -1, 942 [ C(RESULT_MISS) ] = -1, 943 }, 944 }, 945 [ C(NODE) ] = { 946 [ C(OP_READ) ] = { 947 [ C(RESULT_ACCESS) ] = 0x01b7, 948 [ C(RESULT_MISS) ] = 0x01b7, 949 }, 950 [ C(OP_WRITE) ] = { 951 [ C(RESULT_ACCESS) ] = 0x01b7, 952 [ C(RESULT_MISS) ] = 0x01b7, 953 }, 954 [ C(OP_PREFETCH) ] = { 955 [ C(RESULT_ACCESS) ] = 0x01b7, 956 [ C(RESULT_MISS) ] = 0x01b7, 957 }, 958 }, 959 960 }; 961 962 /* 963 * Notes on the events: 964 * - data reads do not include code reads (comparable to earlier tables) 965 * - data counts include speculative execution (except L1 write, dtlb, bpu) 966 * - remote node access includes remote memory, remote cache, remote mmio. 967 * - prefetches are not included in the counts because they are not 968 * reliably counted. 969 */ 970 971 #define HSW_DEMAND_DATA_RD BIT_ULL(0) 972 #define HSW_DEMAND_RFO BIT_ULL(1) 973 #define HSW_ANY_RESPONSE BIT_ULL(16) 974 #define HSW_SUPPLIER_NONE BIT_ULL(17) 975 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22) 976 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27) 977 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28) 978 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29) 979 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \ 980 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 981 HSW_L3_MISS_REMOTE_HOP2P) 982 #define HSW_SNOOP_NONE BIT_ULL(31) 983 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32) 984 #define HSW_SNOOP_MISS BIT_ULL(33) 985 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34) 986 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35) 987 #define HSW_SNOOP_HITM BIT_ULL(36) 988 #define HSW_SNOOP_NON_DRAM BIT_ULL(37) 989 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \ 990 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \ 991 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \ 992 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM) 993 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM) 994 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD 995 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO 996 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\ 997 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P) 998 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE 999 1000 #define BDW_L3_MISS_LOCAL BIT(26) 1001 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \ 1002 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 1003 HSW_L3_MISS_REMOTE_HOP2P) 1004 1005 1006 static __initconst const u64 hsw_hw_cache_event_ids 1007 [PERF_COUNT_HW_CACHE_MAX] 1008 [PERF_COUNT_HW_CACHE_OP_MAX] 1009 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1010 { 1011 [ C(L1D ) ] = { 1012 [ C(OP_READ) ] = { 1013 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1014 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 1015 }, 1016 [ C(OP_WRITE) ] = { 1017 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1018 [ C(RESULT_MISS) ] = 0x0, 1019 }, 1020 [ C(OP_PREFETCH) ] = { 1021 [ C(RESULT_ACCESS) ] = 0x0, 1022 [ C(RESULT_MISS) ] = 0x0, 1023 }, 1024 }, 1025 [ C(L1I ) ] = { 1026 [ C(OP_READ) ] = { 1027 [ C(RESULT_ACCESS) ] = 0x0, 1028 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ 1029 }, 1030 [ C(OP_WRITE) ] = { 1031 [ C(RESULT_ACCESS) ] = -1, 1032 [ C(RESULT_MISS) ] = -1, 1033 }, 1034 [ C(OP_PREFETCH) ] = { 1035 [ C(RESULT_ACCESS) ] = 0x0, 1036 [ C(RESULT_MISS) ] = 0x0, 1037 }, 1038 }, 1039 [ C(LL ) ] = { 1040 [ C(OP_READ) ] = { 1041 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1042 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1043 }, 1044 [ C(OP_WRITE) ] = { 1045 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1046 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1047 }, 1048 [ C(OP_PREFETCH) ] = { 1049 [ C(RESULT_ACCESS) ] = 0x0, 1050 [ C(RESULT_MISS) ] = 0x0, 1051 }, 1052 }, 1053 [ C(DTLB) ] = { 1054 [ C(OP_READ) ] = { 1055 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1056 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ 1057 }, 1058 [ C(OP_WRITE) ] = { 1059 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1060 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 1061 }, 1062 [ C(OP_PREFETCH) ] = { 1063 [ C(RESULT_ACCESS) ] = 0x0, 1064 [ C(RESULT_MISS) ] = 0x0, 1065 }, 1066 }, 1067 [ C(ITLB) ] = { 1068 [ C(OP_READ) ] = { 1069 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ 1070 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ 1071 }, 1072 [ C(OP_WRITE) ] = { 1073 [ C(RESULT_ACCESS) ] = -1, 1074 [ C(RESULT_MISS) ] = -1, 1075 }, 1076 [ C(OP_PREFETCH) ] = { 1077 [ C(RESULT_ACCESS) ] = -1, 1078 [ C(RESULT_MISS) ] = -1, 1079 }, 1080 }, 1081 [ C(BPU ) ] = { 1082 [ C(OP_READ) ] = { 1083 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1084 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1085 }, 1086 [ C(OP_WRITE) ] = { 1087 [ C(RESULT_ACCESS) ] = -1, 1088 [ C(RESULT_MISS) ] = -1, 1089 }, 1090 [ C(OP_PREFETCH) ] = { 1091 [ C(RESULT_ACCESS) ] = -1, 1092 [ C(RESULT_MISS) ] = -1, 1093 }, 1094 }, 1095 [ C(NODE) ] = { 1096 [ C(OP_READ) ] = { 1097 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1098 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1099 }, 1100 [ C(OP_WRITE) ] = { 1101 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1102 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1103 }, 1104 [ C(OP_PREFETCH) ] = { 1105 [ C(RESULT_ACCESS) ] = 0x0, 1106 [ C(RESULT_MISS) ] = 0x0, 1107 }, 1108 }, 1109 }; 1110 1111 static __initconst const u64 hsw_hw_cache_extra_regs 1112 [PERF_COUNT_HW_CACHE_MAX] 1113 [PERF_COUNT_HW_CACHE_OP_MAX] 1114 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1115 { 1116 [ C(LL ) ] = { 1117 [ C(OP_READ) ] = { 1118 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 1119 HSW_LLC_ACCESS, 1120 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 1121 HSW_L3_MISS|HSW_ANY_SNOOP, 1122 }, 1123 [ C(OP_WRITE) ] = { 1124 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 1125 HSW_LLC_ACCESS, 1126 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 1127 HSW_L3_MISS|HSW_ANY_SNOOP, 1128 }, 1129 [ C(OP_PREFETCH) ] = { 1130 [ C(RESULT_ACCESS) ] = 0x0, 1131 [ C(RESULT_MISS) ] = 0x0, 1132 }, 1133 }, 1134 [ C(NODE) ] = { 1135 [ C(OP_READ) ] = { 1136 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 1137 HSW_L3_MISS_LOCAL_DRAM| 1138 HSW_SNOOP_DRAM, 1139 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 1140 HSW_L3_MISS_REMOTE| 1141 HSW_SNOOP_DRAM, 1142 }, 1143 [ C(OP_WRITE) ] = { 1144 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 1145 HSW_L3_MISS_LOCAL_DRAM| 1146 HSW_SNOOP_DRAM, 1147 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 1148 HSW_L3_MISS_REMOTE| 1149 HSW_SNOOP_DRAM, 1150 }, 1151 [ C(OP_PREFETCH) ] = { 1152 [ C(RESULT_ACCESS) ] = 0x0, 1153 [ C(RESULT_MISS) ] = 0x0, 1154 }, 1155 }, 1156 }; 1157 1158 static __initconst const u64 westmere_hw_cache_event_ids 1159 [PERF_COUNT_HW_CACHE_MAX] 1160 [PERF_COUNT_HW_CACHE_OP_MAX] 1161 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1162 { 1163 [ C(L1D) ] = { 1164 [ C(OP_READ) ] = { 1165 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1166 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1167 }, 1168 [ C(OP_WRITE) ] = { 1169 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1170 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1171 }, 1172 [ C(OP_PREFETCH) ] = { 1173 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1174 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1175 }, 1176 }, 1177 [ C(L1I ) ] = { 1178 [ C(OP_READ) ] = { 1179 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1180 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1181 }, 1182 [ C(OP_WRITE) ] = { 1183 [ C(RESULT_ACCESS) ] = -1, 1184 [ C(RESULT_MISS) ] = -1, 1185 }, 1186 [ C(OP_PREFETCH) ] = { 1187 [ C(RESULT_ACCESS) ] = 0x0, 1188 [ C(RESULT_MISS) ] = 0x0, 1189 }, 1190 }, 1191 [ C(LL ) ] = { 1192 [ C(OP_READ) ] = { 1193 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1194 [ C(RESULT_ACCESS) ] = 0x01b7, 1195 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1196 [ C(RESULT_MISS) ] = 0x01b7, 1197 }, 1198 /* 1199 * Use RFO, not WRITEBACK, because a write miss would typically occur 1200 * on RFO. 1201 */ 1202 [ C(OP_WRITE) ] = { 1203 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1204 [ C(RESULT_ACCESS) ] = 0x01b7, 1205 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1206 [ C(RESULT_MISS) ] = 0x01b7, 1207 }, 1208 [ C(OP_PREFETCH) ] = { 1209 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1210 [ C(RESULT_ACCESS) ] = 0x01b7, 1211 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1212 [ C(RESULT_MISS) ] = 0x01b7, 1213 }, 1214 }, 1215 [ C(DTLB) ] = { 1216 [ C(OP_READ) ] = { 1217 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1218 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1219 }, 1220 [ C(OP_WRITE) ] = { 1221 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1222 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1223 }, 1224 [ C(OP_PREFETCH) ] = { 1225 [ C(RESULT_ACCESS) ] = 0x0, 1226 [ C(RESULT_MISS) ] = 0x0, 1227 }, 1228 }, 1229 [ C(ITLB) ] = { 1230 [ C(OP_READ) ] = { 1231 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1232 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ 1233 }, 1234 [ C(OP_WRITE) ] = { 1235 [ C(RESULT_ACCESS) ] = -1, 1236 [ C(RESULT_MISS) ] = -1, 1237 }, 1238 [ C(OP_PREFETCH) ] = { 1239 [ C(RESULT_ACCESS) ] = -1, 1240 [ C(RESULT_MISS) ] = -1, 1241 }, 1242 }, 1243 [ C(BPU ) ] = { 1244 [ C(OP_READ) ] = { 1245 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1246 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1247 }, 1248 [ C(OP_WRITE) ] = { 1249 [ C(RESULT_ACCESS) ] = -1, 1250 [ C(RESULT_MISS) ] = -1, 1251 }, 1252 [ C(OP_PREFETCH) ] = { 1253 [ C(RESULT_ACCESS) ] = -1, 1254 [ C(RESULT_MISS) ] = -1, 1255 }, 1256 }, 1257 [ C(NODE) ] = { 1258 [ C(OP_READ) ] = { 1259 [ C(RESULT_ACCESS) ] = 0x01b7, 1260 [ C(RESULT_MISS) ] = 0x01b7, 1261 }, 1262 [ C(OP_WRITE) ] = { 1263 [ C(RESULT_ACCESS) ] = 0x01b7, 1264 [ C(RESULT_MISS) ] = 0x01b7, 1265 }, 1266 [ C(OP_PREFETCH) ] = { 1267 [ C(RESULT_ACCESS) ] = 0x01b7, 1268 [ C(RESULT_MISS) ] = 0x01b7, 1269 }, 1270 }, 1271 }; 1272 1273 /* 1274 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; 1275 * See IA32 SDM Vol 3B 30.6.1.3 1276 */ 1277 1278 #define NHM_DMND_DATA_RD (1 << 0) 1279 #define NHM_DMND_RFO (1 << 1) 1280 #define NHM_DMND_IFETCH (1 << 2) 1281 #define NHM_DMND_WB (1 << 3) 1282 #define NHM_PF_DATA_RD (1 << 4) 1283 #define NHM_PF_DATA_RFO (1 << 5) 1284 #define NHM_PF_IFETCH (1 << 6) 1285 #define NHM_OFFCORE_OTHER (1 << 7) 1286 #define NHM_UNCORE_HIT (1 << 8) 1287 #define NHM_OTHER_CORE_HIT_SNP (1 << 9) 1288 #define NHM_OTHER_CORE_HITM (1 << 10) 1289 /* reserved */ 1290 #define NHM_REMOTE_CACHE_FWD (1 << 12) 1291 #define NHM_REMOTE_DRAM (1 << 13) 1292 #define NHM_LOCAL_DRAM (1 << 14) 1293 #define NHM_NON_DRAM (1 << 15) 1294 1295 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD) 1296 #define NHM_REMOTE (NHM_REMOTE_DRAM) 1297 1298 #define NHM_DMND_READ (NHM_DMND_DATA_RD) 1299 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) 1300 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) 1301 1302 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) 1303 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD) 1304 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) 1305 1306 static __initconst const u64 nehalem_hw_cache_extra_regs 1307 [PERF_COUNT_HW_CACHE_MAX] 1308 [PERF_COUNT_HW_CACHE_OP_MAX] 1309 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1310 { 1311 [ C(LL ) ] = { 1312 [ C(OP_READ) ] = { 1313 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, 1314 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, 1315 }, 1316 [ C(OP_WRITE) ] = { 1317 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, 1318 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, 1319 }, 1320 [ C(OP_PREFETCH) ] = { 1321 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, 1322 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, 1323 }, 1324 }, 1325 [ C(NODE) ] = { 1326 [ C(OP_READ) ] = { 1327 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE, 1328 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE, 1329 }, 1330 [ C(OP_WRITE) ] = { 1331 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE, 1332 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE, 1333 }, 1334 [ C(OP_PREFETCH) ] = { 1335 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE, 1336 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE, 1337 }, 1338 }, 1339 }; 1340 1341 static __initconst const u64 nehalem_hw_cache_event_ids 1342 [PERF_COUNT_HW_CACHE_MAX] 1343 [PERF_COUNT_HW_CACHE_OP_MAX] 1344 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1345 { 1346 [ C(L1D) ] = { 1347 [ C(OP_READ) ] = { 1348 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1349 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1350 }, 1351 [ C(OP_WRITE) ] = { 1352 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1353 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1354 }, 1355 [ C(OP_PREFETCH) ] = { 1356 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1357 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1358 }, 1359 }, 1360 [ C(L1I ) ] = { 1361 [ C(OP_READ) ] = { 1362 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1363 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1364 }, 1365 [ C(OP_WRITE) ] = { 1366 [ C(RESULT_ACCESS) ] = -1, 1367 [ C(RESULT_MISS) ] = -1, 1368 }, 1369 [ C(OP_PREFETCH) ] = { 1370 [ C(RESULT_ACCESS) ] = 0x0, 1371 [ C(RESULT_MISS) ] = 0x0, 1372 }, 1373 }, 1374 [ C(LL ) ] = { 1375 [ C(OP_READ) ] = { 1376 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1377 [ C(RESULT_ACCESS) ] = 0x01b7, 1378 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1379 [ C(RESULT_MISS) ] = 0x01b7, 1380 }, 1381 /* 1382 * Use RFO, not WRITEBACK, because a write miss would typically occur 1383 * on RFO. 1384 */ 1385 [ C(OP_WRITE) ] = { 1386 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1387 [ C(RESULT_ACCESS) ] = 0x01b7, 1388 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1389 [ C(RESULT_MISS) ] = 0x01b7, 1390 }, 1391 [ C(OP_PREFETCH) ] = { 1392 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1393 [ C(RESULT_ACCESS) ] = 0x01b7, 1394 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1395 [ C(RESULT_MISS) ] = 0x01b7, 1396 }, 1397 }, 1398 [ C(DTLB) ] = { 1399 [ C(OP_READ) ] = { 1400 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1401 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1402 }, 1403 [ C(OP_WRITE) ] = { 1404 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1405 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1406 }, 1407 [ C(OP_PREFETCH) ] = { 1408 [ C(RESULT_ACCESS) ] = 0x0, 1409 [ C(RESULT_MISS) ] = 0x0, 1410 }, 1411 }, 1412 [ C(ITLB) ] = { 1413 [ C(OP_READ) ] = { 1414 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1415 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ 1416 }, 1417 [ C(OP_WRITE) ] = { 1418 [ C(RESULT_ACCESS) ] = -1, 1419 [ C(RESULT_MISS) ] = -1, 1420 }, 1421 [ C(OP_PREFETCH) ] = { 1422 [ C(RESULT_ACCESS) ] = -1, 1423 [ C(RESULT_MISS) ] = -1, 1424 }, 1425 }, 1426 [ C(BPU ) ] = { 1427 [ C(OP_READ) ] = { 1428 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1429 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1430 }, 1431 [ C(OP_WRITE) ] = { 1432 [ C(RESULT_ACCESS) ] = -1, 1433 [ C(RESULT_MISS) ] = -1, 1434 }, 1435 [ C(OP_PREFETCH) ] = { 1436 [ C(RESULT_ACCESS) ] = -1, 1437 [ C(RESULT_MISS) ] = -1, 1438 }, 1439 }, 1440 [ C(NODE) ] = { 1441 [ C(OP_READ) ] = { 1442 [ C(RESULT_ACCESS) ] = 0x01b7, 1443 [ C(RESULT_MISS) ] = 0x01b7, 1444 }, 1445 [ C(OP_WRITE) ] = { 1446 [ C(RESULT_ACCESS) ] = 0x01b7, 1447 [ C(RESULT_MISS) ] = 0x01b7, 1448 }, 1449 [ C(OP_PREFETCH) ] = { 1450 [ C(RESULT_ACCESS) ] = 0x01b7, 1451 [ C(RESULT_MISS) ] = 0x01b7, 1452 }, 1453 }, 1454 }; 1455 1456 static __initconst const u64 core2_hw_cache_event_ids 1457 [PERF_COUNT_HW_CACHE_MAX] 1458 [PERF_COUNT_HW_CACHE_OP_MAX] 1459 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1460 { 1461 [ C(L1D) ] = { 1462 [ C(OP_READ) ] = { 1463 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ 1464 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ 1465 }, 1466 [ C(OP_WRITE) ] = { 1467 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ 1468 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ 1469 }, 1470 [ C(OP_PREFETCH) ] = { 1471 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ 1472 [ C(RESULT_MISS) ] = 0, 1473 }, 1474 }, 1475 [ C(L1I ) ] = { 1476 [ C(OP_READ) ] = { 1477 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ 1478 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ 1479 }, 1480 [ C(OP_WRITE) ] = { 1481 [ C(RESULT_ACCESS) ] = -1, 1482 [ C(RESULT_MISS) ] = -1, 1483 }, 1484 [ C(OP_PREFETCH) ] = { 1485 [ C(RESULT_ACCESS) ] = 0, 1486 [ C(RESULT_MISS) ] = 0, 1487 }, 1488 }, 1489 [ C(LL ) ] = { 1490 [ C(OP_READ) ] = { 1491 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1492 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1493 }, 1494 [ C(OP_WRITE) ] = { 1495 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1496 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1497 }, 1498 [ C(OP_PREFETCH) ] = { 1499 [ C(RESULT_ACCESS) ] = 0, 1500 [ C(RESULT_MISS) ] = 0, 1501 }, 1502 }, 1503 [ C(DTLB) ] = { 1504 [ C(OP_READ) ] = { 1505 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1506 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ 1507 }, 1508 [ C(OP_WRITE) ] = { 1509 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1510 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ 1511 }, 1512 [ C(OP_PREFETCH) ] = { 1513 [ C(RESULT_ACCESS) ] = 0, 1514 [ C(RESULT_MISS) ] = 0, 1515 }, 1516 }, 1517 [ C(ITLB) ] = { 1518 [ C(OP_READ) ] = { 1519 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1520 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ 1521 }, 1522 [ C(OP_WRITE) ] = { 1523 [ C(RESULT_ACCESS) ] = -1, 1524 [ C(RESULT_MISS) ] = -1, 1525 }, 1526 [ C(OP_PREFETCH) ] = { 1527 [ C(RESULT_ACCESS) ] = -1, 1528 [ C(RESULT_MISS) ] = -1, 1529 }, 1530 }, 1531 [ C(BPU ) ] = { 1532 [ C(OP_READ) ] = { 1533 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1534 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1535 }, 1536 [ C(OP_WRITE) ] = { 1537 [ C(RESULT_ACCESS) ] = -1, 1538 [ C(RESULT_MISS) ] = -1, 1539 }, 1540 [ C(OP_PREFETCH) ] = { 1541 [ C(RESULT_ACCESS) ] = -1, 1542 [ C(RESULT_MISS) ] = -1, 1543 }, 1544 }, 1545 }; 1546 1547 static __initconst const u64 atom_hw_cache_event_ids 1548 [PERF_COUNT_HW_CACHE_MAX] 1549 [PERF_COUNT_HW_CACHE_OP_MAX] 1550 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1551 { 1552 [ C(L1D) ] = { 1553 [ C(OP_READ) ] = { 1554 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ 1555 [ C(RESULT_MISS) ] = 0, 1556 }, 1557 [ C(OP_WRITE) ] = { 1558 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ 1559 [ C(RESULT_MISS) ] = 0, 1560 }, 1561 [ C(OP_PREFETCH) ] = { 1562 [ C(RESULT_ACCESS) ] = 0x0, 1563 [ C(RESULT_MISS) ] = 0, 1564 }, 1565 }, 1566 [ C(L1I ) ] = { 1567 [ C(OP_READ) ] = { 1568 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1569 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1570 }, 1571 [ C(OP_WRITE) ] = { 1572 [ C(RESULT_ACCESS) ] = -1, 1573 [ C(RESULT_MISS) ] = -1, 1574 }, 1575 [ C(OP_PREFETCH) ] = { 1576 [ C(RESULT_ACCESS) ] = 0, 1577 [ C(RESULT_MISS) ] = 0, 1578 }, 1579 }, 1580 [ C(LL ) ] = { 1581 [ C(OP_READ) ] = { 1582 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1583 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1584 }, 1585 [ C(OP_WRITE) ] = { 1586 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1587 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1588 }, 1589 [ C(OP_PREFETCH) ] = { 1590 [ C(RESULT_ACCESS) ] = 0, 1591 [ C(RESULT_MISS) ] = 0, 1592 }, 1593 }, 1594 [ C(DTLB) ] = { 1595 [ C(OP_READ) ] = { 1596 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ 1597 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ 1598 }, 1599 [ C(OP_WRITE) ] = { 1600 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ 1601 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ 1602 }, 1603 [ C(OP_PREFETCH) ] = { 1604 [ C(RESULT_ACCESS) ] = 0, 1605 [ C(RESULT_MISS) ] = 0, 1606 }, 1607 }, 1608 [ C(ITLB) ] = { 1609 [ C(OP_READ) ] = { 1610 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1611 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ 1612 }, 1613 [ C(OP_WRITE) ] = { 1614 [ C(RESULT_ACCESS) ] = -1, 1615 [ C(RESULT_MISS) ] = -1, 1616 }, 1617 [ C(OP_PREFETCH) ] = { 1618 [ C(RESULT_ACCESS) ] = -1, 1619 [ C(RESULT_MISS) ] = -1, 1620 }, 1621 }, 1622 [ C(BPU ) ] = { 1623 [ C(OP_READ) ] = { 1624 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1625 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1626 }, 1627 [ C(OP_WRITE) ] = { 1628 [ C(RESULT_ACCESS) ] = -1, 1629 [ C(RESULT_MISS) ] = -1, 1630 }, 1631 [ C(OP_PREFETCH) ] = { 1632 [ C(RESULT_ACCESS) ] = -1, 1633 [ C(RESULT_MISS) ] = -1, 1634 }, 1635 }, 1636 }; 1637 1638 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c"); 1639 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2"); 1640 /* no_alloc_cycles.not_delivered */ 1641 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm, 1642 "event=0xca,umask=0x50"); 1643 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2"); 1644 /* uops_retired.all */ 1645 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm, 1646 "event=0xc2,umask=0x10"); 1647 /* uops_retired.all */ 1648 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm, 1649 "event=0xc2,umask=0x10"); 1650 1651 static struct attribute *slm_events_attrs[] = { 1652 EVENT_PTR(td_total_slots_slm), 1653 EVENT_PTR(td_total_slots_scale_slm), 1654 EVENT_PTR(td_fetch_bubbles_slm), 1655 EVENT_PTR(td_fetch_bubbles_scale_slm), 1656 EVENT_PTR(td_slots_issued_slm), 1657 EVENT_PTR(td_slots_retired_slm), 1658 NULL 1659 }; 1660 1661 static struct extra_reg intel_slm_extra_regs[] __read_mostly = 1662 { 1663 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1664 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), 1665 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1), 1666 EVENT_EXTRA_END 1667 }; 1668 1669 #define SLM_DMND_READ SNB_DMND_DATA_RD 1670 #define SLM_DMND_WRITE SNB_DMND_RFO 1671 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1672 1673 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM) 1674 #define SLM_LLC_ACCESS SNB_RESP_ANY 1675 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM) 1676 1677 static __initconst const u64 slm_hw_cache_extra_regs 1678 [PERF_COUNT_HW_CACHE_MAX] 1679 [PERF_COUNT_HW_CACHE_OP_MAX] 1680 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1681 { 1682 [ C(LL ) ] = { 1683 [ C(OP_READ) ] = { 1684 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, 1685 [ C(RESULT_MISS) ] = 0, 1686 }, 1687 [ C(OP_WRITE) ] = { 1688 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, 1689 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS, 1690 }, 1691 [ C(OP_PREFETCH) ] = { 1692 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS, 1693 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS, 1694 }, 1695 }, 1696 }; 1697 1698 static __initconst const u64 slm_hw_cache_event_ids 1699 [PERF_COUNT_HW_CACHE_MAX] 1700 [PERF_COUNT_HW_CACHE_OP_MAX] 1701 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1702 { 1703 [ C(L1D) ] = { 1704 [ C(OP_READ) ] = { 1705 [ C(RESULT_ACCESS) ] = 0, 1706 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */ 1707 }, 1708 [ C(OP_WRITE) ] = { 1709 [ C(RESULT_ACCESS) ] = 0, 1710 [ C(RESULT_MISS) ] = 0, 1711 }, 1712 [ C(OP_PREFETCH) ] = { 1713 [ C(RESULT_ACCESS) ] = 0, 1714 [ C(RESULT_MISS) ] = 0, 1715 }, 1716 }, 1717 [ C(L1I ) ] = { 1718 [ C(OP_READ) ] = { 1719 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */ 1720 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */ 1721 }, 1722 [ C(OP_WRITE) ] = { 1723 [ C(RESULT_ACCESS) ] = -1, 1724 [ C(RESULT_MISS) ] = -1, 1725 }, 1726 [ C(OP_PREFETCH) ] = { 1727 [ C(RESULT_ACCESS) ] = 0, 1728 [ C(RESULT_MISS) ] = 0, 1729 }, 1730 }, 1731 [ C(LL ) ] = { 1732 [ C(OP_READ) ] = { 1733 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1734 [ C(RESULT_ACCESS) ] = 0x01b7, 1735 [ C(RESULT_MISS) ] = 0, 1736 }, 1737 [ C(OP_WRITE) ] = { 1738 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1739 [ C(RESULT_ACCESS) ] = 0x01b7, 1740 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1741 [ C(RESULT_MISS) ] = 0x01b7, 1742 }, 1743 [ C(OP_PREFETCH) ] = { 1744 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1745 [ C(RESULT_ACCESS) ] = 0x01b7, 1746 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1747 [ C(RESULT_MISS) ] = 0x01b7, 1748 }, 1749 }, 1750 [ C(DTLB) ] = { 1751 [ C(OP_READ) ] = { 1752 [ C(RESULT_ACCESS) ] = 0, 1753 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */ 1754 }, 1755 [ C(OP_WRITE) ] = { 1756 [ C(RESULT_ACCESS) ] = 0, 1757 [ C(RESULT_MISS) ] = 0, 1758 }, 1759 [ C(OP_PREFETCH) ] = { 1760 [ C(RESULT_ACCESS) ] = 0, 1761 [ C(RESULT_MISS) ] = 0, 1762 }, 1763 }, 1764 [ C(ITLB) ] = { 1765 [ C(OP_READ) ] = { 1766 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1767 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */ 1768 }, 1769 [ C(OP_WRITE) ] = { 1770 [ C(RESULT_ACCESS) ] = -1, 1771 [ C(RESULT_MISS) ] = -1, 1772 }, 1773 [ C(OP_PREFETCH) ] = { 1774 [ C(RESULT_ACCESS) ] = -1, 1775 [ C(RESULT_MISS) ] = -1, 1776 }, 1777 }, 1778 [ C(BPU ) ] = { 1779 [ C(OP_READ) ] = { 1780 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1781 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1782 }, 1783 [ C(OP_WRITE) ] = { 1784 [ C(RESULT_ACCESS) ] = -1, 1785 [ C(RESULT_MISS) ] = -1, 1786 }, 1787 [ C(OP_PREFETCH) ] = { 1788 [ C(RESULT_ACCESS) ] = -1, 1789 [ C(RESULT_MISS) ] = -1, 1790 }, 1791 }, 1792 }; 1793 1794 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c"); 1795 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3"); 1796 /* UOPS_NOT_DELIVERED.ANY */ 1797 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c"); 1798 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */ 1799 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02"); 1800 /* UOPS_RETIRED.ANY */ 1801 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2"); 1802 /* UOPS_ISSUED.ANY */ 1803 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e"); 1804 1805 static struct attribute *glm_events_attrs[] = { 1806 EVENT_PTR(td_total_slots_glm), 1807 EVENT_PTR(td_total_slots_scale_glm), 1808 EVENT_PTR(td_fetch_bubbles_glm), 1809 EVENT_PTR(td_recovery_bubbles_glm), 1810 EVENT_PTR(td_slots_issued_glm), 1811 EVENT_PTR(td_slots_retired_glm), 1812 NULL 1813 }; 1814 1815 static struct extra_reg intel_glm_extra_regs[] __read_mostly = { 1816 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1817 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0), 1818 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1), 1819 EVENT_EXTRA_END 1820 }; 1821 1822 #define GLM_DEMAND_DATA_RD BIT_ULL(0) 1823 #define GLM_DEMAND_RFO BIT_ULL(1) 1824 #define GLM_ANY_RESPONSE BIT_ULL(16) 1825 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33) 1826 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD 1827 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO 1828 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1829 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE 1830 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM) 1831 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM) 1832 1833 static __initconst const u64 glm_hw_cache_event_ids 1834 [PERF_COUNT_HW_CACHE_MAX] 1835 [PERF_COUNT_HW_CACHE_OP_MAX] 1836 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1837 [C(L1D)] = { 1838 [C(OP_READ)] = { 1839 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1840 [C(RESULT_MISS)] = 0x0, 1841 }, 1842 [C(OP_WRITE)] = { 1843 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1844 [C(RESULT_MISS)] = 0x0, 1845 }, 1846 [C(OP_PREFETCH)] = { 1847 [C(RESULT_ACCESS)] = 0x0, 1848 [C(RESULT_MISS)] = 0x0, 1849 }, 1850 }, 1851 [C(L1I)] = { 1852 [C(OP_READ)] = { 1853 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 1854 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 1855 }, 1856 [C(OP_WRITE)] = { 1857 [C(RESULT_ACCESS)] = -1, 1858 [C(RESULT_MISS)] = -1, 1859 }, 1860 [C(OP_PREFETCH)] = { 1861 [C(RESULT_ACCESS)] = 0x0, 1862 [C(RESULT_MISS)] = 0x0, 1863 }, 1864 }, 1865 [C(LL)] = { 1866 [C(OP_READ)] = { 1867 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1868 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1869 }, 1870 [C(OP_WRITE)] = { 1871 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1872 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1873 }, 1874 [C(OP_PREFETCH)] = { 1875 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1876 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1877 }, 1878 }, 1879 [C(DTLB)] = { 1880 [C(OP_READ)] = { 1881 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1882 [C(RESULT_MISS)] = 0x0, 1883 }, 1884 [C(OP_WRITE)] = { 1885 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1886 [C(RESULT_MISS)] = 0x0, 1887 }, 1888 [C(OP_PREFETCH)] = { 1889 [C(RESULT_ACCESS)] = 0x0, 1890 [C(RESULT_MISS)] = 0x0, 1891 }, 1892 }, 1893 [C(ITLB)] = { 1894 [C(OP_READ)] = { 1895 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 1896 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 1897 }, 1898 [C(OP_WRITE)] = { 1899 [C(RESULT_ACCESS)] = -1, 1900 [C(RESULT_MISS)] = -1, 1901 }, 1902 [C(OP_PREFETCH)] = { 1903 [C(RESULT_ACCESS)] = -1, 1904 [C(RESULT_MISS)] = -1, 1905 }, 1906 }, 1907 [C(BPU)] = { 1908 [C(OP_READ)] = { 1909 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1910 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1911 }, 1912 [C(OP_WRITE)] = { 1913 [C(RESULT_ACCESS)] = -1, 1914 [C(RESULT_MISS)] = -1, 1915 }, 1916 [C(OP_PREFETCH)] = { 1917 [C(RESULT_ACCESS)] = -1, 1918 [C(RESULT_MISS)] = -1, 1919 }, 1920 }, 1921 }; 1922 1923 static __initconst const u64 glm_hw_cache_extra_regs 1924 [PERF_COUNT_HW_CACHE_MAX] 1925 [PERF_COUNT_HW_CACHE_OP_MAX] 1926 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1927 [C(LL)] = { 1928 [C(OP_READ)] = { 1929 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 1930 GLM_LLC_ACCESS, 1931 [C(RESULT_MISS)] = GLM_DEMAND_READ| 1932 GLM_LLC_MISS, 1933 }, 1934 [C(OP_WRITE)] = { 1935 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 1936 GLM_LLC_ACCESS, 1937 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 1938 GLM_LLC_MISS, 1939 }, 1940 [C(OP_PREFETCH)] = { 1941 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH| 1942 GLM_LLC_ACCESS, 1943 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH| 1944 GLM_LLC_MISS, 1945 }, 1946 }, 1947 }; 1948 1949 static __initconst const u64 glp_hw_cache_event_ids 1950 [PERF_COUNT_HW_CACHE_MAX] 1951 [PERF_COUNT_HW_CACHE_OP_MAX] 1952 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1953 [C(L1D)] = { 1954 [C(OP_READ)] = { 1955 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1956 [C(RESULT_MISS)] = 0x0, 1957 }, 1958 [C(OP_WRITE)] = { 1959 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1960 [C(RESULT_MISS)] = 0x0, 1961 }, 1962 [C(OP_PREFETCH)] = { 1963 [C(RESULT_ACCESS)] = 0x0, 1964 [C(RESULT_MISS)] = 0x0, 1965 }, 1966 }, 1967 [C(L1I)] = { 1968 [C(OP_READ)] = { 1969 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 1970 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 1971 }, 1972 [C(OP_WRITE)] = { 1973 [C(RESULT_ACCESS)] = -1, 1974 [C(RESULT_MISS)] = -1, 1975 }, 1976 [C(OP_PREFETCH)] = { 1977 [C(RESULT_ACCESS)] = 0x0, 1978 [C(RESULT_MISS)] = 0x0, 1979 }, 1980 }, 1981 [C(LL)] = { 1982 [C(OP_READ)] = { 1983 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1984 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1985 }, 1986 [C(OP_WRITE)] = { 1987 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1988 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1989 }, 1990 [C(OP_PREFETCH)] = { 1991 [C(RESULT_ACCESS)] = 0x0, 1992 [C(RESULT_MISS)] = 0x0, 1993 }, 1994 }, 1995 [C(DTLB)] = { 1996 [C(OP_READ)] = { 1997 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1998 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 1999 }, 2000 [C(OP_WRITE)] = { 2001 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 2002 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 2003 }, 2004 [C(OP_PREFETCH)] = { 2005 [C(RESULT_ACCESS)] = 0x0, 2006 [C(RESULT_MISS)] = 0x0, 2007 }, 2008 }, 2009 [C(ITLB)] = { 2010 [C(OP_READ)] = { 2011 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 2012 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 2013 }, 2014 [C(OP_WRITE)] = { 2015 [C(RESULT_ACCESS)] = -1, 2016 [C(RESULT_MISS)] = -1, 2017 }, 2018 [C(OP_PREFETCH)] = { 2019 [C(RESULT_ACCESS)] = -1, 2020 [C(RESULT_MISS)] = -1, 2021 }, 2022 }, 2023 [C(BPU)] = { 2024 [C(OP_READ)] = { 2025 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 2026 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 2027 }, 2028 [C(OP_WRITE)] = { 2029 [C(RESULT_ACCESS)] = -1, 2030 [C(RESULT_MISS)] = -1, 2031 }, 2032 [C(OP_PREFETCH)] = { 2033 [C(RESULT_ACCESS)] = -1, 2034 [C(RESULT_MISS)] = -1, 2035 }, 2036 }, 2037 }; 2038 2039 static __initconst const u64 glp_hw_cache_extra_regs 2040 [PERF_COUNT_HW_CACHE_MAX] 2041 [PERF_COUNT_HW_CACHE_OP_MAX] 2042 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2043 [C(LL)] = { 2044 [C(OP_READ)] = { 2045 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 2046 GLM_LLC_ACCESS, 2047 [C(RESULT_MISS)] = GLM_DEMAND_READ| 2048 GLM_LLC_MISS, 2049 }, 2050 [C(OP_WRITE)] = { 2051 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 2052 GLM_LLC_ACCESS, 2053 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 2054 GLM_LLC_MISS, 2055 }, 2056 [C(OP_PREFETCH)] = { 2057 [C(RESULT_ACCESS)] = 0x0, 2058 [C(RESULT_MISS)] = 0x0, 2059 }, 2060 }, 2061 }; 2062 2063 #define TNT_LOCAL_DRAM BIT_ULL(26) 2064 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD 2065 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO 2066 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE 2067 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \ 2068 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM) 2069 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM) 2070 2071 static __initconst const u64 tnt_hw_cache_extra_regs 2072 [PERF_COUNT_HW_CACHE_MAX] 2073 [PERF_COUNT_HW_CACHE_OP_MAX] 2074 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2075 [C(LL)] = { 2076 [C(OP_READ)] = { 2077 [C(RESULT_ACCESS)] = TNT_DEMAND_READ| 2078 TNT_LLC_ACCESS, 2079 [C(RESULT_MISS)] = TNT_DEMAND_READ| 2080 TNT_LLC_MISS, 2081 }, 2082 [C(OP_WRITE)] = { 2083 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE| 2084 TNT_LLC_ACCESS, 2085 [C(RESULT_MISS)] = TNT_DEMAND_WRITE| 2086 TNT_LLC_MISS, 2087 }, 2088 [C(OP_PREFETCH)] = { 2089 [C(RESULT_ACCESS)] = 0x0, 2090 [C(RESULT_MISS)] = 0x0, 2091 }, 2092 }, 2093 }; 2094 2095 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0"); 2096 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0"); 2097 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6"); 2098 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0"); 2099 2100 static struct attribute *tnt_events_attrs[] = { 2101 EVENT_PTR(td_fe_bound_tnt), 2102 EVENT_PTR(td_retiring_tnt), 2103 EVENT_PTR(td_bad_spec_tnt), 2104 EVENT_PTR(td_be_bound_tnt), 2105 NULL, 2106 }; 2107 2108 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { 2109 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2110 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0), 2111 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1), 2112 EVENT_EXTRA_END 2113 }; 2114 2115 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3"); 2116 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6"); 2117 2118 static struct attribute *grt_mem_attrs[] = { 2119 EVENT_PTR(mem_ld_grt), 2120 EVENT_PTR(mem_st_grt), 2121 NULL 2122 }; 2123 2124 static struct extra_reg intel_grt_extra_regs[] __read_mostly = { 2125 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2126 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 2127 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 2128 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2129 EVENT_EXTRA_END 2130 }; 2131 2132 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0"); 2133 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0"); 2134 2135 static struct attribute *cmt_events_attrs[] = { 2136 EVENT_PTR(td_fe_bound_tnt), 2137 EVENT_PTR(td_retiring_cmt), 2138 EVENT_PTR(td_bad_spec_cmt), 2139 EVENT_PTR(td_be_bound_tnt), 2140 NULL 2141 }; 2142 2143 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = { 2144 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2145 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0), 2146 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1), 2147 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2148 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0), 2149 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1), 2150 EVENT_EXTRA_END 2151 }; 2152 2153 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ 2154 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ 2155 #define KNL_MCDRAM_LOCAL BIT_ULL(21) 2156 #define KNL_MCDRAM_FAR BIT_ULL(22) 2157 #define KNL_DDR_LOCAL BIT_ULL(23) 2158 #define KNL_DDR_FAR BIT_ULL(24) 2159 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \ 2160 KNL_DDR_LOCAL | KNL_DDR_FAR) 2161 #define KNL_L2_READ SLM_DMND_READ 2162 #define KNL_L2_WRITE SLM_DMND_WRITE 2163 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH 2164 #define KNL_L2_ACCESS SLM_LLC_ACCESS 2165 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \ 2166 KNL_DRAM_ANY | SNB_SNP_ANY | \ 2167 SNB_NON_DRAM) 2168 2169 static __initconst const u64 knl_hw_cache_extra_regs 2170 [PERF_COUNT_HW_CACHE_MAX] 2171 [PERF_COUNT_HW_CACHE_OP_MAX] 2172 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2173 [C(LL)] = { 2174 [C(OP_READ)] = { 2175 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS, 2176 [C(RESULT_MISS)] = 0, 2177 }, 2178 [C(OP_WRITE)] = { 2179 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS, 2180 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS, 2181 }, 2182 [C(OP_PREFETCH)] = { 2183 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS, 2184 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS, 2185 }, 2186 }, 2187 }; 2188 2189 /* 2190 * Used from PMIs where the LBRs are already disabled. 2191 * 2192 * This function could be called consecutively. It is required to remain in 2193 * disabled state if called consecutively. 2194 * 2195 * During consecutive calls, the same disable value will be written to related 2196 * registers, so the PMU state remains unchanged. 2197 * 2198 * intel_bts events don't coexist with intel PMU's BTS events because of 2199 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them 2200 * disabled around intel PMU's event batching etc, only inside the PMI handler. 2201 * 2202 * Avoid PEBS_ENABLE MSR access in PMIs. 2203 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore. 2204 * It doesn't matter if the PEBS is enabled or not. 2205 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to 2206 * access PEBS_ENABLE MSR in disable_all()/enable_all(). 2207 * However, there are some cases which may change PEBS status, e.g. PMI 2208 * throttle. The PEBS_ENABLE should be updated where the status changes. 2209 */ 2210 static __always_inline void __intel_pmu_disable_all(bool bts) 2211 { 2212 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2213 2214 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2215 2216 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 2217 intel_pmu_disable_bts(); 2218 } 2219 2220 static __always_inline void intel_pmu_disable_all(void) 2221 { 2222 __intel_pmu_disable_all(true); 2223 intel_pmu_pebs_disable_all(); 2224 intel_pmu_lbr_disable_all(); 2225 } 2226 2227 static void __intel_pmu_enable_all(int added, bool pmi) 2228 { 2229 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2230 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 2231 2232 intel_pmu_lbr_enable_all(pmi); 2233 2234 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { 2235 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); 2236 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; 2237 } 2238 2239 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 2240 intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 2241 2242 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 2243 struct perf_event *event = 2244 cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; 2245 2246 if (WARN_ON_ONCE(!event)) 2247 return; 2248 2249 intel_pmu_enable_bts(event->hw.config); 2250 } 2251 } 2252 2253 static void intel_pmu_enable_all(int added) 2254 { 2255 intel_pmu_pebs_enable_all(); 2256 __intel_pmu_enable_all(added, false); 2257 } 2258 2259 static noinline int 2260 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, 2261 unsigned int cnt, unsigned long flags) 2262 { 2263 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2264 2265 intel_pmu_lbr_read(); 2266 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); 2267 2268 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt); 2269 intel_pmu_enable_all(0); 2270 local_irq_restore(flags); 2271 return cnt; 2272 } 2273 2274 static int 2275 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2276 { 2277 unsigned long flags; 2278 2279 /* must not have branches... */ 2280 local_irq_save(flags); 2281 __intel_pmu_disable_all(false); /* we don't care about BTS */ 2282 __intel_pmu_lbr_disable(); 2283 /* ... until here */ 2284 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2285 } 2286 2287 static int 2288 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2289 { 2290 unsigned long flags; 2291 2292 /* must not have branches... */ 2293 local_irq_save(flags); 2294 __intel_pmu_disable_all(false); /* we don't care about BTS */ 2295 __intel_pmu_arch_lbr_disable(); 2296 /* ... until here */ 2297 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2298 } 2299 2300 /* 2301 * Workaround for: 2302 * Intel Errata AAK100 (model 26) 2303 * Intel Errata AAP53 (model 30) 2304 * Intel Errata BD53 (model 44) 2305 * 2306 * The official story: 2307 * These chips need to be 'reset' when adding counters by programming the 2308 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either 2309 * in sequence on the same PMC or on different PMCs. 2310 * 2311 * In practice it appears some of these events do in fact count, and 2312 * we need to program all 4 events. 2313 */ 2314 static void intel_pmu_nhm_workaround(void) 2315 { 2316 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2317 static const unsigned long nhm_magic[4] = { 2318 0x4300B5, 2319 0x4300D2, 2320 0x4300B1, 2321 0x4300B1 2322 }; 2323 struct perf_event *event; 2324 int i; 2325 2326 /* 2327 * The Errata requires below steps: 2328 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; 2329 * 2) Configure 4 PERFEVTSELx with the magic events and clear 2330 * the corresponding PMCx; 2331 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; 2332 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; 2333 * 5) Clear 4 pairs of ERFEVTSELx and PMCx; 2334 */ 2335 2336 /* 2337 * The real steps we choose are a little different from above. 2338 * A) To reduce MSR operations, we don't run step 1) as they 2339 * are already cleared before this function is called; 2340 * B) Call x86_perf_event_update to save PMCx before configuring 2341 * PERFEVTSELx with magic number; 2342 * C) With step 5), we do clear only when the PERFEVTSELx is 2343 * not used currently. 2344 * D) Call x86_perf_event_set_period to restore PMCx; 2345 */ 2346 2347 /* We always operate 4 pairs of PERF Counters */ 2348 for (i = 0; i < 4; i++) { 2349 event = cpuc->events[i]; 2350 if (event) 2351 static_call(x86_pmu_update)(event); 2352 } 2353 2354 for (i = 0; i < 4; i++) { 2355 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); 2356 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); 2357 } 2358 2359 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); 2360 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 2361 2362 for (i = 0; i < 4; i++) { 2363 event = cpuc->events[i]; 2364 2365 if (event) { 2366 static_call(x86_pmu_set_period)(event); 2367 __x86_pmu_enable_event(&event->hw, 2368 ARCH_PERFMON_EVENTSEL_ENABLE); 2369 } else 2370 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); 2371 } 2372 } 2373 2374 static void intel_pmu_nhm_enable_all(int added) 2375 { 2376 if (added) 2377 intel_pmu_nhm_workaround(); 2378 intel_pmu_enable_all(added); 2379 } 2380 2381 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) 2382 { 2383 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; 2384 2385 if (cpuc->tfa_shadow != val) { 2386 cpuc->tfa_shadow = val; 2387 wrmsrl(MSR_TSX_FORCE_ABORT, val); 2388 } 2389 } 2390 2391 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 2392 { 2393 /* 2394 * We're going to use PMC3, make sure TFA is set before we touch it. 2395 */ 2396 if (cntr == 3) 2397 intel_set_tfa(cpuc, true); 2398 } 2399 2400 static void intel_tfa_pmu_enable_all(int added) 2401 { 2402 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2403 2404 /* 2405 * If we find PMC3 is no longer used when we enable the PMU, we can 2406 * clear TFA. 2407 */ 2408 if (!test_bit(3, cpuc->active_mask)) 2409 intel_set_tfa(cpuc, false); 2410 2411 intel_pmu_enable_all(added); 2412 } 2413 2414 static inline u64 intel_pmu_get_status(void) 2415 { 2416 u64 status; 2417 2418 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 2419 2420 return status; 2421 } 2422 2423 static inline void intel_pmu_ack_status(u64 ack) 2424 { 2425 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 2426 } 2427 2428 static inline bool event_is_checkpointed(struct perf_event *event) 2429 { 2430 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; 2431 } 2432 2433 static inline void intel_set_masks(struct perf_event *event, int idx) 2434 { 2435 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2436 2437 if (event->attr.exclude_host) 2438 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); 2439 if (event->attr.exclude_guest) 2440 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); 2441 if (event_is_checkpointed(event)) 2442 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status); 2443 } 2444 2445 static inline void intel_clear_masks(struct perf_event *event, int idx) 2446 { 2447 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2448 2449 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); 2450 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); 2451 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status); 2452 } 2453 2454 static void intel_pmu_disable_fixed(struct perf_event *event) 2455 { 2456 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2457 struct hw_perf_event *hwc = &event->hw; 2458 int idx = hwc->idx; 2459 u64 mask; 2460 2461 if (is_topdown_idx(idx)) { 2462 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2463 2464 /* 2465 * When there are other active TopDown events, 2466 * don't disable the fixed counter 3. 2467 */ 2468 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) 2469 return; 2470 idx = INTEL_PMC_IDX_FIXED_SLOTS; 2471 } 2472 2473 intel_clear_masks(event, idx); 2474 2475 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK); 2476 cpuc->fixed_ctrl_val &= ~mask; 2477 } 2478 2479 static void intel_pmu_disable_event(struct perf_event *event) 2480 { 2481 struct hw_perf_event *hwc = &event->hw; 2482 int idx = hwc->idx; 2483 2484 switch (idx) { 2485 case 0 ... INTEL_PMC_IDX_FIXED - 1: 2486 intel_clear_masks(event, idx); 2487 x86_pmu_disable_event(event); 2488 break; 2489 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: 2490 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 2491 intel_pmu_disable_fixed(event); 2492 break; 2493 case INTEL_PMC_IDX_FIXED_BTS: 2494 intel_pmu_disable_bts(); 2495 intel_pmu_drain_bts_buffer(); 2496 return; 2497 case INTEL_PMC_IDX_FIXED_VLBR: 2498 intel_clear_masks(event, idx); 2499 break; 2500 default: 2501 intel_clear_masks(event, idx); 2502 pr_warn("Failed to disable the event with invalid index %d\n", 2503 idx); 2504 return; 2505 } 2506 2507 /* 2508 * Needs to be called after x86_pmu_disable_event, 2509 * so we don't trigger the event without PEBS bit set. 2510 */ 2511 if (unlikely(event->attr.precise_ip)) 2512 intel_pmu_pebs_disable(event); 2513 } 2514 2515 static void intel_pmu_assign_event(struct perf_event *event, int idx) 2516 { 2517 if (is_pebs_pt(event)) 2518 perf_report_aux_output_id(event, idx); 2519 } 2520 2521 static void intel_pmu_del_event(struct perf_event *event) 2522 { 2523 if (needs_branch_stack(event)) 2524 intel_pmu_lbr_del(event); 2525 if (event->attr.precise_ip) 2526 intel_pmu_pebs_del(event); 2527 } 2528 2529 static int icl_set_topdown_event_period(struct perf_event *event) 2530 { 2531 struct hw_perf_event *hwc = &event->hw; 2532 s64 left = local64_read(&hwc->period_left); 2533 2534 /* 2535 * The values in PERF_METRICS MSR are derived from fixed counter 3. 2536 * Software should start both registers, PERF_METRICS and fixed 2537 * counter 3, from zero. 2538 * Clear PERF_METRICS and Fixed counter 3 in initialization. 2539 * After that, both MSRs will be cleared for each read. 2540 * Don't need to clear them again. 2541 */ 2542 if (left == x86_pmu.max_period) { 2543 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); 2544 wrmsrl(MSR_PERF_METRICS, 0); 2545 hwc->saved_slots = 0; 2546 hwc->saved_metric = 0; 2547 } 2548 2549 if ((hwc->saved_slots) && is_slots_event(event)) { 2550 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); 2551 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric); 2552 } 2553 2554 perf_event_update_userpage(event); 2555 2556 return 0; 2557 } 2558 2559 static int adl_set_topdown_event_period(struct perf_event *event) 2560 { 2561 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 2562 2563 if (pmu->cpu_type != hybrid_big) 2564 return 0; 2565 2566 return icl_set_topdown_event_period(event); 2567 } 2568 2569 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period); 2570 2571 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) 2572 { 2573 u32 val; 2574 2575 /* 2576 * The metric is reported as an 8bit integer fraction 2577 * summing up to 0xff. 2578 * slots-in-metric = (Metric / 0xff) * slots 2579 */ 2580 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; 2581 return mul_u64_u32_div(slots, val, 0xff); 2582 } 2583 2584 static u64 icl_get_topdown_value(struct perf_event *event, 2585 u64 slots, u64 metrics) 2586 { 2587 int idx = event->hw.idx; 2588 u64 delta; 2589 2590 if (is_metric_idx(idx)) 2591 delta = icl_get_metrics_event_value(metrics, slots, idx); 2592 else 2593 delta = slots; 2594 2595 return delta; 2596 } 2597 2598 static void __icl_update_topdown_event(struct perf_event *event, 2599 u64 slots, u64 metrics, 2600 u64 last_slots, u64 last_metrics) 2601 { 2602 u64 delta, last = 0; 2603 2604 delta = icl_get_topdown_value(event, slots, metrics); 2605 if (last_slots) 2606 last = icl_get_topdown_value(event, last_slots, last_metrics); 2607 2608 /* 2609 * The 8bit integer fraction of metric may be not accurate, 2610 * especially when the changes is very small. 2611 * For example, if only a few bad_spec happens, the fraction 2612 * may be reduced from 1 to 0. If so, the bad_spec event value 2613 * will be 0 which is definitely less than the last value. 2614 * Avoid update event->count for this case. 2615 */ 2616 if (delta > last) { 2617 delta -= last; 2618 local64_add(delta, &event->count); 2619 } 2620 } 2621 2622 static void update_saved_topdown_regs(struct perf_event *event, u64 slots, 2623 u64 metrics, int metric_end) 2624 { 2625 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2626 struct perf_event *other; 2627 int idx; 2628 2629 event->hw.saved_slots = slots; 2630 event->hw.saved_metric = metrics; 2631 2632 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { 2633 if (!is_topdown_idx(idx)) 2634 continue; 2635 other = cpuc->events[idx]; 2636 other->hw.saved_slots = slots; 2637 other->hw.saved_metric = metrics; 2638 } 2639 } 2640 2641 /* 2642 * Update all active Topdown events. 2643 * 2644 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be 2645 * modify by a NMI. PMU has to be disabled before calling this function. 2646 */ 2647 2648 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end) 2649 { 2650 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2651 struct perf_event *other; 2652 u64 slots, metrics; 2653 bool reset = true; 2654 int idx; 2655 2656 /* read Fixed counter 3 */ 2657 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots); 2658 if (!slots) 2659 return 0; 2660 2661 /* read PERF_METRICS */ 2662 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics); 2663 2664 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { 2665 if (!is_topdown_idx(idx)) 2666 continue; 2667 other = cpuc->events[idx]; 2668 __icl_update_topdown_event(other, slots, metrics, 2669 event ? event->hw.saved_slots : 0, 2670 event ? event->hw.saved_metric : 0); 2671 } 2672 2673 /* 2674 * Check and update this event, which may have been cleared 2675 * in active_mask e.g. x86_pmu_stop() 2676 */ 2677 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { 2678 __icl_update_topdown_event(event, slots, metrics, 2679 event->hw.saved_slots, 2680 event->hw.saved_metric); 2681 2682 /* 2683 * In x86_pmu_stop(), the event is cleared in active_mask first, 2684 * then drain the delta, which indicates context switch for 2685 * counting. 2686 * Save metric and slots for context switch. 2687 * Don't need to reset the PERF_METRICS and Fixed counter 3. 2688 * Because the values will be restored in next schedule in. 2689 */ 2690 update_saved_topdown_regs(event, slots, metrics, metric_end); 2691 reset = false; 2692 } 2693 2694 if (reset) { 2695 /* The fixed counter 3 has to be written before the PERF_METRICS. */ 2696 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); 2697 wrmsrl(MSR_PERF_METRICS, 0); 2698 if (event) 2699 update_saved_topdown_regs(event, 0, 0, metric_end); 2700 } 2701 2702 return slots; 2703 } 2704 2705 static u64 icl_update_topdown_event(struct perf_event *event) 2706 { 2707 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE + 2708 x86_pmu.num_topdown_events - 1); 2709 } 2710 2711 static u64 adl_update_topdown_event(struct perf_event *event) 2712 { 2713 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 2714 2715 if (pmu->cpu_type != hybrid_big) 2716 return 0; 2717 2718 return icl_update_topdown_event(event); 2719 } 2720 2721 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); 2722 2723 static void intel_pmu_read_topdown_event(struct perf_event *event) 2724 { 2725 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2726 2727 /* Only need to call update_topdown_event() once for group read. */ 2728 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && 2729 !is_slots_event(event)) 2730 return; 2731 2732 perf_pmu_disable(event->pmu); 2733 static_call(intel_pmu_update_topdown_event)(event); 2734 perf_pmu_enable(event->pmu); 2735 } 2736 2737 static void intel_pmu_read_event(struct perf_event *event) 2738 { 2739 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) 2740 intel_pmu_auto_reload_read(event); 2741 else if (is_topdown_count(event)) 2742 intel_pmu_read_topdown_event(event); 2743 else 2744 x86_perf_event_update(event); 2745 } 2746 2747 static void intel_pmu_enable_fixed(struct perf_event *event) 2748 { 2749 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2750 struct hw_perf_event *hwc = &event->hw; 2751 u64 mask, bits = 0; 2752 int idx = hwc->idx; 2753 2754 if (is_topdown_idx(idx)) { 2755 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2756 /* 2757 * When there are other active TopDown events, 2758 * don't enable the fixed counter 3 again. 2759 */ 2760 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) 2761 return; 2762 2763 idx = INTEL_PMC_IDX_FIXED_SLOTS; 2764 } 2765 2766 intel_set_masks(event, idx); 2767 2768 /* 2769 * Enable IRQ generation (0x8), if not PEBS, 2770 * and enable ring-3 counting (0x2) and ring-0 counting (0x1) 2771 * if requested: 2772 */ 2773 if (!event->attr.precise_ip) 2774 bits |= INTEL_FIXED_0_ENABLE_PMI; 2775 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) 2776 bits |= INTEL_FIXED_0_USER; 2777 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) 2778 bits |= INTEL_FIXED_0_KERNEL; 2779 2780 /* 2781 * ANY bit is supported in v3 and up 2782 */ 2783 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) 2784 bits |= INTEL_FIXED_0_ANYTHREAD; 2785 2786 idx -= INTEL_PMC_IDX_FIXED; 2787 bits = intel_fixed_bits_by_idx(idx, bits); 2788 mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); 2789 2790 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { 2791 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); 2792 mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); 2793 } 2794 2795 cpuc->fixed_ctrl_val &= ~mask; 2796 cpuc->fixed_ctrl_val |= bits; 2797 } 2798 2799 static void intel_pmu_enable_event(struct perf_event *event) 2800 { 2801 struct hw_perf_event *hwc = &event->hw; 2802 int idx = hwc->idx; 2803 2804 if (unlikely(event->attr.precise_ip)) 2805 intel_pmu_pebs_enable(event); 2806 2807 switch (idx) { 2808 case 0 ... INTEL_PMC_IDX_FIXED - 1: 2809 intel_set_masks(event, idx); 2810 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 2811 break; 2812 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: 2813 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 2814 intel_pmu_enable_fixed(event); 2815 break; 2816 case INTEL_PMC_IDX_FIXED_BTS: 2817 if (!__this_cpu_read(cpu_hw_events.enabled)) 2818 return; 2819 intel_pmu_enable_bts(hwc->config); 2820 break; 2821 case INTEL_PMC_IDX_FIXED_VLBR: 2822 intel_set_masks(event, idx); 2823 break; 2824 default: 2825 pr_warn("Failed to enable the event with invalid index %d\n", 2826 idx); 2827 } 2828 } 2829 2830 static void intel_pmu_add_event(struct perf_event *event) 2831 { 2832 if (event->attr.precise_ip) 2833 intel_pmu_pebs_add(event); 2834 if (needs_branch_stack(event)) 2835 intel_pmu_lbr_add(event); 2836 } 2837 2838 /* 2839 * Save and restart an expired event. Called by NMI contexts, 2840 * so it has to be careful about preempting normal event ops: 2841 */ 2842 int intel_pmu_save_and_restart(struct perf_event *event) 2843 { 2844 static_call(x86_pmu_update)(event); 2845 /* 2846 * For a checkpointed counter always reset back to 0. This 2847 * avoids a situation where the counter overflows, aborts the 2848 * transaction and is then set back to shortly before the 2849 * overflow, and overflows and aborts again. 2850 */ 2851 if (unlikely(event_is_checkpointed(event))) { 2852 /* No race with NMIs because the counter should not be armed */ 2853 wrmsrl(event->hw.event_base, 0); 2854 local64_set(&event->hw.prev_count, 0); 2855 } 2856 return static_call(x86_pmu_set_period)(event); 2857 } 2858 2859 static int intel_pmu_set_period(struct perf_event *event) 2860 { 2861 if (unlikely(is_topdown_count(event))) 2862 return static_call(intel_pmu_set_topdown_event_period)(event); 2863 2864 return x86_perf_event_set_period(event); 2865 } 2866 2867 static u64 intel_pmu_update(struct perf_event *event) 2868 { 2869 if (unlikely(is_topdown_count(event))) 2870 return static_call(intel_pmu_update_topdown_event)(event); 2871 2872 return x86_perf_event_update(event); 2873 } 2874 2875 static void intel_pmu_reset(void) 2876 { 2877 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); 2878 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2879 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); 2880 int num_counters = hybrid(cpuc->pmu, num_counters); 2881 unsigned long flags; 2882 int idx; 2883 2884 if (!num_counters) 2885 return; 2886 2887 local_irq_save(flags); 2888 2889 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); 2890 2891 for (idx = 0; idx < num_counters; idx++) { 2892 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); 2893 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); 2894 } 2895 for (idx = 0; idx < num_counters_fixed; idx++) { 2896 if (fixed_counter_disabled(idx, cpuc->pmu)) 2897 continue; 2898 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 2899 } 2900 2901 if (ds) 2902 ds->bts_index = ds->bts_buffer_base; 2903 2904 /* Ack all overflows and disable fixed counters */ 2905 if (x86_pmu.version >= 2) { 2906 intel_pmu_ack_status(intel_pmu_get_status()); 2907 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2908 } 2909 2910 /* Reset LBRs and LBR freezing */ 2911 if (x86_pmu.lbr_nr) { 2912 update_debugctlmsr(get_debugctlmsr() & 2913 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR)); 2914 } 2915 2916 local_irq_restore(flags); 2917 } 2918 2919 /* 2920 * We may be running with guest PEBS events created by KVM, and the 2921 * PEBS records are logged into the guest's DS and invisible to host. 2922 * 2923 * In the case of guest PEBS overflow, we only trigger a fake event 2924 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM. 2925 * The guest will then vm-entry and check the guest DS area to read 2926 * the guest PEBS records. 2927 * 2928 * The contents and other behavior of the guest event do not matter. 2929 */ 2930 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs, 2931 struct perf_sample_data *data) 2932 { 2933 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2934 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask; 2935 struct perf_event *event = NULL; 2936 int bit; 2937 2938 if (!unlikely(perf_guest_state())) 2939 return; 2940 2941 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active || 2942 !guest_pebs_idxs) 2943 return; 2944 2945 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, 2946 INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) { 2947 event = cpuc->events[bit]; 2948 if (!event->attr.precise_ip) 2949 continue; 2950 2951 perf_sample_data_init(data, 0, event->hw.last_period); 2952 if (perf_event_overflow(event, data, regs)) 2953 x86_pmu_stop(event, 0); 2954 2955 /* Inject one fake event is enough. */ 2956 break; 2957 } 2958 } 2959 2960 static int handle_pmi_common(struct pt_regs *regs, u64 status) 2961 { 2962 struct perf_sample_data data; 2963 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2964 int bit; 2965 int handled = 0; 2966 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 2967 2968 inc_irq_stat(apic_perf_irqs); 2969 2970 /* 2971 * Ignore a range of extra bits in status that do not indicate 2972 * overflow by themselves. 2973 */ 2974 status &= ~(GLOBAL_STATUS_COND_CHG | 2975 GLOBAL_STATUS_ASIF | 2976 GLOBAL_STATUS_LBRS_FROZEN); 2977 if (!status) 2978 return 0; 2979 /* 2980 * In case multiple PEBS events are sampled at the same time, 2981 * it is possible to have GLOBAL_STATUS bit 62 set indicating 2982 * PEBS buffer overflow and also seeing at most 3 PEBS counters 2983 * having their bits set in the status register. This is a sign 2984 * that there was at least one PEBS record pending at the time 2985 * of the PMU interrupt. PEBS counters must only be processed 2986 * via the drain_pebs() calls and not via the regular sample 2987 * processing loop coming after that the function, otherwise 2988 * phony regular samples may be generated in the sampling buffer 2989 * not marked with the EXACT tag. Another possibility is to have 2990 * one PEBS event and at least one non-PEBS event which overflows 2991 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will 2992 * not be set, yet the overflow status bit for the PEBS counter will 2993 * be on Skylake. 2994 * 2995 * To avoid this problem, we systematically ignore the PEBS-enabled 2996 * counters from the GLOBAL_STATUS mask and we always process PEBS 2997 * events via drain_pebs(). 2998 */ 2999 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable); 3000 3001 /* 3002 * PEBS overflow sets bit 62 in the global status register 3003 */ 3004 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) { 3005 u64 pebs_enabled = cpuc->pebs_enabled; 3006 3007 handled++; 3008 x86_pmu_handle_guest_pebs(regs, &data); 3009 x86_pmu.drain_pebs(regs, &data); 3010 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; 3011 3012 /* 3013 * PMI throttle may be triggered, which stops the PEBS event. 3014 * Although cpuc->pebs_enabled is updated accordingly, the 3015 * MSR_IA32_PEBS_ENABLE is not updated. Because the 3016 * cpuc->enabled has been forced to 0 in PMI. 3017 * Update the MSR if pebs_enabled is changed. 3018 */ 3019 if (pebs_enabled != cpuc->pebs_enabled) 3020 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 3021 } 3022 3023 /* 3024 * Intel PT 3025 */ 3026 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { 3027 handled++; 3028 if (!perf_guest_handle_intel_pt_intr()) 3029 intel_pt_interrupt(); 3030 } 3031 3032 /* 3033 * Intel Perf metrics 3034 */ 3035 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { 3036 handled++; 3037 static_call(intel_pmu_update_topdown_event)(NULL); 3038 } 3039 3040 /* 3041 * Checkpointed counters can lead to 'spurious' PMIs because the 3042 * rollback caused by the PMI will have cleared the overflow status 3043 * bit. Therefore always force probe these counters. 3044 */ 3045 status |= cpuc->intel_cp_status; 3046 3047 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 3048 struct perf_event *event = cpuc->events[bit]; 3049 3050 handled++; 3051 3052 if (!test_bit(bit, cpuc->active_mask)) 3053 continue; 3054 3055 if (!intel_pmu_save_and_restart(event)) 3056 continue; 3057 3058 perf_sample_data_init(&data, 0, event->hw.last_period); 3059 3060 if (has_branch_stack(event)) 3061 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); 3062 3063 if (perf_event_overflow(event, &data, regs)) 3064 x86_pmu_stop(event, 0); 3065 } 3066 3067 return handled; 3068 } 3069 3070 /* 3071 * This handler is triggered by the local APIC, so the APIC IRQ handling 3072 * rules apply: 3073 */ 3074 static int intel_pmu_handle_irq(struct pt_regs *regs) 3075 { 3076 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3077 bool late_ack = hybrid_bit(cpuc->pmu, late_ack); 3078 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack); 3079 int loops; 3080 u64 status; 3081 int handled; 3082 int pmu_enabled; 3083 3084 /* 3085 * Save the PMU state. 3086 * It needs to be restored when leaving the handler. 3087 */ 3088 pmu_enabled = cpuc->enabled; 3089 /* 3090 * In general, the early ACK is only applied for old platforms. 3091 * For the big core starts from Haswell, the late ACK should be 3092 * applied. 3093 * For the small core after Tremont, we have to do the ACK right 3094 * before re-enabling counters, which is in the middle of the 3095 * NMI handler. 3096 */ 3097 if (!late_ack && !mid_ack) 3098 apic_write(APIC_LVTPC, APIC_DM_NMI); 3099 intel_bts_disable_local(); 3100 cpuc->enabled = 0; 3101 __intel_pmu_disable_all(true); 3102 handled = intel_pmu_drain_bts_buffer(); 3103 handled += intel_bts_interrupt(); 3104 status = intel_pmu_get_status(); 3105 if (!status) 3106 goto done; 3107 3108 loops = 0; 3109 again: 3110 intel_pmu_lbr_read(); 3111 intel_pmu_ack_status(status); 3112 if (++loops > 100) { 3113 static bool warned; 3114 3115 if (!warned) { 3116 WARN(1, "perfevents: irq loop stuck!\n"); 3117 perf_event_print_debug(); 3118 warned = true; 3119 } 3120 intel_pmu_reset(); 3121 goto done; 3122 } 3123 3124 handled += handle_pmi_common(regs, status); 3125 3126 /* 3127 * Repeat if there is more work to be done: 3128 */ 3129 status = intel_pmu_get_status(); 3130 if (status) 3131 goto again; 3132 3133 done: 3134 if (mid_ack) 3135 apic_write(APIC_LVTPC, APIC_DM_NMI); 3136 /* Only restore PMU state when it's active. See x86_pmu_disable(). */ 3137 cpuc->enabled = pmu_enabled; 3138 if (pmu_enabled) 3139 __intel_pmu_enable_all(0, true); 3140 intel_bts_enable_local(); 3141 3142 /* 3143 * Only unmask the NMI after the overflow counters 3144 * have been reset. This avoids spurious NMIs on 3145 * Haswell CPUs. 3146 */ 3147 if (late_ack) 3148 apic_write(APIC_LVTPC, APIC_DM_NMI); 3149 return handled; 3150 } 3151 3152 static struct event_constraint * 3153 intel_bts_constraints(struct perf_event *event) 3154 { 3155 if (unlikely(intel_pmu_has_bts(event))) 3156 return &bts_constraint; 3157 3158 return NULL; 3159 } 3160 3161 /* 3162 * Note: matches a fake event, like Fixed2. 3163 */ 3164 static struct event_constraint * 3165 intel_vlbr_constraints(struct perf_event *event) 3166 { 3167 struct event_constraint *c = &vlbr_constraint; 3168 3169 if (unlikely(constraint_match(c, event->hw.config))) { 3170 event->hw.flags |= c->flags; 3171 return c; 3172 } 3173 3174 return NULL; 3175 } 3176 3177 static int intel_alt_er(struct cpu_hw_events *cpuc, 3178 int idx, u64 config) 3179 { 3180 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs); 3181 int alt_idx = idx; 3182 3183 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) 3184 return idx; 3185 3186 if (idx == EXTRA_REG_RSP_0) 3187 alt_idx = EXTRA_REG_RSP_1; 3188 3189 if (idx == EXTRA_REG_RSP_1) 3190 alt_idx = EXTRA_REG_RSP_0; 3191 3192 if (config & ~extra_regs[alt_idx].valid_mask) 3193 return idx; 3194 3195 return alt_idx; 3196 } 3197 3198 static void intel_fixup_er(struct perf_event *event, int idx) 3199 { 3200 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); 3201 event->hw.extra_reg.idx = idx; 3202 3203 if (idx == EXTRA_REG_RSP_0) { 3204 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 3205 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event; 3206 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; 3207 } else if (idx == EXTRA_REG_RSP_1) { 3208 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 3209 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event; 3210 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 3211 } 3212 } 3213 3214 /* 3215 * manage allocation of shared extra msr for certain events 3216 * 3217 * sharing can be: 3218 * per-cpu: to be shared between the various events on a single PMU 3219 * per-core: per-cpu + shared by HT threads 3220 */ 3221 static struct event_constraint * 3222 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, 3223 struct perf_event *event, 3224 struct hw_perf_event_extra *reg) 3225 { 3226 struct event_constraint *c = &emptyconstraint; 3227 struct er_account *era; 3228 unsigned long flags; 3229 int idx = reg->idx; 3230 3231 /* 3232 * reg->alloc can be set due to existing state, so for fake cpuc we 3233 * need to ignore this, otherwise we might fail to allocate proper fake 3234 * state for this extra reg constraint. Also see the comment below. 3235 */ 3236 if (reg->alloc && !cpuc->is_fake) 3237 return NULL; /* call x86_get_event_constraint() */ 3238 3239 again: 3240 era = &cpuc->shared_regs->regs[idx]; 3241 /* 3242 * we use spin_lock_irqsave() to avoid lockdep issues when 3243 * passing a fake cpuc 3244 */ 3245 raw_spin_lock_irqsave(&era->lock, flags); 3246 3247 if (!atomic_read(&era->ref) || era->config == reg->config) { 3248 3249 /* 3250 * If its a fake cpuc -- as per validate_{group,event}() we 3251 * shouldn't touch event state and we can avoid doing so 3252 * since both will only call get_event_constraints() once 3253 * on each event, this avoids the need for reg->alloc. 3254 * 3255 * Not doing the ER fixup will only result in era->reg being 3256 * wrong, but since we won't actually try and program hardware 3257 * this isn't a problem either. 3258 */ 3259 if (!cpuc->is_fake) { 3260 if (idx != reg->idx) 3261 intel_fixup_er(event, idx); 3262 3263 /* 3264 * x86_schedule_events() can call get_event_constraints() 3265 * multiple times on events in the case of incremental 3266 * scheduling(). reg->alloc ensures we only do the ER 3267 * allocation once. 3268 */ 3269 reg->alloc = 1; 3270 } 3271 3272 /* lock in msr value */ 3273 era->config = reg->config; 3274 era->reg = reg->reg; 3275 3276 /* one more user */ 3277 atomic_inc(&era->ref); 3278 3279 /* 3280 * need to call x86_get_event_constraint() 3281 * to check if associated event has constraints 3282 */ 3283 c = NULL; 3284 } else { 3285 idx = intel_alt_er(cpuc, idx, reg->config); 3286 if (idx != reg->idx) { 3287 raw_spin_unlock_irqrestore(&era->lock, flags); 3288 goto again; 3289 } 3290 } 3291 raw_spin_unlock_irqrestore(&era->lock, flags); 3292 3293 return c; 3294 } 3295 3296 static void 3297 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, 3298 struct hw_perf_event_extra *reg) 3299 { 3300 struct er_account *era; 3301 3302 /* 3303 * Only put constraint if extra reg was actually allocated. Also takes 3304 * care of event which do not use an extra shared reg. 3305 * 3306 * Also, if this is a fake cpuc we shouldn't touch any event state 3307 * (reg->alloc) and we don't care about leaving inconsistent cpuc state 3308 * either since it'll be thrown out. 3309 */ 3310 if (!reg->alloc || cpuc->is_fake) 3311 return; 3312 3313 era = &cpuc->shared_regs->regs[reg->idx]; 3314 3315 /* one fewer user */ 3316 atomic_dec(&era->ref); 3317 3318 /* allocate again next time */ 3319 reg->alloc = 0; 3320 } 3321 3322 static struct event_constraint * 3323 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, 3324 struct perf_event *event) 3325 { 3326 struct event_constraint *c = NULL, *d; 3327 struct hw_perf_event_extra *xreg, *breg; 3328 3329 xreg = &event->hw.extra_reg; 3330 if (xreg->idx != EXTRA_REG_NONE) { 3331 c = __intel_shared_reg_get_constraints(cpuc, event, xreg); 3332 if (c == &emptyconstraint) 3333 return c; 3334 } 3335 breg = &event->hw.branch_reg; 3336 if (breg->idx != EXTRA_REG_NONE) { 3337 d = __intel_shared_reg_get_constraints(cpuc, event, breg); 3338 if (d == &emptyconstraint) { 3339 __intel_shared_reg_put_constraints(cpuc, xreg); 3340 c = d; 3341 } 3342 } 3343 return c; 3344 } 3345 3346 struct event_constraint * 3347 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3348 struct perf_event *event) 3349 { 3350 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); 3351 struct event_constraint *c; 3352 3353 if (event_constraints) { 3354 for_each_event_constraint(c, event_constraints) { 3355 if (constraint_match(c, event->hw.config)) { 3356 event->hw.flags |= c->flags; 3357 return c; 3358 } 3359 } 3360 } 3361 3362 return &hybrid_var(cpuc->pmu, unconstrained); 3363 } 3364 3365 static struct event_constraint * 3366 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3367 struct perf_event *event) 3368 { 3369 struct event_constraint *c; 3370 3371 c = intel_vlbr_constraints(event); 3372 if (c) 3373 return c; 3374 3375 c = intel_bts_constraints(event); 3376 if (c) 3377 return c; 3378 3379 c = intel_shared_regs_constraints(cpuc, event); 3380 if (c) 3381 return c; 3382 3383 c = intel_pebs_constraints(event); 3384 if (c) 3385 return c; 3386 3387 return x86_get_event_constraints(cpuc, idx, event); 3388 } 3389 3390 static void 3391 intel_start_scheduling(struct cpu_hw_events *cpuc) 3392 { 3393 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3394 struct intel_excl_states *xl; 3395 int tid = cpuc->excl_thread_id; 3396 3397 /* 3398 * nothing needed if in group validation mode 3399 */ 3400 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3401 return; 3402 3403 /* 3404 * no exclusion needed 3405 */ 3406 if (WARN_ON_ONCE(!excl_cntrs)) 3407 return; 3408 3409 xl = &excl_cntrs->states[tid]; 3410 3411 xl->sched_started = true; 3412 /* 3413 * lock shared state until we are done scheduling 3414 * in stop_event_scheduling() 3415 * makes scheduling appear as a transaction 3416 */ 3417 raw_spin_lock(&excl_cntrs->lock); 3418 } 3419 3420 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 3421 { 3422 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3423 struct event_constraint *c = cpuc->event_constraint[idx]; 3424 struct intel_excl_states *xl; 3425 int tid = cpuc->excl_thread_id; 3426 3427 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3428 return; 3429 3430 if (WARN_ON_ONCE(!excl_cntrs)) 3431 return; 3432 3433 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) 3434 return; 3435 3436 xl = &excl_cntrs->states[tid]; 3437 3438 lockdep_assert_held(&excl_cntrs->lock); 3439 3440 if (c->flags & PERF_X86_EVENT_EXCL) 3441 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE; 3442 else 3443 xl->state[cntr] = INTEL_EXCL_SHARED; 3444 } 3445 3446 static void 3447 intel_stop_scheduling(struct cpu_hw_events *cpuc) 3448 { 3449 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3450 struct intel_excl_states *xl; 3451 int tid = cpuc->excl_thread_id; 3452 3453 /* 3454 * nothing needed if in group validation mode 3455 */ 3456 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3457 return; 3458 /* 3459 * no exclusion needed 3460 */ 3461 if (WARN_ON_ONCE(!excl_cntrs)) 3462 return; 3463 3464 xl = &excl_cntrs->states[tid]; 3465 3466 xl->sched_started = false; 3467 /* 3468 * release shared state lock (acquired in intel_start_scheduling()) 3469 */ 3470 raw_spin_unlock(&excl_cntrs->lock); 3471 } 3472 3473 static struct event_constraint * 3474 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) 3475 { 3476 WARN_ON_ONCE(!cpuc->constraint_list); 3477 3478 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { 3479 struct event_constraint *cx; 3480 3481 /* 3482 * grab pre-allocated constraint entry 3483 */ 3484 cx = &cpuc->constraint_list[idx]; 3485 3486 /* 3487 * initialize dynamic constraint 3488 * with static constraint 3489 */ 3490 *cx = *c; 3491 3492 /* 3493 * mark constraint as dynamic 3494 */ 3495 cx->flags |= PERF_X86_EVENT_DYNAMIC; 3496 c = cx; 3497 } 3498 3499 return c; 3500 } 3501 3502 static struct event_constraint * 3503 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, 3504 int idx, struct event_constraint *c) 3505 { 3506 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3507 struct intel_excl_states *xlo; 3508 int tid = cpuc->excl_thread_id; 3509 int is_excl, i, w; 3510 3511 /* 3512 * validating a group does not require 3513 * enforcing cross-thread exclusion 3514 */ 3515 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3516 return c; 3517 3518 /* 3519 * no exclusion needed 3520 */ 3521 if (WARN_ON_ONCE(!excl_cntrs)) 3522 return c; 3523 3524 /* 3525 * because we modify the constraint, we need 3526 * to make a copy. Static constraints come 3527 * from static const tables. 3528 * 3529 * only needed when constraint has not yet 3530 * been cloned (marked dynamic) 3531 */ 3532 c = dyn_constraint(cpuc, c, idx); 3533 3534 /* 3535 * From here on, the constraint is dynamic. 3536 * Either it was just allocated above, or it 3537 * was allocated during a earlier invocation 3538 * of this function 3539 */ 3540 3541 /* 3542 * state of sibling HT 3543 */ 3544 xlo = &excl_cntrs->states[tid ^ 1]; 3545 3546 /* 3547 * event requires exclusive counter access 3548 * across HT threads 3549 */ 3550 is_excl = c->flags & PERF_X86_EVENT_EXCL; 3551 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { 3552 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; 3553 if (!cpuc->n_excl++) 3554 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1); 3555 } 3556 3557 /* 3558 * Modify static constraint with current dynamic 3559 * state of thread 3560 * 3561 * EXCLUSIVE: sibling counter measuring exclusive event 3562 * SHARED : sibling counter measuring non-exclusive event 3563 * UNUSED : sibling counter unused 3564 */ 3565 w = c->weight; 3566 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) { 3567 /* 3568 * exclusive event in sibling counter 3569 * our corresponding counter cannot be used 3570 * regardless of our event 3571 */ 3572 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) { 3573 __clear_bit(i, c->idxmsk); 3574 w--; 3575 continue; 3576 } 3577 /* 3578 * if measuring an exclusive event, sibling 3579 * measuring non-exclusive, then counter cannot 3580 * be used 3581 */ 3582 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) { 3583 __clear_bit(i, c->idxmsk); 3584 w--; 3585 continue; 3586 } 3587 } 3588 3589 /* 3590 * if we return an empty mask, then switch 3591 * back to static empty constraint to avoid 3592 * the cost of freeing later on 3593 */ 3594 if (!w) 3595 c = &emptyconstraint; 3596 3597 c->weight = w; 3598 3599 return c; 3600 } 3601 3602 static struct event_constraint * 3603 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3604 struct perf_event *event) 3605 { 3606 struct event_constraint *c1, *c2; 3607 3608 c1 = cpuc->event_constraint[idx]; 3609 3610 /* 3611 * first time only 3612 * - static constraint: no change across incremental scheduling calls 3613 * - dynamic constraint: handled by intel_get_excl_constraints() 3614 */ 3615 c2 = __intel_get_event_constraints(cpuc, idx, event); 3616 if (c1) { 3617 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC)); 3618 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX); 3619 c1->weight = c2->weight; 3620 c2 = c1; 3621 } 3622 3623 if (cpuc->excl_cntrs) 3624 return intel_get_excl_constraints(cpuc, event, idx, c2); 3625 3626 return c2; 3627 } 3628 3629 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, 3630 struct perf_event *event) 3631 { 3632 struct hw_perf_event *hwc = &event->hw; 3633 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3634 int tid = cpuc->excl_thread_id; 3635 struct intel_excl_states *xl; 3636 3637 /* 3638 * nothing needed if in group validation mode 3639 */ 3640 if (cpuc->is_fake) 3641 return; 3642 3643 if (WARN_ON_ONCE(!excl_cntrs)) 3644 return; 3645 3646 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { 3647 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; 3648 if (!--cpuc->n_excl) 3649 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0); 3650 } 3651 3652 /* 3653 * If event was actually assigned, then mark the counter state as 3654 * unused now. 3655 */ 3656 if (hwc->idx >= 0) { 3657 xl = &excl_cntrs->states[tid]; 3658 3659 /* 3660 * put_constraint may be called from x86_schedule_events() 3661 * which already has the lock held so here make locking 3662 * conditional. 3663 */ 3664 if (!xl->sched_started) 3665 raw_spin_lock(&excl_cntrs->lock); 3666 3667 xl->state[hwc->idx] = INTEL_EXCL_UNUSED; 3668 3669 if (!xl->sched_started) 3670 raw_spin_unlock(&excl_cntrs->lock); 3671 } 3672 } 3673 3674 static void 3675 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, 3676 struct perf_event *event) 3677 { 3678 struct hw_perf_event_extra *reg; 3679 3680 reg = &event->hw.extra_reg; 3681 if (reg->idx != EXTRA_REG_NONE) 3682 __intel_shared_reg_put_constraints(cpuc, reg); 3683 3684 reg = &event->hw.branch_reg; 3685 if (reg->idx != EXTRA_REG_NONE) 3686 __intel_shared_reg_put_constraints(cpuc, reg); 3687 } 3688 3689 static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 3690 struct perf_event *event) 3691 { 3692 intel_put_shared_regs_event_constraints(cpuc, event); 3693 3694 /* 3695 * is PMU has exclusive counter restrictions, then 3696 * all events are subject to and must call the 3697 * put_excl_constraints() routine 3698 */ 3699 if (cpuc->excl_cntrs) 3700 intel_put_excl_constraints(cpuc, event); 3701 } 3702 3703 static void intel_pebs_aliases_core2(struct perf_event *event) 3704 { 3705 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3706 /* 3707 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3708 * (0x003c) so that we can use it with PEBS. 3709 * 3710 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3711 * PEBS capable. However we can use INST_RETIRED.ANY_P 3712 * (0x00c0), which is a PEBS capable event, to get the same 3713 * count. 3714 * 3715 * INST_RETIRED.ANY_P counts the number of cycles that retires 3716 * CNTMASK instructions. By setting CNTMASK to a value (16) 3717 * larger than the maximum number of instructions that can be 3718 * retired per cycle (4) and then inverting the condition, we 3719 * count all cycles that retire 16 or less instructions, which 3720 * is every cycle. 3721 * 3722 * Thereby we gain a PEBS capable cycle counter. 3723 */ 3724 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); 3725 3726 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3727 event->hw.config = alt_config; 3728 } 3729 } 3730 3731 static void intel_pebs_aliases_snb(struct perf_event *event) 3732 { 3733 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3734 /* 3735 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3736 * (0x003c) so that we can use it with PEBS. 3737 * 3738 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3739 * PEBS capable. However we can use UOPS_RETIRED.ALL 3740 * (0x01c2), which is a PEBS capable event, to get the same 3741 * count. 3742 * 3743 * UOPS_RETIRED.ALL counts the number of cycles that retires 3744 * CNTMASK micro-ops. By setting CNTMASK to a value (16) 3745 * larger than the maximum number of micro-ops that can be 3746 * retired per cycle (4) and then inverting the condition, we 3747 * count all cycles that retire 16 or less micro-ops, which 3748 * is every cycle. 3749 * 3750 * Thereby we gain a PEBS capable cycle counter. 3751 */ 3752 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); 3753 3754 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3755 event->hw.config = alt_config; 3756 } 3757 } 3758 3759 static void intel_pebs_aliases_precdist(struct perf_event *event) 3760 { 3761 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3762 /* 3763 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3764 * (0x003c) so that we can use it with PEBS. 3765 * 3766 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3767 * PEBS capable. However we can use INST_RETIRED.PREC_DIST 3768 * (0x01c0), which is a PEBS capable event, to get the same 3769 * count. 3770 * 3771 * The PREC_DIST event has special support to minimize sample 3772 * shadowing effects. One drawback is that it can be 3773 * only programmed on counter 1, but that seems like an 3774 * acceptable trade off. 3775 */ 3776 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16); 3777 3778 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3779 event->hw.config = alt_config; 3780 } 3781 } 3782 3783 static void intel_pebs_aliases_ivb(struct perf_event *event) 3784 { 3785 if (event->attr.precise_ip < 3) 3786 return intel_pebs_aliases_snb(event); 3787 return intel_pebs_aliases_precdist(event); 3788 } 3789 3790 static void intel_pebs_aliases_skl(struct perf_event *event) 3791 { 3792 if (event->attr.precise_ip < 3) 3793 return intel_pebs_aliases_core2(event); 3794 return intel_pebs_aliases_precdist(event); 3795 } 3796 3797 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) 3798 { 3799 unsigned long flags = x86_pmu.large_pebs_flags; 3800 3801 if (event->attr.use_clockid) 3802 flags &= ~PERF_SAMPLE_TIME; 3803 if (!event->attr.exclude_kernel) 3804 flags &= ~PERF_SAMPLE_REGS_USER; 3805 if (event->attr.sample_regs_user & ~PEBS_GP_REGS) 3806 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); 3807 return flags; 3808 } 3809 3810 static int intel_pmu_bts_config(struct perf_event *event) 3811 { 3812 struct perf_event_attr *attr = &event->attr; 3813 3814 if (unlikely(intel_pmu_has_bts(event))) { 3815 /* BTS is not supported by this architecture. */ 3816 if (!x86_pmu.bts_active) 3817 return -EOPNOTSUPP; 3818 3819 /* BTS is currently only allowed for user-mode. */ 3820 if (!attr->exclude_kernel) 3821 return -EOPNOTSUPP; 3822 3823 /* BTS is not allowed for precise events. */ 3824 if (attr->precise_ip) 3825 return -EOPNOTSUPP; 3826 3827 /* disallow bts if conflicting events are present */ 3828 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3829 return -EBUSY; 3830 3831 event->destroy = hw_perf_lbr_event_destroy; 3832 } 3833 3834 return 0; 3835 } 3836 3837 static int core_pmu_hw_config(struct perf_event *event) 3838 { 3839 int ret = x86_pmu_hw_config(event); 3840 3841 if (ret) 3842 return ret; 3843 3844 return intel_pmu_bts_config(event); 3845 } 3846 3847 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \ 3848 ((x86_pmu.num_topdown_events - 1) << 8)) 3849 3850 static bool is_available_metric_event(struct perf_event *event) 3851 { 3852 return is_metric_event(event) && 3853 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX; 3854 } 3855 3856 static inline bool is_mem_loads_event(struct perf_event *event) 3857 { 3858 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01); 3859 } 3860 3861 static inline bool is_mem_loads_aux_event(struct perf_event *event) 3862 { 3863 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82); 3864 } 3865 3866 static inline bool require_mem_loads_aux_event(struct perf_event *event) 3867 { 3868 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX)) 3869 return false; 3870 3871 if (is_hybrid()) 3872 return hybrid_pmu(event->pmu)->cpu_type == hybrid_big; 3873 3874 return true; 3875 } 3876 3877 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) 3878 { 3879 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap); 3880 3881 return test_bit(idx, (unsigned long *)&intel_cap->capabilities); 3882 } 3883 3884 static int intel_pmu_hw_config(struct perf_event *event) 3885 { 3886 int ret = x86_pmu_hw_config(event); 3887 3888 if (ret) 3889 return ret; 3890 3891 ret = intel_pmu_bts_config(event); 3892 if (ret) 3893 return ret; 3894 3895 if (event->attr.precise_ip) { 3896 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) 3897 return -EINVAL; 3898 3899 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { 3900 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 3901 if (!(event->attr.sample_type & 3902 ~intel_pmu_large_pebs_flags(event))) { 3903 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; 3904 event->attach_state |= PERF_ATTACH_SCHED_CB; 3905 } 3906 } 3907 if (x86_pmu.pebs_aliases) 3908 x86_pmu.pebs_aliases(event); 3909 } 3910 3911 if (needs_branch_stack(event)) { 3912 ret = intel_pmu_setup_lbr_filter(event); 3913 if (ret) 3914 return ret; 3915 event->attach_state |= PERF_ATTACH_SCHED_CB; 3916 3917 /* 3918 * BTS is set up earlier in this path, so don't account twice 3919 */ 3920 if (!unlikely(intel_pmu_has_bts(event))) { 3921 /* disallow lbr if conflicting events are present */ 3922 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3923 return -EBUSY; 3924 3925 event->destroy = hw_perf_lbr_event_destroy; 3926 } 3927 } 3928 3929 if (event->attr.aux_output) { 3930 if (!event->attr.precise_ip) 3931 return -EINVAL; 3932 3933 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT; 3934 } 3935 3936 if ((event->attr.type == PERF_TYPE_HARDWARE) || 3937 (event->attr.type == PERF_TYPE_HW_CACHE)) 3938 return 0; 3939 3940 /* 3941 * Config Topdown slots and metric events 3942 * 3943 * The slots event on Fixed Counter 3 can support sampling, 3944 * which will be handled normally in x86_perf_event_update(). 3945 * 3946 * Metric events don't support sampling and require being paired 3947 * with a slots event as group leader. When the slots event 3948 * is used in a metrics group, it too cannot support sampling. 3949 */ 3950 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) { 3951 if (event->attr.config1 || event->attr.config2) 3952 return -EINVAL; 3953 3954 /* 3955 * The TopDown metrics events and slots event don't 3956 * support any filters. 3957 */ 3958 if (event->attr.config & X86_ALL_EVENT_FLAGS) 3959 return -EINVAL; 3960 3961 if (is_available_metric_event(event)) { 3962 struct perf_event *leader = event->group_leader; 3963 3964 /* The metric events don't support sampling. */ 3965 if (is_sampling_event(event)) 3966 return -EINVAL; 3967 3968 /* The metric events require a slots group leader. */ 3969 if (!is_slots_event(leader)) 3970 return -EINVAL; 3971 3972 /* 3973 * The leader/SLOTS must not be a sampling event for 3974 * metric use; hardware requires it starts at 0 when used 3975 * in conjunction with MSR_PERF_METRICS. 3976 */ 3977 if (is_sampling_event(leader)) 3978 return -EINVAL; 3979 3980 event->event_caps |= PERF_EV_CAP_SIBLING; 3981 /* 3982 * Only once we have a METRICs sibling do we 3983 * need TopDown magic. 3984 */ 3985 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN; 3986 event->hw.flags |= PERF_X86_EVENT_TOPDOWN; 3987 } 3988 } 3989 3990 /* 3991 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR 3992 * doesn't function quite right. As a work-around it needs to always be 3993 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82). 3994 * The actual count of this second event is irrelevant it just needs 3995 * to be active to make the first event function correctly. 3996 * 3997 * In a group, the auxiliary event must be in front of the load latency 3998 * event. The rule is to simplify the implementation of the check. 3999 * That's because perf cannot have a complete group at the moment. 4000 */ 4001 if (require_mem_loads_aux_event(event) && 4002 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) && 4003 is_mem_loads_event(event)) { 4004 struct perf_event *leader = event->group_leader; 4005 struct perf_event *sibling = NULL; 4006 4007 /* 4008 * When this memload event is also the first event (no group 4009 * exists yet), then there is no aux event before it. 4010 */ 4011 if (leader == event) 4012 return -ENODATA; 4013 4014 if (!is_mem_loads_aux_event(leader)) { 4015 for_each_sibling_event(sibling, leader) { 4016 if (is_mem_loads_aux_event(sibling)) 4017 break; 4018 } 4019 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list)) 4020 return -ENODATA; 4021 } 4022 } 4023 4024 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) 4025 return 0; 4026 4027 if (x86_pmu.version < 3) 4028 return -EINVAL; 4029 4030 ret = perf_allow_cpu(&event->attr); 4031 if (ret) 4032 return ret; 4033 4034 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; 4035 4036 return 0; 4037 } 4038 4039 /* 4040 * Currently, the only caller of this function is the atomic_switch_perf_msrs(). 4041 * The host perf conext helps to prepare the values of the real hardware for 4042 * a set of msrs that need to be switched atomically in a vmx transaction. 4043 * 4044 * For example, the pseudocode needed to add a new msr should look like: 4045 * 4046 * arr[(*nr)++] = (struct perf_guest_switch_msr){ 4047 * .msr = the hardware msr address, 4048 * .host = the value the hardware has when it doesn't run a guest, 4049 * .guest = the value the hardware has when it runs a guest, 4050 * }; 4051 * 4052 * These values have nothing to do with the emulated values the guest sees 4053 * when it uses {RD,WR}MSR, which should be handled by the KVM context, 4054 * specifically in the intel_pmu_{get,set}_msr(). 4055 */ 4056 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) 4057 { 4058 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4059 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 4060 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data; 4061 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 4062 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable; 4063 int global_ctrl, pebs_enable; 4064 4065 *nr = 0; 4066 global_ctrl = (*nr)++; 4067 arr[global_ctrl] = (struct perf_guest_switch_msr){ 4068 .msr = MSR_CORE_PERF_GLOBAL_CTRL, 4069 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask, 4070 .guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask), 4071 }; 4072 4073 if (!x86_pmu.pebs) 4074 return arr; 4075 4076 /* 4077 * If PMU counter has PEBS enabled it is not enough to 4078 * disable counter on a guest entry since PEBS memory 4079 * write can overshoot guest entry and corrupt guest 4080 * memory. Disabling PEBS solves the problem. 4081 * 4082 * Don't do this if the CPU already enforces it. 4083 */ 4084 if (x86_pmu.pebs_no_isolation) { 4085 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4086 .msr = MSR_IA32_PEBS_ENABLE, 4087 .host = cpuc->pebs_enabled, 4088 .guest = 0, 4089 }; 4090 return arr; 4091 } 4092 4093 if (!kvm_pmu || !x86_pmu.pebs_ept) 4094 return arr; 4095 4096 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4097 .msr = MSR_IA32_DS_AREA, 4098 .host = (unsigned long)cpuc->ds, 4099 .guest = kvm_pmu->ds_area, 4100 }; 4101 4102 if (x86_pmu.intel_cap.pebs_baseline) { 4103 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4104 .msr = MSR_PEBS_DATA_CFG, 4105 .host = cpuc->active_pebs_data_cfg, 4106 .guest = kvm_pmu->pebs_data_cfg, 4107 }; 4108 } 4109 4110 pebs_enable = (*nr)++; 4111 arr[pebs_enable] = (struct perf_guest_switch_msr){ 4112 .msr = MSR_IA32_PEBS_ENABLE, 4113 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, 4114 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, 4115 }; 4116 4117 if (arr[pebs_enable].host) { 4118 /* Disable guest PEBS if host PEBS is enabled. */ 4119 arr[pebs_enable].guest = 0; 4120 } else { 4121 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */ 4122 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask; 4123 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask; 4124 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */ 4125 arr[global_ctrl].guest |= arr[pebs_enable].guest; 4126 } 4127 4128 return arr; 4129 } 4130 4131 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data) 4132 { 4133 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4134 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 4135 int idx; 4136 4137 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 4138 struct perf_event *event = cpuc->events[idx]; 4139 4140 arr[idx].msr = x86_pmu_config_addr(idx); 4141 arr[idx].host = arr[idx].guest = 0; 4142 4143 if (!test_bit(idx, cpuc->active_mask)) 4144 continue; 4145 4146 arr[idx].host = arr[idx].guest = 4147 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; 4148 4149 if (event->attr.exclude_host) 4150 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 4151 else if (event->attr.exclude_guest) 4152 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 4153 } 4154 4155 *nr = x86_pmu.num_counters; 4156 return arr; 4157 } 4158 4159 static void core_pmu_enable_event(struct perf_event *event) 4160 { 4161 if (!event->attr.exclude_host) 4162 x86_pmu_enable_event(event); 4163 } 4164 4165 static void core_pmu_enable_all(int added) 4166 { 4167 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4168 int idx; 4169 4170 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 4171 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; 4172 4173 if (!test_bit(idx, cpuc->active_mask) || 4174 cpuc->events[idx]->attr.exclude_host) 4175 continue; 4176 4177 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 4178 } 4179 } 4180 4181 static int hsw_hw_config(struct perf_event *event) 4182 { 4183 int ret = intel_pmu_hw_config(event); 4184 4185 if (ret) 4186 return ret; 4187 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE)) 4188 return 0; 4189 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); 4190 4191 /* 4192 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with 4193 * PEBS or in ANY thread mode. Since the results are non-sensical forbid 4194 * this combination. 4195 */ 4196 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && 4197 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || 4198 event->attr.precise_ip > 0)) 4199 return -EOPNOTSUPP; 4200 4201 if (event_is_checkpointed(event)) { 4202 /* 4203 * Sampling of checkpointed events can cause situations where 4204 * the CPU constantly aborts because of a overflow, which is 4205 * then checkpointed back and ignored. Forbid checkpointing 4206 * for sampling. 4207 * 4208 * But still allow a long sampling period, so that perf stat 4209 * from KVM works. 4210 */ 4211 if (event->attr.sample_period > 0 && 4212 event->attr.sample_period < 0x7fffffff) 4213 return -EOPNOTSUPP; 4214 } 4215 return 0; 4216 } 4217 4218 static struct event_constraint counter0_constraint = 4219 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1); 4220 4221 static struct event_constraint counter1_constraint = 4222 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2); 4223 4224 static struct event_constraint counter0_1_constraint = 4225 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3); 4226 4227 static struct event_constraint counter2_constraint = 4228 EVENT_CONSTRAINT(0, 0x4, 0); 4229 4230 static struct event_constraint fixed0_constraint = 4231 FIXED_EVENT_CONSTRAINT(0x00c0, 0); 4232 4233 static struct event_constraint fixed0_counter0_constraint = 4234 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL); 4235 4236 static struct event_constraint fixed0_counter0_1_constraint = 4237 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL); 4238 4239 static struct event_constraint counters_1_7_constraint = 4240 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL); 4241 4242 static struct event_constraint * 4243 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4244 struct perf_event *event) 4245 { 4246 struct event_constraint *c; 4247 4248 c = intel_get_event_constraints(cpuc, idx, event); 4249 4250 /* Handle special quirk on in_tx_checkpointed only in counter 2 */ 4251 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { 4252 if (c->idxmsk64 & (1U << 2)) 4253 return &counter2_constraint; 4254 return &emptyconstraint; 4255 } 4256 4257 return c; 4258 } 4259 4260 static struct event_constraint * 4261 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4262 struct perf_event *event) 4263 { 4264 /* 4265 * Fixed counter 0 has less skid. 4266 * Force instruction:ppp in Fixed counter 0 4267 */ 4268 if ((event->attr.precise_ip == 3) && 4269 constraint_match(&fixed0_constraint, event->hw.config)) 4270 return &fixed0_constraint; 4271 4272 return hsw_get_event_constraints(cpuc, idx, event); 4273 } 4274 4275 static struct event_constraint * 4276 spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4277 struct perf_event *event) 4278 { 4279 struct event_constraint *c; 4280 4281 c = icl_get_event_constraints(cpuc, idx, event); 4282 4283 /* 4284 * The :ppp indicates the Precise Distribution (PDist) facility, which 4285 * is only supported on the GP counter 0. If a :ppp event which is not 4286 * available on the GP counter 0, error out. 4287 * Exception: Instruction PDIR is only available on the fixed counter 0. 4288 */ 4289 if ((event->attr.precise_ip == 3) && 4290 !constraint_match(&fixed0_constraint, event->hw.config)) { 4291 if (c->idxmsk64 & BIT_ULL(0)) 4292 return &counter0_constraint; 4293 4294 return &emptyconstraint; 4295 } 4296 4297 return c; 4298 } 4299 4300 static struct event_constraint * 4301 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4302 struct perf_event *event) 4303 { 4304 struct event_constraint *c; 4305 4306 /* :ppp means to do reduced skid PEBS which is PMC0 only. */ 4307 if (event->attr.precise_ip == 3) 4308 return &counter0_constraint; 4309 4310 c = intel_get_event_constraints(cpuc, idx, event); 4311 4312 return c; 4313 } 4314 4315 static struct event_constraint * 4316 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4317 struct perf_event *event) 4318 { 4319 struct event_constraint *c; 4320 4321 c = intel_get_event_constraints(cpuc, idx, event); 4322 4323 /* 4324 * :ppp means to do reduced skid PEBS, 4325 * which is available on PMC0 and fixed counter 0. 4326 */ 4327 if (event->attr.precise_ip == 3) { 4328 /* Force instruction:ppp on PMC0 and Fixed counter 0 */ 4329 if (constraint_match(&fixed0_constraint, event->hw.config)) 4330 return &fixed0_counter0_constraint; 4331 4332 return &counter0_constraint; 4333 } 4334 4335 return c; 4336 } 4337 4338 static bool allow_tsx_force_abort = true; 4339 4340 static struct event_constraint * 4341 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4342 struct perf_event *event) 4343 { 4344 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); 4345 4346 /* 4347 * Without TFA we must not use PMC3. 4348 */ 4349 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { 4350 c = dyn_constraint(cpuc, c, idx); 4351 c->idxmsk64 &= ~(1ULL << 3); 4352 c->weight--; 4353 } 4354 4355 return c; 4356 } 4357 4358 static struct event_constraint * 4359 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4360 struct perf_event *event) 4361 { 4362 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4363 4364 if (pmu->cpu_type == hybrid_big) 4365 return spr_get_event_constraints(cpuc, idx, event); 4366 else if (pmu->cpu_type == hybrid_small) 4367 return tnt_get_event_constraints(cpuc, idx, event); 4368 4369 WARN_ON(1); 4370 return &emptyconstraint; 4371 } 4372 4373 static struct event_constraint * 4374 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4375 struct perf_event *event) 4376 { 4377 struct event_constraint *c; 4378 4379 c = intel_get_event_constraints(cpuc, idx, event); 4380 4381 /* 4382 * The :ppp indicates the Precise Distribution (PDist) facility, which 4383 * is only supported on the GP counter 0 & 1 and Fixed counter 0. 4384 * If a :ppp event which is not available on the above eligible counters, 4385 * error out. 4386 */ 4387 if (event->attr.precise_ip == 3) { 4388 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ 4389 if (constraint_match(&fixed0_constraint, event->hw.config)) 4390 return &fixed0_counter0_1_constraint; 4391 4392 switch (c->idxmsk64 & 0x3ull) { 4393 case 0x1: 4394 return &counter0_constraint; 4395 case 0x2: 4396 return &counter1_constraint; 4397 case 0x3: 4398 return &counter0_1_constraint; 4399 } 4400 return &emptyconstraint; 4401 } 4402 4403 return c; 4404 } 4405 4406 static struct event_constraint * 4407 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4408 struct perf_event *event) 4409 { 4410 struct event_constraint *c; 4411 4412 c = spr_get_event_constraints(cpuc, idx, event); 4413 4414 /* The Retire Latency is not supported by the fixed counter 0. */ 4415 if (event->attr.precise_ip && 4416 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) && 4417 constraint_match(&fixed0_constraint, event->hw.config)) { 4418 /* 4419 * The Instruction PDIR is only available 4420 * on the fixed counter 0. Error out for this case. 4421 */ 4422 if (event->attr.precise_ip == 3) 4423 return &emptyconstraint; 4424 return &counters_1_7_constraint; 4425 } 4426 4427 return c; 4428 } 4429 4430 static struct event_constraint * 4431 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4432 struct perf_event *event) 4433 { 4434 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4435 4436 if (pmu->cpu_type == hybrid_big) 4437 return rwc_get_event_constraints(cpuc, idx, event); 4438 if (pmu->cpu_type == hybrid_small) 4439 return cmt_get_event_constraints(cpuc, idx, event); 4440 4441 WARN_ON(1); 4442 return &emptyconstraint; 4443 } 4444 4445 static int adl_hw_config(struct perf_event *event) 4446 { 4447 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4448 4449 if (pmu->cpu_type == hybrid_big) 4450 return hsw_hw_config(event); 4451 else if (pmu->cpu_type == hybrid_small) 4452 return intel_pmu_hw_config(event); 4453 4454 WARN_ON(1); 4455 return -EOPNOTSUPP; 4456 } 4457 4458 static u8 adl_get_hybrid_cpu_type(void) 4459 { 4460 return hybrid_big; 4461 } 4462 4463 /* 4464 * Broadwell: 4465 * 4466 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared 4467 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine 4468 * the two to enforce a minimum period of 128 (the smallest value that has bits 4469 * 0-5 cleared and >= 100). 4470 * 4471 * Because of how the code in x86_perf_event_set_period() works, the truncation 4472 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period 4473 * to make up for the 'lost' events due to carrying the 'error' in period_left. 4474 * 4475 * Therefore the effective (average) period matches the requested period, 4476 * despite coarser hardware granularity. 4477 */ 4478 static void bdw_limit_period(struct perf_event *event, s64 *left) 4479 { 4480 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == 4481 X86_CONFIG(.event=0xc0, .umask=0x01)) { 4482 if (*left < 128) 4483 *left = 128; 4484 *left &= ~0x3fULL; 4485 } 4486 } 4487 4488 static void nhm_limit_period(struct perf_event *event, s64 *left) 4489 { 4490 *left = max(*left, 32LL); 4491 } 4492 4493 static void spr_limit_period(struct perf_event *event, s64 *left) 4494 { 4495 if (event->attr.precise_ip == 3) 4496 *left = max(*left, 128LL); 4497 } 4498 4499 PMU_FORMAT_ATTR(event, "config:0-7" ); 4500 PMU_FORMAT_ATTR(umask, "config:8-15" ); 4501 PMU_FORMAT_ATTR(edge, "config:18" ); 4502 PMU_FORMAT_ATTR(pc, "config:19" ); 4503 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */ 4504 PMU_FORMAT_ATTR(inv, "config:23" ); 4505 PMU_FORMAT_ATTR(cmask, "config:24-31" ); 4506 PMU_FORMAT_ATTR(in_tx, "config:32"); 4507 PMU_FORMAT_ATTR(in_tx_cp, "config:33"); 4508 4509 static struct attribute *intel_arch_formats_attr[] = { 4510 &format_attr_event.attr, 4511 &format_attr_umask.attr, 4512 &format_attr_edge.attr, 4513 &format_attr_pc.attr, 4514 &format_attr_inv.attr, 4515 &format_attr_cmask.attr, 4516 NULL, 4517 }; 4518 4519 ssize_t intel_event_sysfs_show(char *page, u64 config) 4520 { 4521 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); 4522 4523 return x86_event_sysfs_show(page, config, event); 4524 } 4525 4526 static struct intel_shared_regs *allocate_shared_regs(int cpu) 4527 { 4528 struct intel_shared_regs *regs; 4529 int i; 4530 4531 regs = kzalloc_node(sizeof(struct intel_shared_regs), 4532 GFP_KERNEL, cpu_to_node(cpu)); 4533 if (regs) { 4534 /* 4535 * initialize the locks to keep lockdep happy 4536 */ 4537 for (i = 0; i < EXTRA_REG_MAX; i++) 4538 raw_spin_lock_init(®s->regs[i].lock); 4539 4540 regs->core_id = -1; 4541 } 4542 return regs; 4543 } 4544 4545 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) 4546 { 4547 struct intel_excl_cntrs *c; 4548 4549 c = kzalloc_node(sizeof(struct intel_excl_cntrs), 4550 GFP_KERNEL, cpu_to_node(cpu)); 4551 if (c) { 4552 raw_spin_lock_init(&c->lock); 4553 c->core_id = -1; 4554 } 4555 return c; 4556 } 4557 4558 4559 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) 4560 { 4561 cpuc->pebs_record_size = x86_pmu.pebs_record_size; 4562 4563 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 4564 cpuc->shared_regs = allocate_shared_regs(cpu); 4565 if (!cpuc->shared_regs) 4566 goto err; 4567 } 4568 4569 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { 4570 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); 4571 4572 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); 4573 if (!cpuc->constraint_list) 4574 goto err_shared_regs; 4575 } 4576 4577 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 4578 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 4579 if (!cpuc->excl_cntrs) 4580 goto err_constraint_list; 4581 4582 cpuc->excl_thread_id = 0; 4583 } 4584 4585 return 0; 4586 4587 err_constraint_list: 4588 kfree(cpuc->constraint_list); 4589 cpuc->constraint_list = NULL; 4590 4591 err_shared_regs: 4592 kfree(cpuc->shared_regs); 4593 cpuc->shared_regs = NULL; 4594 4595 err: 4596 return -ENOMEM; 4597 } 4598 4599 static int intel_pmu_cpu_prepare(int cpu) 4600 { 4601 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); 4602 } 4603 4604 static void flip_smm_bit(void *data) 4605 { 4606 unsigned long set = *(unsigned long *)data; 4607 4608 if (set > 0) { 4609 msr_set_bit(MSR_IA32_DEBUGCTLMSR, 4610 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 4611 } else { 4612 msr_clear_bit(MSR_IA32_DEBUGCTLMSR, 4613 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 4614 } 4615 } 4616 4617 static void intel_pmu_check_num_counters(int *num_counters, 4618 int *num_counters_fixed, 4619 u64 *intel_ctrl, u64 fixed_mask); 4620 4621 static void update_pmu_cap(struct x86_hybrid_pmu *pmu) 4622 { 4623 unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF); 4624 unsigned int eax, ebx, ecx, edx; 4625 4626 if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) { 4627 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, 4628 &eax, &ebx, &ecx, &edx); 4629 pmu->num_counters = fls(eax); 4630 pmu->num_counters_fixed = fls(ebx); 4631 intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, 4632 &pmu->intel_ctrl, ebx); 4633 } 4634 } 4635 4636 static bool init_hybrid_pmu(int cpu) 4637 { 4638 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4639 u8 cpu_type = get_this_hybrid_cpu_type(); 4640 struct x86_hybrid_pmu *pmu = NULL; 4641 int i; 4642 4643 if (!cpu_type && x86_pmu.get_hybrid_cpu_type) 4644 cpu_type = x86_pmu.get_hybrid_cpu_type(); 4645 4646 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 4647 if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) { 4648 pmu = &x86_pmu.hybrid_pmu[i]; 4649 break; 4650 } 4651 } 4652 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { 4653 cpuc->pmu = NULL; 4654 return false; 4655 } 4656 4657 /* Only check and dump the PMU information for the first CPU */ 4658 if (!cpumask_empty(&pmu->supported_cpus)) 4659 goto end; 4660 4661 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) 4662 update_pmu_cap(pmu); 4663 4664 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed)) 4665 return false; 4666 4667 pr_info("%s PMU driver: ", pmu->name); 4668 4669 if (pmu->intel_cap.pebs_output_pt_available) 4670 pr_cont("PEBS-via-PT "); 4671 4672 pr_cont("\n"); 4673 4674 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed, 4675 pmu->intel_ctrl); 4676 4677 end: 4678 cpumask_set_cpu(cpu, &pmu->supported_cpus); 4679 cpuc->pmu = &pmu->pmu; 4680 4681 return true; 4682 } 4683 4684 static void intel_pmu_cpu_starting(int cpu) 4685 { 4686 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4687 int core_id = topology_core_id(cpu); 4688 int i; 4689 4690 if (is_hybrid() && !init_hybrid_pmu(cpu)) 4691 return; 4692 4693 init_debug_store_on_cpu(cpu); 4694 /* 4695 * Deal with CPUs that don't clear their LBRs on power-up. 4696 */ 4697 intel_pmu_lbr_reset(); 4698 4699 cpuc->lbr_sel = NULL; 4700 4701 if (x86_pmu.flags & PMU_FL_TFA) { 4702 WARN_ON_ONCE(cpuc->tfa_shadow); 4703 cpuc->tfa_shadow = ~0ULL; 4704 intel_set_tfa(cpuc, false); 4705 } 4706 4707 if (x86_pmu.version > 1) 4708 flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 4709 4710 /* 4711 * Disable perf metrics if any added CPU doesn't support it. 4712 * 4713 * Turn off the check for a hybrid architecture, because the 4714 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate 4715 * the architecture features. The perf metrics is a model-specific 4716 * feature for now. The corresponding bit should always be 0 on 4717 * a hybrid platform, e.g., Alder Lake. 4718 */ 4719 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) { 4720 union perf_capabilities perf_cap; 4721 4722 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); 4723 if (!perf_cap.perf_metrics) { 4724 x86_pmu.intel_cap.perf_metrics = 0; 4725 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); 4726 } 4727 } 4728 4729 if (!cpuc->shared_regs) 4730 return; 4731 4732 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { 4733 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 4734 struct intel_shared_regs *pc; 4735 4736 pc = per_cpu(cpu_hw_events, i).shared_regs; 4737 if (pc && pc->core_id == core_id) { 4738 cpuc->kfree_on_online[0] = cpuc->shared_regs; 4739 cpuc->shared_regs = pc; 4740 break; 4741 } 4742 } 4743 cpuc->shared_regs->core_id = core_id; 4744 cpuc->shared_regs->refcnt++; 4745 } 4746 4747 if (x86_pmu.lbr_sel_map) 4748 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 4749 4750 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 4751 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 4752 struct cpu_hw_events *sibling; 4753 struct intel_excl_cntrs *c; 4754 4755 sibling = &per_cpu(cpu_hw_events, i); 4756 c = sibling->excl_cntrs; 4757 if (c && c->core_id == core_id) { 4758 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; 4759 cpuc->excl_cntrs = c; 4760 if (!sibling->excl_thread_id) 4761 cpuc->excl_thread_id = 1; 4762 break; 4763 } 4764 } 4765 cpuc->excl_cntrs->core_id = core_id; 4766 cpuc->excl_cntrs->refcnt++; 4767 } 4768 } 4769 4770 static void free_excl_cntrs(struct cpu_hw_events *cpuc) 4771 { 4772 struct intel_excl_cntrs *c; 4773 4774 c = cpuc->excl_cntrs; 4775 if (c) { 4776 if (c->core_id == -1 || --c->refcnt == 0) 4777 kfree(c); 4778 cpuc->excl_cntrs = NULL; 4779 } 4780 4781 kfree(cpuc->constraint_list); 4782 cpuc->constraint_list = NULL; 4783 } 4784 4785 static void intel_pmu_cpu_dying(int cpu) 4786 { 4787 fini_debug_store_on_cpu(cpu); 4788 } 4789 4790 void intel_cpuc_finish(struct cpu_hw_events *cpuc) 4791 { 4792 struct intel_shared_regs *pc; 4793 4794 pc = cpuc->shared_regs; 4795 if (pc) { 4796 if (pc->core_id == -1 || --pc->refcnt == 0) 4797 kfree(pc); 4798 cpuc->shared_regs = NULL; 4799 } 4800 4801 free_excl_cntrs(cpuc); 4802 } 4803 4804 static void intel_pmu_cpu_dead(int cpu) 4805 { 4806 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4807 4808 intel_cpuc_finish(cpuc); 4809 4810 if (is_hybrid() && cpuc->pmu) 4811 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus); 4812 } 4813 4814 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, 4815 bool sched_in) 4816 { 4817 intel_pmu_pebs_sched_task(pmu_ctx, sched_in); 4818 intel_pmu_lbr_sched_task(pmu_ctx, sched_in); 4819 } 4820 4821 static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, 4822 struct perf_event_pmu_context *next_epc) 4823 { 4824 intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc); 4825 } 4826 4827 static int intel_pmu_check_period(struct perf_event *event, u64 value) 4828 { 4829 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; 4830 } 4831 4832 static void intel_aux_output_init(void) 4833 { 4834 /* Refer also intel_pmu_aux_output_match() */ 4835 if (x86_pmu.intel_cap.pebs_output_pt_available) 4836 x86_pmu.assign = intel_pmu_assign_event; 4837 } 4838 4839 static int intel_pmu_aux_output_match(struct perf_event *event) 4840 { 4841 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */ 4842 if (!x86_pmu.intel_cap.pebs_output_pt_available) 4843 return 0; 4844 4845 return is_intel_pt_event(event); 4846 } 4847 4848 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret) 4849 { 4850 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu); 4851 4852 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus); 4853 } 4854 4855 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 4856 4857 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 4858 4859 PMU_FORMAT_ATTR(frontend, "config1:0-23"); 4860 4861 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63"); 4862 4863 static struct attribute *intel_arch3_formats_attr[] = { 4864 &format_attr_event.attr, 4865 &format_attr_umask.attr, 4866 &format_attr_edge.attr, 4867 &format_attr_pc.attr, 4868 &format_attr_any.attr, 4869 &format_attr_inv.attr, 4870 &format_attr_cmask.attr, 4871 NULL, 4872 }; 4873 4874 static struct attribute *hsw_format_attr[] = { 4875 &format_attr_in_tx.attr, 4876 &format_attr_in_tx_cp.attr, 4877 &format_attr_offcore_rsp.attr, 4878 &format_attr_ldlat.attr, 4879 NULL 4880 }; 4881 4882 static struct attribute *nhm_format_attr[] = { 4883 &format_attr_offcore_rsp.attr, 4884 &format_attr_ldlat.attr, 4885 NULL 4886 }; 4887 4888 static struct attribute *slm_format_attr[] = { 4889 &format_attr_offcore_rsp.attr, 4890 NULL 4891 }; 4892 4893 static struct attribute *cmt_format_attr[] = { 4894 &format_attr_offcore_rsp.attr, 4895 &format_attr_ldlat.attr, 4896 &format_attr_snoop_rsp.attr, 4897 NULL 4898 }; 4899 4900 static struct attribute *skl_format_attr[] = { 4901 &format_attr_frontend.attr, 4902 NULL, 4903 }; 4904 4905 static __initconst const struct x86_pmu core_pmu = { 4906 .name = "core", 4907 .handle_irq = x86_pmu_handle_irq, 4908 .disable_all = x86_pmu_disable_all, 4909 .enable_all = core_pmu_enable_all, 4910 .enable = core_pmu_enable_event, 4911 .disable = x86_pmu_disable_event, 4912 .hw_config = core_pmu_hw_config, 4913 .schedule_events = x86_schedule_events, 4914 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 4915 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 4916 .event_map = intel_pmu_event_map, 4917 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 4918 .apic = 1, 4919 .large_pebs_flags = LARGE_PEBS_FLAGS, 4920 4921 /* 4922 * Intel PMCs cannot be accessed sanely above 32-bit width, 4923 * so we install an artificial 1<<31 period regardless of 4924 * the generic event period: 4925 */ 4926 .max_period = (1ULL<<31) - 1, 4927 .get_event_constraints = intel_get_event_constraints, 4928 .put_event_constraints = intel_put_event_constraints, 4929 .event_constraints = intel_core_event_constraints, 4930 .guest_get_msrs = core_guest_get_msrs, 4931 .format_attrs = intel_arch_formats_attr, 4932 .events_sysfs_show = intel_event_sysfs_show, 4933 4934 /* 4935 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs 4936 * together with PMU version 1 and thus be using core_pmu with 4937 * shared_regs. We need following callbacks here to allocate 4938 * it properly. 4939 */ 4940 .cpu_prepare = intel_pmu_cpu_prepare, 4941 .cpu_starting = intel_pmu_cpu_starting, 4942 .cpu_dying = intel_pmu_cpu_dying, 4943 .cpu_dead = intel_pmu_cpu_dead, 4944 4945 .check_period = intel_pmu_check_period, 4946 4947 .lbr_reset = intel_pmu_lbr_reset_64, 4948 .lbr_read = intel_pmu_lbr_read_64, 4949 .lbr_save = intel_pmu_lbr_save, 4950 .lbr_restore = intel_pmu_lbr_restore, 4951 }; 4952 4953 static __initconst const struct x86_pmu intel_pmu = { 4954 .name = "Intel", 4955 .handle_irq = intel_pmu_handle_irq, 4956 .disable_all = intel_pmu_disable_all, 4957 .enable_all = intel_pmu_enable_all, 4958 .enable = intel_pmu_enable_event, 4959 .disable = intel_pmu_disable_event, 4960 .add = intel_pmu_add_event, 4961 .del = intel_pmu_del_event, 4962 .read = intel_pmu_read_event, 4963 .set_period = intel_pmu_set_period, 4964 .update = intel_pmu_update, 4965 .hw_config = intel_pmu_hw_config, 4966 .schedule_events = x86_schedule_events, 4967 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 4968 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 4969 .event_map = intel_pmu_event_map, 4970 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 4971 .apic = 1, 4972 .large_pebs_flags = LARGE_PEBS_FLAGS, 4973 /* 4974 * Intel PMCs cannot be accessed sanely above 32 bit width, 4975 * so we install an artificial 1<<31 period regardless of 4976 * the generic event period: 4977 */ 4978 .max_period = (1ULL << 31) - 1, 4979 .get_event_constraints = intel_get_event_constraints, 4980 .put_event_constraints = intel_put_event_constraints, 4981 .pebs_aliases = intel_pebs_aliases_core2, 4982 4983 .format_attrs = intel_arch3_formats_attr, 4984 .events_sysfs_show = intel_event_sysfs_show, 4985 4986 .cpu_prepare = intel_pmu_cpu_prepare, 4987 .cpu_starting = intel_pmu_cpu_starting, 4988 .cpu_dying = intel_pmu_cpu_dying, 4989 .cpu_dead = intel_pmu_cpu_dead, 4990 4991 .guest_get_msrs = intel_guest_get_msrs, 4992 .sched_task = intel_pmu_sched_task, 4993 .swap_task_ctx = intel_pmu_swap_task_ctx, 4994 4995 .check_period = intel_pmu_check_period, 4996 4997 .aux_output_match = intel_pmu_aux_output_match, 4998 4999 .lbr_reset = intel_pmu_lbr_reset_64, 5000 .lbr_read = intel_pmu_lbr_read_64, 5001 .lbr_save = intel_pmu_lbr_save, 5002 .lbr_restore = intel_pmu_lbr_restore, 5003 5004 /* 5005 * SMM has access to all 4 rings and while traditionally SMM code only 5006 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. 5007 * 5008 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction 5009 * between SMM or not, this results in what should be pure userspace 5010 * counters including SMM data. 5011 * 5012 * This is a clear privilege issue, therefore globally disable 5013 * counting SMM by default. 5014 */ 5015 .attr_freeze_on_smi = 1, 5016 }; 5017 5018 static __init void intel_clovertown_quirk(void) 5019 { 5020 /* 5021 * PEBS is unreliable due to: 5022 * 5023 * AJ67 - PEBS may experience CPL leaks 5024 * AJ68 - PEBS PMI may be delayed by one event 5025 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] 5026 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS 5027 * 5028 * AJ67 could be worked around by restricting the OS/USR flags. 5029 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. 5030 * 5031 * AJ106 could possibly be worked around by not allowing LBR 5032 * usage from PEBS, including the fixup. 5033 * AJ68 could possibly be worked around by always programming 5034 * a pebs_event_reset[0] value and coping with the lost events. 5035 * 5036 * But taken together it might just make sense to not enable PEBS on 5037 * these chips. 5038 */ 5039 pr_warn("PEBS disabled due to CPU errata\n"); 5040 x86_pmu.pebs = 0; 5041 x86_pmu.pebs_constraints = NULL; 5042 } 5043 5044 static const struct x86_cpu_desc isolation_ucodes[] = { 5045 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f), 5046 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e), 5047 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015), 5048 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), 5049 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), 5050 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023), 5051 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014), 5052 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010), 5053 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), 5054 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), 5055 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), 5056 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014), 5057 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), 5058 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), 5059 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), 5060 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), 5061 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), 5062 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000), 5063 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), 5064 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), 5065 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), 5066 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e), 5067 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e), 5068 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e), 5069 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e), 5070 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e), 5071 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e), 5072 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e), 5073 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e), 5074 {} 5075 }; 5076 5077 static void intel_check_pebs_isolation(void) 5078 { 5079 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes); 5080 } 5081 5082 static __init void intel_pebs_isolation_quirk(void) 5083 { 5084 WARN_ON_ONCE(x86_pmu.check_microcode); 5085 x86_pmu.check_microcode = intel_check_pebs_isolation; 5086 intel_check_pebs_isolation(); 5087 } 5088 5089 static const struct x86_cpu_desc pebs_ucodes[] = { 5090 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028), 5091 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618), 5092 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c), 5093 {} 5094 }; 5095 5096 static bool intel_snb_pebs_broken(void) 5097 { 5098 return !x86_cpu_has_min_microcode_rev(pebs_ucodes); 5099 } 5100 5101 static void intel_snb_check_microcode(void) 5102 { 5103 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken) 5104 return; 5105 5106 /* 5107 * Serialized by the microcode lock.. 5108 */ 5109 if (x86_pmu.pebs_broken) { 5110 pr_info("PEBS enabled due to microcode update\n"); 5111 x86_pmu.pebs_broken = 0; 5112 } else { 5113 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n"); 5114 x86_pmu.pebs_broken = 1; 5115 } 5116 } 5117 5118 static bool is_lbr_from(unsigned long msr) 5119 { 5120 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr; 5121 5122 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr; 5123 } 5124 5125 /* 5126 * Under certain circumstances, access certain MSR may cause #GP. 5127 * The function tests if the input MSR can be safely accessed. 5128 */ 5129 static bool check_msr(unsigned long msr, u64 mask) 5130 { 5131 u64 val_old, val_new, val_tmp; 5132 5133 /* 5134 * Disable the check for real HW, so we don't 5135 * mess with potentially enabled registers: 5136 */ 5137 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 5138 return true; 5139 5140 /* 5141 * Read the current value, change it and read it back to see if it 5142 * matches, this is needed to detect certain hardware emulators 5143 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 5144 */ 5145 if (rdmsrl_safe(msr, &val_old)) 5146 return false; 5147 5148 /* 5149 * Only change the bits which can be updated by wrmsrl. 5150 */ 5151 val_tmp = val_old ^ mask; 5152 5153 if (is_lbr_from(msr)) 5154 val_tmp = lbr_from_signext_quirk_wr(val_tmp); 5155 5156 if (wrmsrl_safe(msr, val_tmp) || 5157 rdmsrl_safe(msr, &val_new)) 5158 return false; 5159 5160 /* 5161 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value 5162 * should equal rdmsrl()'s even with the quirk. 5163 */ 5164 if (val_new != val_tmp) 5165 return false; 5166 5167 if (is_lbr_from(msr)) 5168 val_old = lbr_from_signext_quirk_wr(val_old); 5169 5170 /* Here it's sure that the MSR can be safely accessed. 5171 * Restore the old value and return. 5172 */ 5173 wrmsrl(msr, val_old); 5174 5175 return true; 5176 } 5177 5178 static __init void intel_sandybridge_quirk(void) 5179 { 5180 x86_pmu.check_microcode = intel_snb_check_microcode; 5181 cpus_read_lock(); 5182 intel_snb_check_microcode(); 5183 cpus_read_unlock(); 5184 } 5185 5186 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { 5187 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, 5188 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, 5189 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, 5190 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, 5191 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, 5192 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, 5193 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, 5194 }; 5195 5196 static __init void intel_arch_events_quirk(void) 5197 { 5198 int bit; 5199 5200 /* disable event that reported as not present by cpuid */ 5201 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { 5202 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; 5203 pr_warn("CPUID marked event: \'%s\' unavailable\n", 5204 intel_arch_events_map[bit].name); 5205 } 5206 } 5207 5208 static __init void intel_nehalem_quirk(void) 5209 { 5210 union cpuid10_ebx ebx; 5211 5212 ebx.full = x86_pmu.events_maskl; 5213 if (ebx.split.no_branch_misses_retired) { 5214 /* 5215 * Erratum AAJ80 detected, we work it around by using 5216 * the BR_MISP_EXEC.ANY event. This will over-count 5217 * branch-misses, but it's still much better than the 5218 * architectural event which is often completely bogus: 5219 */ 5220 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; 5221 ebx.split.no_branch_misses_retired = 0; 5222 x86_pmu.events_maskl = ebx.full; 5223 pr_info("CPU erratum AAJ80 worked around\n"); 5224 } 5225 } 5226 5227 /* 5228 * enable software workaround for errata: 5229 * SNB: BJ122 5230 * IVB: BV98 5231 * HSW: HSD29 5232 * 5233 * Only needed when HT is enabled. However detecting 5234 * if HT is enabled is difficult (model specific). So instead, 5235 * we enable the workaround in the early boot, and verify if 5236 * it is needed in a later initcall phase once we have valid 5237 * topology information to check if HT is actually enabled 5238 */ 5239 static __init void intel_ht_bug(void) 5240 { 5241 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED; 5242 5243 x86_pmu.start_scheduling = intel_start_scheduling; 5244 x86_pmu.commit_scheduling = intel_commit_scheduling; 5245 x86_pmu.stop_scheduling = intel_stop_scheduling; 5246 } 5247 5248 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3"); 5249 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82") 5250 5251 /* Haswell special events */ 5252 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1"); 5253 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2"); 5254 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4"); 5255 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2"); 5256 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1"); 5257 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1"); 5258 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2"); 5259 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4"); 5260 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2"); 5261 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1"); 5262 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); 5263 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); 5264 5265 static struct attribute *hsw_events_attrs[] = { 5266 EVENT_PTR(td_slots_issued), 5267 EVENT_PTR(td_slots_retired), 5268 EVENT_PTR(td_fetch_bubbles), 5269 EVENT_PTR(td_total_slots), 5270 EVENT_PTR(td_total_slots_scale), 5271 EVENT_PTR(td_recovery_bubbles), 5272 EVENT_PTR(td_recovery_bubbles_scale), 5273 NULL 5274 }; 5275 5276 static struct attribute *hsw_mem_events_attrs[] = { 5277 EVENT_PTR(mem_ld_hsw), 5278 EVENT_PTR(mem_st_hsw), 5279 NULL, 5280 }; 5281 5282 static struct attribute *hsw_tsx_events_attrs[] = { 5283 EVENT_PTR(tx_start), 5284 EVENT_PTR(tx_commit), 5285 EVENT_PTR(tx_abort), 5286 EVENT_PTR(tx_capacity), 5287 EVENT_PTR(tx_conflict), 5288 EVENT_PTR(el_start), 5289 EVENT_PTR(el_commit), 5290 EVENT_PTR(el_abort), 5291 EVENT_PTR(el_capacity), 5292 EVENT_PTR(el_conflict), 5293 EVENT_PTR(cycles_t), 5294 EVENT_PTR(cycles_ct), 5295 NULL 5296 }; 5297 5298 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80"); 5299 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2"); 5300 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80"); 5301 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2"); 5302 5303 static struct attribute *icl_events_attrs[] = { 5304 EVENT_PTR(mem_ld_hsw), 5305 EVENT_PTR(mem_st_hsw), 5306 NULL, 5307 }; 5308 5309 static struct attribute *icl_td_events_attrs[] = { 5310 EVENT_PTR(slots), 5311 EVENT_PTR(td_retiring), 5312 EVENT_PTR(td_bad_spec), 5313 EVENT_PTR(td_fe_bound), 5314 EVENT_PTR(td_be_bound), 5315 NULL, 5316 }; 5317 5318 static struct attribute *icl_tsx_events_attrs[] = { 5319 EVENT_PTR(tx_start), 5320 EVENT_PTR(tx_abort), 5321 EVENT_PTR(tx_commit), 5322 EVENT_PTR(tx_capacity_read), 5323 EVENT_PTR(tx_capacity_write), 5324 EVENT_PTR(tx_conflict), 5325 EVENT_PTR(el_start), 5326 EVENT_PTR(el_abort), 5327 EVENT_PTR(el_commit), 5328 EVENT_PTR(el_capacity_read), 5329 EVENT_PTR(el_capacity_write), 5330 EVENT_PTR(el_conflict), 5331 EVENT_PTR(cycles_t), 5332 EVENT_PTR(cycles_ct), 5333 NULL, 5334 }; 5335 5336 5337 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2"); 5338 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82"); 5339 5340 static struct attribute *spr_events_attrs[] = { 5341 EVENT_PTR(mem_ld_hsw), 5342 EVENT_PTR(mem_st_spr), 5343 EVENT_PTR(mem_ld_aux), 5344 NULL, 5345 }; 5346 5347 static struct attribute *spr_td_events_attrs[] = { 5348 EVENT_PTR(slots), 5349 EVENT_PTR(td_retiring), 5350 EVENT_PTR(td_bad_spec), 5351 EVENT_PTR(td_fe_bound), 5352 EVENT_PTR(td_be_bound), 5353 EVENT_PTR(td_heavy_ops), 5354 EVENT_PTR(td_br_mispredict), 5355 EVENT_PTR(td_fetch_lat), 5356 EVENT_PTR(td_mem_bound), 5357 NULL, 5358 }; 5359 5360 static struct attribute *spr_tsx_events_attrs[] = { 5361 EVENT_PTR(tx_start), 5362 EVENT_PTR(tx_abort), 5363 EVENT_PTR(tx_commit), 5364 EVENT_PTR(tx_capacity_read), 5365 EVENT_PTR(tx_capacity_write), 5366 EVENT_PTR(tx_conflict), 5367 EVENT_PTR(cycles_t), 5368 EVENT_PTR(cycles_ct), 5369 NULL, 5370 }; 5371 5372 static ssize_t freeze_on_smi_show(struct device *cdev, 5373 struct device_attribute *attr, 5374 char *buf) 5375 { 5376 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi); 5377 } 5378 5379 static DEFINE_MUTEX(freeze_on_smi_mutex); 5380 5381 static ssize_t freeze_on_smi_store(struct device *cdev, 5382 struct device_attribute *attr, 5383 const char *buf, size_t count) 5384 { 5385 unsigned long val; 5386 ssize_t ret; 5387 5388 ret = kstrtoul(buf, 0, &val); 5389 if (ret) 5390 return ret; 5391 5392 if (val > 1) 5393 return -EINVAL; 5394 5395 mutex_lock(&freeze_on_smi_mutex); 5396 5397 if (x86_pmu.attr_freeze_on_smi == val) 5398 goto done; 5399 5400 x86_pmu.attr_freeze_on_smi = val; 5401 5402 cpus_read_lock(); 5403 on_each_cpu(flip_smm_bit, &val, 1); 5404 cpus_read_unlock(); 5405 done: 5406 mutex_unlock(&freeze_on_smi_mutex); 5407 5408 return count; 5409 } 5410 5411 static void update_tfa_sched(void *ignored) 5412 { 5413 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 5414 5415 /* 5416 * check if PMC3 is used 5417 * and if so force schedule out for all event types all contexts 5418 */ 5419 if (test_bit(3, cpuc->active_mask)) 5420 perf_pmu_resched(x86_get_pmu(smp_processor_id())); 5421 } 5422 5423 static ssize_t show_sysctl_tfa(struct device *cdev, 5424 struct device_attribute *attr, 5425 char *buf) 5426 { 5427 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort); 5428 } 5429 5430 static ssize_t set_sysctl_tfa(struct device *cdev, 5431 struct device_attribute *attr, 5432 const char *buf, size_t count) 5433 { 5434 bool val; 5435 ssize_t ret; 5436 5437 ret = kstrtobool(buf, &val); 5438 if (ret) 5439 return ret; 5440 5441 /* no change */ 5442 if (val == allow_tsx_force_abort) 5443 return count; 5444 5445 allow_tsx_force_abort = val; 5446 5447 cpus_read_lock(); 5448 on_each_cpu(update_tfa_sched, NULL, 1); 5449 cpus_read_unlock(); 5450 5451 return count; 5452 } 5453 5454 5455 static DEVICE_ATTR_RW(freeze_on_smi); 5456 5457 static ssize_t branches_show(struct device *cdev, 5458 struct device_attribute *attr, 5459 char *buf) 5460 { 5461 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); 5462 } 5463 5464 static DEVICE_ATTR_RO(branches); 5465 5466 static struct attribute *lbr_attrs[] = { 5467 &dev_attr_branches.attr, 5468 NULL 5469 }; 5470 5471 static char pmu_name_str[30]; 5472 5473 static ssize_t pmu_name_show(struct device *cdev, 5474 struct device_attribute *attr, 5475 char *buf) 5476 { 5477 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str); 5478 } 5479 5480 static DEVICE_ATTR_RO(pmu_name); 5481 5482 static struct attribute *intel_pmu_caps_attrs[] = { 5483 &dev_attr_pmu_name.attr, 5484 NULL 5485 }; 5486 5487 static DEVICE_ATTR(allow_tsx_force_abort, 0644, 5488 show_sysctl_tfa, 5489 set_sysctl_tfa); 5490 5491 static struct attribute *intel_pmu_attrs[] = { 5492 &dev_attr_freeze_on_smi.attr, 5493 &dev_attr_allow_tsx_force_abort.attr, 5494 NULL, 5495 }; 5496 5497 static umode_t 5498 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5499 { 5500 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0; 5501 } 5502 5503 static umode_t 5504 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5505 { 5506 return x86_pmu.pebs ? attr->mode : 0; 5507 } 5508 5509 static umode_t 5510 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5511 { 5512 if (attr == &event_attr_mem_ld_aux.attr.attr) 5513 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0; 5514 5515 return pebs_is_visible(kobj, attr, i); 5516 } 5517 5518 static umode_t 5519 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5520 { 5521 return x86_pmu.lbr_nr ? attr->mode : 0; 5522 } 5523 5524 static umode_t 5525 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5526 { 5527 return x86_pmu.version >= 2 ? attr->mode : 0; 5528 } 5529 5530 static umode_t 5531 default_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5532 { 5533 if (attr == &dev_attr_allow_tsx_force_abort.attr) 5534 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; 5535 5536 return attr->mode; 5537 } 5538 5539 static struct attribute_group group_events_td = { 5540 .name = "events", 5541 }; 5542 5543 static struct attribute_group group_events_mem = { 5544 .name = "events", 5545 .is_visible = mem_is_visible, 5546 }; 5547 5548 static struct attribute_group group_events_tsx = { 5549 .name = "events", 5550 .is_visible = tsx_is_visible, 5551 }; 5552 5553 static struct attribute_group group_caps_gen = { 5554 .name = "caps", 5555 .attrs = intel_pmu_caps_attrs, 5556 }; 5557 5558 static struct attribute_group group_caps_lbr = { 5559 .name = "caps", 5560 .attrs = lbr_attrs, 5561 .is_visible = lbr_is_visible, 5562 }; 5563 5564 static struct attribute_group group_format_extra = { 5565 .name = "format", 5566 .is_visible = exra_is_visible, 5567 }; 5568 5569 static struct attribute_group group_format_extra_skl = { 5570 .name = "format", 5571 .is_visible = exra_is_visible, 5572 }; 5573 5574 static struct attribute_group group_default = { 5575 .attrs = intel_pmu_attrs, 5576 .is_visible = default_is_visible, 5577 }; 5578 5579 static const struct attribute_group *attr_update[] = { 5580 &group_events_td, 5581 &group_events_mem, 5582 &group_events_tsx, 5583 &group_caps_gen, 5584 &group_caps_lbr, 5585 &group_format_extra, 5586 &group_format_extra_skl, 5587 &group_default, 5588 NULL, 5589 }; 5590 5591 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big); 5592 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small); 5593 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small); 5594 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small); 5595 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small); 5596 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big); 5597 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big); 5598 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big); 5599 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big); 5600 5601 static struct attribute *adl_hybrid_events_attrs[] = { 5602 EVENT_PTR(slots_adl), 5603 EVENT_PTR(td_retiring_adl), 5604 EVENT_PTR(td_bad_spec_adl), 5605 EVENT_PTR(td_fe_bound_adl), 5606 EVENT_PTR(td_be_bound_adl), 5607 EVENT_PTR(td_heavy_ops_adl), 5608 EVENT_PTR(td_br_mis_adl), 5609 EVENT_PTR(td_fetch_lat_adl), 5610 EVENT_PTR(td_mem_bound_adl), 5611 NULL, 5612 }; 5613 5614 /* Must be in IDX order */ 5615 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small); 5616 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small); 5617 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big); 5618 5619 static struct attribute *adl_hybrid_mem_attrs[] = { 5620 EVENT_PTR(mem_ld_adl), 5621 EVENT_PTR(mem_st_adl), 5622 EVENT_PTR(mem_ld_aux_adl), 5623 NULL, 5624 }; 5625 5626 static struct attribute *mtl_hybrid_mem_attrs[] = { 5627 EVENT_PTR(mem_ld_adl), 5628 EVENT_PTR(mem_st_adl), 5629 NULL 5630 }; 5631 5632 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big); 5633 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big); 5634 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big); 5635 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big); 5636 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big); 5637 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big); 5638 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big); 5639 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big); 5640 5641 static struct attribute *adl_hybrid_tsx_attrs[] = { 5642 EVENT_PTR(tx_start_adl), 5643 EVENT_PTR(tx_abort_adl), 5644 EVENT_PTR(tx_commit_adl), 5645 EVENT_PTR(tx_capacity_read_adl), 5646 EVENT_PTR(tx_capacity_write_adl), 5647 EVENT_PTR(tx_conflict_adl), 5648 EVENT_PTR(cycles_t_adl), 5649 EVENT_PTR(cycles_ct_adl), 5650 NULL, 5651 }; 5652 5653 FORMAT_ATTR_HYBRID(in_tx, hybrid_big); 5654 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big); 5655 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small); 5656 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small); 5657 FORMAT_ATTR_HYBRID(frontend, hybrid_big); 5658 5659 #define ADL_HYBRID_RTM_FORMAT_ATTR \ 5660 FORMAT_HYBRID_PTR(in_tx), \ 5661 FORMAT_HYBRID_PTR(in_tx_cp) 5662 5663 #define ADL_HYBRID_FORMAT_ATTR \ 5664 FORMAT_HYBRID_PTR(offcore_rsp), \ 5665 FORMAT_HYBRID_PTR(ldlat), \ 5666 FORMAT_HYBRID_PTR(frontend) 5667 5668 static struct attribute *adl_hybrid_extra_attr_rtm[] = { 5669 ADL_HYBRID_RTM_FORMAT_ATTR, 5670 ADL_HYBRID_FORMAT_ATTR, 5671 NULL 5672 }; 5673 5674 static struct attribute *adl_hybrid_extra_attr[] = { 5675 ADL_HYBRID_FORMAT_ATTR, 5676 NULL 5677 }; 5678 5679 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small); 5680 5681 static struct attribute *mtl_hybrid_extra_attr_rtm[] = { 5682 ADL_HYBRID_RTM_FORMAT_ATTR, 5683 ADL_HYBRID_FORMAT_ATTR, 5684 FORMAT_HYBRID_PTR(snoop_rsp), 5685 NULL 5686 }; 5687 5688 static struct attribute *mtl_hybrid_extra_attr[] = { 5689 ADL_HYBRID_FORMAT_ATTR, 5690 FORMAT_HYBRID_PTR(snoop_rsp), 5691 NULL 5692 }; 5693 5694 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) 5695 { 5696 struct device *dev = kobj_to_dev(kobj); 5697 struct x86_hybrid_pmu *pmu = 5698 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5699 struct perf_pmu_events_hybrid_attr *pmu_attr = 5700 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); 5701 5702 return pmu->cpu_type & pmu_attr->pmu_type; 5703 } 5704 5705 static umode_t hybrid_events_is_visible(struct kobject *kobj, 5706 struct attribute *attr, int i) 5707 { 5708 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0; 5709 } 5710 5711 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu) 5712 { 5713 int cpu = cpumask_first(&pmu->supported_cpus); 5714 5715 return (cpu >= nr_cpu_ids) ? -1 : cpu; 5716 } 5717 5718 static umode_t hybrid_tsx_is_visible(struct kobject *kobj, 5719 struct attribute *attr, int i) 5720 { 5721 struct device *dev = kobj_to_dev(kobj); 5722 struct x86_hybrid_pmu *pmu = 5723 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5724 int cpu = hybrid_find_supported_cpu(pmu); 5725 5726 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0; 5727 } 5728 5729 static umode_t hybrid_format_is_visible(struct kobject *kobj, 5730 struct attribute *attr, int i) 5731 { 5732 struct device *dev = kobj_to_dev(kobj); 5733 struct x86_hybrid_pmu *pmu = 5734 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5735 struct perf_pmu_format_hybrid_attr *pmu_attr = 5736 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); 5737 int cpu = hybrid_find_supported_cpu(pmu); 5738 5739 return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0; 5740 } 5741 5742 static struct attribute_group hybrid_group_events_td = { 5743 .name = "events", 5744 .is_visible = hybrid_events_is_visible, 5745 }; 5746 5747 static struct attribute_group hybrid_group_events_mem = { 5748 .name = "events", 5749 .is_visible = hybrid_events_is_visible, 5750 }; 5751 5752 static struct attribute_group hybrid_group_events_tsx = { 5753 .name = "events", 5754 .is_visible = hybrid_tsx_is_visible, 5755 }; 5756 5757 static struct attribute_group hybrid_group_format_extra = { 5758 .name = "format", 5759 .is_visible = hybrid_format_is_visible, 5760 }; 5761 5762 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev, 5763 struct device_attribute *attr, 5764 char *buf) 5765 { 5766 struct x86_hybrid_pmu *pmu = 5767 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5768 5769 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus); 5770 } 5771 5772 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL); 5773 static struct attribute *intel_hybrid_cpus_attrs[] = { 5774 &dev_attr_cpus.attr, 5775 NULL, 5776 }; 5777 5778 static struct attribute_group hybrid_group_cpus = { 5779 .attrs = intel_hybrid_cpus_attrs, 5780 }; 5781 5782 static const struct attribute_group *hybrid_attr_update[] = { 5783 &hybrid_group_events_td, 5784 &hybrid_group_events_mem, 5785 &hybrid_group_events_tsx, 5786 &group_caps_gen, 5787 &group_caps_lbr, 5788 &hybrid_group_format_extra, 5789 &group_default, 5790 &hybrid_group_cpus, 5791 NULL, 5792 }; 5793 5794 static struct attribute *empty_attrs; 5795 5796 static void intel_pmu_check_num_counters(int *num_counters, 5797 int *num_counters_fixed, 5798 u64 *intel_ctrl, u64 fixed_mask) 5799 { 5800 if (*num_counters > INTEL_PMC_MAX_GENERIC) { 5801 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 5802 *num_counters, INTEL_PMC_MAX_GENERIC); 5803 *num_counters = INTEL_PMC_MAX_GENERIC; 5804 } 5805 *intel_ctrl = (1ULL << *num_counters) - 1; 5806 5807 if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) { 5808 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 5809 *num_counters_fixed, INTEL_PMC_MAX_FIXED); 5810 *num_counters_fixed = INTEL_PMC_MAX_FIXED; 5811 } 5812 5813 *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED; 5814 } 5815 5816 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, 5817 int num_counters, 5818 int num_counters_fixed, 5819 u64 intel_ctrl) 5820 { 5821 struct event_constraint *c; 5822 5823 if (!event_constraints) 5824 return; 5825 5826 /* 5827 * event on fixed counter2 (REF_CYCLES) only works on this 5828 * counter, so do not extend mask to generic counters 5829 */ 5830 for_each_event_constraint(c, event_constraints) { 5831 /* 5832 * Don't extend the topdown slots and metrics 5833 * events to the generic counters. 5834 */ 5835 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { 5836 /* 5837 * Disable topdown slots and metrics events, 5838 * if slots event is not in CPUID. 5839 */ 5840 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl)) 5841 c->idxmsk64 = 0; 5842 c->weight = hweight64(c->idxmsk64); 5843 continue; 5844 } 5845 5846 if (c->cmask == FIXED_EVENT_FLAGS) { 5847 /* Disabled fixed counters which are not in CPUID */ 5848 c->idxmsk64 &= intel_ctrl; 5849 5850 /* 5851 * Don't extend the pseudo-encoding to the 5852 * generic counters 5853 */ 5854 if (!use_fixed_pseudo_encoding(c->code)) 5855 c->idxmsk64 |= (1ULL << num_counters) - 1; 5856 } 5857 c->idxmsk64 &= 5858 ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed)); 5859 c->weight = hweight64(c->idxmsk64); 5860 } 5861 } 5862 5863 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs) 5864 { 5865 struct extra_reg *er; 5866 5867 /* 5868 * Access extra MSR may cause #GP under certain circumstances. 5869 * E.g. KVM doesn't support offcore event 5870 * Check all extra_regs here. 5871 */ 5872 if (!extra_regs) 5873 return; 5874 5875 for (er = extra_regs; er->msr; er++) { 5876 er->extra_msr_access = check_msr(er->msr, 0x11UL); 5877 /* Disable LBR select mapping */ 5878 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) 5879 x86_pmu.lbr_sel_map = NULL; 5880 } 5881 } 5882 5883 static void intel_pmu_check_hybrid_pmus(u64 fixed_mask) 5884 { 5885 struct x86_hybrid_pmu *pmu; 5886 int i; 5887 5888 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 5889 pmu = &x86_pmu.hybrid_pmu[i]; 5890 5891 intel_pmu_check_num_counters(&pmu->num_counters, 5892 &pmu->num_counters_fixed, 5893 &pmu->intel_ctrl, 5894 fixed_mask); 5895 5896 if (pmu->intel_cap.perf_metrics) { 5897 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; 5898 pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS; 5899 } 5900 5901 if (pmu->intel_cap.pebs_output_pt_available) 5902 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; 5903 5904 intel_pmu_check_event_constraints(pmu->event_constraints, 5905 pmu->num_counters, 5906 pmu->num_counters_fixed, 5907 pmu->intel_ctrl); 5908 5909 intel_pmu_check_extra_regs(pmu->extra_regs); 5910 } 5911 } 5912 5913 static __always_inline bool is_mtl(u8 x86_model) 5914 { 5915 return (x86_model == INTEL_FAM6_METEORLAKE) || 5916 (x86_model == INTEL_FAM6_METEORLAKE_L); 5917 } 5918 5919 __init int intel_pmu_init(void) 5920 { 5921 struct attribute **extra_skl_attr = &empty_attrs; 5922 struct attribute **extra_attr = &empty_attrs; 5923 struct attribute **td_attr = &empty_attrs; 5924 struct attribute **mem_attr = &empty_attrs; 5925 struct attribute **tsx_attr = &empty_attrs; 5926 union cpuid10_edx edx; 5927 union cpuid10_eax eax; 5928 union cpuid10_ebx ebx; 5929 unsigned int fixed_mask; 5930 bool pmem = false; 5931 int version, i; 5932 char *name; 5933 struct x86_hybrid_pmu *pmu; 5934 5935 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 5936 switch (boot_cpu_data.x86) { 5937 case 0x6: 5938 return p6_pmu_init(); 5939 case 0xb: 5940 return knc_pmu_init(); 5941 case 0xf: 5942 return p4_pmu_init(); 5943 } 5944 return -ENODEV; 5945 } 5946 5947 /* 5948 * Check whether the Architectural PerfMon supports 5949 * Branch Misses Retired hw_event or not. 5950 */ 5951 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full); 5952 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) 5953 return -ENODEV; 5954 5955 version = eax.split.version_id; 5956 if (version < 2) 5957 x86_pmu = core_pmu; 5958 else 5959 x86_pmu = intel_pmu; 5960 5961 x86_pmu.version = version; 5962 x86_pmu.num_counters = eax.split.num_counters; 5963 x86_pmu.cntval_bits = eax.split.bit_width; 5964 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; 5965 5966 x86_pmu.events_maskl = ebx.full; 5967 x86_pmu.events_mask_len = eax.split.mask_length; 5968 5969 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); 5970 x86_pmu.pebs_capable = PEBS_COUNTER_MASK; 5971 5972 /* 5973 * Quirk: v2 perfmon does not report fixed-purpose events, so 5974 * assume at least 3 events, when not running in a hypervisor: 5975 */ 5976 if (version > 1 && version < 5) { 5977 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR); 5978 5979 x86_pmu.num_counters_fixed = 5980 max((int)edx.split.num_counters_fixed, assume); 5981 5982 fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1; 5983 } else if (version >= 5) 5984 x86_pmu.num_counters_fixed = fls(fixed_mask); 5985 5986 if (boot_cpu_has(X86_FEATURE_PDCM)) { 5987 u64 capabilities; 5988 5989 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); 5990 x86_pmu.intel_cap.capabilities = capabilities; 5991 } 5992 5993 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) { 5994 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32; 5995 x86_pmu.lbr_read = intel_pmu_lbr_read_32; 5996 } 5997 5998 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) 5999 intel_pmu_arch_lbr_init(); 6000 6001 intel_ds_init(); 6002 6003 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ 6004 6005 if (version >= 5) { 6006 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated; 6007 if (x86_pmu.intel_cap.anythread_deprecated) 6008 pr_cont(" AnyThread deprecated, "); 6009 } 6010 6011 /* 6012 * Install the hw-cache-events table: 6013 */ 6014 switch (boot_cpu_data.x86_model) { 6015 case INTEL_FAM6_CORE_YONAH: 6016 pr_cont("Core events, "); 6017 name = "core"; 6018 break; 6019 6020 case INTEL_FAM6_CORE2_MEROM: 6021 x86_add_quirk(intel_clovertown_quirk); 6022 fallthrough; 6023 6024 case INTEL_FAM6_CORE2_MEROM_L: 6025 case INTEL_FAM6_CORE2_PENRYN: 6026 case INTEL_FAM6_CORE2_DUNNINGTON: 6027 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, 6028 sizeof(hw_cache_event_ids)); 6029 6030 intel_pmu_lbr_init_core(); 6031 6032 x86_pmu.event_constraints = intel_core2_event_constraints; 6033 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; 6034 pr_cont("Core2 events, "); 6035 name = "core2"; 6036 break; 6037 6038 case INTEL_FAM6_NEHALEM: 6039 case INTEL_FAM6_NEHALEM_EP: 6040 case INTEL_FAM6_NEHALEM_EX: 6041 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, 6042 sizeof(hw_cache_event_ids)); 6043 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 6044 sizeof(hw_cache_extra_regs)); 6045 6046 intel_pmu_lbr_init_nhm(); 6047 6048 x86_pmu.event_constraints = intel_nehalem_event_constraints; 6049 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 6050 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 6051 x86_pmu.extra_regs = intel_nehalem_extra_regs; 6052 x86_pmu.limit_period = nhm_limit_period; 6053 6054 mem_attr = nhm_mem_events_attrs; 6055 6056 /* UOPS_ISSUED.STALLED_CYCLES */ 6057 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6058 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6059 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 6060 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6061 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 6062 6063 intel_pmu_pebs_data_source_nhm(); 6064 x86_add_quirk(intel_nehalem_quirk); 6065 x86_pmu.pebs_no_tlb = 1; 6066 extra_attr = nhm_format_attr; 6067 6068 pr_cont("Nehalem events, "); 6069 name = "nehalem"; 6070 break; 6071 6072 case INTEL_FAM6_ATOM_BONNELL: 6073 case INTEL_FAM6_ATOM_BONNELL_MID: 6074 case INTEL_FAM6_ATOM_SALTWELL: 6075 case INTEL_FAM6_ATOM_SALTWELL_MID: 6076 case INTEL_FAM6_ATOM_SALTWELL_TABLET: 6077 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 6078 sizeof(hw_cache_event_ids)); 6079 6080 intel_pmu_lbr_init_atom(); 6081 6082 x86_pmu.event_constraints = intel_gen_event_constraints; 6083 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; 6084 x86_pmu.pebs_aliases = intel_pebs_aliases_core2; 6085 pr_cont("Atom events, "); 6086 name = "bonnell"; 6087 break; 6088 6089 case INTEL_FAM6_ATOM_SILVERMONT: 6090 case INTEL_FAM6_ATOM_SILVERMONT_D: 6091 case INTEL_FAM6_ATOM_SILVERMONT_MID: 6092 case INTEL_FAM6_ATOM_AIRMONT: 6093 case INTEL_FAM6_ATOM_AIRMONT_MID: 6094 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 6095 sizeof(hw_cache_event_ids)); 6096 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 6097 sizeof(hw_cache_extra_regs)); 6098 6099 intel_pmu_lbr_init_slm(); 6100 6101 x86_pmu.event_constraints = intel_slm_event_constraints; 6102 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 6103 x86_pmu.extra_regs = intel_slm_extra_regs; 6104 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6105 td_attr = slm_events_attrs; 6106 extra_attr = slm_format_attr; 6107 pr_cont("Silvermont events, "); 6108 name = "silvermont"; 6109 break; 6110 6111 case INTEL_FAM6_ATOM_GOLDMONT: 6112 case INTEL_FAM6_ATOM_GOLDMONT_D: 6113 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, 6114 sizeof(hw_cache_event_ids)); 6115 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, 6116 sizeof(hw_cache_extra_regs)); 6117 6118 intel_pmu_lbr_init_skl(); 6119 6120 x86_pmu.event_constraints = intel_slm_event_constraints; 6121 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints; 6122 x86_pmu.extra_regs = intel_glm_extra_regs; 6123 /* 6124 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6125 * for precise cycles. 6126 * :pp is identical to :ppp 6127 */ 6128 x86_pmu.pebs_aliases = NULL; 6129 x86_pmu.pebs_prec_dist = true; 6130 x86_pmu.lbr_pt_coexist = true; 6131 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6132 td_attr = glm_events_attrs; 6133 extra_attr = slm_format_attr; 6134 pr_cont("Goldmont events, "); 6135 name = "goldmont"; 6136 break; 6137 6138 case INTEL_FAM6_ATOM_GOLDMONT_PLUS: 6139 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6140 sizeof(hw_cache_event_ids)); 6141 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, 6142 sizeof(hw_cache_extra_regs)); 6143 6144 intel_pmu_lbr_init_skl(); 6145 6146 x86_pmu.event_constraints = intel_slm_event_constraints; 6147 x86_pmu.extra_regs = intel_glm_extra_regs; 6148 /* 6149 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6150 * for precise cycles. 6151 */ 6152 x86_pmu.pebs_aliases = NULL; 6153 x86_pmu.pebs_prec_dist = true; 6154 x86_pmu.lbr_pt_coexist = true; 6155 x86_pmu.pebs_capable = ~0ULL; 6156 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6157 x86_pmu.flags |= PMU_FL_PEBS_ALL; 6158 x86_pmu.get_event_constraints = glp_get_event_constraints; 6159 td_attr = glm_events_attrs; 6160 /* Goldmont Plus has 4-wide pipeline */ 6161 event_attr_td_total_slots_scale_glm.event_str = "4"; 6162 extra_attr = slm_format_attr; 6163 pr_cont("Goldmont plus events, "); 6164 name = "goldmont_plus"; 6165 break; 6166 6167 case INTEL_FAM6_ATOM_TREMONT_D: 6168 case INTEL_FAM6_ATOM_TREMONT: 6169 case INTEL_FAM6_ATOM_TREMONT_L: 6170 x86_pmu.late_ack = true; 6171 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6172 sizeof(hw_cache_event_ids)); 6173 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, 6174 sizeof(hw_cache_extra_regs)); 6175 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6176 6177 intel_pmu_lbr_init_skl(); 6178 6179 x86_pmu.event_constraints = intel_slm_event_constraints; 6180 x86_pmu.extra_regs = intel_tnt_extra_regs; 6181 /* 6182 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6183 * for precise cycles. 6184 */ 6185 x86_pmu.pebs_aliases = NULL; 6186 x86_pmu.pebs_prec_dist = true; 6187 x86_pmu.lbr_pt_coexist = true; 6188 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6189 x86_pmu.get_event_constraints = tnt_get_event_constraints; 6190 td_attr = tnt_events_attrs; 6191 extra_attr = slm_format_attr; 6192 pr_cont("Tremont events, "); 6193 name = "Tremont"; 6194 break; 6195 6196 case INTEL_FAM6_ATOM_GRACEMONT: 6197 x86_pmu.mid_ack = true; 6198 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6199 sizeof(hw_cache_event_ids)); 6200 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, 6201 sizeof(hw_cache_extra_regs)); 6202 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6203 6204 x86_pmu.event_constraints = intel_slm_event_constraints; 6205 x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; 6206 x86_pmu.extra_regs = intel_grt_extra_regs; 6207 6208 x86_pmu.pebs_aliases = NULL; 6209 x86_pmu.pebs_prec_dist = true; 6210 x86_pmu.pebs_block = true; 6211 x86_pmu.lbr_pt_coexist = true; 6212 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6213 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6214 6215 intel_pmu_pebs_data_source_grt(); 6216 x86_pmu.pebs_latency_data = adl_latency_data_small; 6217 x86_pmu.get_event_constraints = tnt_get_event_constraints; 6218 x86_pmu.limit_period = spr_limit_period; 6219 td_attr = tnt_events_attrs; 6220 mem_attr = grt_mem_attrs; 6221 extra_attr = nhm_format_attr; 6222 pr_cont("Gracemont events, "); 6223 name = "gracemont"; 6224 break; 6225 6226 case INTEL_FAM6_ATOM_CRESTMONT: 6227 case INTEL_FAM6_ATOM_CRESTMONT_X: 6228 x86_pmu.mid_ack = true; 6229 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6230 sizeof(hw_cache_event_ids)); 6231 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, 6232 sizeof(hw_cache_extra_regs)); 6233 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6234 6235 x86_pmu.event_constraints = intel_slm_event_constraints; 6236 x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; 6237 x86_pmu.extra_regs = intel_cmt_extra_regs; 6238 6239 x86_pmu.pebs_aliases = NULL; 6240 x86_pmu.pebs_prec_dist = true; 6241 x86_pmu.lbr_pt_coexist = true; 6242 x86_pmu.pebs_block = true; 6243 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6244 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6245 6246 intel_pmu_pebs_data_source_cmt(); 6247 x86_pmu.pebs_latency_data = mtl_latency_data_small; 6248 x86_pmu.get_event_constraints = cmt_get_event_constraints; 6249 x86_pmu.limit_period = spr_limit_period; 6250 td_attr = cmt_events_attrs; 6251 mem_attr = grt_mem_attrs; 6252 extra_attr = cmt_format_attr; 6253 pr_cont("Crestmont events, "); 6254 name = "crestmont"; 6255 break; 6256 6257 case INTEL_FAM6_WESTMERE: 6258 case INTEL_FAM6_WESTMERE_EP: 6259 case INTEL_FAM6_WESTMERE_EX: 6260 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 6261 sizeof(hw_cache_event_ids)); 6262 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 6263 sizeof(hw_cache_extra_regs)); 6264 6265 intel_pmu_lbr_init_nhm(); 6266 6267 x86_pmu.event_constraints = intel_westmere_event_constraints; 6268 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 6269 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; 6270 x86_pmu.extra_regs = intel_westmere_extra_regs; 6271 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6272 6273 mem_attr = nhm_mem_events_attrs; 6274 6275 /* UOPS_ISSUED.STALLED_CYCLES */ 6276 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6277 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6278 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 6279 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6280 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 6281 6282 intel_pmu_pebs_data_source_nhm(); 6283 extra_attr = nhm_format_attr; 6284 pr_cont("Westmere events, "); 6285 name = "westmere"; 6286 break; 6287 6288 case INTEL_FAM6_SANDYBRIDGE: 6289 case INTEL_FAM6_SANDYBRIDGE_X: 6290 x86_add_quirk(intel_sandybridge_quirk); 6291 x86_add_quirk(intel_ht_bug); 6292 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 6293 sizeof(hw_cache_event_ids)); 6294 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 6295 sizeof(hw_cache_extra_regs)); 6296 6297 intel_pmu_lbr_init_snb(); 6298 6299 x86_pmu.event_constraints = intel_snb_event_constraints; 6300 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 6301 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 6302 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X) 6303 x86_pmu.extra_regs = intel_snbep_extra_regs; 6304 else 6305 x86_pmu.extra_regs = intel_snb_extra_regs; 6306 6307 6308 /* all extra regs are per-cpu when HT is on */ 6309 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6310 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6311 6312 td_attr = snb_events_attrs; 6313 mem_attr = snb_mem_events_attrs; 6314 6315 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 6316 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6317 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6318 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ 6319 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6320 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); 6321 6322 extra_attr = nhm_format_attr; 6323 6324 pr_cont("SandyBridge events, "); 6325 name = "sandybridge"; 6326 break; 6327 6328 case INTEL_FAM6_IVYBRIDGE: 6329 case INTEL_FAM6_IVYBRIDGE_X: 6330 x86_add_quirk(intel_ht_bug); 6331 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 6332 sizeof(hw_cache_event_ids)); 6333 /* dTLB-load-misses on IVB is different than SNB */ 6334 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ 6335 6336 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 6337 sizeof(hw_cache_extra_regs)); 6338 6339 intel_pmu_lbr_init_snb(); 6340 6341 x86_pmu.event_constraints = intel_ivb_event_constraints; 6342 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 6343 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6344 x86_pmu.pebs_prec_dist = true; 6345 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X) 6346 x86_pmu.extra_regs = intel_snbep_extra_regs; 6347 else 6348 x86_pmu.extra_regs = intel_snb_extra_regs; 6349 /* all extra regs are per-cpu when HT is on */ 6350 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6351 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6352 6353 td_attr = snb_events_attrs; 6354 mem_attr = snb_mem_events_attrs; 6355 6356 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 6357 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6358 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6359 6360 extra_attr = nhm_format_attr; 6361 6362 pr_cont("IvyBridge events, "); 6363 name = "ivybridge"; 6364 break; 6365 6366 6367 case INTEL_FAM6_HASWELL: 6368 case INTEL_FAM6_HASWELL_X: 6369 case INTEL_FAM6_HASWELL_L: 6370 case INTEL_FAM6_HASWELL_G: 6371 x86_add_quirk(intel_ht_bug); 6372 x86_add_quirk(intel_pebs_isolation_quirk); 6373 x86_pmu.late_ack = true; 6374 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6375 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6376 6377 intel_pmu_lbr_init_hsw(); 6378 6379 x86_pmu.event_constraints = intel_hsw_event_constraints; 6380 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; 6381 x86_pmu.extra_regs = intel_snbep_extra_regs; 6382 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6383 x86_pmu.pebs_prec_dist = true; 6384 /* all extra regs are per-cpu when HT is on */ 6385 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6386 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6387 6388 x86_pmu.hw_config = hsw_hw_config; 6389 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6390 x86_pmu.lbr_double_abort = true; 6391 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6392 hsw_format_attr : nhm_format_attr; 6393 td_attr = hsw_events_attrs; 6394 mem_attr = hsw_mem_events_attrs; 6395 tsx_attr = hsw_tsx_events_attrs; 6396 pr_cont("Haswell events, "); 6397 name = "haswell"; 6398 break; 6399 6400 case INTEL_FAM6_BROADWELL: 6401 case INTEL_FAM6_BROADWELL_D: 6402 case INTEL_FAM6_BROADWELL_G: 6403 case INTEL_FAM6_BROADWELL_X: 6404 x86_add_quirk(intel_pebs_isolation_quirk); 6405 x86_pmu.late_ack = true; 6406 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6407 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6408 6409 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */ 6410 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ | 6411 BDW_L3_MISS|HSW_SNOOP_DRAM; 6412 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS| 6413 HSW_SNOOP_DRAM; 6414 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ| 6415 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 6416 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE| 6417 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 6418 6419 intel_pmu_lbr_init_hsw(); 6420 6421 x86_pmu.event_constraints = intel_bdw_event_constraints; 6422 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints; 6423 x86_pmu.extra_regs = intel_snbep_extra_regs; 6424 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6425 x86_pmu.pebs_prec_dist = true; 6426 /* all extra regs are per-cpu when HT is on */ 6427 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6428 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6429 6430 x86_pmu.hw_config = hsw_hw_config; 6431 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6432 x86_pmu.limit_period = bdw_limit_period; 6433 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6434 hsw_format_attr : nhm_format_attr; 6435 td_attr = hsw_events_attrs; 6436 mem_attr = hsw_mem_events_attrs; 6437 tsx_attr = hsw_tsx_events_attrs; 6438 pr_cont("Broadwell events, "); 6439 name = "broadwell"; 6440 break; 6441 6442 case INTEL_FAM6_XEON_PHI_KNL: 6443 case INTEL_FAM6_XEON_PHI_KNM: 6444 memcpy(hw_cache_event_ids, 6445 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6446 memcpy(hw_cache_extra_regs, 6447 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6448 intel_pmu_lbr_init_knl(); 6449 6450 x86_pmu.event_constraints = intel_slm_event_constraints; 6451 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 6452 x86_pmu.extra_regs = intel_knl_extra_regs; 6453 6454 /* all extra regs are per-cpu when HT is on */ 6455 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6456 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6457 extra_attr = slm_format_attr; 6458 pr_cont("Knights Landing/Mill events, "); 6459 name = "knights-landing"; 6460 break; 6461 6462 case INTEL_FAM6_SKYLAKE_X: 6463 pmem = true; 6464 fallthrough; 6465 case INTEL_FAM6_SKYLAKE_L: 6466 case INTEL_FAM6_SKYLAKE: 6467 case INTEL_FAM6_KABYLAKE_L: 6468 case INTEL_FAM6_KABYLAKE: 6469 case INTEL_FAM6_COMETLAKE_L: 6470 case INTEL_FAM6_COMETLAKE: 6471 x86_add_quirk(intel_pebs_isolation_quirk); 6472 x86_pmu.late_ack = true; 6473 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6474 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6475 intel_pmu_lbr_init_skl(); 6476 6477 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */ 6478 event_attr_td_recovery_bubbles.event_str_noht = 6479 "event=0xd,umask=0x1,cmask=1"; 6480 event_attr_td_recovery_bubbles.event_str_ht = 6481 "event=0xd,umask=0x1,cmask=1,any=1"; 6482 6483 x86_pmu.event_constraints = intel_skl_event_constraints; 6484 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints; 6485 x86_pmu.extra_regs = intel_skl_extra_regs; 6486 x86_pmu.pebs_aliases = intel_pebs_aliases_skl; 6487 x86_pmu.pebs_prec_dist = true; 6488 /* all extra regs are per-cpu when HT is on */ 6489 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6490 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6491 6492 x86_pmu.hw_config = hsw_hw_config; 6493 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6494 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6495 hsw_format_attr : nhm_format_attr; 6496 extra_skl_attr = skl_format_attr; 6497 td_attr = hsw_events_attrs; 6498 mem_attr = hsw_mem_events_attrs; 6499 tsx_attr = hsw_tsx_events_attrs; 6500 intel_pmu_pebs_data_source_skl(pmem); 6501 6502 /* 6503 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default. 6504 * TSX force abort hooks are not required on these systems. Only deploy 6505 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT. 6506 */ 6507 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) && 6508 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) { 6509 x86_pmu.flags |= PMU_FL_TFA; 6510 x86_pmu.get_event_constraints = tfa_get_event_constraints; 6511 x86_pmu.enable_all = intel_tfa_pmu_enable_all; 6512 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; 6513 } 6514 6515 pr_cont("Skylake events, "); 6516 name = "skylake"; 6517 break; 6518 6519 case INTEL_FAM6_ICELAKE_X: 6520 case INTEL_FAM6_ICELAKE_D: 6521 x86_pmu.pebs_ept = 1; 6522 pmem = true; 6523 fallthrough; 6524 case INTEL_FAM6_ICELAKE_L: 6525 case INTEL_FAM6_ICELAKE: 6526 case INTEL_FAM6_TIGERLAKE_L: 6527 case INTEL_FAM6_TIGERLAKE: 6528 case INTEL_FAM6_ROCKETLAKE: 6529 x86_pmu.late_ack = true; 6530 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6531 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6532 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6533 intel_pmu_lbr_init_skl(); 6534 6535 x86_pmu.event_constraints = intel_icl_event_constraints; 6536 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints; 6537 x86_pmu.extra_regs = intel_icl_extra_regs; 6538 x86_pmu.pebs_aliases = NULL; 6539 x86_pmu.pebs_prec_dist = true; 6540 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6541 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6542 6543 x86_pmu.hw_config = hsw_hw_config; 6544 x86_pmu.get_event_constraints = icl_get_event_constraints; 6545 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6546 hsw_format_attr : nhm_format_attr; 6547 extra_skl_attr = skl_format_attr; 6548 mem_attr = icl_events_attrs; 6549 td_attr = icl_td_events_attrs; 6550 tsx_attr = icl_tsx_events_attrs; 6551 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 6552 x86_pmu.lbr_pt_coexist = true; 6553 intel_pmu_pebs_data_source_skl(pmem); 6554 x86_pmu.num_topdown_events = 4; 6555 static_call_update(intel_pmu_update_topdown_event, 6556 &icl_update_topdown_event); 6557 static_call_update(intel_pmu_set_topdown_event_period, 6558 &icl_set_topdown_event_period); 6559 pr_cont("Icelake events, "); 6560 name = "icelake"; 6561 break; 6562 6563 case INTEL_FAM6_SAPPHIRERAPIDS_X: 6564 case INTEL_FAM6_EMERALDRAPIDS_X: 6565 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 6566 x86_pmu.extra_regs = intel_spr_extra_regs; 6567 fallthrough; 6568 case INTEL_FAM6_GRANITERAPIDS_X: 6569 case INTEL_FAM6_GRANITERAPIDS_D: 6570 pmem = true; 6571 x86_pmu.late_ack = true; 6572 memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6573 memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6574 6575 x86_pmu.event_constraints = intel_spr_event_constraints; 6576 x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints; 6577 if (!x86_pmu.extra_regs) 6578 x86_pmu.extra_regs = intel_gnr_extra_regs; 6579 x86_pmu.limit_period = spr_limit_period; 6580 x86_pmu.pebs_ept = 1; 6581 x86_pmu.pebs_aliases = NULL; 6582 x86_pmu.pebs_prec_dist = true; 6583 x86_pmu.pebs_block = true; 6584 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6585 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6586 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6587 6588 x86_pmu.hw_config = hsw_hw_config; 6589 x86_pmu.get_event_constraints = spr_get_event_constraints; 6590 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6591 hsw_format_attr : nhm_format_attr; 6592 extra_skl_attr = skl_format_attr; 6593 mem_attr = spr_events_attrs; 6594 td_attr = spr_td_events_attrs; 6595 tsx_attr = spr_tsx_events_attrs; 6596 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 6597 x86_pmu.lbr_pt_coexist = true; 6598 intel_pmu_pebs_data_source_skl(pmem); 6599 x86_pmu.num_topdown_events = 8; 6600 static_call_update(intel_pmu_update_topdown_event, 6601 &icl_update_topdown_event); 6602 static_call_update(intel_pmu_set_topdown_event_period, 6603 &icl_set_topdown_event_period); 6604 pr_cont("Sapphire Rapids events, "); 6605 name = "sapphire_rapids"; 6606 break; 6607 6608 case INTEL_FAM6_ALDERLAKE: 6609 case INTEL_FAM6_ALDERLAKE_L: 6610 case INTEL_FAM6_RAPTORLAKE: 6611 case INTEL_FAM6_RAPTORLAKE_P: 6612 case INTEL_FAM6_RAPTORLAKE_S: 6613 case INTEL_FAM6_METEORLAKE: 6614 case INTEL_FAM6_METEORLAKE_L: 6615 /* 6616 * Alder Lake has 2 types of CPU, core and atom. 6617 * 6618 * Initialize the common PerfMon capabilities here. 6619 */ 6620 x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS, 6621 sizeof(struct x86_hybrid_pmu), 6622 GFP_KERNEL); 6623 if (!x86_pmu.hybrid_pmu) 6624 return -ENOMEM; 6625 static_branch_enable(&perf_is_hybrid); 6626 x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS; 6627 6628 x86_pmu.pebs_aliases = NULL; 6629 x86_pmu.pebs_prec_dist = true; 6630 x86_pmu.pebs_block = true; 6631 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6632 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6633 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6634 x86_pmu.lbr_pt_coexist = true; 6635 x86_pmu.pebs_latency_data = adl_latency_data_small; 6636 x86_pmu.num_topdown_events = 8; 6637 static_call_update(intel_pmu_update_topdown_event, 6638 &adl_update_topdown_event); 6639 static_call_update(intel_pmu_set_topdown_event_period, 6640 &adl_set_topdown_event_period); 6641 6642 x86_pmu.filter = intel_pmu_filter; 6643 x86_pmu.get_event_constraints = adl_get_event_constraints; 6644 x86_pmu.hw_config = adl_hw_config; 6645 x86_pmu.limit_period = spr_limit_period; 6646 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; 6647 /* 6648 * The rtm_abort_event is used to check whether to enable GPRs 6649 * for the RTM abort event. Atom doesn't have the RTM abort 6650 * event. There is no harmful to set it in the common 6651 * x86_pmu.rtm_abort_event. 6652 */ 6653 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 6654 6655 td_attr = adl_hybrid_events_attrs; 6656 mem_attr = adl_hybrid_mem_attrs; 6657 tsx_attr = adl_hybrid_tsx_attrs; 6658 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6659 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr; 6660 6661 /* Initialize big core specific PerfMon capabilities.*/ 6662 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 6663 pmu->name = "cpu_core"; 6664 pmu->cpu_type = hybrid_big; 6665 pmu->late_ack = true; 6666 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { 6667 pmu->num_counters = x86_pmu.num_counters + 2; 6668 pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; 6669 } else { 6670 pmu->num_counters = x86_pmu.num_counters; 6671 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6672 } 6673 6674 /* 6675 * Quirk: For some Alder Lake machine, when all E-cores are disabled in 6676 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However, 6677 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will 6678 * mistakenly add extra counters for P-cores. Correct the number of 6679 * counters here. 6680 */ 6681 if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) { 6682 pmu->num_counters = x86_pmu.num_counters; 6683 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6684 } 6685 6686 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); 6687 pmu->unconstrained = (struct event_constraint) 6688 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 6689 0, pmu->num_counters, 0, 0); 6690 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; 6691 pmu->intel_cap.perf_metrics = 1; 6692 pmu->intel_cap.pebs_output_pt_available = 0; 6693 6694 memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); 6695 memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); 6696 pmu->event_constraints = intel_spr_event_constraints; 6697 pmu->pebs_constraints = intel_spr_pebs_event_constraints; 6698 pmu->extra_regs = intel_spr_extra_regs; 6699 6700 /* Initialize Atom core specific PerfMon capabilities.*/ 6701 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 6702 pmu->name = "cpu_atom"; 6703 pmu->cpu_type = hybrid_small; 6704 pmu->mid_ack = true; 6705 pmu->num_counters = x86_pmu.num_counters; 6706 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6707 pmu->max_pebs_events = x86_pmu.max_pebs_events; 6708 pmu->unconstrained = (struct event_constraint) 6709 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 6710 0, pmu->num_counters, 0, 0); 6711 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; 6712 pmu->intel_cap.perf_metrics = 0; 6713 pmu->intel_cap.pebs_output_pt_available = 1; 6714 6715 memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); 6716 memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); 6717 pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6718 pmu->event_constraints = intel_slm_event_constraints; 6719 pmu->pebs_constraints = intel_grt_pebs_event_constraints; 6720 pmu->extra_regs = intel_grt_extra_regs; 6721 if (is_mtl(boot_cpu_data.x86_model)) { 6722 x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_gnr_extra_regs; 6723 x86_pmu.pebs_latency_data = mtl_latency_data_small; 6724 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6725 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; 6726 mem_attr = mtl_hybrid_mem_attrs; 6727 intel_pmu_pebs_data_source_mtl(); 6728 x86_pmu.get_event_constraints = mtl_get_event_constraints; 6729 pmu->extra_regs = intel_cmt_extra_regs; 6730 pr_cont("Meteorlake Hybrid events, "); 6731 name = "meteorlake_hybrid"; 6732 } else { 6733 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 6734 intel_pmu_pebs_data_source_adl(); 6735 pr_cont("Alderlake Hybrid events, "); 6736 name = "alderlake_hybrid"; 6737 } 6738 break; 6739 6740 default: 6741 switch (x86_pmu.version) { 6742 case 1: 6743 x86_pmu.event_constraints = intel_v1_event_constraints; 6744 pr_cont("generic architected perfmon v1, "); 6745 name = "generic_arch_v1"; 6746 break; 6747 case 2: 6748 case 3: 6749 case 4: 6750 /* 6751 * default constraints for v2 and up 6752 */ 6753 x86_pmu.event_constraints = intel_gen_event_constraints; 6754 pr_cont("generic architected perfmon, "); 6755 name = "generic_arch_v2+"; 6756 break; 6757 default: 6758 /* 6759 * The default constraints for v5 and up can support up to 6760 * 16 fixed counters. For the fixed counters 4 and later, 6761 * the pseudo-encoding is applied. 6762 * The constraints may be cut according to the CPUID enumeration 6763 * by inserting the EVENT_CONSTRAINT_END. 6764 */ 6765 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) 6766 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; 6767 intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1; 6768 x86_pmu.event_constraints = intel_v5_gen_event_constraints; 6769 pr_cont("generic architected perfmon, "); 6770 name = "generic_arch_v5+"; 6771 break; 6772 } 6773 } 6774 6775 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); 6776 6777 if (!is_hybrid()) { 6778 group_events_td.attrs = td_attr; 6779 group_events_mem.attrs = mem_attr; 6780 group_events_tsx.attrs = tsx_attr; 6781 group_format_extra.attrs = extra_attr; 6782 group_format_extra_skl.attrs = extra_skl_attr; 6783 6784 x86_pmu.attr_update = attr_update; 6785 } else { 6786 hybrid_group_events_td.attrs = td_attr; 6787 hybrid_group_events_mem.attrs = mem_attr; 6788 hybrid_group_events_tsx.attrs = tsx_attr; 6789 hybrid_group_format_extra.attrs = extra_attr; 6790 6791 x86_pmu.attr_update = hybrid_attr_update; 6792 } 6793 6794 intel_pmu_check_num_counters(&x86_pmu.num_counters, 6795 &x86_pmu.num_counters_fixed, 6796 &x86_pmu.intel_ctrl, 6797 (u64)fixed_mask); 6798 6799 /* AnyThread may be deprecated on arch perfmon v5 or later */ 6800 if (x86_pmu.intel_cap.anythread_deprecated) 6801 x86_pmu.format_attrs = intel_arch_formats_attr; 6802 6803 intel_pmu_check_event_constraints(x86_pmu.event_constraints, 6804 x86_pmu.num_counters, 6805 x86_pmu.num_counters_fixed, 6806 x86_pmu.intel_ctrl); 6807 /* 6808 * Access LBR MSR may cause #GP under certain circumstances. 6809 * Check all LBR MSR here. 6810 * Disable LBR access if any LBR MSRs can not be accessed. 6811 */ 6812 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL)) 6813 x86_pmu.lbr_nr = 0; 6814 for (i = 0; i < x86_pmu.lbr_nr; i++) { 6815 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && 6816 check_msr(x86_pmu.lbr_to + i, 0xffffUL))) 6817 x86_pmu.lbr_nr = 0; 6818 } 6819 6820 if (x86_pmu.lbr_nr) { 6821 intel_pmu_lbr_init(); 6822 6823 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); 6824 6825 /* only support branch_stack snapshot for perfmon >= v2 */ 6826 if (x86_pmu.disable_all == intel_pmu_disable_all) { 6827 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) { 6828 static_call_update(perf_snapshot_branch_stack, 6829 intel_pmu_snapshot_arch_branch_stack); 6830 } else { 6831 static_call_update(perf_snapshot_branch_stack, 6832 intel_pmu_snapshot_branch_stack); 6833 } 6834 } 6835 } 6836 6837 intel_pmu_check_extra_regs(x86_pmu.extra_regs); 6838 6839 /* Support full width counters using alternative MSR range */ 6840 if (x86_pmu.intel_cap.full_width_write) { 6841 x86_pmu.max_period = x86_pmu.cntval_mask >> 1; 6842 x86_pmu.perfctr = MSR_IA32_PMC0; 6843 pr_cont("full-width counters, "); 6844 } 6845 6846 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) 6847 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; 6848 6849 if (is_hybrid()) 6850 intel_pmu_check_hybrid_pmus((u64)fixed_mask); 6851 6852 if (x86_pmu.intel_cap.pebs_timing_info) 6853 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; 6854 6855 intel_aux_output_init(); 6856 6857 return 0; 6858 } 6859 6860 /* 6861 * HT bug: phase 2 init 6862 * Called once we have valid topology information to check 6863 * whether or not HT is enabled 6864 * If HT is off, then we disable the workaround 6865 */ 6866 static __init int fixup_ht_bug(void) 6867 { 6868 int c; 6869 /* 6870 * problem not present on this CPU model, nothing to do 6871 */ 6872 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED)) 6873 return 0; 6874 6875 if (topology_max_smt_threads() > 1) { 6876 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n"); 6877 return 0; 6878 } 6879 6880 cpus_read_lock(); 6881 6882 hardlockup_detector_perf_stop(); 6883 6884 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); 6885 6886 x86_pmu.start_scheduling = NULL; 6887 x86_pmu.commit_scheduling = NULL; 6888 x86_pmu.stop_scheduling = NULL; 6889 6890 hardlockup_detector_perf_restart(); 6891 6892 for_each_online_cpu(c) 6893 free_excl_cntrs(&per_cpu(cpu_hw_events, c)); 6894 6895 cpus_read_unlock(); 6896 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); 6897 return 0; 6898 } 6899 subsys_initcall(fixup_ht_bug) 6900