1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Per core/cpu state 4 * 5 * Used to coordinate shared registers between HT threads or 6 * among events on a single PMU. 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/stddef.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/nmi.h> 17 #include <linux/kvm_host.h> 18 19 #include <asm/cpufeature.h> 20 #include <asm/hardirq.h> 21 #include <asm/intel-family.h> 22 #include <asm/intel_pt.h> 23 #include <asm/apic.h> 24 #include <asm/cpu_device_id.h> 25 26 #include "../perf_event.h" 27 28 /* 29 * Intel PerfMon, used on Core and later. 30 */ 31 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = 32 { 33 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, 34 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 35 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, 36 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, 37 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 38 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 39 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 40 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ 41 }; 42 43 static struct event_constraint intel_core_event_constraints[] __read_mostly = 44 { 45 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 46 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 47 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 48 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 49 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ 51 EVENT_CONSTRAINT_END 52 }; 53 54 static struct event_constraint intel_core2_event_constraints[] __read_mostly = 55 { 56 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 57 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 58 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 59 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 60 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 61 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 62 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 63 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 64 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ 65 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 66 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ 67 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ 68 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ 69 EVENT_CONSTRAINT_END 70 }; 71 72 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = 73 { 74 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 75 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 76 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 77 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ 78 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ 79 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ 80 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ 81 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ 82 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ 83 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 84 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 85 EVENT_CONSTRAINT_END 86 }; 87 88 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 89 { 90 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 91 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 92 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 93 EVENT_EXTRA_END 94 }; 95 96 static struct event_constraint intel_westmere_event_constraints[] __read_mostly = 97 { 98 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 99 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 100 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 101 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 102 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ 103 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 104 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ 105 EVENT_CONSTRAINT_END 106 }; 107 108 static struct event_constraint intel_snb_event_constraints[] __read_mostly = 109 { 110 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 111 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 112 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 113 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 114 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 116 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 117 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 118 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 119 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 120 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 121 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 122 123 /* 124 * When HT is off these events can only run on the bottom 4 counters 125 * When HT is on, they are impacted by the HT bug and require EXCL access 126 */ 127 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 128 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 129 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 130 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 131 132 EVENT_CONSTRAINT_END 133 }; 134 135 static struct event_constraint intel_ivb_event_constraints[] __read_mostly = 136 { 137 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 138 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 139 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 140 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ 141 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */ 142 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ 143 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ 144 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 145 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 146 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */ 147 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 148 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 149 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 150 151 /* 152 * When HT is off these events can only run on the bottom 4 counters 153 * When HT is on, they are impacted by the HT bug and require EXCL access 154 */ 155 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 156 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 157 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 158 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 159 160 EVENT_CONSTRAINT_END 161 }; 162 163 static struct extra_reg intel_westmere_extra_regs[] __read_mostly = 164 { 165 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 166 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 167 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), 168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 169 EVENT_EXTRA_END 170 }; 171 172 static struct event_constraint intel_v1_event_constraints[] __read_mostly = 173 { 174 EVENT_CONSTRAINT_END 175 }; 176 177 static struct event_constraint intel_gen_event_constraints[] __read_mostly = 178 { 179 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 180 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 181 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 182 EVENT_CONSTRAINT_END 183 }; 184 185 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly = 186 { 187 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 188 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 189 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 190 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 191 FIXED_EVENT_CONSTRAINT(0x0500, 4), 192 FIXED_EVENT_CONSTRAINT(0x0600, 5), 193 FIXED_EVENT_CONSTRAINT(0x0700, 6), 194 FIXED_EVENT_CONSTRAINT(0x0800, 7), 195 FIXED_EVENT_CONSTRAINT(0x0900, 8), 196 FIXED_EVENT_CONSTRAINT(0x0a00, 9), 197 FIXED_EVENT_CONSTRAINT(0x0b00, 10), 198 FIXED_EVENT_CONSTRAINT(0x0c00, 11), 199 FIXED_EVENT_CONSTRAINT(0x0d00, 12), 200 FIXED_EVENT_CONSTRAINT(0x0e00, 13), 201 FIXED_EVENT_CONSTRAINT(0x0f00, 14), 202 FIXED_EVENT_CONSTRAINT(0x1000, 15), 203 EVENT_CONSTRAINT_END 204 }; 205 206 static struct event_constraint intel_slm_event_constraints[] __read_mostly = 207 { 208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 211 EVENT_CONSTRAINT_END 212 }; 213 214 static struct event_constraint intel_skl_event_constraints[] = { 215 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 216 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 217 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 218 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ 219 220 /* 221 * when HT is off, these can only run on the bottom 4 counters 222 */ 223 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 224 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 225 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 226 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 227 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */ 228 229 EVENT_CONSTRAINT_END 230 }; 231 232 static struct extra_reg intel_knl_extra_regs[] __read_mostly = { 233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0), 234 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1), 235 EVENT_EXTRA_END 236 }; 237 238 static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 239 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 240 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 241 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 242 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 243 EVENT_EXTRA_END 244 }; 245 246 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 247 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 248 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 249 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 250 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 251 EVENT_EXTRA_END 252 }; 253 254 static struct extra_reg intel_skl_extra_regs[] __read_mostly = { 255 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 256 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 257 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 258 /* 259 * Note the low 8 bits eventsel code is not a continuous field, containing 260 * some #GPing bits. These are masked out. 261 */ 262 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 263 EVENT_EXTRA_END 264 }; 265 266 static struct event_constraint intel_icl_event_constraints[] = { 267 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 268 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */ 269 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 270 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 271 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 272 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 273 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 274 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 275 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 276 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 277 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), 278 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), 279 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ 280 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf), 281 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), 282 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ 283 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ 284 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ 285 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */ 286 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), 287 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), 288 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), 289 INTEL_EVENT_CONSTRAINT(0xef, 0xf), 290 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), 291 EVENT_CONSTRAINT_END 292 }; 293 294 static struct extra_reg intel_icl_extra_regs[] __read_mostly = { 295 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0), 296 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1), 297 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 298 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 299 EVENT_EXTRA_END 300 }; 301 302 static struct extra_reg intel_spr_extra_regs[] __read_mostly = { 303 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 304 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 305 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 306 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 307 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), 308 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 309 EVENT_EXTRA_END 310 }; 311 312 static struct event_constraint intel_spr_event_constraints[] = { 313 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 314 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ 315 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 316 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 317 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ 318 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0), 319 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1), 320 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2), 321 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3), 322 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4), 323 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5), 324 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), 325 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), 326 327 INTEL_EVENT_CONSTRAINT(0x2e, 0xff), 328 INTEL_EVENT_CONSTRAINT(0x3c, 0xff), 329 /* 330 * Generally event codes < 0x90 are restricted to counters 0-3. 331 * The 0x2E and 0x3C are exception, which has no restriction. 332 */ 333 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf), 334 335 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf), 336 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), 337 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf), 338 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), 339 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), 340 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1), 341 INTEL_EVENT_CONSTRAINT(0xce, 0x1), 342 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), 343 /* 344 * Generally event codes >= 0x90 are likely to have no restrictions. 345 * The exception are defined as above. 346 */ 347 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff), 348 349 EVENT_CONSTRAINT_END 350 }; 351 352 static struct extra_reg intel_gnr_extra_regs[] __read_mostly = { 353 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 354 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 355 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 356 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), 357 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), 358 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), 359 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), 360 EVENT_EXTRA_END 361 }; 362 363 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); 364 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); 365 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); 366 367 static struct attribute *nhm_mem_events_attrs[] = { 368 EVENT_PTR(mem_ld_nhm), 369 NULL, 370 }; 371 372 /* 373 * topdown events for Intel Core CPUs. 374 * 375 * The events are all in slots, which is a free slot in a 4 wide 376 * pipeline. Some events are already reported in slots, for cycle 377 * events we multiply by the pipeline width (4). 378 * 379 * With Hyper Threading on, topdown metrics are either summed or averaged 380 * between the threads of a core: (count_t0 + count_t1). 381 * 382 * For the average case the metric is always scaled to pipeline width, 383 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4) 384 */ 385 386 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots, 387 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */ 388 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */ 389 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2"); 390 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued, 391 "event=0xe,umask=0x1"); /* uops_issued.any */ 392 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired, 393 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */ 394 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles, 395 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */ 396 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, 397 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */ 398 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */ 399 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, 400 "4", "2"); 401 402 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4"); 403 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80"); 404 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81"); 405 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82"); 406 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83"); 407 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84"); 408 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85"); 409 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86"); 410 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87"); 411 412 static struct attribute *snb_events_attrs[] = { 413 EVENT_PTR(td_slots_issued), 414 EVENT_PTR(td_slots_retired), 415 EVENT_PTR(td_fetch_bubbles), 416 EVENT_PTR(td_total_slots), 417 EVENT_PTR(td_total_slots_scale), 418 EVENT_PTR(td_recovery_bubbles), 419 EVENT_PTR(td_recovery_bubbles_scale), 420 NULL, 421 }; 422 423 static struct attribute *snb_mem_events_attrs[] = { 424 EVENT_PTR(mem_ld_snb), 425 EVENT_PTR(mem_st_snb), 426 NULL, 427 }; 428 429 static struct event_constraint intel_hsw_event_constraints[] = { 430 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 431 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 432 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 433 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 434 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 435 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 436 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 437 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), 438 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 439 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), 440 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 441 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), 442 443 /* 444 * When HT is off these events can only run on the bottom 4 counters 445 * When HT is on, they are impacted by the HT bug and require EXCL access 446 */ 447 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 448 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 449 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 450 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 451 452 EVENT_CONSTRAINT_END 453 }; 454 455 static struct event_constraint intel_bdw_event_constraints[] = { 456 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 457 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 458 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 459 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 460 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ 461 /* 462 * when HT is off, these can only run on the bottom 4 counters 463 */ 464 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 465 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 466 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 467 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 468 EVENT_CONSTRAINT_END 469 }; 470 471 static u64 intel_pmu_event_map(int hw_event) 472 { 473 return intel_perfmon_event_map[hw_event]; 474 } 475 476 static __initconst const u64 spr_hw_cache_event_ids 477 [PERF_COUNT_HW_CACHE_MAX] 478 [PERF_COUNT_HW_CACHE_OP_MAX] 479 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 480 { 481 [ C(L1D ) ] = { 482 [ C(OP_READ) ] = { 483 [ C(RESULT_ACCESS) ] = 0x81d0, 484 [ C(RESULT_MISS) ] = 0xe124, 485 }, 486 [ C(OP_WRITE) ] = { 487 [ C(RESULT_ACCESS) ] = 0x82d0, 488 }, 489 }, 490 [ C(L1I ) ] = { 491 [ C(OP_READ) ] = { 492 [ C(RESULT_MISS) ] = 0xe424, 493 }, 494 [ C(OP_WRITE) ] = { 495 [ C(RESULT_ACCESS) ] = -1, 496 [ C(RESULT_MISS) ] = -1, 497 }, 498 }, 499 [ C(LL ) ] = { 500 [ C(OP_READ) ] = { 501 [ C(RESULT_ACCESS) ] = 0x12a, 502 [ C(RESULT_MISS) ] = 0x12a, 503 }, 504 [ C(OP_WRITE) ] = { 505 [ C(RESULT_ACCESS) ] = 0x12a, 506 [ C(RESULT_MISS) ] = 0x12a, 507 }, 508 }, 509 [ C(DTLB) ] = { 510 [ C(OP_READ) ] = { 511 [ C(RESULT_ACCESS) ] = 0x81d0, 512 [ C(RESULT_MISS) ] = 0xe12, 513 }, 514 [ C(OP_WRITE) ] = { 515 [ C(RESULT_ACCESS) ] = 0x82d0, 516 [ C(RESULT_MISS) ] = 0xe13, 517 }, 518 }, 519 [ C(ITLB) ] = { 520 [ C(OP_READ) ] = { 521 [ C(RESULT_ACCESS) ] = -1, 522 [ C(RESULT_MISS) ] = 0xe11, 523 }, 524 [ C(OP_WRITE) ] = { 525 [ C(RESULT_ACCESS) ] = -1, 526 [ C(RESULT_MISS) ] = -1, 527 }, 528 [ C(OP_PREFETCH) ] = { 529 [ C(RESULT_ACCESS) ] = -1, 530 [ C(RESULT_MISS) ] = -1, 531 }, 532 }, 533 [ C(BPU ) ] = { 534 [ C(OP_READ) ] = { 535 [ C(RESULT_ACCESS) ] = 0x4c4, 536 [ C(RESULT_MISS) ] = 0x4c5, 537 }, 538 [ C(OP_WRITE) ] = { 539 [ C(RESULT_ACCESS) ] = -1, 540 [ C(RESULT_MISS) ] = -1, 541 }, 542 [ C(OP_PREFETCH) ] = { 543 [ C(RESULT_ACCESS) ] = -1, 544 [ C(RESULT_MISS) ] = -1, 545 }, 546 }, 547 [ C(NODE) ] = { 548 [ C(OP_READ) ] = { 549 [ C(RESULT_ACCESS) ] = 0x12a, 550 [ C(RESULT_MISS) ] = 0x12a, 551 }, 552 }, 553 }; 554 555 static __initconst const u64 spr_hw_cache_extra_regs 556 [PERF_COUNT_HW_CACHE_MAX] 557 [PERF_COUNT_HW_CACHE_OP_MAX] 558 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 559 { 560 [ C(LL ) ] = { 561 [ C(OP_READ) ] = { 562 [ C(RESULT_ACCESS) ] = 0x10001, 563 [ C(RESULT_MISS) ] = 0x3fbfc00001, 564 }, 565 [ C(OP_WRITE) ] = { 566 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002, 567 [ C(RESULT_MISS) ] = 0x3f3fc00002, 568 }, 569 }, 570 [ C(NODE) ] = { 571 [ C(OP_READ) ] = { 572 [ C(RESULT_ACCESS) ] = 0x10c000001, 573 [ C(RESULT_MISS) ] = 0x3fb3000001, 574 }, 575 }, 576 }; 577 578 /* 579 * Notes on the events: 580 * - data reads do not include code reads (comparable to earlier tables) 581 * - data counts include speculative execution (except L1 write, dtlb, bpu) 582 * - remote node access includes remote memory, remote cache, remote mmio. 583 * - prefetches are not included in the counts. 584 * - icache miss does not include decoded icache 585 */ 586 587 #define SKL_DEMAND_DATA_RD BIT_ULL(0) 588 #define SKL_DEMAND_RFO BIT_ULL(1) 589 #define SKL_ANY_RESPONSE BIT_ULL(16) 590 #define SKL_SUPPLIER_NONE BIT_ULL(17) 591 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26) 592 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27) 593 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28) 594 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29) 595 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \ 596 SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 597 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 598 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 599 #define SKL_SPL_HIT BIT_ULL(30) 600 #define SKL_SNOOP_NONE BIT_ULL(31) 601 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32) 602 #define SKL_SNOOP_MISS BIT_ULL(33) 603 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34) 604 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35) 605 #define SKL_SNOOP_HITM BIT_ULL(36) 606 #define SKL_SNOOP_NON_DRAM BIT_ULL(37) 607 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \ 608 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 609 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 610 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM) 611 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD 612 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \ 613 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 614 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 615 SKL_SNOOP_HITM|SKL_SPL_HIT) 616 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO 617 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE 618 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 619 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 620 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 621 622 static __initconst const u64 skl_hw_cache_event_ids 623 [PERF_COUNT_HW_CACHE_MAX] 624 [PERF_COUNT_HW_CACHE_OP_MAX] 625 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 626 { 627 [ C(L1D ) ] = { 628 [ C(OP_READ) ] = { 629 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 630 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 631 }, 632 [ C(OP_WRITE) ] = { 633 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 634 [ C(RESULT_MISS) ] = 0x0, 635 }, 636 [ C(OP_PREFETCH) ] = { 637 [ C(RESULT_ACCESS) ] = 0x0, 638 [ C(RESULT_MISS) ] = 0x0, 639 }, 640 }, 641 [ C(L1I ) ] = { 642 [ C(OP_READ) ] = { 643 [ C(RESULT_ACCESS) ] = 0x0, 644 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */ 645 }, 646 [ C(OP_WRITE) ] = { 647 [ C(RESULT_ACCESS) ] = -1, 648 [ C(RESULT_MISS) ] = -1, 649 }, 650 [ C(OP_PREFETCH) ] = { 651 [ C(RESULT_ACCESS) ] = 0x0, 652 [ C(RESULT_MISS) ] = 0x0, 653 }, 654 }, 655 [ C(LL ) ] = { 656 [ C(OP_READ) ] = { 657 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 658 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 659 }, 660 [ C(OP_WRITE) ] = { 661 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 662 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 663 }, 664 [ C(OP_PREFETCH) ] = { 665 [ C(RESULT_ACCESS) ] = 0x0, 666 [ C(RESULT_MISS) ] = 0x0, 667 }, 668 }, 669 [ C(DTLB) ] = { 670 [ C(OP_READ) ] = { 671 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 672 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 673 }, 674 [ C(OP_WRITE) ] = { 675 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 676 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 677 }, 678 [ C(OP_PREFETCH) ] = { 679 [ C(RESULT_ACCESS) ] = 0x0, 680 [ C(RESULT_MISS) ] = 0x0, 681 }, 682 }, 683 [ C(ITLB) ] = { 684 [ C(OP_READ) ] = { 685 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */ 686 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */ 687 }, 688 [ C(OP_WRITE) ] = { 689 [ C(RESULT_ACCESS) ] = -1, 690 [ C(RESULT_MISS) ] = -1, 691 }, 692 [ C(OP_PREFETCH) ] = { 693 [ C(RESULT_ACCESS) ] = -1, 694 [ C(RESULT_MISS) ] = -1, 695 }, 696 }, 697 [ C(BPU ) ] = { 698 [ C(OP_READ) ] = { 699 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 700 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 701 }, 702 [ C(OP_WRITE) ] = { 703 [ C(RESULT_ACCESS) ] = -1, 704 [ C(RESULT_MISS) ] = -1, 705 }, 706 [ C(OP_PREFETCH) ] = { 707 [ C(RESULT_ACCESS) ] = -1, 708 [ C(RESULT_MISS) ] = -1, 709 }, 710 }, 711 [ C(NODE) ] = { 712 [ C(OP_READ) ] = { 713 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 714 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 715 }, 716 [ C(OP_WRITE) ] = { 717 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 718 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 719 }, 720 [ C(OP_PREFETCH) ] = { 721 [ C(RESULT_ACCESS) ] = 0x0, 722 [ C(RESULT_MISS) ] = 0x0, 723 }, 724 }, 725 }; 726 727 static __initconst const u64 skl_hw_cache_extra_regs 728 [PERF_COUNT_HW_CACHE_MAX] 729 [PERF_COUNT_HW_CACHE_OP_MAX] 730 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 731 { 732 [ C(LL ) ] = { 733 [ C(OP_READ) ] = { 734 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 735 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 736 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 737 SKL_L3_MISS|SKL_ANY_SNOOP| 738 SKL_SUPPLIER_NONE, 739 }, 740 [ C(OP_WRITE) ] = { 741 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 742 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 743 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 744 SKL_L3_MISS|SKL_ANY_SNOOP| 745 SKL_SUPPLIER_NONE, 746 }, 747 [ C(OP_PREFETCH) ] = { 748 [ C(RESULT_ACCESS) ] = 0x0, 749 [ C(RESULT_MISS) ] = 0x0, 750 }, 751 }, 752 [ C(NODE) ] = { 753 [ C(OP_READ) ] = { 754 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 755 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 756 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 757 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 758 }, 759 [ C(OP_WRITE) ] = { 760 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 761 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 762 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 763 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 764 }, 765 [ C(OP_PREFETCH) ] = { 766 [ C(RESULT_ACCESS) ] = 0x0, 767 [ C(RESULT_MISS) ] = 0x0, 768 }, 769 }, 770 }; 771 772 #define SNB_DMND_DATA_RD (1ULL << 0) 773 #define SNB_DMND_RFO (1ULL << 1) 774 #define SNB_DMND_IFETCH (1ULL << 2) 775 #define SNB_DMND_WB (1ULL << 3) 776 #define SNB_PF_DATA_RD (1ULL << 4) 777 #define SNB_PF_RFO (1ULL << 5) 778 #define SNB_PF_IFETCH (1ULL << 6) 779 #define SNB_LLC_DATA_RD (1ULL << 7) 780 #define SNB_LLC_RFO (1ULL << 8) 781 #define SNB_LLC_IFETCH (1ULL << 9) 782 #define SNB_BUS_LOCKS (1ULL << 10) 783 #define SNB_STRM_ST (1ULL << 11) 784 #define SNB_OTHER (1ULL << 15) 785 #define SNB_RESP_ANY (1ULL << 16) 786 #define SNB_NO_SUPP (1ULL << 17) 787 #define SNB_LLC_HITM (1ULL << 18) 788 #define SNB_LLC_HITE (1ULL << 19) 789 #define SNB_LLC_HITS (1ULL << 20) 790 #define SNB_LLC_HITF (1ULL << 21) 791 #define SNB_LOCAL (1ULL << 22) 792 #define SNB_REMOTE (0xffULL << 23) 793 #define SNB_SNP_NONE (1ULL << 31) 794 #define SNB_SNP_NOT_NEEDED (1ULL << 32) 795 #define SNB_SNP_MISS (1ULL << 33) 796 #define SNB_NO_FWD (1ULL << 34) 797 #define SNB_SNP_FWD (1ULL << 35) 798 #define SNB_HITM (1ULL << 36) 799 #define SNB_NON_DRAM (1ULL << 37) 800 801 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) 802 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO) 803 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 804 805 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ 806 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ 807 SNB_HITM) 808 809 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) 810 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY) 811 812 #define SNB_L3_ACCESS SNB_RESP_ANY 813 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM) 814 815 static __initconst const u64 snb_hw_cache_extra_regs 816 [PERF_COUNT_HW_CACHE_MAX] 817 [PERF_COUNT_HW_CACHE_OP_MAX] 818 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 819 { 820 [ C(LL ) ] = { 821 [ C(OP_READ) ] = { 822 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, 823 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS, 824 }, 825 [ C(OP_WRITE) ] = { 826 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, 827 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS, 828 }, 829 [ C(OP_PREFETCH) ] = { 830 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, 831 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS, 832 }, 833 }, 834 [ C(NODE) ] = { 835 [ C(OP_READ) ] = { 836 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, 837 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE, 838 }, 839 [ C(OP_WRITE) ] = { 840 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, 841 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, 842 }, 843 [ C(OP_PREFETCH) ] = { 844 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, 845 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, 846 }, 847 }, 848 }; 849 850 static __initconst const u64 snb_hw_cache_event_ids 851 [PERF_COUNT_HW_CACHE_MAX] 852 [PERF_COUNT_HW_CACHE_OP_MAX] 853 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 854 { 855 [ C(L1D) ] = { 856 [ C(OP_READ) ] = { 857 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ 858 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ 859 }, 860 [ C(OP_WRITE) ] = { 861 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ 862 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ 863 }, 864 [ C(OP_PREFETCH) ] = { 865 [ C(RESULT_ACCESS) ] = 0x0, 866 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ 867 }, 868 }, 869 [ C(L1I ) ] = { 870 [ C(OP_READ) ] = { 871 [ C(RESULT_ACCESS) ] = 0x0, 872 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ 873 }, 874 [ C(OP_WRITE) ] = { 875 [ C(RESULT_ACCESS) ] = -1, 876 [ C(RESULT_MISS) ] = -1, 877 }, 878 [ C(OP_PREFETCH) ] = { 879 [ C(RESULT_ACCESS) ] = 0x0, 880 [ C(RESULT_MISS) ] = 0x0, 881 }, 882 }, 883 [ C(LL ) ] = { 884 [ C(OP_READ) ] = { 885 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 886 [ C(RESULT_ACCESS) ] = 0x01b7, 887 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 888 [ C(RESULT_MISS) ] = 0x01b7, 889 }, 890 [ C(OP_WRITE) ] = { 891 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 892 [ C(RESULT_ACCESS) ] = 0x01b7, 893 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 894 [ C(RESULT_MISS) ] = 0x01b7, 895 }, 896 [ C(OP_PREFETCH) ] = { 897 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 898 [ C(RESULT_ACCESS) ] = 0x01b7, 899 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 900 [ C(RESULT_MISS) ] = 0x01b7, 901 }, 902 }, 903 [ C(DTLB) ] = { 904 [ C(OP_READ) ] = { 905 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ 906 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ 907 }, 908 [ C(OP_WRITE) ] = { 909 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ 910 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 911 }, 912 [ C(OP_PREFETCH) ] = { 913 [ C(RESULT_ACCESS) ] = 0x0, 914 [ C(RESULT_MISS) ] = 0x0, 915 }, 916 }, 917 [ C(ITLB) ] = { 918 [ C(OP_READ) ] = { 919 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ 920 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ 921 }, 922 [ C(OP_WRITE) ] = { 923 [ C(RESULT_ACCESS) ] = -1, 924 [ C(RESULT_MISS) ] = -1, 925 }, 926 [ C(OP_PREFETCH) ] = { 927 [ C(RESULT_ACCESS) ] = -1, 928 [ C(RESULT_MISS) ] = -1, 929 }, 930 }, 931 [ C(BPU ) ] = { 932 [ C(OP_READ) ] = { 933 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 934 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 935 }, 936 [ C(OP_WRITE) ] = { 937 [ C(RESULT_ACCESS) ] = -1, 938 [ C(RESULT_MISS) ] = -1, 939 }, 940 [ C(OP_PREFETCH) ] = { 941 [ C(RESULT_ACCESS) ] = -1, 942 [ C(RESULT_MISS) ] = -1, 943 }, 944 }, 945 [ C(NODE) ] = { 946 [ C(OP_READ) ] = { 947 [ C(RESULT_ACCESS) ] = 0x01b7, 948 [ C(RESULT_MISS) ] = 0x01b7, 949 }, 950 [ C(OP_WRITE) ] = { 951 [ C(RESULT_ACCESS) ] = 0x01b7, 952 [ C(RESULT_MISS) ] = 0x01b7, 953 }, 954 [ C(OP_PREFETCH) ] = { 955 [ C(RESULT_ACCESS) ] = 0x01b7, 956 [ C(RESULT_MISS) ] = 0x01b7, 957 }, 958 }, 959 960 }; 961 962 /* 963 * Notes on the events: 964 * - data reads do not include code reads (comparable to earlier tables) 965 * - data counts include speculative execution (except L1 write, dtlb, bpu) 966 * - remote node access includes remote memory, remote cache, remote mmio. 967 * - prefetches are not included in the counts because they are not 968 * reliably counted. 969 */ 970 971 #define HSW_DEMAND_DATA_RD BIT_ULL(0) 972 #define HSW_DEMAND_RFO BIT_ULL(1) 973 #define HSW_ANY_RESPONSE BIT_ULL(16) 974 #define HSW_SUPPLIER_NONE BIT_ULL(17) 975 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22) 976 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27) 977 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28) 978 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29) 979 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \ 980 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 981 HSW_L3_MISS_REMOTE_HOP2P) 982 #define HSW_SNOOP_NONE BIT_ULL(31) 983 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32) 984 #define HSW_SNOOP_MISS BIT_ULL(33) 985 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34) 986 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35) 987 #define HSW_SNOOP_HITM BIT_ULL(36) 988 #define HSW_SNOOP_NON_DRAM BIT_ULL(37) 989 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \ 990 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \ 991 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \ 992 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM) 993 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM) 994 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD 995 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO 996 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\ 997 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P) 998 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE 999 1000 #define BDW_L3_MISS_LOCAL BIT(26) 1001 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \ 1002 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 1003 HSW_L3_MISS_REMOTE_HOP2P) 1004 1005 1006 static __initconst const u64 hsw_hw_cache_event_ids 1007 [PERF_COUNT_HW_CACHE_MAX] 1008 [PERF_COUNT_HW_CACHE_OP_MAX] 1009 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1010 { 1011 [ C(L1D ) ] = { 1012 [ C(OP_READ) ] = { 1013 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1014 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 1015 }, 1016 [ C(OP_WRITE) ] = { 1017 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1018 [ C(RESULT_MISS) ] = 0x0, 1019 }, 1020 [ C(OP_PREFETCH) ] = { 1021 [ C(RESULT_ACCESS) ] = 0x0, 1022 [ C(RESULT_MISS) ] = 0x0, 1023 }, 1024 }, 1025 [ C(L1I ) ] = { 1026 [ C(OP_READ) ] = { 1027 [ C(RESULT_ACCESS) ] = 0x0, 1028 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ 1029 }, 1030 [ C(OP_WRITE) ] = { 1031 [ C(RESULT_ACCESS) ] = -1, 1032 [ C(RESULT_MISS) ] = -1, 1033 }, 1034 [ C(OP_PREFETCH) ] = { 1035 [ C(RESULT_ACCESS) ] = 0x0, 1036 [ C(RESULT_MISS) ] = 0x0, 1037 }, 1038 }, 1039 [ C(LL ) ] = { 1040 [ C(OP_READ) ] = { 1041 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1042 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1043 }, 1044 [ C(OP_WRITE) ] = { 1045 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1046 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1047 }, 1048 [ C(OP_PREFETCH) ] = { 1049 [ C(RESULT_ACCESS) ] = 0x0, 1050 [ C(RESULT_MISS) ] = 0x0, 1051 }, 1052 }, 1053 [ C(DTLB) ] = { 1054 [ C(OP_READ) ] = { 1055 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1056 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ 1057 }, 1058 [ C(OP_WRITE) ] = { 1059 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1060 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 1061 }, 1062 [ C(OP_PREFETCH) ] = { 1063 [ C(RESULT_ACCESS) ] = 0x0, 1064 [ C(RESULT_MISS) ] = 0x0, 1065 }, 1066 }, 1067 [ C(ITLB) ] = { 1068 [ C(OP_READ) ] = { 1069 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ 1070 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ 1071 }, 1072 [ C(OP_WRITE) ] = { 1073 [ C(RESULT_ACCESS) ] = -1, 1074 [ C(RESULT_MISS) ] = -1, 1075 }, 1076 [ C(OP_PREFETCH) ] = { 1077 [ C(RESULT_ACCESS) ] = -1, 1078 [ C(RESULT_MISS) ] = -1, 1079 }, 1080 }, 1081 [ C(BPU ) ] = { 1082 [ C(OP_READ) ] = { 1083 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1084 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1085 }, 1086 [ C(OP_WRITE) ] = { 1087 [ C(RESULT_ACCESS) ] = -1, 1088 [ C(RESULT_MISS) ] = -1, 1089 }, 1090 [ C(OP_PREFETCH) ] = { 1091 [ C(RESULT_ACCESS) ] = -1, 1092 [ C(RESULT_MISS) ] = -1, 1093 }, 1094 }, 1095 [ C(NODE) ] = { 1096 [ C(OP_READ) ] = { 1097 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1098 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1099 }, 1100 [ C(OP_WRITE) ] = { 1101 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1102 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 1103 }, 1104 [ C(OP_PREFETCH) ] = { 1105 [ C(RESULT_ACCESS) ] = 0x0, 1106 [ C(RESULT_MISS) ] = 0x0, 1107 }, 1108 }, 1109 }; 1110 1111 static __initconst const u64 hsw_hw_cache_extra_regs 1112 [PERF_COUNT_HW_CACHE_MAX] 1113 [PERF_COUNT_HW_CACHE_OP_MAX] 1114 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1115 { 1116 [ C(LL ) ] = { 1117 [ C(OP_READ) ] = { 1118 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 1119 HSW_LLC_ACCESS, 1120 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 1121 HSW_L3_MISS|HSW_ANY_SNOOP, 1122 }, 1123 [ C(OP_WRITE) ] = { 1124 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 1125 HSW_LLC_ACCESS, 1126 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 1127 HSW_L3_MISS|HSW_ANY_SNOOP, 1128 }, 1129 [ C(OP_PREFETCH) ] = { 1130 [ C(RESULT_ACCESS) ] = 0x0, 1131 [ C(RESULT_MISS) ] = 0x0, 1132 }, 1133 }, 1134 [ C(NODE) ] = { 1135 [ C(OP_READ) ] = { 1136 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 1137 HSW_L3_MISS_LOCAL_DRAM| 1138 HSW_SNOOP_DRAM, 1139 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 1140 HSW_L3_MISS_REMOTE| 1141 HSW_SNOOP_DRAM, 1142 }, 1143 [ C(OP_WRITE) ] = { 1144 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 1145 HSW_L3_MISS_LOCAL_DRAM| 1146 HSW_SNOOP_DRAM, 1147 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 1148 HSW_L3_MISS_REMOTE| 1149 HSW_SNOOP_DRAM, 1150 }, 1151 [ C(OP_PREFETCH) ] = { 1152 [ C(RESULT_ACCESS) ] = 0x0, 1153 [ C(RESULT_MISS) ] = 0x0, 1154 }, 1155 }, 1156 }; 1157 1158 static __initconst const u64 westmere_hw_cache_event_ids 1159 [PERF_COUNT_HW_CACHE_MAX] 1160 [PERF_COUNT_HW_CACHE_OP_MAX] 1161 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1162 { 1163 [ C(L1D) ] = { 1164 [ C(OP_READ) ] = { 1165 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1166 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1167 }, 1168 [ C(OP_WRITE) ] = { 1169 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1170 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1171 }, 1172 [ C(OP_PREFETCH) ] = { 1173 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1174 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1175 }, 1176 }, 1177 [ C(L1I ) ] = { 1178 [ C(OP_READ) ] = { 1179 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1180 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1181 }, 1182 [ C(OP_WRITE) ] = { 1183 [ C(RESULT_ACCESS) ] = -1, 1184 [ C(RESULT_MISS) ] = -1, 1185 }, 1186 [ C(OP_PREFETCH) ] = { 1187 [ C(RESULT_ACCESS) ] = 0x0, 1188 [ C(RESULT_MISS) ] = 0x0, 1189 }, 1190 }, 1191 [ C(LL ) ] = { 1192 [ C(OP_READ) ] = { 1193 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1194 [ C(RESULT_ACCESS) ] = 0x01b7, 1195 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1196 [ C(RESULT_MISS) ] = 0x01b7, 1197 }, 1198 /* 1199 * Use RFO, not WRITEBACK, because a write miss would typically occur 1200 * on RFO. 1201 */ 1202 [ C(OP_WRITE) ] = { 1203 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1204 [ C(RESULT_ACCESS) ] = 0x01b7, 1205 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1206 [ C(RESULT_MISS) ] = 0x01b7, 1207 }, 1208 [ C(OP_PREFETCH) ] = { 1209 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1210 [ C(RESULT_ACCESS) ] = 0x01b7, 1211 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1212 [ C(RESULT_MISS) ] = 0x01b7, 1213 }, 1214 }, 1215 [ C(DTLB) ] = { 1216 [ C(OP_READ) ] = { 1217 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1218 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1219 }, 1220 [ C(OP_WRITE) ] = { 1221 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1222 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1223 }, 1224 [ C(OP_PREFETCH) ] = { 1225 [ C(RESULT_ACCESS) ] = 0x0, 1226 [ C(RESULT_MISS) ] = 0x0, 1227 }, 1228 }, 1229 [ C(ITLB) ] = { 1230 [ C(OP_READ) ] = { 1231 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1232 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ 1233 }, 1234 [ C(OP_WRITE) ] = { 1235 [ C(RESULT_ACCESS) ] = -1, 1236 [ C(RESULT_MISS) ] = -1, 1237 }, 1238 [ C(OP_PREFETCH) ] = { 1239 [ C(RESULT_ACCESS) ] = -1, 1240 [ C(RESULT_MISS) ] = -1, 1241 }, 1242 }, 1243 [ C(BPU ) ] = { 1244 [ C(OP_READ) ] = { 1245 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1246 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1247 }, 1248 [ C(OP_WRITE) ] = { 1249 [ C(RESULT_ACCESS) ] = -1, 1250 [ C(RESULT_MISS) ] = -1, 1251 }, 1252 [ C(OP_PREFETCH) ] = { 1253 [ C(RESULT_ACCESS) ] = -1, 1254 [ C(RESULT_MISS) ] = -1, 1255 }, 1256 }, 1257 [ C(NODE) ] = { 1258 [ C(OP_READ) ] = { 1259 [ C(RESULT_ACCESS) ] = 0x01b7, 1260 [ C(RESULT_MISS) ] = 0x01b7, 1261 }, 1262 [ C(OP_WRITE) ] = { 1263 [ C(RESULT_ACCESS) ] = 0x01b7, 1264 [ C(RESULT_MISS) ] = 0x01b7, 1265 }, 1266 [ C(OP_PREFETCH) ] = { 1267 [ C(RESULT_ACCESS) ] = 0x01b7, 1268 [ C(RESULT_MISS) ] = 0x01b7, 1269 }, 1270 }, 1271 }; 1272 1273 /* 1274 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; 1275 * See IA32 SDM Vol 3B 30.6.1.3 1276 */ 1277 1278 #define NHM_DMND_DATA_RD (1 << 0) 1279 #define NHM_DMND_RFO (1 << 1) 1280 #define NHM_DMND_IFETCH (1 << 2) 1281 #define NHM_DMND_WB (1 << 3) 1282 #define NHM_PF_DATA_RD (1 << 4) 1283 #define NHM_PF_DATA_RFO (1 << 5) 1284 #define NHM_PF_IFETCH (1 << 6) 1285 #define NHM_OFFCORE_OTHER (1 << 7) 1286 #define NHM_UNCORE_HIT (1 << 8) 1287 #define NHM_OTHER_CORE_HIT_SNP (1 << 9) 1288 #define NHM_OTHER_CORE_HITM (1 << 10) 1289 /* reserved */ 1290 #define NHM_REMOTE_CACHE_FWD (1 << 12) 1291 #define NHM_REMOTE_DRAM (1 << 13) 1292 #define NHM_LOCAL_DRAM (1 << 14) 1293 #define NHM_NON_DRAM (1 << 15) 1294 1295 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD) 1296 #define NHM_REMOTE (NHM_REMOTE_DRAM) 1297 1298 #define NHM_DMND_READ (NHM_DMND_DATA_RD) 1299 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) 1300 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) 1301 1302 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) 1303 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD) 1304 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) 1305 1306 static __initconst const u64 nehalem_hw_cache_extra_regs 1307 [PERF_COUNT_HW_CACHE_MAX] 1308 [PERF_COUNT_HW_CACHE_OP_MAX] 1309 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1310 { 1311 [ C(LL ) ] = { 1312 [ C(OP_READ) ] = { 1313 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, 1314 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, 1315 }, 1316 [ C(OP_WRITE) ] = { 1317 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, 1318 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, 1319 }, 1320 [ C(OP_PREFETCH) ] = { 1321 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, 1322 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, 1323 }, 1324 }, 1325 [ C(NODE) ] = { 1326 [ C(OP_READ) ] = { 1327 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE, 1328 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE, 1329 }, 1330 [ C(OP_WRITE) ] = { 1331 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE, 1332 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE, 1333 }, 1334 [ C(OP_PREFETCH) ] = { 1335 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE, 1336 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE, 1337 }, 1338 }, 1339 }; 1340 1341 static __initconst const u64 nehalem_hw_cache_event_ids 1342 [PERF_COUNT_HW_CACHE_MAX] 1343 [PERF_COUNT_HW_CACHE_OP_MAX] 1344 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1345 { 1346 [ C(L1D) ] = { 1347 [ C(OP_READ) ] = { 1348 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1349 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1350 }, 1351 [ C(OP_WRITE) ] = { 1352 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1353 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1354 }, 1355 [ C(OP_PREFETCH) ] = { 1356 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1357 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1358 }, 1359 }, 1360 [ C(L1I ) ] = { 1361 [ C(OP_READ) ] = { 1362 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1363 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1364 }, 1365 [ C(OP_WRITE) ] = { 1366 [ C(RESULT_ACCESS) ] = -1, 1367 [ C(RESULT_MISS) ] = -1, 1368 }, 1369 [ C(OP_PREFETCH) ] = { 1370 [ C(RESULT_ACCESS) ] = 0x0, 1371 [ C(RESULT_MISS) ] = 0x0, 1372 }, 1373 }, 1374 [ C(LL ) ] = { 1375 [ C(OP_READ) ] = { 1376 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1377 [ C(RESULT_ACCESS) ] = 0x01b7, 1378 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1379 [ C(RESULT_MISS) ] = 0x01b7, 1380 }, 1381 /* 1382 * Use RFO, not WRITEBACK, because a write miss would typically occur 1383 * on RFO. 1384 */ 1385 [ C(OP_WRITE) ] = { 1386 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1387 [ C(RESULT_ACCESS) ] = 0x01b7, 1388 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1389 [ C(RESULT_MISS) ] = 0x01b7, 1390 }, 1391 [ C(OP_PREFETCH) ] = { 1392 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1393 [ C(RESULT_ACCESS) ] = 0x01b7, 1394 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1395 [ C(RESULT_MISS) ] = 0x01b7, 1396 }, 1397 }, 1398 [ C(DTLB) ] = { 1399 [ C(OP_READ) ] = { 1400 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1401 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1402 }, 1403 [ C(OP_WRITE) ] = { 1404 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1405 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1406 }, 1407 [ C(OP_PREFETCH) ] = { 1408 [ C(RESULT_ACCESS) ] = 0x0, 1409 [ C(RESULT_MISS) ] = 0x0, 1410 }, 1411 }, 1412 [ C(ITLB) ] = { 1413 [ C(OP_READ) ] = { 1414 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1415 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ 1416 }, 1417 [ C(OP_WRITE) ] = { 1418 [ C(RESULT_ACCESS) ] = -1, 1419 [ C(RESULT_MISS) ] = -1, 1420 }, 1421 [ C(OP_PREFETCH) ] = { 1422 [ C(RESULT_ACCESS) ] = -1, 1423 [ C(RESULT_MISS) ] = -1, 1424 }, 1425 }, 1426 [ C(BPU ) ] = { 1427 [ C(OP_READ) ] = { 1428 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1429 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1430 }, 1431 [ C(OP_WRITE) ] = { 1432 [ C(RESULT_ACCESS) ] = -1, 1433 [ C(RESULT_MISS) ] = -1, 1434 }, 1435 [ C(OP_PREFETCH) ] = { 1436 [ C(RESULT_ACCESS) ] = -1, 1437 [ C(RESULT_MISS) ] = -1, 1438 }, 1439 }, 1440 [ C(NODE) ] = { 1441 [ C(OP_READ) ] = { 1442 [ C(RESULT_ACCESS) ] = 0x01b7, 1443 [ C(RESULT_MISS) ] = 0x01b7, 1444 }, 1445 [ C(OP_WRITE) ] = { 1446 [ C(RESULT_ACCESS) ] = 0x01b7, 1447 [ C(RESULT_MISS) ] = 0x01b7, 1448 }, 1449 [ C(OP_PREFETCH) ] = { 1450 [ C(RESULT_ACCESS) ] = 0x01b7, 1451 [ C(RESULT_MISS) ] = 0x01b7, 1452 }, 1453 }, 1454 }; 1455 1456 static __initconst const u64 core2_hw_cache_event_ids 1457 [PERF_COUNT_HW_CACHE_MAX] 1458 [PERF_COUNT_HW_CACHE_OP_MAX] 1459 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1460 { 1461 [ C(L1D) ] = { 1462 [ C(OP_READ) ] = { 1463 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ 1464 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ 1465 }, 1466 [ C(OP_WRITE) ] = { 1467 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ 1468 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ 1469 }, 1470 [ C(OP_PREFETCH) ] = { 1471 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ 1472 [ C(RESULT_MISS) ] = 0, 1473 }, 1474 }, 1475 [ C(L1I ) ] = { 1476 [ C(OP_READ) ] = { 1477 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ 1478 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ 1479 }, 1480 [ C(OP_WRITE) ] = { 1481 [ C(RESULT_ACCESS) ] = -1, 1482 [ C(RESULT_MISS) ] = -1, 1483 }, 1484 [ C(OP_PREFETCH) ] = { 1485 [ C(RESULT_ACCESS) ] = 0, 1486 [ C(RESULT_MISS) ] = 0, 1487 }, 1488 }, 1489 [ C(LL ) ] = { 1490 [ C(OP_READ) ] = { 1491 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1492 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1493 }, 1494 [ C(OP_WRITE) ] = { 1495 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1496 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1497 }, 1498 [ C(OP_PREFETCH) ] = { 1499 [ C(RESULT_ACCESS) ] = 0, 1500 [ C(RESULT_MISS) ] = 0, 1501 }, 1502 }, 1503 [ C(DTLB) ] = { 1504 [ C(OP_READ) ] = { 1505 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1506 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ 1507 }, 1508 [ C(OP_WRITE) ] = { 1509 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1510 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ 1511 }, 1512 [ C(OP_PREFETCH) ] = { 1513 [ C(RESULT_ACCESS) ] = 0, 1514 [ C(RESULT_MISS) ] = 0, 1515 }, 1516 }, 1517 [ C(ITLB) ] = { 1518 [ C(OP_READ) ] = { 1519 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1520 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ 1521 }, 1522 [ C(OP_WRITE) ] = { 1523 [ C(RESULT_ACCESS) ] = -1, 1524 [ C(RESULT_MISS) ] = -1, 1525 }, 1526 [ C(OP_PREFETCH) ] = { 1527 [ C(RESULT_ACCESS) ] = -1, 1528 [ C(RESULT_MISS) ] = -1, 1529 }, 1530 }, 1531 [ C(BPU ) ] = { 1532 [ C(OP_READ) ] = { 1533 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1534 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1535 }, 1536 [ C(OP_WRITE) ] = { 1537 [ C(RESULT_ACCESS) ] = -1, 1538 [ C(RESULT_MISS) ] = -1, 1539 }, 1540 [ C(OP_PREFETCH) ] = { 1541 [ C(RESULT_ACCESS) ] = -1, 1542 [ C(RESULT_MISS) ] = -1, 1543 }, 1544 }, 1545 }; 1546 1547 static __initconst const u64 atom_hw_cache_event_ids 1548 [PERF_COUNT_HW_CACHE_MAX] 1549 [PERF_COUNT_HW_CACHE_OP_MAX] 1550 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1551 { 1552 [ C(L1D) ] = { 1553 [ C(OP_READ) ] = { 1554 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ 1555 [ C(RESULT_MISS) ] = 0, 1556 }, 1557 [ C(OP_WRITE) ] = { 1558 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ 1559 [ C(RESULT_MISS) ] = 0, 1560 }, 1561 [ C(OP_PREFETCH) ] = { 1562 [ C(RESULT_ACCESS) ] = 0x0, 1563 [ C(RESULT_MISS) ] = 0, 1564 }, 1565 }, 1566 [ C(L1I ) ] = { 1567 [ C(OP_READ) ] = { 1568 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1569 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1570 }, 1571 [ C(OP_WRITE) ] = { 1572 [ C(RESULT_ACCESS) ] = -1, 1573 [ C(RESULT_MISS) ] = -1, 1574 }, 1575 [ C(OP_PREFETCH) ] = { 1576 [ C(RESULT_ACCESS) ] = 0, 1577 [ C(RESULT_MISS) ] = 0, 1578 }, 1579 }, 1580 [ C(LL ) ] = { 1581 [ C(OP_READ) ] = { 1582 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1583 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1584 }, 1585 [ C(OP_WRITE) ] = { 1586 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1587 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1588 }, 1589 [ C(OP_PREFETCH) ] = { 1590 [ C(RESULT_ACCESS) ] = 0, 1591 [ C(RESULT_MISS) ] = 0, 1592 }, 1593 }, 1594 [ C(DTLB) ] = { 1595 [ C(OP_READ) ] = { 1596 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ 1597 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ 1598 }, 1599 [ C(OP_WRITE) ] = { 1600 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ 1601 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ 1602 }, 1603 [ C(OP_PREFETCH) ] = { 1604 [ C(RESULT_ACCESS) ] = 0, 1605 [ C(RESULT_MISS) ] = 0, 1606 }, 1607 }, 1608 [ C(ITLB) ] = { 1609 [ C(OP_READ) ] = { 1610 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1611 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ 1612 }, 1613 [ C(OP_WRITE) ] = { 1614 [ C(RESULT_ACCESS) ] = -1, 1615 [ C(RESULT_MISS) ] = -1, 1616 }, 1617 [ C(OP_PREFETCH) ] = { 1618 [ C(RESULT_ACCESS) ] = -1, 1619 [ C(RESULT_MISS) ] = -1, 1620 }, 1621 }, 1622 [ C(BPU ) ] = { 1623 [ C(OP_READ) ] = { 1624 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1625 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1626 }, 1627 [ C(OP_WRITE) ] = { 1628 [ C(RESULT_ACCESS) ] = -1, 1629 [ C(RESULT_MISS) ] = -1, 1630 }, 1631 [ C(OP_PREFETCH) ] = { 1632 [ C(RESULT_ACCESS) ] = -1, 1633 [ C(RESULT_MISS) ] = -1, 1634 }, 1635 }, 1636 }; 1637 1638 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c"); 1639 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2"); 1640 /* no_alloc_cycles.not_delivered */ 1641 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm, 1642 "event=0xca,umask=0x50"); 1643 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2"); 1644 /* uops_retired.all */ 1645 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm, 1646 "event=0xc2,umask=0x10"); 1647 /* uops_retired.all */ 1648 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm, 1649 "event=0xc2,umask=0x10"); 1650 1651 static struct attribute *slm_events_attrs[] = { 1652 EVENT_PTR(td_total_slots_slm), 1653 EVENT_PTR(td_total_slots_scale_slm), 1654 EVENT_PTR(td_fetch_bubbles_slm), 1655 EVENT_PTR(td_fetch_bubbles_scale_slm), 1656 EVENT_PTR(td_slots_issued_slm), 1657 EVENT_PTR(td_slots_retired_slm), 1658 NULL 1659 }; 1660 1661 static struct extra_reg intel_slm_extra_regs[] __read_mostly = 1662 { 1663 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1664 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), 1665 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1), 1666 EVENT_EXTRA_END 1667 }; 1668 1669 #define SLM_DMND_READ SNB_DMND_DATA_RD 1670 #define SLM_DMND_WRITE SNB_DMND_RFO 1671 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1672 1673 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM) 1674 #define SLM_LLC_ACCESS SNB_RESP_ANY 1675 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM) 1676 1677 static __initconst const u64 slm_hw_cache_extra_regs 1678 [PERF_COUNT_HW_CACHE_MAX] 1679 [PERF_COUNT_HW_CACHE_OP_MAX] 1680 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1681 { 1682 [ C(LL ) ] = { 1683 [ C(OP_READ) ] = { 1684 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, 1685 [ C(RESULT_MISS) ] = 0, 1686 }, 1687 [ C(OP_WRITE) ] = { 1688 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, 1689 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS, 1690 }, 1691 [ C(OP_PREFETCH) ] = { 1692 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS, 1693 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS, 1694 }, 1695 }, 1696 }; 1697 1698 static __initconst const u64 slm_hw_cache_event_ids 1699 [PERF_COUNT_HW_CACHE_MAX] 1700 [PERF_COUNT_HW_CACHE_OP_MAX] 1701 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1702 { 1703 [ C(L1D) ] = { 1704 [ C(OP_READ) ] = { 1705 [ C(RESULT_ACCESS) ] = 0, 1706 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */ 1707 }, 1708 [ C(OP_WRITE) ] = { 1709 [ C(RESULT_ACCESS) ] = 0, 1710 [ C(RESULT_MISS) ] = 0, 1711 }, 1712 [ C(OP_PREFETCH) ] = { 1713 [ C(RESULT_ACCESS) ] = 0, 1714 [ C(RESULT_MISS) ] = 0, 1715 }, 1716 }, 1717 [ C(L1I ) ] = { 1718 [ C(OP_READ) ] = { 1719 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */ 1720 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */ 1721 }, 1722 [ C(OP_WRITE) ] = { 1723 [ C(RESULT_ACCESS) ] = -1, 1724 [ C(RESULT_MISS) ] = -1, 1725 }, 1726 [ C(OP_PREFETCH) ] = { 1727 [ C(RESULT_ACCESS) ] = 0, 1728 [ C(RESULT_MISS) ] = 0, 1729 }, 1730 }, 1731 [ C(LL ) ] = { 1732 [ C(OP_READ) ] = { 1733 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1734 [ C(RESULT_ACCESS) ] = 0x01b7, 1735 [ C(RESULT_MISS) ] = 0, 1736 }, 1737 [ C(OP_WRITE) ] = { 1738 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1739 [ C(RESULT_ACCESS) ] = 0x01b7, 1740 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1741 [ C(RESULT_MISS) ] = 0x01b7, 1742 }, 1743 [ C(OP_PREFETCH) ] = { 1744 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1745 [ C(RESULT_ACCESS) ] = 0x01b7, 1746 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1747 [ C(RESULT_MISS) ] = 0x01b7, 1748 }, 1749 }, 1750 [ C(DTLB) ] = { 1751 [ C(OP_READ) ] = { 1752 [ C(RESULT_ACCESS) ] = 0, 1753 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */ 1754 }, 1755 [ C(OP_WRITE) ] = { 1756 [ C(RESULT_ACCESS) ] = 0, 1757 [ C(RESULT_MISS) ] = 0, 1758 }, 1759 [ C(OP_PREFETCH) ] = { 1760 [ C(RESULT_ACCESS) ] = 0, 1761 [ C(RESULT_MISS) ] = 0, 1762 }, 1763 }, 1764 [ C(ITLB) ] = { 1765 [ C(OP_READ) ] = { 1766 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1767 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */ 1768 }, 1769 [ C(OP_WRITE) ] = { 1770 [ C(RESULT_ACCESS) ] = -1, 1771 [ C(RESULT_MISS) ] = -1, 1772 }, 1773 [ C(OP_PREFETCH) ] = { 1774 [ C(RESULT_ACCESS) ] = -1, 1775 [ C(RESULT_MISS) ] = -1, 1776 }, 1777 }, 1778 [ C(BPU ) ] = { 1779 [ C(OP_READ) ] = { 1780 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1781 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1782 }, 1783 [ C(OP_WRITE) ] = { 1784 [ C(RESULT_ACCESS) ] = -1, 1785 [ C(RESULT_MISS) ] = -1, 1786 }, 1787 [ C(OP_PREFETCH) ] = { 1788 [ C(RESULT_ACCESS) ] = -1, 1789 [ C(RESULT_MISS) ] = -1, 1790 }, 1791 }, 1792 }; 1793 1794 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c"); 1795 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3"); 1796 /* UOPS_NOT_DELIVERED.ANY */ 1797 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c"); 1798 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */ 1799 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02"); 1800 /* UOPS_RETIRED.ANY */ 1801 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2"); 1802 /* UOPS_ISSUED.ANY */ 1803 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e"); 1804 1805 static struct attribute *glm_events_attrs[] = { 1806 EVENT_PTR(td_total_slots_glm), 1807 EVENT_PTR(td_total_slots_scale_glm), 1808 EVENT_PTR(td_fetch_bubbles_glm), 1809 EVENT_PTR(td_recovery_bubbles_glm), 1810 EVENT_PTR(td_slots_issued_glm), 1811 EVENT_PTR(td_slots_retired_glm), 1812 NULL 1813 }; 1814 1815 static struct extra_reg intel_glm_extra_regs[] __read_mostly = { 1816 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1817 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0), 1818 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1), 1819 EVENT_EXTRA_END 1820 }; 1821 1822 #define GLM_DEMAND_DATA_RD BIT_ULL(0) 1823 #define GLM_DEMAND_RFO BIT_ULL(1) 1824 #define GLM_ANY_RESPONSE BIT_ULL(16) 1825 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33) 1826 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD 1827 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO 1828 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1829 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE 1830 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM) 1831 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM) 1832 1833 static __initconst const u64 glm_hw_cache_event_ids 1834 [PERF_COUNT_HW_CACHE_MAX] 1835 [PERF_COUNT_HW_CACHE_OP_MAX] 1836 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1837 [C(L1D)] = { 1838 [C(OP_READ)] = { 1839 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1840 [C(RESULT_MISS)] = 0x0, 1841 }, 1842 [C(OP_WRITE)] = { 1843 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1844 [C(RESULT_MISS)] = 0x0, 1845 }, 1846 [C(OP_PREFETCH)] = { 1847 [C(RESULT_ACCESS)] = 0x0, 1848 [C(RESULT_MISS)] = 0x0, 1849 }, 1850 }, 1851 [C(L1I)] = { 1852 [C(OP_READ)] = { 1853 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 1854 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 1855 }, 1856 [C(OP_WRITE)] = { 1857 [C(RESULT_ACCESS)] = -1, 1858 [C(RESULT_MISS)] = -1, 1859 }, 1860 [C(OP_PREFETCH)] = { 1861 [C(RESULT_ACCESS)] = 0x0, 1862 [C(RESULT_MISS)] = 0x0, 1863 }, 1864 }, 1865 [C(LL)] = { 1866 [C(OP_READ)] = { 1867 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1868 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1869 }, 1870 [C(OP_WRITE)] = { 1871 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1872 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1873 }, 1874 [C(OP_PREFETCH)] = { 1875 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1876 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1877 }, 1878 }, 1879 [C(DTLB)] = { 1880 [C(OP_READ)] = { 1881 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1882 [C(RESULT_MISS)] = 0x0, 1883 }, 1884 [C(OP_WRITE)] = { 1885 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1886 [C(RESULT_MISS)] = 0x0, 1887 }, 1888 [C(OP_PREFETCH)] = { 1889 [C(RESULT_ACCESS)] = 0x0, 1890 [C(RESULT_MISS)] = 0x0, 1891 }, 1892 }, 1893 [C(ITLB)] = { 1894 [C(OP_READ)] = { 1895 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 1896 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 1897 }, 1898 [C(OP_WRITE)] = { 1899 [C(RESULT_ACCESS)] = -1, 1900 [C(RESULT_MISS)] = -1, 1901 }, 1902 [C(OP_PREFETCH)] = { 1903 [C(RESULT_ACCESS)] = -1, 1904 [C(RESULT_MISS)] = -1, 1905 }, 1906 }, 1907 [C(BPU)] = { 1908 [C(OP_READ)] = { 1909 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1910 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1911 }, 1912 [C(OP_WRITE)] = { 1913 [C(RESULT_ACCESS)] = -1, 1914 [C(RESULT_MISS)] = -1, 1915 }, 1916 [C(OP_PREFETCH)] = { 1917 [C(RESULT_ACCESS)] = -1, 1918 [C(RESULT_MISS)] = -1, 1919 }, 1920 }, 1921 }; 1922 1923 static __initconst const u64 glm_hw_cache_extra_regs 1924 [PERF_COUNT_HW_CACHE_MAX] 1925 [PERF_COUNT_HW_CACHE_OP_MAX] 1926 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1927 [C(LL)] = { 1928 [C(OP_READ)] = { 1929 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 1930 GLM_LLC_ACCESS, 1931 [C(RESULT_MISS)] = GLM_DEMAND_READ| 1932 GLM_LLC_MISS, 1933 }, 1934 [C(OP_WRITE)] = { 1935 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 1936 GLM_LLC_ACCESS, 1937 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 1938 GLM_LLC_MISS, 1939 }, 1940 [C(OP_PREFETCH)] = { 1941 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH| 1942 GLM_LLC_ACCESS, 1943 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH| 1944 GLM_LLC_MISS, 1945 }, 1946 }, 1947 }; 1948 1949 static __initconst const u64 glp_hw_cache_event_ids 1950 [PERF_COUNT_HW_CACHE_MAX] 1951 [PERF_COUNT_HW_CACHE_OP_MAX] 1952 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1953 [C(L1D)] = { 1954 [C(OP_READ)] = { 1955 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1956 [C(RESULT_MISS)] = 0x0, 1957 }, 1958 [C(OP_WRITE)] = { 1959 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1960 [C(RESULT_MISS)] = 0x0, 1961 }, 1962 [C(OP_PREFETCH)] = { 1963 [C(RESULT_ACCESS)] = 0x0, 1964 [C(RESULT_MISS)] = 0x0, 1965 }, 1966 }, 1967 [C(L1I)] = { 1968 [C(OP_READ)] = { 1969 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 1970 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 1971 }, 1972 [C(OP_WRITE)] = { 1973 [C(RESULT_ACCESS)] = -1, 1974 [C(RESULT_MISS)] = -1, 1975 }, 1976 [C(OP_PREFETCH)] = { 1977 [C(RESULT_ACCESS)] = 0x0, 1978 [C(RESULT_MISS)] = 0x0, 1979 }, 1980 }, 1981 [C(LL)] = { 1982 [C(OP_READ)] = { 1983 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1984 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1985 }, 1986 [C(OP_WRITE)] = { 1987 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1988 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1989 }, 1990 [C(OP_PREFETCH)] = { 1991 [C(RESULT_ACCESS)] = 0x0, 1992 [C(RESULT_MISS)] = 0x0, 1993 }, 1994 }, 1995 [C(DTLB)] = { 1996 [C(OP_READ)] = { 1997 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1998 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 1999 }, 2000 [C(OP_WRITE)] = { 2001 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 2002 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 2003 }, 2004 [C(OP_PREFETCH)] = { 2005 [C(RESULT_ACCESS)] = 0x0, 2006 [C(RESULT_MISS)] = 0x0, 2007 }, 2008 }, 2009 [C(ITLB)] = { 2010 [C(OP_READ)] = { 2011 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 2012 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 2013 }, 2014 [C(OP_WRITE)] = { 2015 [C(RESULT_ACCESS)] = -1, 2016 [C(RESULT_MISS)] = -1, 2017 }, 2018 [C(OP_PREFETCH)] = { 2019 [C(RESULT_ACCESS)] = -1, 2020 [C(RESULT_MISS)] = -1, 2021 }, 2022 }, 2023 [C(BPU)] = { 2024 [C(OP_READ)] = { 2025 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 2026 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 2027 }, 2028 [C(OP_WRITE)] = { 2029 [C(RESULT_ACCESS)] = -1, 2030 [C(RESULT_MISS)] = -1, 2031 }, 2032 [C(OP_PREFETCH)] = { 2033 [C(RESULT_ACCESS)] = -1, 2034 [C(RESULT_MISS)] = -1, 2035 }, 2036 }, 2037 }; 2038 2039 static __initconst const u64 glp_hw_cache_extra_regs 2040 [PERF_COUNT_HW_CACHE_MAX] 2041 [PERF_COUNT_HW_CACHE_OP_MAX] 2042 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2043 [C(LL)] = { 2044 [C(OP_READ)] = { 2045 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 2046 GLM_LLC_ACCESS, 2047 [C(RESULT_MISS)] = GLM_DEMAND_READ| 2048 GLM_LLC_MISS, 2049 }, 2050 [C(OP_WRITE)] = { 2051 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 2052 GLM_LLC_ACCESS, 2053 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 2054 GLM_LLC_MISS, 2055 }, 2056 [C(OP_PREFETCH)] = { 2057 [C(RESULT_ACCESS)] = 0x0, 2058 [C(RESULT_MISS)] = 0x0, 2059 }, 2060 }, 2061 }; 2062 2063 #define TNT_LOCAL_DRAM BIT_ULL(26) 2064 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD 2065 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO 2066 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE 2067 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \ 2068 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM) 2069 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM) 2070 2071 static __initconst const u64 tnt_hw_cache_extra_regs 2072 [PERF_COUNT_HW_CACHE_MAX] 2073 [PERF_COUNT_HW_CACHE_OP_MAX] 2074 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2075 [C(LL)] = { 2076 [C(OP_READ)] = { 2077 [C(RESULT_ACCESS)] = TNT_DEMAND_READ| 2078 TNT_LLC_ACCESS, 2079 [C(RESULT_MISS)] = TNT_DEMAND_READ| 2080 TNT_LLC_MISS, 2081 }, 2082 [C(OP_WRITE)] = { 2083 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE| 2084 TNT_LLC_ACCESS, 2085 [C(RESULT_MISS)] = TNT_DEMAND_WRITE| 2086 TNT_LLC_MISS, 2087 }, 2088 [C(OP_PREFETCH)] = { 2089 [C(RESULT_ACCESS)] = 0x0, 2090 [C(RESULT_MISS)] = 0x0, 2091 }, 2092 }, 2093 }; 2094 2095 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0"); 2096 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0"); 2097 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6"); 2098 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0"); 2099 2100 static struct attribute *tnt_events_attrs[] = { 2101 EVENT_PTR(td_fe_bound_tnt), 2102 EVENT_PTR(td_retiring_tnt), 2103 EVENT_PTR(td_bad_spec_tnt), 2104 EVENT_PTR(td_be_bound_tnt), 2105 NULL, 2106 }; 2107 2108 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { 2109 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2110 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0), 2111 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1), 2112 EVENT_EXTRA_END 2113 }; 2114 2115 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3"); 2116 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6"); 2117 2118 static struct attribute *grt_mem_attrs[] = { 2119 EVENT_PTR(mem_ld_grt), 2120 EVENT_PTR(mem_st_grt), 2121 NULL 2122 }; 2123 2124 static struct extra_reg intel_grt_extra_regs[] __read_mostly = { 2125 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2126 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 2127 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 2128 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2129 EVENT_EXTRA_END 2130 }; 2131 2132 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0"); 2133 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0"); 2134 2135 static struct attribute *cmt_events_attrs[] = { 2136 EVENT_PTR(td_fe_bound_tnt), 2137 EVENT_PTR(td_retiring_cmt), 2138 EVENT_PTR(td_bad_spec_cmt), 2139 EVENT_PTR(td_be_bound_tnt), 2140 NULL 2141 }; 2142 2143 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = { 2144 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 2145 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0), 2146 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1), 2147 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0), 2148 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0), 2149 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1), 2150 EVENT_EXTRA_END 2151 }; 2152 2153 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ 2154 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ 2155 #define KNL_MCDRAM_LOCAL BIT_ULL(21) 2156 #define KNL_MCDRAM_FAR BIT_ULL(22) 2157 #define KNL_DDR_LOCAL BIT_ULL(23) 2158 #define KNL_DDR_FAR BIT_ULL(24) 2159 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \ 2160 KNL_DDR_LOCAL | KNL_DDR_FAR) 2161 #define KNL_L2_READ SLM_DMND_READ 2162 #define KNL_L2_WRITE SLM_DMND_WRITE 2163 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH 2164 #define KNL_L2_ACCESS SLM_LLC_ACCESS 2165 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \ 2166 KNL_DRAM_ANY | SNB_SNP_ANY | \ 2167 SNB_NON_DRAM) 2168 2169 static __initconst const u64 knl_hw_cache_extra_regs 2170 [PERF_COUNT_HW_CACHE_MAX] 2171 [PERF_COUNT_HW_CACHE_OP_MAX] 2172 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 2173 [C(LL)] = { 2174 [C(OP_READ)] = { 2175 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS, 2176 [C(RESULT_MISS)] = 0, 2177 }, 2178 [C(OP_WRITE)] = { 2179 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS, 2180 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS, 2181 }, 2182 [C(OP_PREFETCH)] = { 2183 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS, 2184 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS, 2185 }, 2186 }, 2187 }; 2188 2189 /* 2190 * Used from PMIs where the LBRs are already disabled. 2191 * 2192 * This function could be called consecutively. It is required to remain in 2193 * disabled state if called consecutively. 2194 * 2195 * During consecutive calls, the same disable value will be written to related 2196 * registers, so the PMU state remains unchanged. 2197 * 2198 * intel_bts events don't coexist with intel PMU's BTS events because of 2199 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them 2200 * disabled around intel PMU's event batching etc, only inside the PMI handler. 2201 * 2202 * Avoid PEBS_ENABLE MSR access in PMIs. 2203 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore. 2204 * It doesn't matter if the PEBS is enabled or not. 2205 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to 2206 * access PEBS_ENABLE MSR in disable_all()/enable_all(). 2207 * However, there are some cases which may change PEBS status, e.g. PMI 2208 * throttle. The PEBS_ENABLE should be updated where the status changes. 2209 */ 2210 static __always_inline void __intel_pmu_disable_all(bool bts) 2211 { 2212 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2213 2214 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2215 2216 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 2217 intel_pmu_disable_bts(); 2218 } 2219 2220 static __always_inline void intel_pmu_disable_all(void) 2221 { 2222 __intel_pmu_disable_all(true); 2223 intel_pmu_pebs_disable_all(); 2224 intel_pmu_lbr_disable_all(); 2225 } 2226 2227 static void __intel_pmu_enable_all(int added, bool pmi) 2228 { 2229 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2230 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 2231 2232 intel_pmu_lbr_enable_all(pmi); 2233 2234 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { 2235 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); 2236 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; 2237 } 2238 2239 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 2240 intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 2241 2242 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 2243 struct perf_event *event = 2244 cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; 2245 2246 if (WARN_ON_ONCE(!event)) 2247 return; 2248 2249 intel_pmu_enable_bts(event->hw.config); 2250 } 2251 } 2252 2253 static void intel_pmu_enable_all(int added) 2254 { 2255 intel_pmu_pebs_enable_all(); 2256 __intel_pmu_enable_all(added, false); 2257 } 2258 2259 static noinline int 2260 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, 2261 unsigned int cnt, unsigned long flags) 2262 { 2263 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2264 2265 intel_pmu_lbr_read(); 2266 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); 2267 2268 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt); 2269 intel_pmu_enable_all(0); 2270 local_irq_restore(flags); 2271 return cnt; 2272 } 2273 2274 static int 2275 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2276 { 2277 unsigned long flags; 2278 2279 /* must not have branches... */ 2280 local_irq_save(flags); 2281 __intel_pmu_disable_all(false); /* we don't care about BTS */ 2282 __intel_pmu_lbr_disable(); 2283 /* ... until here */ 2284 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2285 } 2286 2287 static int 2288 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2289 { 2290 unsigned long flags; 2291 2292 /* must not have branches... */ 2293 local_irq_save(flags); 2294 __intel_pmu_disable_all(false); /* we don't care about BTS */ 2295 __intel_pmu_arch_lbr_disable(); 2296 /* ... until here */ 2297 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2298 } 2299 2300 /* 2301 * Workaround for: 2302 * Intel Errata AAK100 (model 26) 2303 * Intel Errata AAP53 (model 30) 2304 * Intel Errata BD53 (model 44) 2305 * 2306 * The official story: 2307 * These chips need to be 'reset' when adding counters by programming the 2308 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either 2309 * in sequence on the same PMC or on different PMCs. 2310 * 2311 * In practice it appears some of these events do in fact count, and 2312 * we need to program all 4 events. 2313 */ 2314 static void intel_pmu_nhm_workaround(void) 2315 { 2316 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2317 static const unsigned long nhm_magic[4] = { 2318 0x4300B5, 2319 0x4300D2, 2320 0x4300B1, 2321 0x4300B1 2322 }; 2323 struct perf_event *event; 2324 int i; 2325 2326 /* 2327 * The Errata requires below steps: 2328 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; 2329 * 2) Configure 4 PERFEVTSELx with the magic events and clear 2330 * the corresponding PMCx; 2331 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; 2332 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; 2333 * 5) Clear 4 pairs of ERFEVTSELx and PMCx; 2334 */ 2335 2336 /* 2337 * The real steps we choose are a little different from above. 2338 * A) To reduce MSR operations, we don't run step 1) as they 2339 * are already cleared before this function is called; 2340 * B) Call x86_perf_event_update to save PMCx before configuring 2341 * PERFEVTSELx with magic number; 2342 * C) With step 5), we do clear only when the PERFEVTSELx is 2343 * not used currently. 2344 * D) Call x86_perf_event_set_period to restore PMCx; 2345 */ 2346 2347 /* We always operate 4 pairs of PERF Counters */ 2348 for (i = 0; i < 4; i++) { 2349 event = cpuc->events[i]; 2350 if (event) 2351 static_call(x86_pmu_update)(event); 2352 } 2353 2354 for (i = 0; i < 4; i++) { 2355 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); 2356 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); 2357 } 2358 2359 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); 2360 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 2361 2362 for (i = 0; i < 4; i++) { 2363 event = cpuc->events[i]; 2364 2365 if (event) { 2366 static_call(x86_pmu_set_period)(event); 2367 __x86_pmu_enable_event(&event->hw, 2368 ARCH_PERFMON_EVENTSEL_ENABLE); 2369 } else 2370 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); 2371 } 2372 } 2373 2374 static void intel_pmu_nhm_enable_all(int added) 2375 { 2376 if (added) 2377 intel_pmu_nhm_workaround(); 2378 intel_pmu_enable_all(added); 2379 } 2380 2381 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) 2382 { 2383 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; 2384 2385 if (cpuc->tfa_shadow != val) { 2386 cpuc->tfa_shadow = val; 2387 wrmsrl(MSR_TSX_FORCE_ABORT, val); 2388 } 2389 } 2390 2391 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 2392 { 2393 /* 2394 * We're going to use PMC3, make sure TFA is set before we touch it. 2395 */ 2396 if (cntr == 3) 2397 intel_set_tfa(cpuc, true); 2398 } 2399 2400 static void intel_tfa_pmu_enable_all(int added) 2401 { 2402 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2403 2404 /* 2405 * If we find PMC3 is no longer used when we enable the PMU, we can 2406 * clear TFA. 2407 */ 2408 if (!test_bit(3, cpuc->active_mask)) 2409 intel_set_tfa(cpuc, false); 2410 2411 intel_pmu_enable_all(added); 2412 } 2413 2414 static inline u64 intel_pmu_get_status(void) 2415 { 2416 u64 status; 2417 2418 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 2419 2420 return status; 2421 } 2422 2423 static inline void intel_pmu_ack_status(u64 ack) 2424 { 2425 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 2426 } 2427 2428 static inline bool event_is_checkpointed(struct perf_event *event) 2429 { 2430 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; 2431 } 2432 2433 static inline void intel_set_masks(struct perf_event *event, int idx) 2434 { 2435 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2436 2437 if (event->attr.exclude_host) 2438 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); 2439 if (event->attr.exclude_guest) 2440 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); 2441 if (event_is_checkpointed(event)) 2442 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status); 2443 } 2444 2445 static inline void intel_clear_masks(struct perf_event *event, int idx) 2446 { 2447 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2448 2449 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); 2450 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); 2451 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status); 2452 } 2453 2454 static void intel_pmu_disable_fixed(struct perf_event *event) 2455 { 2456 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2457 struct hw_perf_event *hwc = &event->hw; 2458 int idx = hwc->idx; 2459 u64 mask; 2460 2461 if (is_topdown_idx(idx)) { 2462 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2463 2464 /* 2465 * When there are other active TopDown events, 2466 * don't disable the fixed counter 3. 2467 */ 2468 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) 2469 return; 2470 idx = INTEL_PMC_IDX_FIXED_SLOTS; 2471 } 2472 2473 intel_clear_masks(event, idx); 2474 2475 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK); 2476 cpuc->fixed_ctrl_val &= ~mask; 2477 } 2478 2479 static void intel_pmu_disable_event(struct perf_event *event) 2480 { 2481 struct hw_perf_event *hwc = &event->hw; 2482 int idx = hwc->idx; 2483 2484 switch (idx) { 2485 case 0 ... INTEL_PMC_IDX_FIXED - 1: 2486 intel_clear_masks(event, idx); 2487 x86_pmu_disable_event(event); 2488 break; 2489 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: 2490 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 2491 intel_pmu_disable_fixed(event); 2492 break; 2493 case INTEL_PMC_IDX_FIXED_BTS: 2494 intel_pmu_disable_bts(); 2495 intel_pmu_drain_bts_buffer(); 2496 return; 2497 case INTEL_PMC_IDX_FIXED_VLBR: 2498 intel_clear_masks(event, idx); 2499 break; 2500 default: 2501 intel_clear_masks(event, idx); 2502 pr_warn("Failed to disable the event with invalid index %d\n", 2503 idx); 2504 return; 2505 } 2506 2507 /* 2508 * Needs to be called after x86_pmu_disable_event, 2509 * so we don't trigger the event without PEBS bit set. 2510 */ 2511 if (unlikely(event->attr.precise_ip)) 2512 intel_pmu_pebs_disable(event); 2513 } 2514 2515 static void intel_pmu_assign_event(struct perf_event *event, int idx) 2516 { 2517 if (is_pebs_pt(event)) 2518 perf_report_aux_output_id(event, idx); 2519 } 2520 2521 static void intel_pmu_del_event(struct perf_event *event) 2522 { 2523 if (needs_branch_stack(event)) 2524 intel_pmu_lbr_del(event); 2525 if (event->attr.precise_ip) 2526 intel_pmu_pebs_del(event); 2527 } 2528 2529 static int icl_set_topdown_event_period(struct perf_event *event) 2530 { 2531 struct hw_perf_event *hwc = &event->hw; 2532 s64 left = local64_read(&hwc->period_left); 2533 2534 /* 2535 * The values in PERF_METRICS MSR are derived from fixed counter 3. 2536 * Software should start both registers, PERF_METRICS and fixed 2537 * counter 3, from zero. 2538 * Clear PERF_METRICS and Fixed counter 3 in initialization. 2539 * After that, both MSRs will be cleared for each read. 2540 * Don't need to clear them again. 2541 */ 2542 if (left == x86_pmu.max_period) { 2543 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); 2544 wrmsrl(MSR_PERF_METRICS, 0); 2545 hwc->saved_slots = 0; 2546 hwc->saved_metric = 0; 2547 } 2548 2549 if ((hwc->saved_slots) && is_slots_event(event)) { 2550 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots); 2551 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric); 2552 } 2553 2554 perf_event_update_userpage(event); 2555 2556 return 0; 2557 } 2558 2559 static int adl_set_topdown_event_period(struct perf_event *event) 2560 { 2561 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 2562 2563 if (pmu->cpu_type != hybrid_big) 2564 return 0; 2565 2566 return icl_set_topdown_event_period(event); 2567 } 2568 2569 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period); 2570 2571 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) 2572 { 2573 u32 val; 2574 2575 /* 2576 * The metric is reported as an 8bit integer fraction 2577 * summing up to 0xff. 2578 * slots-in-metric = (Metric / 0xff) * slots 2579 */ 2580 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; 2581 return mul_u64_u32_div(slots, val, 0xff); 2582 } 2583 2584 static u64 icl_get_topdown_value(struct perf_event *event, 2585 u64 slots, u64 metrics) 2586 { 2587 int idx = event->hw.idx; 2588 u64 delta; 2589 2590 if (is_metric_idx(idx)) 2591 delta = icl_get_metrics_event_value(metrics, slots, idx); 2592 else 2593 delta = slots; 2594 2595 return delta; 2596 } 2597 2598 static void __icl_update_topdown_event(struct perf_event *event, 2599 u64 slots, u64 metrics, 2600 u64 last_slots, u64 last_metrics) 2601 { 2602 u64 delta, last = 0; 2603 2604 delta = icl_get_topdown_value(event, slots, metrics); 2605 if (last_slots) 2606 last = icl_get_topdown_value(event, last_slots, last_metrics); 2607 2608 /* 2609 * The 8bit integer fraction of metric may be not accurate, 2610 * especially when the changes is very small. 2611 * For example, if only a few bad_spec happens, the fraction 2612 * may be reduced from 1 to 0. If so, the bad_spec event value 2613 * will be 0 which is definitely less than the last value. 2614 * Avoid update event->count for this case. 2615 */ 2616 if (delta > last) { 2617 delta -= last; 2618 local64_add(delta, &event->count); 2619 } 2620 } 2621 2622 static void update_saved_topdown_regs(struct perf_event *event, u64 slots, 2623 u64 metrics, int metric_end) 2624 { 2625 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2626 struct perf_event *other; 2627 int idx; 2628 2629 event->hw.saved_slots = slots; 2630 event->hw.saved_metric = metrics; 2631 2632 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { 2633 if (!is_topdown_idx(idx)) 2634 continue; 2635 other = cpuc->events[idx]; 2636 other->hw.saved_slots = slots; 2637 other->hw.saved_metric = metrics; 2638 } 2639 } 2640 2641 /* 2642 * Update all active Topdown events. 2643 * 2644 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be 2645 * modify by a NMI. PMU has to be disabled before calling this function. 2646 */ 2647 2648 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end) 2649 { 2650 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2651 struct perf_event *other; 2652 u64 slots, metrics; 2653 bool reset = true; 2654 int idx; 2655 2656 /* read Fixed counter 3 */ 2657 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots); 2658 if (!slots) 2659 return 0; 2660 2661 /* read PERF_METRICS */ 2662 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics); 2663 2664 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { 2665 if (!is_topdown_idx(idx)) 2666 continue; 2667 other = cpuc->events[idx]; 2668 __icl_update_topdown_event(other, slots, metrics, 2669 event ? event->hw.saved_slots : 0, 2670 event ? event->hw.saved_metric : 0); 2671 } 2672 2673 /* 2674 * Check and update this event, which may have been cleared 2675 * in active_mask e.g. x86_pmu_stop() 2676 */ 2677 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { 2678 __icl_update_topdown_event(event, slots, metrics, 2679 event->hw.saved_slots, 2680 event->hw.saved_metric); 2681 2682 /* 2683 * In x86_pmu_stop(), the event is cleared in active_mask first, 2684 * then drain the delta, which indicates context switch for 2685 * counting. 2686 * Save metric and slots for context switch. 2687 * Don't need to reset the PERF_METRICS and Fixed counter 3. 2688 * Because the values will be restored in next schedule in. 2689 */ 2690 update_saved_topdown_regs(event, slots, metrics, metric_end); 2691 reset = false; 2692 } 2693 2694 if (reset) { 2695 /* The fixed counter 3 has to be written before the PERF_METRICS. */ 2696 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0); 2697 wrmsrl(MSR_PERF_METRICS, 0); 2698 if (event) 2699 update_saved_topdown_regs(event, 0, 0, metric_end); 2700 } 2701 2702 return slots; 2703 } 2704 2705 static u64 icl_update_topdown_event(struct perf_event *event) 2706 { 2707 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE + 2708 x86_pmu.num_topdown_events - 1); 2709 } 2710 2711 static u64 adl_update_topdown_event(struct perf_event *event) 2712 { 2713 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 2714 2715 if (pmu->cpu_type != hybrid_big) 2716 return 0; 2717 2718 return icl_update_topdown_event(event); 2719 } 2720 2721 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); 2722 2723 static void intel_pmu_read_topdown_event(struct perf_event *event) 2724 { 2725 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2726 2727 /* Only need to call update_topdown_event() once for group read. */ 2728 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && 2729 !is_slots_event(event)) 2730 return; 2731 2732 perf_pmu_disable(event->pmu); 2733 static_call(intel_pmu_update_topdown_event)(event); 2734 perf_pmu_enable(event->pmu); 2735 } 2736 2737 static void intel_pmu_read_event(struct perf_event *event) 2738 { 2739 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) 2740 intel_pmu_auto_reload_read(event); 2741 else if (is_topdown_count(event)) 2742 intel_pmu_read_topdown_event(event); 2743 else 2744 x86_perf_event_update(event); 2745 } 2746 2747 static void intel_pmu_enable_fixed(struct perf_event *event) 2748 { 2749 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2750 struct hw_perf_event *hwc = &event->hw; 2751 u64 mask, bits = 0; 2752 int idx = hwc->idx; 2753 2754 if (is_topdown_idx(idx)) { 2755 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2756 /* 2757 * When there are other active TopDown events, 2758 * don't enable the fixed counter 3 again. 2759 */ 2760 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) 2761 return; 2762 2763 idx = INTEL_PMC_IDX_FIXED_SLOTS; 2764 } 2765 2766 intel_set_masks(event, idx); 2767 2768 /* 2769 * Enable IRQ generation (0x8), if not PEBS, 2770 * and enable ring-3 counting (0x2) and ring-0 counting (0x1) 2771 * if requested: 2772 */ 2773 if (!event->attr.precise_ip) 2774 bits |= INTEL_FIXED_0_ENABLE_PMI; 2775 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) 2776 bits |= INTEL_FIXED_0_USER; 2777 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) 2778 bits |= INTEL_FIXED_0_KERNEL; 2779 2780 /* 2781 * ANY bit is supported in v3 and up 2782 */ 2783 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) 2784 bits |= INTEL_FIXED_0_ANYTHREAD; 2785 2786 idx -= INTEL_PMC_IDX_FIXED; 2787 bits = intel_fixed_bits_by_idx(idx, bits); 2788 mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); 2789 2790 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { 2791 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); 2792 mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); 2793 } 2794 2795 cpuc->fixed_ctrl_val &= ~mask; 2796 cpuc->fixed_ctrl_val |= bits; 2797 } 2798 2799 static void intel_pmu_enable_event(struct perf_event *event) 2800 { 2801 struct hw_perf_event *hwc = &event->hw; 2802 int idx = hwc->idx; 2803 2804 if (unlikely(event->attr.precise_ip)) 2805 intel_pmu_pebs_enable(event); 2806 2807 switch (idx) { 2808 case 0 ... INTEL_PMC_IDX_FIXED - 1: 2809 intel_set_masks(event, idx); 2810 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 2811 break; 2812 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: 2813 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: 2814 intel_pmu_enable_fixed(event); 2815 break; 2816 case INTEL_PMC_IDX_FIXED_BTS: 2817 if (!__this_cpu_read(cpu_hw_events.enabled)) 2818 return; 2819 intel_pmu_enable_bts(hwc->config); 2820 break; 2821 case INTEL_PMC_IDX_FIXED_VLBR: 2822 intel_set_masks(event, idx); 2823 break; 2824 default: 2825 pr_warn("Failed to enable the event with invalid index %d\n", 2826 idx); 2827 } 2828 } 2829 2830 static void intel_pmu_add_event(struct perf_event *event) 2831 { 2832 if (event->attr.precise_ip) 2833 intel_pmu_pebs_add(event); 2834 if (needs_branch_stack(event)) 2835 intel_pmu_lbr_add(event); 2836 } 2837 2838 /* 2839 * Save and restart an expired event. Called by NMI contexts, 2840 * so it has to be careful about preempting normal event ops: 2841 */ 2842 int intel_pmu_save_and_restart(struct perf_event *event) 2843 { 2844 static_call(x86_pmu_update)(event); 2845 /* 2846 * For a checkpointed counter always reset back to 0. This 2847 * avoids a situation where the counter overflows, aborts the 2848 * transaction and is then set back to shortly before the 2849 * overflow, and overflows and aborts again. 2850 */ 2851 if (unlikely(event_is_checkpointed(event))) { 2852 /* No race with NMIs because the counter should not be armed */ 2853 wrmsrl(event->hw.event_base, 0); 2854 local64_set(&event->hw.prev_count, 0); 2855 } 2856 return static_call(x86_pmu_set_period)(event); 2857 } 2858 2859 static int intel_pmu_set_period(struct perf_event *event) 2860 { 2861 if (unlikely(is_topdown_count(event))) 2862 return static_call(intel_pmu_set_topdown_event_period)(event); 2863 2864 return x86_perf_event_set_period(event); 2865 } 2866 2867 static u64 intel_pmu_update(struct perf_event *event) 2868 { 2869 if (unlikely(is_topdown_count(event))) 2870 return static_call(intel_pmu_update_topdown_event)(event); 2871 2872 return x86_perf_event_update(event); 2873 } 2874 2875 static void intel_pmu_reset(void) 2876 { 2877 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); 2878 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2879 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); 2880 int num_counters = hybrid(cpuc->pmu, num_counters); 2881 unsigned long flags; 2882 int idx; 2883 2884 if (!num_counters) 2885 return; 2886 2887 local_irq_save(flags); 2888 2889 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); 2890 2891 for (idx = 0; idx < num_counters; idx++) { 2892 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); 2893 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); 2894 } 2895 for (idx = 0; idx < num_counters_fixed; idx++) { 2896 if (fixed_counter_disabled(idx, cpuc->pmu)) 2897 continue; 2898 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 2899 } 2900 2901 if (ds) 2902 ds->bts_index = ds->bts_buffer_base; 2903 2904 /* Ack all overflows and disable fixed counters */ 2905 if (x86_pmu.version >= 2) { 2906 intel_pmu_ack_status(intel_pmu_get_status()); 2907 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2908 } 2909 2910 /* Reset LBRs and LBR freezing */ 2911 if (x86_pmu.lbr_nr) { 2912 update_debugctlmsr(get_debugctlmsr() & 2913 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR)); 2914 } 2915 2916 local_irq_restore(flags); 2917 } 2918 2919 /* 2920 * We may be running with guest PEBS events created by KVM, and the 2921 * PEBS records are logged into the guest's DS and invisible to host. 2922 * 2923 * In the case of guest PEBS overflow, we only trigger a fake event 2924 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM. 2925 * The guest will then vm-entry and check the guest DS area to read 2926 * the guest PEBS records. 2927 * 2928 * The contents and other behavior of the guest event do not matter. 2929 */ 2930 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs, 2931 struct perf_sample_data *data) 2932 { 2933 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2934 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask; 2935 struct perf_event *event = NULL; 2936 int bit; 2937 2938 if (!unlikely(perf_guest_state())) 2939 return; 2940 2941 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active || 2942 !guest_pebs_idxs) 2943 return; 2944 2945 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs, 2946 INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) { 2947 event = cpuc->events[bit]; 2948 if (!event->attr.precise_ip) 2949 continue; 2950 2951 perf_sample_data_init(data, 0, event->hw.last_period); 2952 if (perf_event_overflow(event, data, regs)) 2953 x86_pmu_stop(event, 0); 2954 2955 /* Inject one fake event is enough. */ 2956 break; 2957 } 2958 } 2959 2960 static int handle_pmi_common(struct pt_regs *regs, u64 status) 2961 { 2962 struct perf_sample_data data; 2963 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2964 int bit; 2965 int handled = 0; 2966 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 2967 2968 inc_irq_stat(apic_perf_irqs); 2969 2970 /* 2971 * Ignore a range of extra bits in status that do not indicate 2972 * overflow by themselves. 2973 */ 2974 status &= ~(GLOBAL_STATUS_COND_CHG | 2975 GLOBAL_STATUS_ASIF | 2976 GLOBAL_STATUS_LBRS_FROZEN); 2977 if (!status) 2978 return 0; 2979 /* 2980 * In case multiple PEBS events are sampled at the same time, 2981 * it is possible to have GLOBAL_STATUS bit 62 set indicating 2982 * PEBS buffer overflow and also seeing at most 3 PEBS counters 2983 * having their bits set in the status register. This is a sign 2984 * that there was at least one PEBS record pending at the time 2985 * of the PMU interrupt. PEBS counters must only be processed 2986 * via the drain_pebs() calls and not via the regular sample 2987 * processing loop coming after that the function, otherwise 2988 * phony regular samples may be generated in the sampling buffer 2989 * not marked with the EXACT tag. Another possibility is to have 2990 * one PEBS event and at least one non-PEBS event which overflows 2991 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will 2992 * not be set, yet the overflow status bit for the PEBS counter will 2993 * be on Skylake. 2994 * 2995 * To avoid this problem, we systematically ignore the PEBS-enabled 2996 * counters from the GLOBAL_STATUS mask and we always process PEBS 2997 * events via drain_pebs(). 2998 */ 2999 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable); 3000 3001 /* 3002 * PEBS overflow sets bit 62 in the global status register 3003 */ 3004 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) { 3005 u64 pebs_enabled = cpuc->pebs_enabled; 3006 3007 handled++; 3008 x86_pmu_handle_guest_pebs(regs, &data); 3009 x86_pmu.drain_pebs(regs, &data); 3010 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; 3011 3012 /* 3013 * PMI throttle may be triggered, which stops the PEBS event. 3014 * Although cpuc->pebs_enabled is updated accordingly, the 3015 * MSR_IA32_PEBS_ENABLE is not updated. Because the 3016 * cpuc->enabled has been forced to 0 in PMI. 3017 * Update the MSR if pebs_enabled is changed. 3018 */ 3019 if (pebs_enabled != cpuc->pebs_enabled) 3020 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 3021 } 3022 3023 /* 3024 * Intel PT 3025 */ 3026 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { 3027 handled++; 3028 if (!perf_guest_handle_intel_pt_intr()) 3029 intel_pt_interrupt(); 3030 } 3031 3032 /* 3033 * Intel Perf metrics 3034 */ 3035 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { 3036 handled++; 3037 static_call(intel_pmu_update_topdown_event)(NULL); 3038 } 3039 3040 /* 3041 * Checkpointed counters can lead to 'spurious' PMIs because the 3042 * rollback caused by the PMI will have cleared the overflow status 3043 * bit. Therefore always force probe these counters. 3044 */ 3045 status |= cpuc->intel_cp_status; 3046 3047 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 3048 struct perf_event *event = cpuc->events[bit]; 3049 3050 handled++; 3051 3052 if (!test_bit(bit, cpuc->active_mask)) 3053 continue; 3054 3055 if (!intel_pmu_save_and_restart(event)) 3056 continue; 3057 3058 perf_sample_data_init(&data, 0, event->hw.last_period); 3059 3060 if (has_branch_stack(event)) 3061 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); 3062 3063 if (perf_event_overflow(event, &data, regs)) 3064 x86_pmu_stop(event, 0); 3065 } 3066 3067 return handled; 3068 } 3069 3070 /* 3071 * This handler is triggered by the local APIC, so the APIC IRQ handling 3072 * rules apply: 3073 */ 3074 static int intel_pmu_handle_irq(struct pt_regs *regs) 3075 { 3076 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3077 bool late_ack = hybrid_bit(cpuc->pmu, late_ack); 3078 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack); 3079 int loops; 3080 u64 status; 3081 int handled; 3082 int pmu_enabled; 3083 3084 /* 3085 * Save the PMU state. 3086 * It needs to be restored when leaving the handler. 3087 */ 3088 pmu_enabled = cpuc->enabled; 3089 /* 3090 * In general, the early ACK is only applied for old platforms. 3091 * For the big core starts from Haswell, the late ACK should be 3092 * applied. 3093 * For the small core after Tremont, we have to do the ACK right 3094 * before re-enabling counters, which is in the middle of the 3095 * NMI handler. 3096 */ 3097 if (!late_ack && !mid_ack) 3098 apic_write(APIC_LVTPC, APIC_DM_NMI); 3099 intel_bts_disable_local(); 3100 cpuc->enabled = 0; 3101 __intel_pmu_disable_all(true); 3102 handled = intel_pmu_drain_bts_buffer(); 3103 handled += intel_bts_interrupt(); 3104 status = intel_pmu_get_status(); 3105 if (!status) 3106 goto done; 3107 3108 loops = 0; 3109 again: 3110 intel_pmu_lbr_read(); 3111 intel_pmu_ack_status(status); 3112 if (++loops > 100) { 3113 static bool warned; 3114 3115 if (!warned) { 3116 WARN(1, "perfevents: irq loop stuck!\n"); 3117 perf_event_print_debug(); 3118 warned = true; 3119 } 3120 intel_pmu_reset(); 3121 goto done; 3122 } 3123 3124 handled += handle_pmi_common(regs, status); 3125 3126 /* 3127 * Repeat if there is more work to be done: 3128 */ 3129 status = intel_pmu_get_status(); 3130 if (status) 3131 goto again; 3132 3133 done: 3134 if (mid_ack) 3135 apic_write(APIC_LVTPC, APIC_DM_NMI); 3136 /* Only restore PMU state when it's active. See x86_pmu_disable(). */ 3137 cpuc->enabled = pmu_enabled; 3138 if (pmu_enabled) 3139 __intel_pmu_enable_all(0, true); 3140 intel_bts_enable_local(); 3141 3142 /* 3143 * Only unmask the NMI after the overflow counters 3144 * have been reset. This avoids spurious NMIs on 3145 * Haswell CPUs. 3146 */ 3147 if (late_ack) 3148 apic_write(APIC_LVTPC, APIC_DM_NMI); 3149 return handled; 3150 } 3151 3152 static struct event_constraint * 3153 intel_bts_constraints(struct perf_event *event) 3154 { 3155 if (unlikely(intel_pmu_has_bts(event))) 3156 return &bts_constraint; 3157 3158 return NULL; 3159 } 3160 3161 /* 3162 * Note: matches a fake event, like Fixed2. 3163 */ 3164 static struct event_constraint * 3165 intel_vlbr_constraints(struct perf_event *event) 3166 { 3167 struct event_constraint *c = &vlbr_constraint; 3168 3169 if (unlikely(constraint_match(c, event->hw.config))) { 3170 event->hw.flags |= c->flags; 3171 return c; 3172 } 3173 3174 return NULL; 3175 } 3176 3177 static int intel_alt_er(struct cpu_hw_events *cpuc, 3178 int idx, u64 config) 3179 { 3180 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs); 3181 int alt_idx = idx; 3182 3183 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) 3184 return idx; 3185 3186 if (idx == EXTRA_REG_RSP_0) 3187 alt_idx = EXTRA_REG_RSP_1; 3188 3189 if (idx == EXTRA_REG_RSP_1) 3190 alt_idx = EXTRA_REG_RSP_0; 3191 3192 if (config & ~extra_regs[alt_idx].valid_mask) 3193 return idx; 3194 3195 return alt_idx; 3196 } 3197 3198 static void intel_fixup_er(struct perf_event *event, int idx) 3199 { 3200 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); 3201 event->hw.extra_reg.idx = idx; 3202 3203 if (idx == EXTRA_REG_RSP_0) { 3204 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 3205 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event; 3206 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; 3207 } else if (idx == EXTRA_REG_RSP_1) { 3208 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 3209 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event; 3210 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 3211 } 3212 } 3213 3214 /* 3215 * manage allocation of shared extra msr for certain events 3216 * 3217 * sharing can be: 3218 * per-cpu: to be shared between the various events on a single PMU 3219 * per-core: per-cpu + shared by HT threads 3220 */ 3221 static struct event_constraint * 3222 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, 3223 struct perf_event *event, 3224 struct hw_perf_event_extra *reg) 3225 { 3226 struct event_constraint *c = &emptyconstraint; 3227 struct er_account *era; 3228 unsigned long flags; 3229 int idx = reg->idx; 3230 3231 /* 3232 * reg->alloc can be set due to existing state, so for fake cpuc we 3233 * need to ignore this, otherwise we might fail to allocate proper fake 3234 * state for this extra reg constraint. Also see the comment below. 3235 */ 3236 if (reg->alloc && !cpuc->is_fake) 3237 return NULL; /* call x86_get_event_constraint() */ 3238 3239 again: 3240 era = &cpuc->shared_regs->regs[idx]; 3241 /* 3242 * we use spin_lock_irqsave() to avoid lockdep issues when 3243 * passing a fake cpuc 3244 */ 3245 raw_spin_lock_irqsave(&era->lock, flags); 3246 3247 if (!atomic_read(&era->ref) || era->config == reg->config) { 3248 3249 /* 3250 * If its a fake cpuc -- as per validate_{group,event}() we 3251 * shouldn't touch event state and we can avoid doing so 3252 * since both will only call get_event_constraints() once 3253 * on each event, this avoids the need for reg->alloc. 3254 * 3255 * Not doing the ER fixup will only result in era->reg being 3256 * wrong, but since we won't actually try and program hardware 3257 * this isn't a problem either. 3258 */ 3259 if (!cpuc->is_fake) { 3260 if (idx != reg->idx) 3261 intel_fixup_er(event, idx); 3262 3263 /* 3264 * x86_schedule_events() can call get_event_constraints() 3265 * multiple times on events in the case of incremental 3266 * scheduling(). reg->alloc ensures we only do the ER 3267 * allocation once. 3268 */ 3269 reg->alloc = 1; 3270 } 3271 3272 /* lock in msr value */ 3273 era->config = reg->config; 3274 era->reg = reg->reg; 3275 3276 /* one more user */ 3277 atomic_inc(&era->ref); 3278 3279 /* 3280 * need to call x86_get_event_constraint() 3281 * to check if associated event has constraints 3282 */ 3283 c = NULL; 3284 } else { 3285 idx = intel_alt_er(cpuc, idx, reg->config); 3286 if (idx != reg->idx) { 3287 raw_spin_unlock_irqrestore(&era->lock, flags); 3288 goto again; 3289 } 3290 } 3291 raw_spin_unlock_irqrestore(&era->lock, flags); 3292 3293 return c; 3294 } 3295 3296 static void 3297 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, 3298 struct hw_perf_event_extra *reg) 3299 { 3300 struct er_account *era; 3301 3302 /* 3303 * Only put constraint if extra reg was actually allocated. Also takes 3304 * care of event which do not use an extra shared reg. 3305 * 3306 * Also, if this is a fake cpuc we shouldn't touch any event state 3307 * (reg->alloc) and we don't care about leaving inconsistent cpuc state 3308 * either since it'll be thrown out. 3309 */ 3310 if (!reg->alloc || cpuc->is_fake) 3311 return; 3312 3313 era = &cpuc->shared_regs->regs[reg->idx]; 3314 3315 /* one fewer user */ 3316 atomic_dec(&era->ref); 3317 3318 /* allocate again next time */ 3319 reg->alloc = 0; 3320 } 3321 3322 static struct event_constraint * 3323 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, 3324 struct perf_event *event) 3325 { 3326 struct event_constraint *c = NULL, *d; 3327 struct hw_perf_event_extra *xreg, *breg; 3328 3329 xreg = &event->hw.extra_reg; 3330 if (xreg->idx != EXTRA_REG_NONE) { 3331 c = __intel_shared_reg_get_constraints(cpuc, event, xreg); 3332 if (c == &emptyconstraint) 3333 return c; 3334 } 3335 breg = &event->hw.branch_reg; 3336 if (breg->idx != EXTRA_REG_NONE) { 3337 d = __intel_shared_reg_get_constraints(cpuc, event, breg); 3338 if (d == &emptyconstraint) { 3339 __intel_shared_reg_put_constraints(cpuc, xreg); 3340 c = d; 3341 } 3342 } 3343 return c; 3344 } 3345 3346 struct event_constraint * 3347 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3348 struct perf_event *event) 3349 { 3350 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); 3351 struct event_constraint *c; 3352 3353 if (event_constraints) { 3354 for_each_event_constraint(c, event_constraints) { 3355 if (constraint_match(c, event->hw.config)) { 3356 event->hw.flags |= c->flags; 3357 return c; 3358 } 3359 } 3360 } 3361 3362 return &hybrid_var(cpuc->pmu, unconstrained); 3363 } 3364 3365 static struct event_constraint * 3366 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3367 struct perf_event *event) 3368 { 3369 struct event_constraint *c; 3370 3371 c = intel_vlbr_constraints(event); 3372 if (c) 3373 return c; 3374 3375 c = intel_bts_constraints(event); 3376 if (c) 3377 return c; 3378 3379 c = intel_shared_regs_constraints(cpuc, event); 3380 if (c) 3381 return c; 3382 3383 c = intel_pebs_constraints(event); 3384 if (c) 3385 return c; 3386 3387 return x86_get_event_constraints(cpuc, idx, event); 3388 } 3389 3390 static void 3391 intel_start_scheduling(struct cpu_hw_events *cpuc) 3392 { 3393 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3394 struct intel_excl_states *xl; 3395 int tid = cpuc->excl_thread_id; 3396 3397 /* 3398 * nothing needed if in group validation mode 3399 */ 3400 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3401 return; 3402 3403 /* 3404 * no exclusion needed 3405 */ 3406 if (WARN_ON_ONCE(!excl_cntrs)) 3407 return; 3408 3409 xl = &excl_cntrs->states[tid]; 3410 3411 xl->sched_started = true; 3412 /* 3413 * lock shared state until we are done scheduling 3414 * in stop_event_scheduling() 3415 * makes scheduling appear as a transaction 3416 */ 3417 raw_spin_lock(&excl_cntrs->lock); 3418 } 3419 3420 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 3421 { 3422 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3423 struct event_constraint *c = cpuc->event_constraint[idx]; 3424 struct intel_excl_states *xl; 3425 int tid = cpuc->excl_thread_id; 3426 3427 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3428 return; 3429 3430 if (WARN_ON_ONCE(!excl_cntrs)) 3431 return; 3432 3433 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) 3434 return; 3435 3436 xl = &excl_cntrs->states[tid]; 3437 3438 lockdep_assert_held(&excl_cntrs->lock); 3439 3440 if (c->flags & PERF_X86_EVENT_EXCL) 3441 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE; 3442 else 3443 xl->state[cntr] = INTEL_EXCL_SHARED; 3444 } 3445 3446 static void 3447 intel_stop_scheduling(struct cpu_hw_events *cpuc) 3448 { 3449 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3450 struct intel_excl_states *xl; 3451 int tid = cpuc->excl_thread_id; 3452 3453 /* 3454 * nothing needed if in group validation mode 3455 */ 3456 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3457 return; 3458 /* 3459 * no exclusion needed 3460 */ 3461 if (WARN_ON_ONCE(!excl_cntrs)) 3462 return; 3463 3464 xl = &excl_cntrs->states[tid]; 3465 3466 xl->sched_started = false; 3467 /* 3468 * release shared state lock (acquired in intel_start_scheduling()) 3469 */ 3470 raw_spin_unlock(&excl_cntrs->lock); 3471 } 3472 3473 static struct event_constraint * 3474 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) 3475 { 3476 WARN_ON_ONCE(!cpuc->constraint_list); 3477 3478 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { 3479 struct event_constraint *cx; 3480 3481 /* 3482 * grab pre-allocated constraint entry 3483 */ 3484 cx = &cpuc->constraint_list[idx]; 3485 3486 /* 3487 * initialize dynamic constraint 3488 * with static constraint 3489 */ 3490 *cx = *c; 3491 3492 /* 3493 * mark constraint as dynamic 3494 */ 3495 cx->flags |= PERF_X86_EVENT_DYNAMIC; 3496 c = cx; 3497 } 3498 3499 return c; 3500 } 3501 3502 static struct event_constraint * 3503 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, 3504 int idx, struct event_constraint *c) 3505 { 3506 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3507 struct intel_excl_states *xlo; 3508 int tid = cpuc->excl_thread_id; 3509 int is_excl, i, w; 3510 3511 /* 3512 * validating a group does not require 3513 * enforcing cross-thread exclusion 3514 */ 3515 if (cpuc->is_fake || !is_ht_workaround_enabled()) 3516 return c; 3517 3518 /* 3519 * no exclusion needed 3520 */ 3521 if (WARN_ON_ONCE(!excl_cntrs)) 3522 return c; 3523 3524 /* 3525 * because we modify the constraint, we need 3526 * to make a copy. Static constraints come 3527 * from static const tables. 3528 * 3529 * only needed when constraint has not yet 3530 * been cloned (marked dynamic) 3531 */ 3532 c = dyn_constraint(cpuc, c, idx); 3533 3534 /* 3535 * From here on, the constraint is dynamic. 3536 * Either it was just allocated above, or it 3537 * was allocated during a earlier invocation 3538 * of this function 3539 */ 3540 3541 /* 3542 * state of sibling HT 3543 */ 3544 xlo = &excl_cntrs->states[tid ^ 1]; 3545 3546 /* 3547 * event requires exclusive counter access 3548 * across HT threads 3549 */ 3550 is_excl = c->flags & PERF_X86_EVENT_EXCL; 3551 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { 3552 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; 3553 if (!cpuc->n_excl++) 3554 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1); 3555 } 3556 3557 /* 3558 * Modify static constraint with current dynamic 3559 * state of thread 3560 * 3561 * EXCLUSIVE: sibling counter measuring exclusive event 3562 * SHARED : sibling counter measuring non-exclusive event 3563 * UNUSED : sibling counter unused 3564 */ 3565 w = c->weight; 3566 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) { 3567 /* 3568 * exclusive event in sibling counter 3569 * our corresponding counter cannot be used 3570 * regardless of our event 3571 */ 3572 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) { 3573 __clear_bit(i, c->idxmsk); 3574 w--; 3575 continue; 3576 } 3577 /* 3578 * if measuring an exclusive event, sibling 3579 * measuring non-exclusive, then counter cannot 3580 * be used 3581 */ 3582 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) { 3583 __clear_bit(i, c->idxmsk); 3584 w--; 3585 continue; 3586 } 3587 } 3588 3589 /* 3590 * if we return an empty mask, then switch 3591 * back to static empty constraint to avoid 3592 * the cost of freeing later on 3593 */ 3594 if (!w) 3595 c = &emptyconstraint; 3596 3597 c->weight = w; 3598 3599 return c; 3600 } 3601 3602 static struct event_constraint * 3603 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3604 struct perf_event *event) 3605 { 3606 struct event_constraint *c1, *c2; 3607 3608 c1 = cpuc->event_constraint[idx]; 3609 3610 /* 3611 * first time only 3612 * - static constraint: no change across incremental scheduling calls 3613 * - dynamic constraint: handled by intel_get_excl_constraints() 3614 */ 3615 c2 = __intel_get_event_constraints(cpuc, idx, event); 3616 if (c1) { 3617 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC)); 3618 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX); 3619 c1->weight = c2->weight; 3620 c2 = c1; 3621 } 3622 3623 if (cpuc->excl_cntrs) 3624 return intel_get_excl_constraints(cpuc, event, idx, c2); 3625 3626 return c2; 3627 } 3628 3629 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, 3630 struct perf_event *event) 3631 { 3632 struct hw_perf_event *hwc = &event->hw; 3633 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 3634 int tid = cpuc->excl_thread_id; 3635 struct intel_excl_states *xl; 3636 3637 /* 3638 * nothing needed if in group validation mode 3639 */ 3640 if (cpuc->is_fake) 3641 return; 3642 3643 if (WARN_ON_ONCE(!excl_cntrs)) 3644 return; 3645 3646 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { 3647 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; 3648 if (!--cpuc->n_excl) 3649 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0); 3650 } 3651 3652 /* 3653 * If event was actually assigned, then mark the counter state as 3654 * unused now. 3655 */ 3656 if (hwc->idx >= 0) { 3657 xl = &excl_cntrs->states[tid]; 3658 3659 /* 3660 * put_constraint may be called from x86_schedule_events() 3661 * which already has the lock held so here make locking 3662 * conditional. 3663 */ 3664 if (!xl->sched_started) 3665 raw_spin_lock(&excl_cntrs->lock); 3666 3667 xl->state[hwc->idx] = INTEL_EXCL_UNUSED; 3668 3669 if (!xl->sched_started) 3670 raw_spin_unlock(&excl_cntrs->lock); 3671 } 3672 } 3673 3674 static void 3675 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, 3676 struct perf_event *event) 3677 { 3678 struct hw_perf_event_extra *reg; 3679 3680 reg = &event->hw.extra_reg; 3681 if (reg->idx != EXTRA_REG_NONE) 3682 __intel_shared_reg_put_constraints(cpuc, reg); 3683 3684 reg = &event->hw.branch_reg; 3685 if (reg->idx != EXTRA_REG_NONE) 3686 __intel_shared_reg_put_constraints(cpuc, reg); 3687 } 3688 3689 static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 3690 struct perf_event *event) 3691 { 3692 intel_put_shared_regs_event_constraints(cpuc, event); 3693 3694 /* 3695 * is PMU has exclusive counter restrictions, then 3696 * all events are subject to and must call the 3697 * put_excl_constraints() routine 3698 */ 3699 if (cpuc->excl_cntrs) 3700 intel_put_excl_constraints(cpuc, event); 3701 } 3702 3703 static void intel_pebs_aliases_core2(struct perf_event *event) 3704 { 3705 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3706 /* 3707 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3708 * (0x003c) so that we can use it with PEBS. 3709 * 3710 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3711 * PEBS capable. However we can use INST_RETIRED.ANY_P 3712 * (0x00c0), which is a PEBS capable event, to get the same 3713 * count. 3714 * 3715 * INST_RETIRED.ANY_P counts the number of cycles that retires 3716 * CNTMASK instructions. By setting CNTMASK to a value (16) 3717 * larger than the maximum number of instructions that can be 3718 * retired per cycle (4) and then inverting the condition, we 3719 * count all cycles that retire 16 or less instructions, which 3720 * is every cycle. 3721 * 3722 * Thereby we gain a PEBS capable cycle counter. 3723 */ 3724 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); 3725 3726 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3727 event->hw.config = alt_config; 3728 } 3729 } 3730 3731 static void intel_pebs_aliases_snb(struct perf_event *event) 3732 { 3733 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3734 /* 3735 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3736 * (0x003c) so that we can use it with PEBS. 3737 * 3738 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3739 * PEBS capable. However we can use UOPS_RETIRED.ALL 3740 * (0x01c2), which is a PEBS capable event, to get the same 3741 * count. 3742 * 3743 * UOPS_RETIRED.ALL counts the number of cycles that retires 3744 * CNTMASK micro-ops. By setting CNTMASK to a value (16) 3745 * larger than the maximum number of micro-ops that can be 3746 * retired per cycle (4) and then inverting the condition, we 3747 * count all cycles that retire 16 or less micro-ops, which 3748 * is every cycle. 3749 * 3750 * Thereby we gain a PEBS capable cycle counter. 3751 */ 3752 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); 3753 3754 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3755 event->hw.config = alt_config; 3756 } 3757 } 3758 3759 static void intel_pebs_aliases_precdist(struct perf_event *event) 3760 { 3761 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3762 /* 3763 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3764 * (0x003c) so that we can use it with PEBS. 3765 * 3766 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3767 * PEBS capable. However we can use INST_RETIRED.PREC_DIST 3768 * (0x01c0), which is a PEBS capable event, to get the same 3769 * count. 3770 * 3771 * The PREC_DIST event has special support to minimize sample 3772 * shadowing effects. One drawback is that it can be 3773 * only programmed on counter 1, but that seems like an 3774 * acceptable trade off. 3775 */ 3776 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16); 3777 3778 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3779 event->hw.config = alt_config; 3780 } 3781 } 3782 3783 static void intel_pebs_aliases_ivb(struct perf_event *event) 3784 { 3785 if (event->attr.precise_ip < 3) 3786 return intel_pebs_aliases_snb(event); 3787 return intel_pebs_aliases_precdist(event); 3788 } 3789 3790 static void intel_pebs_aliases_skl(struct perf_event *event) 3791 { 3792 if (event->attr.precise_ip < 3) 3793 return intel_pebs_aliases_core2(event); 3794 return intel_pebs_aliases_precdist(event); 3795 } 3796 3797 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) 3798 { 3799 unsigned long flags = x86_pmu.large_pebs_flags; 3800 3801 if (event->attr.use_clockid) 3802 flags &= ~PERF_SAMPLE_TIME; 3803 if (!event->attr.exclude_kernel) 3804 flags &= ~PERF_SAMPLE_REGS_USER; 3805 if (event->attr.sample_regs_user & ~PEBS_GP_REGS) 3806 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); 3807 return flags; 3808 } 3809 3810 static int intel_pmu_bts_config(struct perf_event *event) 3811 { 3812 struct perf_event_attr *attr = &event->attr; 3813 3814 if (unlikely(intel_pmu_has_bts(event))) { 3815 /* BTS is not supported by this architecture. */ 3816 if (!x86_pmu.bts_active) 3817 return -EOPNOTSUPP; 3818 3819 /* BTS is currently only allowed for user-mode. */ 3820 if (!attr->exclude_kernel) 3821 return -EOPNOTSUPP; 3822 3823 /* BTS is not allowed for precise events. */ 3824 if (attr->precise_ip) 3825 return -EOPNOTSUPP; 3826 3827 /* disallow bts if conflicting events are present */ 3828 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3829 return -EBUSY; 3830 3831 event->destroy = hw_perf_lbr_event_destroy; 3832 } 3833 3834 return 0; 3835 } 3836 3837 static int core_pmu_hw_config(struct perf_event *event) 3838 { 3839 int ret = x86_pmu_hw_config(event); 3840 3841 if (ret) 3842 return ret; 3843 3844 return intel_pmu_bts_config(event); 3845 } 3846 3847 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \ 3848 ((x86_pmu.num_topdown_events - 1) << 8)) 3849 3850 static bool is_available_metric_event(struct perf_event *event) 3851 { 3852 return is_metric_event(event) && 3853 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX; 3854 } 3855 3856 static inline bool is_mem_loads_event(struct perf_event *event) 3857 { 3858 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01); 3859 } 3860 3861 static inline bool is_mem_loads_aux_event(struct perf_event *event) 3862 { 3863 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82); 3864 } 3865 3866 static inline bool require_mem_loads_aux_event(struct perf_event *event) 3867 { 3868 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX)) 3869 return false; 3870 3871 if (is_hybrid()) 3872 return hybrid_pmu(event->pmu)->cpu_type == hybrid_big; 3873 3874 return true; 3875 } 3876 3877 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) 3878 { 3879 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap); 3880 3881 return test_bit(idx, (unsigned long *)&intel_cap->capabilities); 3882 } 3883 3884 static int intel_pmu_hw_config(struct perf_event *event) 3885 { 3886 int ret = x86_pmu_hw_config(event); 3887 3888 if (ret) 3889 return ret; 3890 3891 ret = intel_pmu_bts_config(event); 3892 if (ret) 3893 return ret; 3894 3895 if (event->attr.precise_ip) { 3896 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) 3897 return -EINVAL; 3898 3899 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { 3900 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 3901 if (!(event->attr.sample_type & 3902 ~intel_pmu_large_pebs_flags(event))) { 3903 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; 3904 event->attach_state |= PERF_ATTACH_SCHED_CB; 3905 } 3906 } 3907 if (x86_pmu.pebs_aliases) 3908 x86_pmu.pebs_aliases(event); 3909 } 3910 3911 if (needs_branch_stack(event)) { 3912 ret = intel_pmu_setup_lbr_filter(event); 3913 if (ret) 3914 return ret; 3915 event->attach_state |= PERF_ATTACH_SCHED_CB; 3916 3917 /* 3918 * BTS is set up earlier in this path, so don't account twice 3919 */ 3920 if (!unlikely(intel_pmu_has_bts(event))) { 3921 /* disallow lbr if conflicting events are present */ 3922 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3923 return -EBUSY; 3924 3925 event->destroy = hw_perf_lbr_event_destroy; 3926 } 3927 } 3928 3929 if (event->attr.aux_output) { 3930 if (!event->attr.precise_ip) 3931 return -EINVAL; 3932 3933 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT; 3934 } 3935 3936 if ((event->attr.type == PERF_TYPE_HARDWARE) || 3937 (event->attr.type == PERF_TYPE_HW_CACHE)) 3938 return 0; 3939 3940 /* 3941 * Config Topdown slots and metric events 3942 * 3943 * The slots event on Fixed Counter 3 can support sampling, 3944 * which will be handled normally in x86_perf_event_update(). 3945 * 3946 * Metric events don't support sampling and require being paired 3947 * with a slots event as group leader. When the slots event 3948 * is used in a metrics group, it too cannot support sampling. 3949 */ 3950 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) { 3951 if (event->attr.config1 || event->attr.config2) 3952 return -EINVAL; 3953 3954 /* 3955 * The TopDown metrics events and slots event don't 3956 * support any filters. 3957 */ 3958 if (event->attr.config & X86_ALL_EVENT_FLAGS) 3959 return -EINVAL; 3960 3961 if (is_available_metric_event(event)) { 3962 struct perf_event *leader = event->group_leader; 3963 3964 /* The metric events don't support sampling. */ 3965 if (is_sampling_event(event)) 3966 return -EINVAL; 3967 3968 /* The metric events require a slots group leader. */ 3969 if (!is_slots_event(leader)) 3970 return -EINVAL; 3971 3972 /* 3973 * The leader/SLOTS must not be a sampling event for 3974 * metric use; hardware requires it starts at 0 when used 3975 * in conjunction with MSR_PERF_METRICS. 3976 */ 3977 if (is_sampling_event(leader)) 3978 return -EINVAL; 3979 3980 event->event_caps |= PERF_EV_CAP_SIBLING; 3981 /* 3982 * Only once we have a METRICs sibling do we 3983 * need TopDown magic. 3984 */ 3985 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN; 3986 event->hw.flags |= PERF_X86_EVENT_TOPDOWN; 3987 } 3988 } 3989 3990 /* 3991 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR 3992 * doesn't function quite right. As a work-around it needs to always be 3993 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82). 3994 * The actual count of this second event is irrelevant it just needs 3995 * to be active to make the first event function correctly. 3996 * 3997 * In a group, the auxiliary event must be in front of the load latency 3998 * event. The rule is to simplify the implementation of the check. 3999 * That's because perf cannot have a complete group at the moment. 4000 */ 4001 if (require_mem_loads_aux_event(event) && 4002 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) && 4003 is_mem_loads_event(event)) { 4004 struct perf_event *leader = event->group_leader; 4005 struct perf_event *sibling = NULL; 4006 4007 /* 4008 * When this memload event is also the first event (no group 4009 * exists yet), then there is no aux event before it. 4010 */ 4011 if (leader == event) 4012 return -ENODATA; 4013 4014 if (!is_mem_loads_aux_event(leader)) { 4015 for_each_sibling_event(sibling, leader) { 4016 if (is_mem_loads_aux_event(sibling)) 4017 break; 4018 } 4019 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list)) 4020 return -ENODATA; 4021 } 4022 } 4023 4024 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) 4025 return 0; 4026 4027 if (x86_pmu.version < 3) 4028 return -EINVAL; 4029 4030 ret = perf_allow_cpu(&event->attr); 4031 if (ret) 4032 return ret; 4033 4034 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; 4035 4036 return 0; 4037 } 4038 4039 /* 4040 * Currently, the only caller of this function is the atomic_switch_perf_msrs(). 4041 * The host perf conext helps to prepare the values of the real hardware for 4042 * a set of msrs that need to be switched atomically in a vmx transaction. 4043 * 4044 * For example, the pseudocode needed to add a new msr should look like: 4045 * 4046 * arr[(*nr)++] = (struct perf_guest_switch_msr){ 4047 * .msr = the hardware msr address, 4048 * .host = the value the hardware has when it doesn't run a guest, 4049 * .guest = the value the hardware has when it runs a guest, 4050 * }; 4051 * 4052 * These values have nothing to do with the emulated values the guest sees 4053 * when it uses {RD,WR}MSR, which should be handled by the KVM context, 4054 * specifically in the intel_pmu_{get,set}_msr(). 4055 */ 4056 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) 4057 { 4058 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4059 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 4060 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data; 4061 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); 4062 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable; 4063 int global_ctrl, pebs_enable; 4064 4065 /* 4066 * In addition to obeying exclude_guest/exclude_host, remove bits being 4067 * used for PEBS when running a guest, because PEBS writes to virtual 4068 * addresses (not physical addresses). 4069 */ 4070 *nr = 0; 4071 global_ctrl = (*nr)++; 4072 arr[global_ctrl] = (struct perf_guest_switch_msr){ 4073 .msr = MSR_CORE_PERF_GLOBAL_CTRL, 4074 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask, 4075 .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask, 4076 }; 4077 4078 if (!x86_pmu.pebs) 4079 return arr; 4080 4081 /* 4082 * If PMU counter has PEBS enabled it is not enough to 4083 * disable counter on a guest entry since PEBS memory 4084 * write can overshoot guest entry and corrupt guest 4085 * memory. Disabling PEBS solves the problem. 4086 * 4087 * Don't do this if the CPU already enforces it. 4088 */ 4089 if (x86_pmu.pebs_no_isolation) { 4090 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4091 .msr = MSR_IA32_PEBS_ENABLE, 4092 .host = cpuc->pebs_enabled, 4093 .guest = 0, 4094 }; 4095 return arr; 4096 } 4097 4098 if (!kvm_pmu || !x86_pmu.pebs_ept) 4099 return arr; 4100 4101 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4102 .msr = MSR_IA32_DS_AREA, 4103 .host = (unsigned long)cpuc->ds, 4104 .guest = kvm_pmu->ds_area, 4105 }; 4106 4107 if (x86_pmu.intel_cap.pebs_baseline) { 4108 arr[(*nr)++] = (struct perf_guest_switch_msr){ 4109 .msr = MSR_PEBS_DATA_CFG, 4110 .host = cpuc->active_pebs_data_cfg, 4111 .guest = kvm_pmu->pebs_data_cfg, 4112 }; 4113 } 4114 4115 pebs_enable = (*nr)++; 4116 arr[pebs_enable] = (struct perf_guest_switch_msr){ 4117 .msr = MSR_IA32_PEBS_ENABLE, 4118 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, 4119 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, 4120 }; 4121 4122 if (arr[pebs_enable].host) { 4123 /* Disable guest PEBS if host PEBS is enabled. */ 4124 arr[pebs_enable].guest = 0; 4125 } else { 4126 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */ 4127 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask; 4128 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask; 4129 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */ 4130 arr[global_ctrl].guest |= arr[pebs_enable].guest; 4131 } 4132 4133 return arr; 4134 } 4135 4136 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data) 4137 { 4138 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4139 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 4140 int idx; 4141 4142 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 4143 struct perf_event *event = cpuc->events[idx]; 4144 4145 arr[idx].msr = x86_pmu_config_addr(idx); 4146 arr[idx].host = arr[idx].guest = 0; 4147 4148 if (!test_bit(idx, cpuc->active_mask)) 4149 continue; 4150 4151 arr[idx].host = arr[idx].guest = 4152 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; 4153 4154 if (event->attr.exclude_host) 4155 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 4156 else if (event->attr.exclude_guest) 4157 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 4158 } 4159 4160 *nr = x86_pmu.num_counters; 4161 return arr; 4162 } 4163 4164 static void core_pmu_enable_event(struct perf_event *event) 4165 { 4166 if (!event->attr.exclude_host) 4167 x86_pmu_enable_event(event); 4168 } 4169 4170 static void core_pmu_enable_all(int added) 4171 { 4172 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 4173 int idx; 4174 4175 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 4176 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; 4177 4178 if (!test_bit(idx, cpuc->active_mask) || 4179 cpuc->events[idx]->attr.exclude_host) 4180 continue; 4181 4182 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 4183 } 4184 } 4185 4186 static int hsw_hw_config(struct perf_event *event) 4187 { 4188 int ret = intel_pmu_hw_config(event); 4189 4190 if (ret) 4191 return ret; 4192 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE)) 4193 return 0; 4194 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); 4195 4196 /* 4197 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with 4198 * PEBS or in ANY thread mode. Since the results are non-sensical forbid 4199 * this combination. 4200 */ 4201 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && 4202 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || 4203 event->attr.precise_ip > 0)) 4204 return -EOPNOTSUPP; 4205 4206 if (event_is_checkpointed(event)) { 4207 /* 4208 * Sampling of checkpointed events can cause situations where 4209 * the CPU constantly aborts because of a overflow, which is 4210 * then checkpointed back and ignored. Forbid checkpointing 4211 * for sampling. 4212 * 4213 * But still allow a long sampling period, so that perf stat 4214 * from KVM works. 4215 */ 4216 if (event->attr.sample_period > 0 && 4217 event->attr.sample_period < 0x7fffffff) 4218 return -EOPNOTSUPP; 4219 } 4220 return 0; 4221 } 4222 4223 static struct event_constraint counter0_constraint = 4224 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1); 4225 4226 static struct event_constraint counter1_constraint = 4227 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2); 4228 4229 static struct event_constraint counter0_1_constraint = 4230 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3); 4231 4232 static struct event_constraint counter2_constraint = 4233 EVENT_CONSTRAINT(0, 0x4, 0); 4234 4235 static struct event_constraint fixed0_constraint = 4236 FIXED_EVENT_CONSTRAINT(0x00c0, 0); 4237 4238 static struct event_constraint fixed0_counter0_constraint = 4239 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL); 4240 4241 static struct event_constraint fixed0_counter0_1_constraint = 4242 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL); 4243 4244 static struct event_constraint counters_1_7_constraint = 4245 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL); 4246 4247 static struct event_constraint * 4248 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4249 struct perf_event *event) 4250 { 4251 struct event_constraint *c; 4252 4253 c = intel_get_event_constraints(cpuc, idx, event); 4254 4255 /* Handle special quirk on in_tx_checkpointed only in counter 2 */ 4256 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { 4257 if (c->idxmsk64 & (1U << 2)) 4258 return &counter2_constraint; 4259 return &emptyconstraint; 4260 } 4261 4262 return c; 4263 } 4264 4265 static struct event_constraint * 4266 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4267 struct perf_event *event) 4268 { 4269 /* 4270 * Fixed counter 0 has less skid. 4271 * Force instruction:ppp in Fixed counter 0 4272 */ 4273 if ((event->attr.precise_ip == 3) && 4274 constraint_match(&fixed0_constraint, event->hw.config)) 4275 return &fixed0_constraint; 4276 4277 return hsw_get_event_constraints(cpuc, idx, event); 4278 } 4279 4280 static struct event_constraint * 4281 spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4282 struct perf_event *event) 4283 { 4284 struct event_constraint *c; 4285 4286 c = icl_get_event_constraints(cpuc, idx, event); 4287 4288 /* 4289 * The :ppp indicates the Precise Distribution (PDist) facility, which 4290 * is only supported on the GP counter 0. If a :ppp event which is not 4291 * available on the GP counter 0, error out. 4292 * Exception: Instruction PDIR is only available on the fixed counter 0. 4293 */ 4294 if ((event->attr.precise_ip == 3) && 4295 !constraint_match(&fixed0_constraint, event->hw.config)) { 4296 if (c->idxmsk64 & BIT_ULL(0)) 4297 return &counter0_constraint; 4298 4299 return &emptyconstraint; 4300 } 4301 4302 return c; 4303 } 4304 4305 static struct event_constraint * 4306 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4307 struct perf_event *event) 4308 { 4309 struct event_constraint *c; 4310 4311 /* :ppp means to do reduced skid PEBS which is PMC0 only. */ 4312 if (event->attr.precise_ip == 3) 4313 return &counter0_constraint; 4314 4315 c = intel_get_event_constraints(cpuc, idx, event); 4316 4317 return c; 4318 } 4319 4320 static struct event_constraint * 4321 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4322 struct perf_event *event) 4323 { 4324 struct event_constraint *c; 4325 4326 c = intel_get_event_constraints(cpuc, idx, event); 4327 4328 /* 4329 * :ppp means to do reduced skid PEBS, 4330 * which is available on PMC0 and fixed counter 0. 4331 */ 4332 if (event->attr.precise_ip == 3) { 4333 /* Force instruction:ppp on PMC0 and Fixed counter 0 */ 4334 if (constraint_match(&fixed0_constraint, event->hw.config)) 4335 return &fixed0_counter0_constraint; 4336 4337 return &counter0_constraint; 4338 } 4339 4340 return c; 4341 } 4342 4343 static bool allow_tsx_force_abort = true; 4344 4345 static struct event_constraint * 4346 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4347 struct perf_event *event) 4348 { 4349 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); 4350 4351 /* 4352 * Without TFA we must not use PMC3. 4353 */ 4354 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { 4355 c = dyn_constraint(cpuc, c, idx); 4356 c->idxmsk64 &= ~(1ULL << 3); 4357 c->weight--; 4358 } 4359 4360 return c; 4361 } 4362 4363 static struct event_constraint * 4364 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4365 struct perf_event *event) 4366 { 4367 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4368 4369 if (pmu->cpu_type == hybrid_big) 4370 return spr_get_event_constraints(cpuc, idx, event); 4371 else if (pmu->cpu_type == hybrid_small) 4372 return tnt_get_event_constraints(cpuc, idx, event); 4373 4374 WARN_ON(1); 4375 return &emptyconstraint; 4376 } 4377 4378 static struct event_constraint * 4379 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4380 struct perf_event *event) 4381 { 4382 struct event_constraint *c; 4383 4384 c = intel_get_event_constraints(cpuc, idx, event); 4385 4386 /* 4387 * The :ppp indicates the Precise Distribution (PDist) facility, which 4388 * is only supported on the GP counter 0 & 1 and Fixed counter 0. 4389 * If a :ppp event which is not available on the above eligible counters, 4390 * error out. 4391 */ 4392 if (event->attr.precise_ip == 3) { 4393 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ 4394 if (constraint_match(&fixed0_constraint, event->hw.config)) 4395 return &fixed0_counter0_1_constraint; 4396 4397 switch (c->idxmsk64 & 0x3ull) { 4398 case 0x1: 4399 return &counter0_constraint; 4400 case 0x2: 4401 return &counter1_constraint; 4402 case 0x3: 4403 return &counter0_1_constraint; 4404 } 4405 return &emptyconstraint; 4406 } 4407 4408 return c; 4409 } 4410 4411 static struct event_constraint * 4412 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4413 struct perf_event *event) 4414 { 4415 struct event_constraint *c; 4416 4417 c = spr_get_event_constraints(cpuc, idx, event); 4418 4419 /* The Retire Latency is not supported by the fixed counter 0. */ 4420 if (event->attr.precise_ip && 4421 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) && 4422 constraint_match(&fixed0_constraint, event->hw.config)) { 4423 /* 4424 * The Instruction PDIR is only available 4425 * on the fixed counter 0. Error out for this case. 4426 */ 4427 if (event->attr.precise_ip == 3) 4428 return &emptyconstraint; 4429 return &counters_1_7_constraint; 4430 } 4431 4432 return c; 4433 } 4434 4435 static struct event_constraint * 4436 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 4437 struct perf_event *event) 4438 { 4439 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4440 4441 if (pmu->cpu_type == hybrid_big) 4442 return rwc_get_event_constraints(cpuc, idx, event); 4443 if (pmu->cpu_type == hybrid_small) 4444 return cmt_get_event_constraints(cpuc, idx, event); 4445 4446 WARN_ON(1); 4447 return &emptyconstraint; 4448 } 4449 4450 static int adl_hw_config(struct perf_event *event) 4451 { 4452 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); 4453 4454 if (pmu->cpu_type == hybrid_big) 4455 return hsw_hw_config(event); 4456 else if (pmu->cpu_type == hybrid_small) 4457 return intel_pmu_hw_config(event); 4458 4459 WARN_ON(1); 4460 return -EOPNOTSUPP; 4461 } 4462 4463 static u8 adl_get_hybrid_cpu_type(void) 4464 { 4465 return hybrid_big; 4466 } 4467 4468 static inline bool erratum_hsw11(struct perf_event *event) 4469 { 4470 return (event->hw.config & INTEL_ARCH_EVENT_MASK) == 4471 X86_CONFIG(.event=0xc0, .umask=0x01); 4472 } 4473 4474 /* 4475 * The HSW11 requires a period larger than 100 which is the same as the BDM11. 4476 * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL. 4477 * 4478 * The message 'interrupt took too long' can be observed on any counter which 4479 * was armed with a period < 32 and two events expired in the same NMI. 4480 * A minimum period of 32 is enforced for the rest of the events. 4481 */ 4482 static void hsw_limit_period(struct perf_event *event, s64 *left) 4483 { 4484 *left = max(*left, erratum_hsw11(event) ? 128 : 32); 4485 } 4486 4487 /* 4488 * Broadwell: 4489 * 4490 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared 4491 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine 4492 * the two to enforce a minimum period of 128 (the smallest value that has bits 4493 * 0-5 cleared and >= 100). 4494 * 4495 * Because of how the code in x86_perf_event_set_period() works, the truncation 4496 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period 4497 * to make up for the 'lost' events due to carrying the 'error' in period_left. 4498 * 4499 * Therefore the effective (average) period matches the requested period, 4500 * despite coarser hardware granularity. 4501 */ 4502 static void bdw_limit_period(struct perf_event *event, s64 *left) 4503 { 4504 if (erratum_hsw11(event)) { 4505 if (*left < 128) 4506 *left = 128; 4507 *left &= ~0x3fULL; 4508 } 4509 } 4510 4511 static void nhm_limit_period(struct perf_event *event, s64 *left) 4512 { 4513 *left = max(*left, 32LL); 4514 } 4515 4516 static void spr_limit_period(struct perf_event *event, s64 *left) 4517 { 4518 if (event->attr.precise_ip == 3) 4519 *left = max(*left, 128LL); 4520 } 4521 4522 PMU_FORMAT_ATTR(event, "config:0-7" ); 4523 PMU_FORMAT_ATTR(umask, "config:8-15" ); 4524 PMU_FORMAT_ATTR(edge, "config:18" ); 4525 PMU_FORMAT_ATTR(pc, "config:19" ); 4526 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */ 4527 PMU_FORMAT_ATTR(inv, "config:23" ); 4528 PMU_FORMAT_ATTR(cmask, "config:24-31" ); 4529 PMU_FORMAT_ATTR(in_tx, "config:32"); 4530 PMU_FORMAT_ATTR(in_tx_cp, "config:33"); 4531 4532 static struct attribute *intel_arch_formats_attr[] = { 4533 &format_attr_event.attr, 4534 &format_attr_umask.attr, 4535 &format_attr_edge.attr, 4536 &format_attr_pc.attr, 4537 &format_attr_inv.attr, 4538 &format_attr_cmask.attr, 4539 NULL, 4540 }; 4541 4542 ssize_t intel_event_sysfs_show(char *page, u64 config) 4543 { 4544 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); 4545 4546 return x86_event_sysfs_show(page, config, event); 4547 } 4548 4549 static struct intel_shared_regs *allocate_shared_regs(int cpu) 4550 { 4551 struct intel_shared_regs *regs; 4552 int i; 4553 4554 regs = kzalloc_node(sizeof(struct intel_shared_regs), 4555 GFP_KERNEL, cpu_to_node(cpu)); 4556 if (regs) { 4557 /* 4558 * initialize the locks to keep lockdep happy 4559 */ 4560 for (i = 0; i < EXTRA_REG_MAX; i++) 4561 raw_spin_lock_init(®s->regs[i].lock); 4562 4563 regs->core_id = -1; 4564 } 4565 return regs; 4566 } 4567 4568 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) 4569 { 4570 struct intel_excl_cntrs *c; 4571 4572 c = kzalloc_node(sizeof(struct intel_excl_cntrs), 4573 GFP_KERNEL, cpu_to_node(cpu)); 4574 if (c) { 4575 raw_spin_lock_init(&c->lock); 4576 c->core_id = -1; 4577 } 4578 return c; 4579 } 4580 4581 4582 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) 4583 { 4584 cpuc->pebs_record_size = x86_pmu.pebs_record_size; 4585 4586 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 4587 cpuc->shared_regs = allocate_shared_regs(cpu); 4588 if (!cpuc->shared_regs) 4589 goto err; 4590 } 4591 4592 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { 4593 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); 4594 4595 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); 4596 if (!cpuc->constraint_list) 4597 goto err_shared_regs; 4598 } 4599 4600 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 4601 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 4602 if (!cpuc->excl_cntrs) 4603 goto err_constraint_list; 4604 4605 cpuc->excl_thread_id = 0; 4606 } 4607 4608 return 0; 4609 4610 err_constraint_list: 4611 kfree(cpuc->constraint_list); 4612 cpuc->constraint_list = NULL; 4613 4614 err_shared_regs: 4615 kfree(cpuc->shared_regs); 4616 cpuc->shared_regs = NULL; 4617 4618 err: 4619 return -ENOMEM; 4620 } 4621 4622 static int intel_pmu_cpu_prepare(int cpu) 4623 { 4624 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); 4625 } 4626 4627 static void flip_smm_bit(void *data) 4628 { 4629 unsigned long set = *(unsigned long *)data; 4630 4631 if (set > 0) { 4632 msr_set_bit(MSR_IA32_DEBUGCTLMSR, 4633 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 4634 } else { 4635 msr_clear_bit(MSR_IA32_DEBUGCTLMSR, 4636 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 4637 } 4638 } 4639 4640 static void intel_pmu_check_num_counters(int *num_counters, 4641 int *num_counters_fixed, 4642 u64 *intel_ctrl, u64 fixed_mask); 4643 4644 static void update_pmu_cap(struct x86_hybrid_pmu *pmu) 4645 { 4646 unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF); 4647 unsigned int eax, ebx, ecx, edx; 4648 4649 if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) { 4650 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, 4651 &eax, &ebx, &ecx, &edx); 4652 pmu->num_counters = fls(eax); 4653 pmu->num_counters_fixed = fls(ebx); 4654 intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, 4655 &pmu->intel_ctrl, ebx); 4656 } 4657 } 4658 4659 static bool init_hybrid_pmu(int cpu) 4660 { 4661 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4662 u8 cpu_type = get_this_hybrid_cpu_type(); 4663 struct x86_hybrid_pmu *pmu = NULL; 4664 int i; 4665 4666 if (!cpu_type && x86_pmu.get_hybrid_cpu_type) 4667 cpu_type = x86_pmu.get_hybrid_cpu_type(); 4668 4669 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 4670 if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) { 4671 pmu = &x86_pmu.hybrid_pmu[i]; 4672 break; 4673 } 4674 } 4675 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { 4676 cpuc->pmu = NULL; 4677 return false; 4678 } 4679 4680 /* Only check and dump the PMU information for the first CPU */ 4681 if (!cpumask_empty(&pmu->supported_cpus)) 4682 goto end; 4683 4684 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) 4685 update_pmu_cap(pmu); 4686 4687 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed)) 4688 return false; 4689 4690 pr_info("%s PMU driver: ", pmu->name); 4691 4692 if (pmu->intel_cap.pebs_output_pt_available) 4693 pr_cont("PEBS-via-PT "); 4694 4695 pr_cont("\n"); 4696 4697 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed, 4698 pmu->intel_ctrl); 4699 4700 end: 4701 cpumask_set_cpu(cpu, &pmu->supported_cpus); 4702 cpuc->pmu = &pmu->pmu; 4703 4704 return true; 4705 } 4706 4707 static void intel_pmu_cpu_starting(int cpu) 4708 { 4709 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4710 int core_id = topology_core_id(cpu); 4711 int i; 4712 4713 if (is_hybrid() && !init_hybrid_pmu(cpu)) 4714 return; 4715 4716 init_debug_store_on_cpu(cpu); 4717 /* 4718 * Deal with CPUs that don't clear their LBRs on power-up. 4719 */ 4720 intel_pmu_lbr_reset(); 4721 4722 cpuc->lbr_sel = NULL; 4723 4724 if (x86_pmu.flags & PMU_FL_TFA) { 4725 WARN_ON_ONCE(cpuc->tfa_shadow); 4726 cpuc->tfa_shadow = ~0ULL; 4727 intel_set_tfa(cpuc, false); 4728 } 4729 4730 if (x86_pmu.version > 1) 4731 flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 4732 4733 /* 4734 * Disable perf metrics if any added CPU doesn't support it. 4735 * 4736 * Turn off the check for a hybrid architecture, because the 4737 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate 4738 * the architecture features. The perf metrics is a model-specific 4739 * feature for now. The corresponding bit should always be 0 on 4740 * a hybrid platform, e.g., Alder Lake. 4741 */ 4742 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) { 4743 union perf_capabilities perf_cap; 4744 4745 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities); 4746 if (!perf_cap.perf_metrics) { 4747 x86_pmu.intel_cap.perf_metrics = 0; 4748 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); 4749 } 4750 } 4751 4752 if (!cpuc->shared_regs) 4753 return; 4754 4755 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { 4756 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 4757 struct intel_shared_regs *pc; 4758 4759 pc = per_cpu(cpu_hw_events, i).shared_regs; 4760 if (pc && pc->core_id == core_id) { 4761 cpuc->kfree_on_online[0] = cpuc->shared_regs; 4762 cpuc->shared_regs = pc; 4763 break; 4764 } 4765 } 4766 cpuc->shared_regs->core_id = core_id; 4767 cpuc->shared_regs->refcnt++; 4768 } 4769 4770 if (x86_pmu.lbr_sel_map) 4771 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 4772 4773 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 4774 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 4775 struct cpu_hw_events *sibling; 4776 struct intel_excl_cntrs *c; 4777 4778 sibling = &per_cpu(cpu_hw_events, i); 4779 c = sibling->excl_cntrs; 4780 if (c && c->core_id == core_id) { 4781 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; 4782 cpuc->excl_cntrs = c; 4783 if (!sibling->excl_thread_id) 4784 cpuc->excl_thread_id = 1; 4785 break; 4786 } 4787 } 4788 cpuc->excl_cntrs->core_id = core_id; 4789 cpuc->excl_cntrs->refcnt++; 4790 } 4791 } 4792 4793 static void free_excl_cntrs(struct cpu_hw_events *cpuc) 4794 { 4795 struct intel_excl_cntrs *c; 4796 4797 c = cpuc->excl_cntrs; 4798 if (c) { 4799 if (c->core_id == -1 || --c->refcnt == 0) 4800 kfree(c); 4801 cpuc->excl_cntrs = NULL; 4802 } 4803 4804 kfree(cpuc->constraint_list); 4805 cpuc->constraint_list = NULL; 4806 } 4807 4808 static void intel_pmu_cpu_dying(int cpu) 4809 { 4810 fini_debug_store_on_cpu(cpu); 4811 } 4812 4813 void intel_cpuc_finish(struct cpu_hw_events *cpuc) 4814 { 4815 struct intel_shared_regs *pc; 4816 4817 pc = cpuc->shared_regs; 4818 if (pc) { 4819 if (pc->core_id == -1 || --pc->refcnt == 0) 4820 kfree(pc); 4821 cpuc->shared_regs = NULL; 4822 } 4823 4824 free_excl_cntrs(cpuc); 4825 } 4826 4827 static void intel_pmu_cpu_dead(int cpu) 4828 { 4829 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 4830 4831 intel_cpuc_finish(cpuc); 4832 4833 if (is_hybrid() && cpuc->pmu) 4834 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus); 4835 } 4836 4837 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, 4838 bool sched_in) 4839 { 4840 intel_pmu_pebs_sched_task(pmu_ctx, sched_in); 4841 intel_pmu_lbr_sched_task(pmu_ctx, sched_in); 4842 } 4843 4844 static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, 4845 struct perf_event_pmu_context *next_epc) 4846 { 4847 intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc); 4848 } 4849 4850 static int intel_pmu_check_period(struct perf_event *event, u64 value) 4851 { 4852 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; 4853 } 4854 4855 static void intel_aux_output_init(void) 4856 { 4857 /* Refer also intel_pmu_aux_output_match() */ 4858 if (x86_pmu.intel_cap.pebs_output_pt_available) 4859 x86_pmu.assign = intel_pmu_assign_event; 4860 } 4861 4862 static int intel_pmu_aux_output_match(struct perf_event *event) 4863 { 4864 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */ 4865 if (!x86_pmu.intel_cap.pebs_output_pt_available) 4866 return 0; 4867 4868 return is_intel_pt_event(event); 4869 } 4870 4871 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret) 4872 { 4873 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu); 4874 4875 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus); 4876 } 4877 4878 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 4879 4880 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 4881 4882 PMU_FORMAT_ATTR(frontend, "config1:0-23"); 4883 4884 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63"); 4885 4886 static struct attribute *intel_arch3_formats_attr[] = { 4887 &format_attr_event.attr, 4888 &format_attr_umask.attr, 4889 &format_attr_edge.attr, 4890 &format_attr_pc.attr, 4891 &format_attr_any.attr, 4892 &format_attr_inv.attr, 4893 &format_attr_cmask.attr, 4894 NULL, 4895 }; 4896 4897 static struct attribute *hsw_format_attr[] = { 4898 &format_attr_in_tx.attr, 4899 &format_attr_in_tx_cp.attr, 4900 &format_attr_offcore_rsp.attr, 4901 &format_attr_ldlat.attr, 4902 NULL 4903 }; 4904 4905 static struct attribute *nhm_format_attr[] = { 4906 &format_attr_offcore_rsp.attr, 4907 &format_attr_ldlat.attr, 4908 NULL 4909 }; 4910 4911 static struct attribute *slm_format_attr[] = { 4912 &format_attr_offcore_rsp.attr, 4913 NULL 4914 }; 4915 4916 static struct attribute *cmt_format_attr[] = { 4917 &format_attr_offcore_rsp.attr, 4918 &format_attr_ldlat.attr, 4919 &format_attr_snoop_rsp.attr, 4920 NULL 4921 }; 4922 4923 static struct attribute *skl_format_attr[] = { 4924 &format_attr_frontend.attr, 4925 NULL, 4926 }; 4927 4928 static __initconst const struct x86_pmu core_pmu = { 4929 .name = "core", 4930 .handle_irq = x86_pmu_handle_irq, 4931 .disable_all = x86_pmu_disable_all, 4932 .enable_all = core_pmu_enable_all, 4933 .enable = core_pmu_enable_event, 4934 .disable = x86_pmu_disable_event, 4935 .hw_config = core_pmu_hw_config, 4936 .schedule_events = x86_schedule_events, 4937 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 4938 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 4939 .event_map = intel_pmu_event_map, 4940 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 4941 .apic = 1, 4942 .large_pebs_flags = LARGE_PEBS_FLAGS, 4943 4944 /* 4945 * Intel PMCs cannot be accessed sanely above 32-bit width, 4946 * so we install an artificial 1<<31 period regardless of 4947 * the generic event period: 4948 */ 4949 .max_period = (1ULL<<31) - 1, 4950 .get_event_constraints = intel_get_event_constraints, 4951 .put_event_constraints = intel_put_event_constraints, 4952 .event_constraints = intel_core_event_constraints, 4953 .guest_get_msrs = core_guest_get_msrs, 4954 .format_attrs = intel_arch_formats_attr, 4955 .events_sysfs_show = intel_event_sysfs_show, 4956 4957 /* 4958 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs 4959 * together with PMU version 1 and thus be using core_pmu with 4960 * shared_regs. We need following callbacks here to allocate 4961 * it properly. 4962 */ 4963 .cpu_prepare = intel_pmu_cpu_prepare, 4964 .cpu_starting = intel_pmu_cpu_starting, 4965 .cpu_dying = intel_pmu_cpu_dying, 4966 .cpu_dead = intel_pmu_cpu_dead, 4967 4968 .check_period = intel_pmu_check_period, 4969 4970 .lbr_reset = intel_pmu_lbr_reset_64, 4971 .lbr_read = intel_pmu_lbr_read_64, 4972 .lbr_save = intel_pmu_lbr_save, 4973 .lbr_restore = intel_pmu_lbr_restore, 4974 }; 4975 4976 static __initconst const struct x86_pmu intel_pmu = { 4977 .name = "Intel", 4978 .handle_irq = intel_pmu_handle_irq, 4979 .disable_all = intel_pmu_disable_all, 4980 .enable_all = intel_pmu_enable_all, 4981 .enable = intel_pmu_enable_event, 4982 .disable = intel_pmu_disable_event, 4983 .add = intel_pmu_add_event, 4984 .del = intel_pmu_del_event, 4985 .read = intel_pmu_read_event, 4986 .set_period = intel_pmu_set_period, 4987 .update = intel_pmu_update, 4988 .hw_config = intel_pmu_hw_config, 4989 .schedule_events = x86_schedule_events, 4990 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 4991 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 4992 .event_map = intel_pmu_event_map, 4993 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 4994 .apic = 1, 4995 .large_pebs_flags = LARGE_PEBS_FLAGS, 4996 /* 4997 * Intel PMCs cannot be accessed sanely above 32 bit width, 4998 * so we install an artificial 1<<31 period regardless of 4999 * the generic event period: 5000 */ 5001 .max_period = (1ULL << 31) - 1, 5002 .get_event_constraints = intel_get_event_constraints, 5003 .put_event_constraints = intel_put_event_constraints, 5004 .pebs_aliases = intel_pebs_aliases_core2, 5005 5006 .format_attrs = intel_arch3_formats_attr, 5007 .events_sysfs_show = intel_event_sysfs_show, 5008 5009 .cpu_prepare = intel_pmu_cpu_prepare, 5010 .cpu_starting = intel_pmu_cpu_starting, 5011 .cpu_dying = intel_pmu_cpu_dying, 5012 .cpu_dead = intel_pmu_cpu_dead, 5013 5014 .guest_get_msrs = intel_guest_get_msrs, 5015 .sched_task = intel_pmu_sched_task, 5016 .swap_task_ctx = intel_pmu_swap_task_ctx, 5017 5018 .check_period = intel_pmu_check_period, 5019 5020 .aux_output_match = intel_pmu_aux_output_match, 5021 5022 .lbr_reset = intel_pmu_lbr_reset_64, 5023 .lbr_read = intel_pmu_lbr_read_64, 5024 .lbr_save = intel_pmu_lbr_save, 5025 .lbr_restore = intel_pmu_lbr_restore, 5026 5027 /* 5028 * SMM has access to all 4 rings and while traditionally SMM code only 5029 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. 5030 * 5031 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction 5032 * between SMM or not, this results in what should be pure userspace 5033 * counters including SMM data. 5034 * 5035 * This is a clear privilege issue, therefore globally disable 5036 * counting SMM by default. 5037 */ 5038 .attr_freeze_on_smi = 1, 5039 }; 5040 5041 static __init void intel_clovertown_quirk(void) 5042 { 5043 /* 5044 * PEBS is unreliable due to: 5045 * 5046 * AJ67 - PEBS may experience CPL leaks 5047 * AJ68 - PEBS PMI may be delayed by one event 5048 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] 5049 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS 5050 * 5051 * AJ67 could be worked around by restricting the OS/USR flags. 5052 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. 5053 * 5054 * AJ106 could possibly be worked around by not allowing LBR 5055 * usage from PEBS, including the fixup. 5056 * AJ68 could possibly be worked around by always programming 5057 * a pebs_event_reset[0] value and coping with the lost events. 5058 * 5059 * But taken together it might just make sense to not enable PEBS on 5060 * these chips. 5061 */ 5062 pr_warn("PEBS disabled due to CPU errata\n"); 5063 x86_pmu.pebs = 0; 5064 x86_pmu.pebs_constraints = NULL; 5065 } 5066 5067 static const struct x86_cpu_desc isolation_ucodes[] = { 5068 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f), 5069 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e), 5070 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015), 5071 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), 5072 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), 5073 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023), 5074 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014), 5075 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010), 5076 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009), 5077 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009), 5078 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002), 5079 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014), 5080 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), 5081 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), 5082 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), 5083 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), 5084 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), 5085 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000), 5086 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), 5087 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), 5088 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), 5089 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e), 5090 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e), 5091 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e), 5092 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e), 5093 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e), 5094 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e), 5095 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e), 5096 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e), 5097 {} 5098 }; 5099 5100 static void intel_check_pebs_isolation(void) 5101 { 5102 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes); 5103 } 5104 5105 static __init void intel_pebs_isolation_quirk(void) 5106 { 5107 WARN_ON_ONCE(x86_pmu.check_microcode); 5108 x86_pmu.check_microcode = intel_check_pebs_isolation; 5109 intel_check_pebs_isolation(); 5110 } 5111 5112 static const struct x86_cpu_desc pebs_ucodes[] = { 5113 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028), 5114 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618), 5115 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c), 5116 {} 5117 }; 5118 5119 static bool intel_snb_pebs_broken(void) 5120 { 5121 return !x86_cpu_has_min_microcode_rev(pebs_ucodes); 5122 } 5123 5124 static void intel_snb_check_microcode(void) 5125 { 5126 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken) 5127 return; 5128 5129 /* 5130 * Serialized by the microcode lock.. 5131 */ 5132 if (x86_pmu.pebs_broken) { 5133 pr_info("PEBS enabled due to microcode update\n"); 5134 x86_pmu.pebs_broken = 0; 5135 } else { 5136 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n"); 5137 x86_pmu.pebs_broken = 1; 5138 } 5139 } 5140 5141 static bool is_lbr_from(unsigned long msr) 5142 { 5143 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr; 5144 5145 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr; 5146 } 5147 5148 /* 5149 * Under certain circumstances, access certain MSR may cause #GP. 5150 * The function tests if the input MSR can be safely accessed. 5151 */ 5152 static bool check_msr(unsigned long msr, u64 mask) 5153 { 5154 u64 val_old, val_new, val_tmp; 5155 5156 /* 5157 * Disable the check for real HW, so we don't 5158 * mess with potentially enabled registers: 5159 */ 5160 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 5161 return true; 5162 5163 /* 5164 * Read the current value, change it and read it back to see if it 5165 * matches, this is needed to detect certain hardware emulators 5166 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 5167 */ 5168 if (rdmsrl_safe(msr, &val_old)) 5169 return false; 5170 5171 /* 5172 * Only change the bits which can be updated by wrmsrl. 5173 */ 5174 val_tmp = val_old ^ mask; 5175 5176 if (is_lbr_from(msr)) 5177 val_tmp = lbr_from_signext_quirk_wr(val_tmp); 5178 5179 if (wrmsrl_safe(msr, val_tmp) || 5180 rdmsrl_safe(msr, &val_new)) 5181 return false; 5182 5183 /* 5184 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value 5185 * should equal rdmsrl()'s even with the quirk. 5186 */ 5187 if (val_new != val_tmp) 5188 return false; 5189 5190 if (is_lbr_from(msr)) 5191 val_old = lbr_from_signext_quirk_wr(val_old); 5192 5193 /* Here it's sure that the MSR can be safely accessed. 5194 * Restore the old value and return. 5195 */ 5196 wrmsrl(msr, val_old); 5197 5198 return true; 5199 } 5200 5201 static __init void intel_sandybridge_quirk(void) 5202 { 5203 x86_pmu.check_microcode = intel_snb_check_microcode; 5204 cpus_read_lock(); 5205 intel_snb_check_microcode(); 5206 cpus_read_unlock(); 5207 } 5208 5209 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { 5210 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, 5211 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, 5212 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, 5213 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, 5214 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, 5215 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, 5216 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, 5217 }; 5218 5219 static __init void intel_arch_events_quirk(void) 5220 { 5221 int bit; 5222 5223 /* disable event that reported as not present by cpuid */ 5224 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { 5225 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; 5226 pr_warn("CPUID marked event: \'%s\' unavailable\n", 5227 intel_arch_events_map[bit].name); 5228 } 5229 } 5230 5231 static __init void intel_nehalem_quirk(void) 5232 { 5233 union cpuid10_ebx ebx; 5234 5235 ebx.full = x86_pmu.events_maskl; 5236 if (ebx.split.no_branch_misses_retired) { 5237 /* 5238 * Erratum AAJ80 detected, we work it around by using 5239 * the BR_MISP_EXEC.ANY event. This will over-count 5240 * branch-misses, but it's still much better than the 5241 * architectural event which is often completely bogus: 5242 */ 5243 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; 5244 ebx.split.no_branch_misses_retired = 0; 5245 x86_pmu.events_maskl = ebx.full; 5246 pr_info("CPU erratum AAJ80 worked around\n"); 5247 } 5248 } 5249 5250 /* 5251 * enable software workaround for errata: 5252 * SNB: BJ122 5253 * IVB: BV98 5254 * HSW: HSD29 5255 * 5256 * Only needed when HT is enabled. However detecting 5257 * if HT is enabled is difficult (model specific). So instead, 5258 * we enable the workaround in the early boot, and verify if 5259 * it is needed in a later initcall phase once we have valid 5260 * topology information to check if HT is actually enabled 5261 */ 5262 static __init void intel_ht_bug(void) 5263 { 5264 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED; 5265 5266 x86_pmu.start_scheduling = intel_start_scheduling; 5267 x86_pmu.commit_scheduling = intel_commit_scheduling; 5268 x86_pmu.stop_scheduling = intel_stop_scheduling; 5269 } 5270 5271 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3"); 5272 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82") 5273 5274 /* Haswell special events */ 5275 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1"); 5276 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2"); 5277 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4"); 5278 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2"); 5279 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1"); 5280 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1"); 5281 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2"); 5282 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4"); 5283 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2"); 5284 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1"); 5285 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); 5286 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); 5287 5288 static struct attribute *hsw_events_attrs[] = { 5289 EVENT_PTR(td_slots_issued), 5290 EVENT_PTR(td_slots_retired), 5291 EVENT_PTR(td_fetch_bubbles), 5292 EVENT_PTR(td_total_slots), 5293 EVENT_PTR(td_total_slots_scale), 5294 EVENT_PTR(td_recovery_bubbles), 5295 EVENT_PTR(td_recovery_bubbles_scale), 5296 NULL 5297 }; 5298 5299 static struct attribute *hsw_mem_events_attrs[] = { 5300 EVENT_PTR(mem_ld_hsw), 5301 EVENT_PTR(mem_st_hsw), 5302 NULL, 5303 }; 5304 5305 static struct attribute *hsw_tsx_events_attrs[] = { 5306 EVENT_PTR(tx_start), 5307 EVENT_PTR(tx_commit), 5308 EVENT_PTR(tx_abort), 5309 EVENT_PTR(tx_capacity), 5310 EVENT_PTR(tx_conflict), 5311 EVENT_PTR(el_start), 5312 EVENT_PTR(el_commit), 5313 EVENT_PTR(el_abort), 5314 EVENT_PTR(el_capacity), 5315 EVENT_PTR(el_conflict), 5316 EVENT_PTR(cycles_t), 5317 EVENT_PTR(cycles_ct), 5318 NULL 5319 }; 5320 5321 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80"); 5322 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2"); 5323 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80"); 5324 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2"); 5325 5326 static struct attribute *icl_events_attrs[] = { 5327 EVENT_PTR(mem_ld_hsw), 5328 EVENT_PTR(mem_st_hsw), 5329 NULL, 5330 }; 5331 5332 static struct attribute *icl_td_events_attrs[] = { 5333 EVENT_PTR(slots), 5334 EVENT_PTR(td_retiring), 5335 EVENT_PTR(td_bad_spec), 5336 EVENT_PTR(td_fe_bound), 5337 EVENT_PTR(td_be_bound), 5338 NULL, 5339 }; 5340 5341 static struct attribute *icl_tsx_events_attrs[] = { 5342 EVENT_PTR(tx_start), 5343 EVENT_PTR(tx_abort), 5344 EVENT_PTR(tx_commit), 5345 EVENT_PTR(tx_capacity_read), 5346 EVENT_PTR(tx_capacity_write), 5347 EVENT_PTR(tx_conflict), 5348 EVENT_PTR(el_start), 5349 EVENT_PTR(el_abort), 5350 EVENT_PTR(el_commit), 5351 EVENT_PTR(el_capacity_read), 5352 EVENT_PTR(el_capacity_write), 5353 EVENT_PTR(el_conflict), 5354 EVENT_PTR(cycles_t), 5355 EVENT_PTR(cycles_ct), 5356 NULL, 5357 }; 5358 5359 5360 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2"); 5361 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82"); 5362 5363 static struct attribute *spr_events_attrs[] = { 5364 EVENT_PTR(mem_ld_hsw), 5365 EVENT_PTR(mem_st_spr), 5366 EVENT_PTR(mem_ld_aux), 5367 NULL, 5368 }; 5369 5370 static struct attribute *spr_td_events_attrs[] = { 5371 EVENT_PTR(slots), 5372 EVENT_PTR(td_retiring), 5373 EVENT_PTR(td_bad_spec), 5374 EVENT_PTR(td_fe_bound), 5375 EVENT_PTR(td_be_bound), 5376 EVENT_PTR(td_heavy_ops), 5377 EVENT_PTR(td_br_mispredict), 5378 EVENT_PTR(td_fetch_lat), 5379 EVENT_PTR(td_mem_bound), 5380 NULL, 5381 }; 5382 5383 static struct attribute *spr_tsx_events_attrs[] = { 5384 EVENT_PTR(tx_start), 5385 EVENT_PTR(tx_abort), 5386 EVENT_PTR(tx_commit), 5387 EVENT_PTR(tx_capacity_read), 5388 EVENT_PTR(tx_capacity_write), 5389 EVENT_PTR(tx_conflict), 5390 EVENT_PTR(cycles_t), 5391 EVENT_PTR(cycles_ct), 5392 NULL, 5393 }; 5394 5395 static ssize_t freeze_on_smi_show(struct device *cdev, 5396 struct device_attribute *attr, 5397 char *buf) 5398 { 5399 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi); 5400 } 5401 5402 static DEFINE_MUTEX(freeze_on_smi_mutex); 5403 5404 static ssize_t freeze_on_smi_store(struct device *cdev, 5405 struct device_attribute *attr, 5406 const char *buf, size_t count) 5407 { 5408 unsigned long val; 5409 ssize_t ret; 5410 5411 ret = kstrtoul(buf, 0, &val); 5412 if (ret) 5413 return ret; 5414 5415 if (val > 1) 5416 return -EINVAL; 5417 5418 mutex_lock(&freeze_on_smi_mutex); 5419 5420 if (x86_pmu.attr_freeze_on_smi == val) 5421 goto done; 5422 5423 x86_pmu.attr_freeze_on_smi = val; 5424 5425 cpus_read_lock(); 5426 on_each_cpu(flip_smm_bit, &val, 1); 5427 cpus_read_unlock(); 5428 done: 5429 mutex_unlock(&freeze_on_smi_mutex); 5430 5431 return count; 5432 } 5433 5434 static void update_tfa_sched(void *ignored) 5435 { 5436 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 5437 5438 /* 5439 * check if PMC3 is used 5440 * and if so force schedule out for all event types all contexts 5441 */ 5442 if (test_bit(3, cpuc->active_mask)) 5443 perf_pmu_resched(x86_get_pmu(smp_processor_id())); 5444 } 5445 5446 static ssize_t show_sysctl_tfa(struct device *cdev, 5447 struct device_attribute *attr, 5448 char *buf) 5449 { 5450 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort); 5451 } 5452 5453 static ssize_t set_sysctl_tfa(struct device *cdev, 5454 struct device_attribute *attr, 5455 const char *buf, size_t count) 5456 { 5457 bool val; 5458 ssize_t ret; 5459 5460 ret = kstrtobool(buf, &val); 5461 if (ret) 5462 return ret; 5463 5464 /* no change */ 5465 if (val == allow_tsx_force_abort) 5466 return count; 5467 5468 allow_tsx_force_abort = val; 5469 5470 cpus_read_lock(); 5471 on_each_cpu(update_tfa_sched, NULL, 1); 5472 cpus_read_unlock(); 5473 5474 return count; 5475 } 5476 5477 5478 static DEVICE_ATTR_RW(freeze_on_smi); 5479 5480 static ssize_t branches_show(struct device *cdev, 5481 struct device_attribute *attr, 5482 char *buf) 5483 { 5484 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); 5485 } 5486 5487 static DEVICE_ATTR_RO(branches); 5488 5489 static struct attribute *lbr_attrs[] = { 5490 &dev_attr_branches.attr, 5491 NULL 5492 }; 5493 5494 static char pmu_name_str[30]; 5495 5496 static ssize_t pmu_name_show(struct device *cdev, 5497 struct device_attribute *attr, 5498 char *buf) 5499 { 5500 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str); 5501 } 5502 5503 static DEVICE_ATTR_RO(pmu_name); 5504 5505 static struct attribute *intel_pmu_caps_attrs[] = { 5506 &dev_attr_pmu_name.attr, 5507 NULL 5508 }; 5509 5510 static DEVICE_ATTR(allow_tsx_force_abort, 0644, 5511 show_sysctl_tfa, 5512 set_sysctl_tfa); 5513 5514 static struct attribute *intel_pmu_attrs[] = { 5515 &dev_attr_freeze_on_smi.attr, 5516 &dev_attr_allow_tsx_force_abort.attr, 5517 NULL, 5518 }; 5519 5520 static umode_t 5521 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5522 { 5523 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0; 5524 } 5525 5526 static umode_t 5527 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5528 { 5529 return x86_pmu.pebs ? attr->mode : 0; 5530 } 5531 5532 static umode_t 5533 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5534 { 5535 if (attr == &event_attr_mem_ld_aux.attr.attr) 5536 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0; 5537 5538 return pebs_is_visible(kobj, attr, i); 5539 } 5540 5541 static umode_t 5542 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5543 { 5544 return x86_pmu.lbr_nr ? attr->mode : 0; 5545 } 5546 5547 static umode_t 5548 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5549 { 5550 return x86_pmu.version >= 2 ? attr->mode : 0; 5551 } 5552 5553 static umode_t 5554 default_is_visible(struct kobject *kobj, struct attribute *attr, int i) 5555 { 5556 if (attr == &dev_attr_allow_tsx_force_abort.attr) 5557 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; 5558 5559 return attr->mode; 5560 } 5561 5562 static struct attribute_group group_events_td = { 5563 .name = "events", 5564 }; 5565 5566 static struct attribute_group group_events_mem = { 5567 .name = "events", 5568 .is_visible = mem_is_visible, 5569 }; 5570 5571 static struct attribute_group group_events_tsx = { 5572 .name = "events", 5573 .is_visible = tsx_is_visible, 5574 }; 5575 5576 static struct attribute_group group_caps_gen = { 5577 .name = "caps", 5578 .attrs = intel_pmu_caps_attrs, 5579 }; 5580 5581 static struct attribute_group group_caps_lbr = { 5582 .name = "caps", 5583 .attrs = lbr_attrs, 5584 .is_visible = lbr_is_visible, 5585 }; 5586 5587 static struct attribute_group group_format_extra = { 5588 .name = "format", 5589 .is_visible = exra_is_visible, 5590 }; 5591 5592 static struct attribute_group group_format_extra_skl = { 5593 .name = "format", 5594 .is_visible = exra_is_visible, 5595 }; 5596 5597 static struct attribute_group group_default = { 5598 .attrs = intel_pmu_attrs, 5599 .is_visible = default_is_visible, 5600 }; 5601 5602 static const struct attribute_group *attr_update[] = { 5603 &group_events_td, 5604 &group_events_mem, 5605 &group_events_tsx, 5606 &group_caps_gen, 5607 &group_caps_lbr, 5608 &group_format_extra, 5609 &group_format_extra_skl, 5610 &group_default, 5611 NULL, 5612 }; 5613 5614 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big); 5615 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small); 5616 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small); 5617 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small); 5618 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small); 5619 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big); 5620 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big); 5621 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big); 5622 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big); 5623 5624 static struct attribute *adl_hybrid_events_attrs[] = { 5625 EVENT_PTR(slots_adl), 5626 EVENT_PTR(td_retiring_adl), 5627 EVENT_PTR(td_bad_spec_adl), 5628 EVENT_PTR(td_fe_bound_adl), 5629 EVENT_PTR(td_be_bound_adl), 5630 EVENT_PTR(td_heavy_ops_adl), 5631 EVENT_PTR(td_br_mis_adl), 5632 EVENT_PTR(td_fetch_lat_adl), 5633 EVENT_PTR(td_mem_bound_adl), 5634 NULL, 5635 }; 5636 5637 /* Must be in IDX order */ 5638 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small); 5639 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small); 5640 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big); 5641 5642 static struct attribute *adl_hybrid_mem_attrs[] = { 5643 EVENT_PTR(mem_ld_adl), 5644 EVENT_PTR(mem_st_adl), 5645 EVENT_PTR(mem_ld_aux_adl), 5646 NULL, 5647 }; 5648 5649 static struct attribute *mtl_hybrid_mem_attrs[] = { 5650 EVENT_PTR(mem_ld_adl), 5651 EVENT_PTR(mem_st_adl), 5652 NULL 5653 }; 5654 5655 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big); 5656 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big); 5657 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big); 5658 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big); 5659 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big); 5660 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big); 5661 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big); 5662 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big); 5663 5664 static struct attribute *adl_hybrid_tsx_attrs[] = { 5665 EVENT_PTR(tx_start_adl), 5666 EVENT_PTR(tx_abort_adl), 5667 EVENT_PTR(tx_commit_adl), 5668 EVENT_PTR(tx_capacity_read_adl), 5669 EVENT_PTR(tx_capacity_write_adl), 5670 EVENT_PTR(tx_conflict_adl), 5671 EVENT_PTR(cycles_t_adl), 5672 EVENT_PTR(cycles_ct_adl), 5673 NULL, 5674 }; 5675 5676 FORMAT_ATTR_HYBRID(in_tx, hybrid_big); 5677 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big); 5678 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small); 5679 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small); 5680 FORMAT_ATTR_HYBRID(frontend, hybrid_big); 5681 5682 #define ADL_HYBRID_RTM_FORMAT_ATTR \ 5683 FORMAT_HYBRID_PTR(in_tx), \ 5684 FORMAT_HYBRID_PTR(in_tx_cp) 5685 5686 #define ADL_HYBRID_FORMAT_ATTR \ 5687 FORMAT_HYBRID_PTR(offcore_rsp), \ 5688 FORMAT_HYBRID_PTR(ldlat), \ 5689 FORMAT_HYBRID_PTR(frontend) 5690 5691 static struct attribute *adl_hybrid_extra_attr_rtm[] = { 5692 ADL_HYBRID_RTM_FORMAT_ATTR, 5693 ADL_HYBRID_FORMAT_ATTR, 5694 NULL 5695 }; 5696 5697 static struct attribute *adl_hybrid_extra_attr[] = { 5698 ADL_HYBRID_FORMAT_ATTR, 5699 NULL 5700 }; 5701 5702 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small); 5703 5704 static struct attribute *mtl_hybrid_extra_attr_rtm[] = { 5705 ADL_HYBRID_RTM_FORMAT_ATTR, 5706 ADL_HYBRID_FORMAT_ATTR, 5707 FORMAT_HYBRID_PTR(snoop_rsp), 5708 NULL 5709 }; 5710 5711 static struct attribute *mtl_hybrid_extra_attr[] = { 5712 ADL_HYBRID_FORMAT_ATTR, 5713 FORMAT_HYBRID_PTR(snoop_rsp), 5714 NULL 5715 }; 5716 5717 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) 5718 { 5719 struct device *dev = kobj_to_dev(kobj); 5720 struct x86_hybrid_pmu *pmu = 5721 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5722 struct perf_pmu_events_hybrid_attr *pmu_attr = 5723 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); 5724 5725 return pmu->cpu_type & pmu_attr->pmu_type; 5726 } 5727 5728 static umode_t hybrid_events_is_visible(struct kobject *kobj, 5729 struct attribute *attr, int i) 5730 { 5731 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0; 5732 } 5733 5734 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu) 5735 { 5736 int cpu = cpumask_first(&pmu->supported_cpus); 5737 5738 return (cpu >= nr_cpu_ids) ? -1 : cpu; 5739 } 5740 5741 static umode_t hybrid_tsx_is_visible(struct kobject *kobj, 5742 struct attribute *attr, int i) 5743 { 5744 struct device *dev = kobj_to_dev(kobj); 5745 struct x86_hybrid_pmu *pmu = 5746 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5747 int cpu = hybrid_find_supported_cpu(pmu); 5748 5749 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0; 5750 } 5751 5752 static umode_t hybrid_format_is_visible(struct kobject *kobj, 5753 struct attribute *attr, int i) 5754 { 5755 struct device *dev = kobj_to_dev(kobj); 5756 struct x86_hybrid_pmu *pmu = 5757 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5758 struct perf_pmu_format_hybrid_attr *pmu_attr = 5759 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); 5760 int cpu = hybrid_find_supported_cpu(pmu); 5761 5762 return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0; 5763 } 5764 5765 static struct attribute_group hybrid_group_events_td = { 5766 .name = "events", 5767 .is_visible = hybrid_events_is_visible, 5768 }; 5769 5770 static struct attribute_group hybrid_group_events_mem = { 5771 .name = "events", 5772 .is_visible = hybrid_events_is_visible, 5773 }; 5774 5775 static struct attribute_group hybrid_group_events_tsx = { 5776 .name = "events", 5777 .is_visible = hybrid_tsx_is_visible, 5778 }; 5779 5780 static struct attribute_group hybrid_group_format_extra = { 5781 .name = "format", 5782 .is_visible = hybrid_format_is_visible, 5783 }; 5784 5785 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev, 5786 struct device_attribute *attr, 5787 char *buf) 5788 { 5789 struct x86_hybrid_pmu *pmu = 5790 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); 5791 5792 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus); 5793 } 5794 5795 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL); 5796 static struct attribute *intel_hybrid_cpus_attrs[] = { 5797 &dev_attr_cpus.attr, 5798 NULL, 5799 }; 5800 5801 static struct attribute_group hybrid_group_cpus = { 5802 .attrs = intel_hybrid_cpus_attrs, 5803 }; 5804 5805 static const struct attribute_group *hybrid_attr_update[] = { 5806 &hybrid_group_events_td, 5807 &hybrid_group_events_mem, 5808 &hybrid_group_events_tsx, 5809 &group_caps_gen, 5810 &group_caps_lbr, 5811 &hybrid_group_format_extra, 5812 &group_default, 5813 &hybrid_group_cpus, 5814 NULL, 5815 }; 5816 5817 static struct attribute *empty_attrs; 5818 5819 static void intel_pmu_check_num_counters(int *num_counters, 5820 int *num_counters_fixed, 5821 u64 *intel_ctrl, u64 fixed_mask) 5822 { 5823 if (*num_counters > INTEL_PMC_MAX_GENERIC) { 5824 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 5825 *num_counters, INTEL_PMC_MAX_GENERIC); 5826 *num_counters = INTEL_PMC_MAX_GENERIC; 5827 } 5828 *intel_ctrl = (1ULL << *num_counters) - 1; 5829 5830 if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) { 5831 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 5832 *num_counters_fixed, INTEL_PMC_MAX_FIXED); 5833 *num_counters_fixed = INTEL_PMC_MAX_FIXED; 5834 } 5835 5836 *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED; 5837 } 5838 5839 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, 5840 int num_counters, 5841 int num_counters_fixed, 5842 u64 intel_ctrl) 5843 { 5844 struct event_constraint *c; 5845 5846 if (!event_constraints) 5847 return; 5848 5849 /* 5850 * event on fixed counter2 (REF_CYCLES) only works on this 5851 * counter, so do not extend mask to generic counters 5852 */ 5853 for_each_event_constraint(c, event_constraints) { 5854 /* 5855 * Don't extend the topdown slots and metrics 5856 * events to the generic counters. 5857 */ 5858 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { 5859 /* 5860 * Disable topdown slots and metrics events, 5861 * if slots event is not in CPUID. 5862 */ 5863 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl)) 5864 c->idxmsk64 = 0; 5865 c->weight = hweight64(c->idxmsk64); 5866 continue; 5867 } 5868 5869 if (c->cmask == FIXED_EVENT_FLAGS) { 5870 /* Disabled fixed counters which are not in CPUID */ 5871 c->idxmsk64 &= intel_ctrl; 5872 5873 /* 5874 * Don't extend the pseudo-encoding to the 5875 * generic counters 5876 */ 5877 if (!use_fixed_pseudo_encoding(c->code)) 5878 c->idxmsk64 |= (1ULL << num_counters) - 1; 5879 } 5880 c->idxmsk64 &= 5881 ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed)); 5882 c->weight = hweight64(c->idxmsk64); 5883 } 5884 } 5885 5886 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs) 5887 { 5888 struct extra_reg *er; 5889 5890 /* 5891 * Access extra MSR may cause #GP under certain circumstances. 5892 * E.g. KVM doesn't support offcore event 5893 * Check all extra_regs here. 5894 */ 5895 if (!extra_regs) 5896 return; 5897 5898 for (er = extra_regs; er->msr; er++) { 5899 er->extra_msr_access = check_msr(er->msr, 0x11UL); 5900 /* Disable LBR select mapping */ 5901 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) 5902 x86_pmu.lbr_sel_map = NULL; 5903 } 5904 } 5905 5906 static void intel_pmu_check_hybrid_pmus(u64 fixed_mask) 5907 { 5908 struct x86_hybrid_pmu *pmu; 5909 int i; 5910 5911 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { 5912 pmu = &x86_pmu.hybrid_pmu[i]; 5913 5914 intel_pmu_check_num_counters(&pmu->num_counters, 5915 &pmu->num_counters_fixed, 5916 &pmu->intel_ctrl, 5917 fixed_mask); 5918 5919 if (pmu->intel_cap.perf_metrics) { 5920 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; 5921 pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS; 5922 } 5923 5924 if (pmu->intel_cap.pebs_output_pt_available) 5925 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; 5926 5927 intel_pmu_check_event_constraints(pmu->event_constraints, 5928 pmu->num_counters, 5929 pmu->num_counters_fixed, 5930 pmu->intel_ctrl); 5931 5932 intel_pmu_check_extra_regs(pmu->extra_regs); 5933 } 5934 } 5935 5936 static __always_inline bool is_mtl(u8 x86_model) 5937 { 5938 return (x86_model == INTEL_FAM6_METEORLAKE) || 5939 (x86_model == INTEL_FAM6_METEORLAKE_L); 5940 } 5941 5942 __init int intel_pmu_init(void) 5943 { 5944 struct attribute **extra_skl_attr = &empty_attrs; 5945 struct attribute **extra_attr = &empty_attrs; 5946 struct attribute **td_attr = &empty_attrs; 5947 struct attribute **mem_attr = &empty_attrs; 5948 struct attribute **tsx_attr = &empty_attrs; 5949 union cpuid10_edx edx; 5950 union cpuid10_eax eax; 5951 union cpuid10_ebx ebx; 5952 unsigned int fixed_mask; 5953 bool pmem = false; 5954 int version, i; 5955 char *name; 5956 struct x86_hybrid_pmu *pmu; 5957 5958 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 5959 switch (boot_cpu_data.x86) { 5960 case 0x6: 5961 return p6_pmu_init(); 5962 case 0xb: 5963 return knc_pmu_init(); 5964 case 0xf: 5965 return p4_pmu_init(); 5966 } 5967 return -ENODEV; 5968 } 5969 5970 /* 5971 * Check whether the Architectural PerfMon supports 5972 * Branch Misses Retired hw_event or not. 5973 */ 5974 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full); 5975 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) 5976 return -ENODEV; 5977 5978 version = eax.split.version_id; 5979 if (version < 2) 5980 x86_pmu = core_pmu; 5981 else 5982 x86_pmu = intel_pmu; 5983 5984 x86_pmu.version = version; 5985 x86_pmu.num_counters = eax.split.num_counters; 5986 x86_pmu.cntval_bits = eax.split.bit_width; 5987 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; 5988 5989 x86_pmu.events_maskl = ebx.full; 5990 x86_pmu.events_mask_len = eax.split.mask_length; 5991 5992 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); 5993 x86_pmu.pebs_capable = PEBS_COUNTER_MASK; 5994 5995 /* 5996 * Quirk: v2 perfmon does not report fixed-purpose events, so 5997 * assume at least 3 events, when not running in a hypervisor: 5998 */ 5999 if (version > 1 && version < 5) { 6000 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR); 6001 6002 x86_pmu.num_counters_fixed = 6003 max((int)edx.split.num_counters_fixed, assume); 6004 6005 fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1; 6006 } else if (version >= 5) 6007 x86_pmu.num_counters_fixed = fls(fixed_mask); 6008 6009 if (boot_cpu_has(X86_FEATURE_PDCM)) { 6010 u64 capabilities; 6011 6012 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); 6013 x86_pmu.intel_cap.capabilities = capabilities; 6014 } 6015 6016 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) { 6017 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32; 6018 x86_pmu.lbr_read = intel_pmu_lbr_read_32; 6019 } 6020 6021 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) 6022 intel_pmu_arch_lbr_init(); 6023 6024 intel_ds_init(); 6025 6026 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ 6027 6028 if (version >= 5) { 6029 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated; 6030 if (x86_pmu.intel_cap.anythread_deprecated) 6031 pr_cont(" AnyThread deprecated, "); 6032 } 6033 6034 /* 6035 * Install the hw-cache-events table: 6036 */ 6037 switch (boot_cpu_data.x86_model) { 6038 case INTEL_FAM6_CORE_YONAH: 6039 pr_cont("Core events, "); 6040 name = "core"; 6041 break; 6042 6043 case INTEL_FAM6_CORE2_MEROM: 6044 x86_add_quirk(intel_clovertown_quirk); 6045 fallthrough; 6046 6047 case INTEL_FAM6_CORE2_MEROM_L: 6048 case INTEL_FAM6_CORE2_PENRYN: 6049 case INTEL_FAM6_CORE2_DUNNINGTON: 6050 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, 6051 sizeof(hw_cache_event_ids)); 6052 6053 intel_pmu_lbr_init_core(); 6054 6055 x86_pmu.event_constraints = intel_core2_event_constraints; 6056 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; 6057 pr_cont("Core2 events, "); 6058 name = "core2"; 6059 break; 6060 6061 case INTEL_FAM6_NEHALEM: 6062 case INTEL_FAM6_NEHALEM_EP: 6063 case INTEL_FAM6_NEHALEM_EX: 6064 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, 6065 sizeof(hw_cache_event_ids)); 6066 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 6067 sizeof(hw_cache_extra_regs)); 6068 6069 intel_pmu_lbr_init_nhm(); 6070 6071 x86_pmu.event_constraints = intel_nehalem_event_constraints; 6072 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 6073 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 6074 x86_pmu.extra_regs = intel_nehalem_extra_regs; 6075 x86_pmu.limit_period = nhm_limit_period; 6076 6077 mem_attr = nhm_mem_events_attrs; 6078 6079 /* UOPS_ISSUED.STALLED_CYCLES */ 6080 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6081 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6082 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 6083 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6084 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 6085 6086 intel_pmu_pebs_data_source_nhm(); 6087 x86_add_quirk(intel_nehalem_quirk); 6088 x86_pmu.pebs_no_tlb = 1; 6089 extra_attr = nhm_format_attr; 6090 6091 pr_cont("Nehalem events, "); 6092 name = "nehalem"; 6093 break; 6094 6095 case INTEL_FAM6_ATOM_BONNELL: 6096 case INTEL_FAM6_ATOM_BONNELL_MID: 6097 case INTEL_FAM6_ATOM_SALTWELL: 6098 case INTEL_FAM6_ATOM_SALTWELL_MID: 6099 case INTEL_FAM6_ATOM_SALTWELL_TABLET: 6100 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 6101 sizeof(hw_cache_event_ids)); 6102 6103 intel_pmu_lbr_init_atom(); 6104 6105 x86_pmu.event_constraints = intel_gen_event_constraints; 6106 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; 6107 x86_pmu.pebs_aliases = intel_pebs_aliases_core2; 6108 pr_cont("Atom events, "); 6109 name = "bonnell"; 6110 break; 6111 6112 case INTEL_FAM6_ATOM_SILVERMONT: 6113 case INTEL_FAM6_ATOM_SILVERMONT_D: 6114 case INTEL_FAM6_ATOM_SILVERMONT_MID: 6115 case INTEL_FAM6_ATOM_AIRMONT: 6116 case INTEL_FAM6_ATOM_AIRMONT_MID: 6117 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 6118 sizeof(hw_cache_event_ids)); 6119 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 6120 sizeof(hw_cache_extra_regs)); 6121 6122 intel_pmu_lbr_init_slm(); 6123 6124 x86_pmu.event_constraints = intel_slm_event_constraints; 6125 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 6126 x86_pmu.extra_regs = intel_slm_extra_regs; 6127 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6128 td_attr = slm_events_attrs; 6129 extra_attr = slm_format_attr; 6130 pr_cont("Silvermont events, "); 6131 name = "silvermont"; 6132 break; 6133 6134 case INTEL_FAM6_ATOM_GOLDMONT: 6135 case INTEL_FAM6_ATOM_GOLDMONT_D: 6136 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, 6137 sizeof(hw_cache_event_ids)); 6138 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, 6139 sizeof(hw_cache_extra_regs)); 6140 6141 intel_pmu_lbr_init_skl(); 6142 6143 x86_pmu.event_constraints = intel_slm_event_constraints; 6144 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints; 6145 x86_pmu.extra_regs = intel_glm_extra_regs; 6146 /* 6147 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6148 * for precise cycles. 6149 * :pp is identical to :ppp 6150 */ 6151 x86_pmu.pebs_aliases = NULL; 6152 x86_pmu.pebs_prec_dist = true; 6153 x86_pmu.lbr_pt_coexist = true; 6154 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6155 td_attr = glm_events_attrs; 6156 extra_attr = slm_format_attr; 6157 pr_cont("Goldmont events, "); 6158 name = "goldmont"; 6159 break; 6160 6161 case INTEL_FAM6_ATOM_GOLDMONT_PLUS: 6162 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6163 sizeof(hw_cache_event_ids)); 6164 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, 6165 sizeof(hw_cache_extra_regs)); 6166 6167 intel_pmu_lbr_init_skl(); 6168 6169 x86_pmu.event_constraints = intel_slm_event_constraints; 6170 x86_pmu.extra_regs = intel_glm_extra_regs; 6171 /* 6172 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6173 * for precise cycles. 6174 */ 6175 x86_pmu.pebs_aliases = NULL; 6176 x86_pmu.pebs_prec_dist = true; 6177 x86_pmu.lbr_pt_coexist = true; 6178 x86_pmu.pebs_capable = ~0ULL; 6179 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6180 x86_pmu.flags |= PMU_FL_PEBS_ALL; 6181 x86_pmu.get_event_constraints = glp_get_event_constraints; 6182 td_attr = glm_events_attrs; 6183 /* Goldmont Plus has 4-wide pipeline */ 6184 event_attr_td_total_slots_scale_glm.event_str = "4"; 6185 extra_attr = slm_format_attr; 6186 pr_cont("Goldmont plus events, "); 6187 name = "goldmont_plus"; 6188 break; 6189 6190 case INTEL_FAM6_ATOM_TREMONT_D: 6191 case INTEL_FAM6_ATOM_TREMONT: 6192 case INTEL_FAM6_ATOM_TREMONT_L: 6193 x86_pmu.late_ack = true; 6194 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6195 sizeof(hw_cache_event_ids)); 6196 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, 6197 sizeof(hw_cache_extra_regs)); 6198 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6199 6200 intel_pmu_lbr_init_skl(); 6201 6202 x86_pmu.event_constraints = intel_slm_event_constraints; 6203 x86_pmu.extra_regs = intel_tnt_extra_regs; 6204 /* 6205 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 6206 * for precise cycles. 6207 */ 6208 x86_pmu.pebs_aliases = NULL; 6209 x86_pmu.pebs_prec_dist = true; 6210 x86_pmu.lbr_pt_coexist = true; 6211 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6212 x86_pmu.get_event_constraints = tnt_get_event_constraints; 6213 td_attr = tnt_events_attrs; 6214 extra_attr = slm_format_attr; 6215 pr_cont("Tremont events, "); 6216 name = "Tremont"; 6217 break; 6218 6219 case INTEL_FAM6_ATOM_GRACEMONT: 6220 x86_pmu.mid_ack = true; 6221 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6222 sizeof(hw_cache_event_ids)); 6223 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, 6224 sizeof(hw_cache_extra_regs)); 6225 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6226 6227 x86_pmu.event_constraints = intel_slm_event_constraints; 6228 x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; 6229 x86_pmu.extra_regs = intel_grt_extra_regs; 6230 6231 x86_pmu.pebs_aliases = NULL; 6232 x86_pmu.pebs_prec_dist = true; 6233 x86_pmu.pebs_block = true; 6234 x86_pmu.lbr_pt_coexist = true; 6235 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6236 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6237 6238 intel_pmu_pebs_data_source_grt(); 6239 x86_pmu.pebs_latency_data = adl_latency_data_small; 6240 x86_pmu.get_event_constraints = tnt_get_event_constraints; 6241 x86_pmu.limit_period = spr_limit_period; 6242 td_attr = tnt_events_attrs; 6243 mem_attr = grt_mem_attrs; 6244 extra_attr = nhm_format_attr; 6245 pr_cont("Gracemont events, "); 6246 name = "gracemont"; 6247 break; 6248 6249 case INTEL_FAM6_ATOM_CRESTMONT: 6250 case INTEL_FAM6_ATOM_CRESTMONT_X: 6251 x86_pmu.mid_ack = true; 6252 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 6253 sizeof(hw_cache_event_ids)); 6254 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, 6255 sizeof(hw_cache_extra_regs)); 6256 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6257 6258 x86_pmu.event_constraints = intel_slm_event_constraints; 6259 x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; 6260 x86_pmu.extra_regs = intel_cmt_extra_regs; 6261 6262 x86_pmu.pebs_aliases = NULL; 6263 x86_pmu.pebs_prec_dist = true; 6264 x86_pmu.lbr_pt_coexist = true; 6265 x86_pmu.pebs_block = true; 6266 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6267 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6268 6269 intel_pmu_pebs_data_source_cmt(); 6270 x86_pmu.pebs_latency_data = mtl_latency_data_small; 6271 x86_pmu.get_event_constraints = cmt_get_event_constraints; 6272 x86_pmu.limit_period = spr_limit_period; 6273 td_attr = cmt_events_attrs; 6274 mem_attr = grt_mem_attrs; 6275 extra_attr = cmt_format_attr; 6276 pr_cont("Crestmont events, "); 6277 name = "crestmont"; 6278 break; 6279 6280 case INTEL_FAM6_WESTMERE: 6281 case INTEL_FAM6_WESTMERE_EP: 6282 case INTEL_FAM6_WESTMERE_EX: 6283 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 6284 sizeof(hw_cache_event_ids)); 6285 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 6286 sizeof(hw_cache_extra_regs)); 6287 6288 intel_pmu_lbr_init_nhm(); 6289 6290 x86_pmu.event_constraints = intel_westmere_event_constraints; 6291 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 6292 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; 6293 x86_pmu.extra_regs = intel_westmere_extra_regs; 6294 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6295 6296 mem_attr = nhm_mem_events_attrs; 6297 6298 /* UOPS_ISSUED.STALLED_CYCLES */ 6299 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6300 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6301 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 6302 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6303 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 6304 6305 intel_pmu_pebs_data_source_nhm(); 6306 extra_attr = nhm_format_attr; 6307 pr_cont("Westmere events, "); 6308 name = "westmere"; 6309 break; 6310 6311 case INTEL_FAM6_SANDYBRIDGE: 6312 case INTEL_FAM6_SANDYBRIDGE_X: 6313 x86_add_quirk(intel_sandybridge_quirk); 6314 x86_add_quirk(intel_ht_bug); 6315 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 6316 sizeof(hw_cache_event_ids)); 6317 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 6318 sizeof(hw_cache_extra_regs)); 6319 6320 intel_pmu_lbr_init_snb(); 6321 6322 x86_pmu.event_constraints = intel_snb_event_constraints; 6323 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 6324 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 6325 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X) 6326 x86_pmu.extra_regs = intel_snbep_extra_regs; 6327 else 6328 x86_pmu.extra_regs = intel_snb_extra_regs; 6329 6330 6331 /* all extra regs are per-cpu when HT is on */ 6332 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6333 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6334 6335 td_attr = snb_events_attrs; 6336 mem_attr = snb_mem_events_attrs; 6337 6338 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 6339 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6340 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6341 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ 6342 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 6343 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); 6344 6345 extra_attr = nhm_format_attr; 6346 6347 pr_cont("SandyBridge events, "); 6348 name = "sandybridge"; 6349 break; 6350 6351 case INTEL_FAM6_IVYBRIDGE: 6352 case INTEL_FAM6_IVYBRIDGE_X: 6353 x86_add_quirk(intel_ht_bug); 6354 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 6355 sizeof(hw_cache_event_ids)); 6356 /* dTLB-load-misses on IVB is different than SNB */ 6357 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ 6358 6359 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 6360 sizeof(hw_cache_extra_regs)); 6361 6362 intel_pmu_lbr_init_snb(); 6363 6364 x86_pmu.event_constraints = intel_ivb_event_constraints; 6365 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 6366 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6367 x86_pmu.pebs_prec_dist = true; 6368 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X) 6369 x86_pmu.extra_regs = intel_snbep_extra_regs; 6370 else 6371 x86_pmu.extra_regs = intel_snb_extra_regs; 6372 /* all extra regs are per-cpu when HT is on */ 6373 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6374 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6375 6376 td_attr = snb_events_attrs; 6377 mem_attr = snb_mem_events_attrs; 6378 6379 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 6380 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 6381 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 6382 6383 extra_attr = nhm_format_attr; 6384 6385 pr_cont("IvyBridge events, "); 6386 name = "ivybridge"; 6387 break; 6388 6389 6390 case INTEL_FAM6_HASWELL: 6391 case INTEL_FAM6_HASWELL_X: 6392 case INTEL_FAM6_HASWELL_L: 6393 case INTEL_FAM6_HASWELL_G: 6394 x86_add_quirk(intel_ht_bug); 6395 x86_add_quirk(intel_pebs_isolation_quirk); 6396 x86_pmu.late_ack = true; 6397 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6398 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6399 6400 intel_pmu_lbr_init_hsw(); 6401 6402 x86_pmu.event_constraints = intel_hsw_event_constraints; 6403 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; 6404 x86_pmu.extra_regs = intel_snbep_extra_regs; 6405 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6406 x86_pmu.pebs_prec_dist = true; 6407 /* all extra regs are per-cpu when HT is on */ 6408 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6409 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6410 6411 x86_pmu.hw_config = hsw_hw_config; 6412 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6413 x86_pmu.limit_period = hsw_limit_period; 6414 x86_pmu.lbr_double_abort = true; 6415 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6416 hsw_format_attr : nhm_format_attr; 6417 td_attr = hsw_events_attrs; 6418 mem_attr = hsw_mem_events_attrs; 6419 tsx_attr = hsw_tsx_events_attrs; 6420 pr_cont("Haswell events, "); 6421 name = "haswell"; 6422 break; 6423 6424 case INTEL_FAM6_BROADWELL: 6425 case INTEL_FAM6_BROADWELL_D: 6426 case INTEL_FAM6_BROADWELL_G: 6427 case INTEL_FAM6_BROADWELL_X: 6428 x86_add_quirk(intel_pebs_isolation_quirk); 6429 x86_pmu.late_ack = true; 6430 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6431 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6432 6433 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */ 6434 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ | 6435 BDW_L3_MISS|HSW_SNOOP_DRAM; 6436 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS| 6437 HSW_SNOOP_DRAM; 6438 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ| 6439 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 6440 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE| 6441 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 6442 6443 intel_pmu_lbr_init_hsw(); 6444 6445 x86_pmu.event_constraints = intel_bdw_event_constraints; 6446 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints; 6447 x86_pmu.extra_regs = intel_snbep_extra_regs; 6448 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 6449 x86_pmu.pebs_prec_dist = true; 6450 /* all extra regs are per-cpu when HT is on */ 6451 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6452 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6453 6454 x86_pmu.hw_config = hsw_hw_config; 6455 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6456 x86_pmu.limit_period = bdw_limit_period; 6457 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6458 hsw_format_attr : nhm_format_attr; 6459 td_attr = hsw_events_attrs; 6460 mem_attr = hsw_mem_events_attrs; 6461 tsx_attr = hsw_tsx_events_attrs; 6462 pr_cont("Broadwell events, "); 6463 name = "broadwell"; 6464 break; 6465 6466 case INTEL_FAM6_XEON_PHI_KNL: 6467 case INTEL_FAM6_XEON_PHI_KNM: 6468 memcpy(hw_cache_event_ids, 6469 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6470 memcpy(hw_cache_extra_regs, 6471 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6472 intel_pmu_lbr_init_knl(); 6473 6474 x86_pmu.event_constraints = intel_slm_event_constraints; 6475 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 6476 x86_pmu.extra_regs = intel_knl_extra_regs; 6477 6478 /* all extra regs are per-cpu when HT is on */ 6479 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6480 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6481 extra_attr = slm_format_attr; 6482 pr_cont("Knights Landing/Mill events, "); 6483 name = "knights-landing"; 6484 break; 6485 6486 case INTEL_FAM6_SKYLAKE_X: 6487 pmem = true; 6488 fallthrough; 6489 case INTEL_FAM6_SKYLAKE_L: 6490 case INTEL_FAM6_SKYLAKE: 6491 case INTEL_FAM6_KABYLAKE_L: 6492 case INTEL_FAM6_KABYLAKE: 6493 case INTEL_FAM6_COMETLAKE_L: 6494 case INTEL_FAM6_COMETLAKE: 6495 x86_add_quirk(intel_pebs_isolation_quirk); 6496 x86_pmu.late_ack = true; 6497 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6498 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6499 intel_pmu_lbr_init_skl(); 6500 6501 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */ 6502 event_attr_td_recovery_bubbles.event_str_noht = 6503 "event=0xd,umask=0x1,cmask=1"; 6504 event_attr_td_recovery_bubbles.event_str_ht = 6505 "event=0xd,umask=0x1,cmask=1,any=1"; 6506 6507 x86_pmu.event_constraints = intel_skl_event_constraints; 6508 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints; 6509 x86_pmu.extra_regs = intel_skl_extra_regs; 6510 x86_pmu.pebs_aliases = intel_pebs_aliases_skl; 6511 x86_pmu.pebs_prec_dist = true; 6512 /* all extra regs are per-cpu when HT is on */ 6513 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6514 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6515 6516 x86_pmu.hw_config = hsw_hw_config; 6517 x86_pmu.get_event_constraints = hsw_get_event_constraints; 6518 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6519 hsw_format_attr : nhm_format_attr; 6520 extra_skl_attr = skl_format_attr; 6521 td_attr = hsw_events_attrs; 6522 mem_attr = hsw_mem_events_attrs; 6523 tsx_attr = hsw_tsx_events_attrs; 6524 intel_pmu_pebs_data_source_skl(pmem); 6525 6526 /* 6527 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default. 6528 * TSX force abort hooks are not required on these systems. Only deploy 6529 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT. 6530 */ 6531 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) && 6532 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) { 6533 x86_pmu.flags |= PMU_FL_TFA; 6534 x86_pmu.get_event_constraints = tfa_get_event_constraints; 6535 x86_pmu.enable_all = intel_tfa_pmu_enable_all; 6536 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; 6537 } 6538 6539 pr_cont("Skylake events, "); 6540 name = "skylake"; 6541 break; 6542 6543 case INTEL_FAM6_ICELAKE_X: 6544 case INTEL_FAM6_ICELAKE_D: 6545 x86_pmu.pebs_ept = 1; 6546 pmem = true; 6547 fallthrough; 6548 case INTEL_FAM6_ICELAKE_L: 6549 case INTEL_FAM6_ICELAKE: 6550 case INTEL_FAM6_TIGERLAKE_L: 6551 case INTEL_FAM6_TIGERLAKE: 6552 case INTEL_FAM6_ROCKETLAKE: 6553 x86_pmu.late_ack = true; 6554 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6555 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6556 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6557 intel_pmu_lbr_init_skl(); 6558 6559 x86_pmu.event_constraints = intel_icl_event_constraints; 6560 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints; 6561 x86_pmu.extra_regs = intel_icl_extra_regs; 6562 x86_pmu.pebs_aliases = NULL; 6563 x86_pmu.pebs_prec_dist = true; 6564 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6565 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6566 6567 x86_pmu.hw_config = hsw_hw_config; 6568 x86_pmu.get_event_constraints = icl_get_event_constraints; 6569 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6570 hsw_format_attr : nhm_format_attr; 6571 extra_skl_attr = skl_format_attr; 6572 mem_attr = icl_events_attrs; 6573 td_attr = icl_td_events_attrs; 6574 tsx_attr = icl_tsx_events_attrs; 6575 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 6576 x86_pmu.lbr_pt_coexist = true; 6577 intel_pmu_pebs_data_source_skl(pmem); 6578 x86_pmu.num_topdown_events = 4; 6579 static_call_update(intel_pmu_update_topdown_event, 6580 &icl_update_topdown_event); 6581 static_call_update(intel_pmu_set_topdown_event_period, 6582 &icl_set_topdown_event_period); 6583 pr_cont("Icelake events, "); 6584 name = "icelake"; 6585 break; 6586 6587 case INTEL_FAM6_SAPPHIRERAPIDS_X: 6588 case INTEL_FAM6_EMERALDRAPIDS_X: 6589 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 6590 x86_pmu.extra_regs = intel_spr_extra_regs; 6591 fallthrough; 6592 case INTEL_FAM6_GRANITERAPIDS_X: 6593 case INTEL_FAM6_GRANITERAPIDS_D: 6594 pmem = true; 6595 x86_pmu.late_ack = true; 6596 memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 6597 memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 6598 6599 x86_pmu.event_constraints = intel_spr_event_constraints; 6600 x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints; 6601 if (!x86_pmu.extra_regs) 6602 x86_pmu.extra_regs = intel_gnr_extra_regs; 6603 x86_pmu.limit_period = spr_limit_period; 6604 x86_pmu.pebs_ept = 1; 6605 x86_pmu.pebs_aliases = NULL; 6606 x86_pmu.pebs_prec_dist = true; 6607 x86_pmu.pebs_block = true; 6608 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6609 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6610 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6611 6612 x86_pmu.hw_config = hsw_hw_config; 6613 x86_pmu.get_event_constraints = spr_get_event_constraints; 6614 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6615 hsw_format_attr : nhm_format_attr; 6616 extra_skl_attr = skl_format_attr; 6617 mem_attr = spr_events_attrs; 6618 td_attr = spr_td_events_attrs; 6619 tsx_attr = spr_tsx_events_attrs; 6620 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 6621 x86_pmu.lbr_pt_coexist = true; 6622 intel_pmu_pebs_data_source_skl(pmem); 6623 x86_pmu.num_topdown_events = 8; 6624 static_call_update(intel_pmu_update_topdown_event, 6625 &icl_update_topdown_event); 6626 static_call_update(intel_pmu_set_topdown_event_period, 6627 &icl_set_topdown_event_period); 6628 pr_cont("Sapphire Rapids events, "); 6629 name = "sapphire_rapids"; 6630 break; 6631 6632 case INTEL_FAM6_ALDERLAKE: 6633 case INTEL_FAM6_ALDERLAKE_L: 6634 case INTEL_FAM6_RAPTORLAKE: 6635 case INTEL_FAM6_RAPTORLAKE_P: 6636 case INTEL_FAM6_RAPTORLAKE_S: 6637 case INTEL_FAM6_METEORLAKE: 6638 case INTEL_FAM6_METEORLAKE_L: 6639 /* 6640 * Alder Lake has 2 types of CPU, core and atom. 6641 * 6642 * Initialize the common PerfMon capabilities here. 6643 */ 6644 x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS, 6645 sizeof(struct x86_hybrid_pmu), 6646 GFP_KERNEL); 6647 if (!x86_pmu.hybrid_pmu) 6648 return -ENOMEM; 6649 static_branch_enable(&perf_is_hybrid); 6650 x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS; 6651 6652 x86_pmu.pebs_aliases = NULL; 6653 x86_pmu.pebs_prec_dist = true; 6654 x86_pmu.pebs_block = true; 6655 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 6656 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 6657 x86_pmu.flags |= PMU_FL_INSTR_LATENCY; 6658 x86_pmu.lbr_pt_coexist = true; 6659 x86_pmu.pebs_latency_data = adl_latency_data_small; 6660 x86_pmu.num_topdown_events = 8; 6661 static_call_update(intel_pmu_update_topdown_event, 6662 &adl_update_topdown_event); 6663 static_call_update(intel_pmu_set_topdown_event_period, 6664 &adl_set_topdown_event_period); 6665 6666 x86_pmu.filter = intel_pmu_filter; 6667 x86_pmu.get_event_constraints = adl_get_event_constraints; 6668 x86_pmu.hw_config = adl_hw_config; 6669 x86_pmu.limit_period = spr_limit_period; 6670 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; 6671 /* 6672 * The rtm_abort_event is used to check whether to enable GPRs 6673 * for the RTM abort event. Atom doesn't have the RTM abort 6674 * event. There is no harmful to set it in the common 6675 * x86_pmu.rtm_abort_event. 6676 */ 6677 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); 6678 6679 td_attr = adl_hybrid_events_attrs; 6680 mem_attr = adl_hybrid_mem_attrs; 6681 tsx_attr = adl_hybrid_tsx_attrs; 6682 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6683 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr; 6684 6685 /* Initialize big core specific PerfMon capabilities.*/ 6686 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; 6687 pmu->name = "cpu_core"; 6688 pmu->cpu_type = hybrid_big; 6689 pmu->late_ack = true; 6690 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { 6691 pmu->num_counters = x86_pmu.num_counters + 2; 6692 pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; 6693 } else { 6694 pmu->num_counters = x86_pmu.num_counters; 6695 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6696 } 6697 6698 /* 6699 * Quirk: For some Alder Lake machine, when all E-cores are disabled in 6700 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However, 6701 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will 6702 * mistakenly add extra counters for P-cores. Correct the number of 6703 * counters here. 6704 */ 6705 if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) { 6706 pmu->num_counters = x86_pmu.num_counters; 6707 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6708 } 6709 6710 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); 6711 pmu->unconstrained = (struct event_constraint) 6712 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 6713 0, pmu->num_counters, 0, 0); 6714 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; 6715 pmu->intel_cap.perf_metrics = 1; 6716 pmu->intel_cap.pebs_output_pt_available = 0; 6717 6718 memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); 6719 memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); 6720 pmu->event_constraints = intel_spr_event_constraints; 6721 pmu->pebs_constraints = intel_spr_pebs_event_constraints; 6722 pmu->extra_regs = intel_spr_extra_regs; 6723 6724 /* Initialize Atom core specific PerfMon capabilities.*/ 6725 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; 6726 pmu->name = "cpu_atom"; 6727 pmu->cpu_type = hybrid_small; 6728 pmu->mid_ack = true; 6729 pmu->num_counters = x86_pmu.num_counters; 6730 pmu->num_counters_fixed = x86_pmu.num_counters_fixed; 6731 pmu->max_pebs_events = x86_pmu.max_pebs_events; 6732 pmu->unconstrained = (struct event_constraint) 6733 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 6734 0, pmu->num_counters, 0, 0); 6735 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; 6736 pmu->intel_cap.perf_metrics = 0; 6737 pmu->intel_cap.pebs_output_pt_available = 1; 6738 6739 memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); 6740 memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); 6741 pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; 6742 pmu->event_constraints = intel_slm_event_constraints; 6743 pmu->pebs_constraints = intel_grt_pebs_event_constraints; 6744 pmu->extra_regs = intel_grt_extra_regs; 6745 if (is_mtl(boot_cpu_data.x86_model)) { 6746 x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_gnr_extra_regs; 6747 x86_pmu.pebs_latency_data = mtl_latency_data_small; 6748 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 6749 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; 6750 mem_attr = mtl_hybrid_mem_attrs; 6751 intel_pmu_pebs_data_source_mtl(); 6752 x86_pmu.get_event_constraints = mtl_get_event_constraints; 6753 pmu->extra_regs = intel_cmt_extra_regs; 6754 pr_cont("Meteorlake Hybrid events, "); 6755 name = "meteorlake_hybrid"; 6756 } else { 6757 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; 6758 intel_pmu_pebs_data_source_adl(); 6759 pr_cont("Alderlake Hybrid events, "); 6760 name = "alderlake_hybrid"; 6761 } 6762 break; 6763 6764 default: 6765 switch (x86_pmu.version) { 6766 case 1: 6767 x86_pmu.event_constraints = intel_v1_event_constraints; 6768 pr_cont("generic architected perfmon v1, "); 6769 name = "generic_arch_v1"; 6770 break; 6771 case 2: 6772 case 3: 6773 case 4: 6774 /* 6775 * default constraints for v2 and up 6776 */ 6777 x86_pmu.event_constraints = intel_gen_event_constraints; 6778 pr_cont("generic architected perfmon, "); 6779 name = "generic_arch_v2+"; 6780 break; 6781 default: 6782 /* 6783 * The default constraints for v5 and up can support up to 6784 * 16 fixed counters. For the fixed counters 4 and later, 6785 * the pseudo-encoding is applied. 6786 * The constraints may be cut according to the CPUID enumeration 6787 * by inserting the EVENT_CONSTRAINT_END. 6788 */ 6789 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) 6790 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; 6791 intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1; 6792 x86_pmu.event_constraints = intel_v5_gen_event_constraints; 6793 pr_cont("generic architected perfmon, "); 6794 name = "generic_arch_v5+"; 6795 break; 6796 } 6797 } 6798 6799 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); 6800 6801 if (!is_hybrid()) { 6802 group_events_td.attrs = td_attr; 6803 group_events_mem.attrs = mem_attr; 6804 group_events_tsx.attrs = tsx_attr; 6805 group_format_extra.attrs = extra_attr; 6806 group_format_extra_skl.attrs = extra_skl_attr; 6807 6808 x86_pmu.attr_update = attr_update; 6809 } else { 6810 hybrid_group_events_td.attrs = td_attr; 6811 hybrid_group_events_mem.attrs = mem_attr; 6812 hybrid_group_events_tsx.attrs = tsx_attr; 6813 hybrid_group_format_extra.attrs = extra_attr; 6814 6815 x86_pmu.attr_update = hybrid_attr_update; 6816 } 6817 6818 intel_pmu_check_num_counters(&x86_pmu.num_counters, 6819 &x86_pmu.num_counters_fixed, 6820 &x86_pmu.intel_ctrl, 6821 (u64)fixed_mask); 6822 6823 /* AnyThread may be deprecated on arch perfmon v5 or later */ 6824 if (x86_pmu.intel_cap.anythread_deprecated) 6825 x86_pmu.format_attrs = intel_arch_formats_attr; 6826 6827 intel_pmu_check_event_constraints(x86_pmu.event_constraints, 6828 x86_pmu.num_counters, 6829 x86_pmu.num_counters_fixed, 6830 x86_pmu.intel_ctrl); 6831 /* 6832 * Access LBR MSR may cause #GP under certain circumstances. 6833 * Check all LBR MSR here. 6834 * Disable LBR access if any LBR MSRs can not be accessed. 6835 */ 6836 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL)) 6837 x86_pmu.lbr_nr = 0; 6838 for (i = 0; i < x86_pmu.lbr_nr; i++) { 6839 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && 6840 check_msr(x86_pmu.lbr_to + i, 0xffffUL))) 6841 x86_pmu.lbr_nr = 0; 6842 } 6843 6844 if (x86_pmu.lbr_nr) { 6845 intel_pmu_lbr_init(); 6846 6847 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); 6848 6849 /* only support branch_stack snapshot for perfmon >= v2 */ 6850 if (x86_pmu.disable_all == intel_pmu_disable_all) { 6851 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) { 6852 static_call_update(perf_snapshot_branch_stack, 6853 intel_pmu_snapshot_arch_branch_stack); 6854 } else { 6855 static_call_update(perf_snapshot_branch_stack, 6856 intel_pmu_snapshot_branch_stack); 6857 } 6858 } 6859 } 6860 6861 intel_pmu_check_extra_regs(x86_pmu.extra_regs); 6862 6863 /* Support full width counters using alternative MSR range */ 6864 if (x86_pmu.intel_cap.full_width_write) { 6865 x86_pmu.max_period = x86_pmu.cntval_mask >> 1; 6866 x86_pmu.perfctr = MSR_IA32_PMC0; 6867 pr_cont("full-width counters, "); 6868 } 6869 6870 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) 6871 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; 6872 6873 if (is_hybrid()) 6874 intel_pmu_check_hybrid_pmus((u64)fixed_mask); 6875 6876 if (x86_pmu.intel_cap.pebs_timing_info) 6877 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; 6878 6879 intel_aux_output_init(); 6880 6881 return 0; 6882 } 6883 6884 /* 6885 * HT bug: phase 2 init 6886 * Called once we have valid topology information to check 6887 * whether or not HT is enabled 6888 * If HT is off, then we disable the workaround 6889 */ 6890 static __init int fixup_ht_bug(void) 6891 { 6892 int c; 6893 /* 6894 * problem not present on this CPU model, nothing to do 6895 */ 6896 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED)) 6897 return 0; 6898 6899 if (topology_max_smt_threads() > 1) { 6900 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n"); 6901 return 0; 6902 } 6903 6904 cpus_read_lock(); 6905 6906 hardlockup_detector_perf_stop(); 6907 6908 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); 6909 6910 x86_pmu.start_scheduling = NULL; 6911 x86_pmu.commit_scheduling = NULL; 6912 x86_pmu.stop_scheduling = NULL; 6913 6914 hardlockup_detector_perf_restart(); 6915 6916 for_each_online_cpu(c) 6917 free_excl_cntrs(&per_cpu(cpu_hw_events, c)); 6918 6919 cpus_read_unlock(); 6920 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); 6921 return 0; 6922 } 6923 subsys_initcall(fixup_ht_bug) 6924