1 /* 2 * Per core/cpu state 3 * 4 * Used to coordinate shared registers between HT threads or 5 * among events on a single PMU. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/stddef.h> 11 #include <linux/types.h> 12 #include <linux/init.h> 13 #include <linux/slab.h> 14 #include <linux/export.h> 15 #include <linux/nmi.h> 16 17 #include <asm/cpufeature.h> 18 #include <asm/hardirq.h> 19 #include <asm/intel-family.h> 20 #include <asm/apic.h> 21 #include <asm/cpu_device_id.h> 22 23 #include "../perf_event.h" 24 25 /* 26 * Intel PerfMon, used on Core and later. 27 */ 28 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = 29 { 30 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, 31 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 32 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, 33 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, 34 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, 35 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 36 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 37 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ 38 }; 39 40 static struct event_constraint intel_core_event_constraints[] __read_mostly = 41 { 42 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 43 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 44 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 45 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 46 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 47 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ 48 EVENT_CONSTRAINT_END 49 }; 50 51 static struct event_constraint intel_core2_event_constraints[] __read_mostly = 52 { 53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 55 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 56 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ 57 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ 58 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ 59 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ 60 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ 61 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ 62 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ 63 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ 64 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ 65 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ 66 EVENT_CONSTRAINT_END 67 }; 68 69 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = 70 { 71 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 72 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 73 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 74 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ 75 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ 76 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ 77 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ 78 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ 79 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ 80 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 81 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 82 EVENT_CONSTRAINT_END 83 }; 84 85 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 86 { 87 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 88 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 89 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 90 EVENT_EXTRA_END 91 }; 92 93 static struct event_constraint intel_westmere_event_constraints[] __read_mostly = 94 { 95 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 96 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 97 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 98 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ 99 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ 100 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ 101 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ 102 EVENT_CONSTRAINT_END 103 }; 104 105 static struct event_constraint intel_snb_event_constraints[] __read_mostly = 106 { 107 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 108 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 109 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 110 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 111 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 112 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 113 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 114 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 115 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 116 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 117 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 118 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 119 120 /* 121 * When HT is off these events can only run on the bottom 4 counters 122 * When HT is on, they are impacted by the HT bug and require EXCL access 123 */ 124 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 125 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 126 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 127 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 128 129 EVENT_CONSTRAINT_END 130 }; 131 132 static struct event_constraint intel_ivb_event_constraints[] __read_mostly = 133 { 134 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 135 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 136 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 137 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ 138 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */ 139 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ 140 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ 141 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 142 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ 143 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */ 144 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 145 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 146 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 147 148 /* 149 * When HT is off these events can only run on the bottom 4 counters 150 * When HT is on, they are impacted by the HT bug and require EXCL access 151 */ 152 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 153 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 154 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 155 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 156 157 EVENT_CONSTRAINT_END 158 }; 159 160 static struct extra_reg intel_westmere_extra_regs[] __read_mostly = 161 { 162 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 163 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 164 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), 165 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 166 EVENT_EXTRA_END 167 }; 168 169 static struct event_constraint intel_v1_event_constraints[] __read_mostly = 170 { 171 EVENT_CONSTRAINT_END 172 }; 173 174 static struct event_constraint intel_gen_event_constraints[] __read_mostly = 175 { 176 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 177 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 178 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 179 EVENT_CONSTRAINT_END 180 }; 181 182 static struct event_constraint intel_slm_event_constraints[] __read_mostly = 183 { 184 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 185 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 186 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 187 EVENT_CONSTRAINT_END 188 }; 189 190 static struct event_constraint intel_skl_event_constraints[] = { 191 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 192 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 193 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 194 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ 195 196 /* 197 * when HT is off, these can only run on the bottom 4 counters 198 */ 199 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 200 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 201 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 202 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 203 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */ 204 205 EVENT_CONSTRAINT_END 206 }; 207 208 static struct extra_reg intel_knl_extra_regs[] __read_mostly = { 209 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0), 210 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1), 211 EVENT_EXTRA_END 212 }; 213 214 static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 215 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 216 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 217 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 218 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 219 EVENT_EXTRA_END 220 }; 221 222 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 223 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 224 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 225 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 226 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 227 EVENT_EXTRA_END 228 }; 229 230 static struct extra_reg intel_skl_extra_regs[] __read_mostly = { 231 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 232 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 233 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 234 /* 235 * Note the low 8 bits eventsel code is not a continuous field, containing 236 * some #GPing bits. These are masked out. 237 */ 238 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 239 EVENT_EXTRA_END 240 }; 241 242 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); 243 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); 244 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); 245 246 static struct attribute *nhm_mem_events_attrs[] = { 247 EVENT_PTR(mem_ld_nhm), 248 NULL, 249 }; 250 251 /* 252 * topdown events for Intel Core CPUs. 253 * 254 * The events are all in slots, which is a free slot in a 4 wide 255 * pipeline. Some events are already reported in slots, for cycle 256 * events we multiply by the pipeline width (4). 257 * 258 * With Hyper Threading on, topdown metrics are either summed or averaged 259 * between the threads of a core: (count_t0 + count_t1). 260 * 261 * For the average case the metric is always scaled to pipeline width, 262 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4) 263 */ 264 265 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots, 266 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */ 267 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */ 268 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2"); 269 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued, 270 "event=0xe,umask=0x1"); /* uops_issued.any */ 271 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired, 272 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */ 273 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles, 274 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */ 275 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles, 276 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */ 277 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */ 278 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale, 279 "4", "2"); 280 281 static struct attribute *snb_events_attrs[] = { 282 EVENT_PTR(td_slots_issued), 283 EVENT_PTR(td_slots_retired), 284 EVENT_PTR(td_fetch_bubbles), 285 EVENT_PTR(td_total_slots), 286 EVENT_PTR(td_total_slots_scale), 287 EVENT_PTR(td_recovery_bubbles), 288 EVENT_PTR(td_recovery_bubbles_scale), 289 NULL, 290 }; 291 292 static struct attribute *snb_mem_events_attrs[] = { 293 EVENT_PTR(mem_ld_snb), 294 EVENT_PTR(mem_st_snb), 295 NULL, 296 }; 297 298 static struct event_constraint intel_hsw_event_constraints[] = { 299 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 300 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 301 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 302 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 303 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 304 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 305 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 306 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), 307 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 308 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), 309 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 310 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), 311 312 /* 313 * When HT is off these events can only run on the bottom 4 counters 314 * When HT is on, they are impacted by the HT bug and require EXCL access 315 */ 316 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 317 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 318 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 319 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 320 321 EVENT_CONSTRAINT_END 322 }; 323 324 static struct event_constraint intel_bdw_event_constraints[] = { 325 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 326 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 327 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 328 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 329 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ 330 /* 331 * when HT is off, these can only run on the bottom 4 counters 332 */ 333 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ 334 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */ 335 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */ 336 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */ 337 EVENT_CONSTRAINT_END 338 }; 339 340 static u64 intel_pmu_event_map(int hw_event) 341 { 342 return intel_perfmon_event_map[hw_event]; 343 } 344 345 /* 346 * Notes on the events: 347 * - data reads do not include code reads (comparable to earlier tables) 348 * - data counts include speculative execution (except L1 write, dtlb, bpu) 349 * - remote node access includes remote memory, remote cache, remote mmio. 350 * - prefetches are not included in the counts. 351 * - icache miss does not include decoded icache 352 */ 353 354 #define SKL_DEMAND_DATA_RD BIT_ULL(0) 355 #define SKL_DEMAND_RFO BIT_ULL(1) 356 #define SKL_ANY_RESPONSE BIT_ULL(16) 357 #define SKL_SUPPLIER_NONE BIT_ULL(17) 358 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26) 359 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27) 360 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28) 361 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29) 362 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \ 363 SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 364 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 365 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 366 #define SKL_SPL_HIT BIT_ULL(30) 367 #define SKL_SNOOP_NONE BIT_ULL(31) 368 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32) 369 #define SKL_SNOOP_MISS BIT_ULL(33) 370 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34) 371 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35) 372 #define SKL_SNOOP_HITM BIT_ULL(36) 373 #define SKL_SNOOP_NON_DRAM BIT_ULL(37) 374 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \ 375 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 376 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 377 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM) 378 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD 379 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \ 380 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \ 381 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \ 382 SKL_SNOOP_HITM|SKL_SPL_HIT) 383 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO 384 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE 385 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \ 386 SKL_L3_MISS_REMOTE_HOP1_DRAM| \ 387 SKL_L3_MISS_REMOTE_HOP2P_DRAM) 388 389 static __initconst const u64 skl_hw_cache_event_ids 390 [PERF_COUNT_HW_CACHE_MAX] 391 [PERF_COUNT_HW_CACHE_OP_MAX] 392 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 393 { 394 [ C(L1D ) ] = { 395 [ C(OP_READ) ] = { 396 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 397 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 398 }, 399 [ C(OP_WRITE) ] = { 400 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 401 [ C(RESULT_MISS) ] = 0x0, 402 }, 403 [ C(OP_PREFETCH) ] = { 404 [ C(RESULT_ACCESS) ] = 0x0, 405 [ C(RESULT_MISS) ] = 0x0, 406 }, 407 }, 408 [ C(L1I ) ] = { 409 [ C(OP_READ) ] = { 410 [ C(RESULT_ACCESS) ] = 0x0, 411 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */ 412 }, 413 [ C(OP_WRITE) ] = { 414 [ C(RESULT_ACCESS) ] = -1, 415 [ C(RESULT_MISS) ] = -1, 416 }, 417 [ C(OP_PREFETCH) ] = { 418 [ C(RESULT_ACCESS) ] = 0x0, 419 [ C(RESULT_MISS) ] = 0x0, 420 }, 421 }, 422 [ C(LL ) ] = { 423 [ C(OP_READ) ] = { 424 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 425 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 426 }, 427 [ C(OP_WRITE) ] = { 428 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 429 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 430 }, 431 [ C(OP_PREFETCH) ] = { 432 [ C(RESULT_ACCESS) ] = 0x0, 433 [ C(RESULT_MISS) ] = 0x0, 434 }, 435 }, 436 [ C(DTLB) ] = { 437 [ C(OP_READ) ] = { 438 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 439 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 440 }, 441 [ C(OP_WRITE) ] = { 442 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 443 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 444 }, 445 [ C(OP_PREFETCH) ] = { 446 [ C(RESULT_ACCESS) ] = 0x0, 447 [ C(RESULT_MISS) ] = 0x0, 448 }, 449 }, 450 [ C(ITLB) ] = { 451 [ C(OP_READ) ] = { 452 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */ 453 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */ 454 }, 455 [ C(OP_WRITE) ] = { 456 [ C(RESULT_ACCESS) ] = -1, 457 [ C(RESULT_MISS) ] = -1, 458 }, 459 [ C(OP_PREFETCH) ] = { 460 [ C(RESULT_ACCESS) ] = -1, 461 [ C(RESULT_MISS) ] = -1, 462 }, 463 }, 464 [ C(BPU ) ] = { 465 [ C(OP_READ) ] = { 466 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 467 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 468 }, 469 [ C(OP_WRITE) ] = { 470 [ C(RESULT_ACCESS) ] = -1, 471 [ C(RESULT_MISS) ] = -1, 472 }, 473 [ C(OP_PREFETCH) ] = { 474 [ C(RESULT_ACCESS) ] = -1, 475 [ C(RESULT_MISS) ] = -1, 476 }, 477 }, 478 [ C(NODE) ] = { 479 [ C(OP_READ) ] = { 480 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 481 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 482 }, 483 [ C(OP_WRITE) ] = { 484 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 485 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 486 }, 487 [ C(OP_PREFETCH) ] = { 488 [ C(RESULT_ACCESS) ] = 0x0, 489 [ C(RESULT_MISS) ] = 0x0, 490 }, 491 }, 492 }; 493 494 static __initconst const u64 skl_hw_cache_extra_regs 495 [PERF_COUNT_HW_CACHE_MAX] 496 [PERF_COUNT_HW_CACHE_OP_MAX] 497 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 498 { 499 [ C(LL ) ] = { 500 [ C(OP_READ) ] = { 501 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 502 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 503 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 504 SKL_L3_MISS|SKL_ANY_SNOOP| 505 SKL_SUPPLIER_NONE, 506 }, 507 [ C(OP_WRITE) ] = { 508 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 509 SKL_LLC_ACCESS|SKL_ANY_SNOOP, 510 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 511 SKL_L3_MISS|SKL_ANY_SNOOP| 512 SKL_SUPPLIER_NONE, 513 }, 514 [ C(OP_PREFETCH) ] = { 515 [ C(RESULT_ACCESS) ] = 0x0, 516 [ C(RESULT_MISS) ] = 0x0, 517 }, 518 }, 519 [ C(NODE) ] = { 520 [ C(OP_READ) ] = { 521 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ| 522 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 523 [ C(RESULT_MISS) ] = SKL_DEMAND_READ| 524 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 525 }, 526 [ C(OP_WRITE) ] = { 527 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE| 528 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM, 529 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE| 530 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM, 531 }, 532 [ C(OP_PREFETCH) ] = { 533 [ C(RESULT_ACCESS) ] = 0x0, 534 [ C(RESULT_MISS) ] = 0x0, 535 }, 536 }, 537 }; 538 539 #define SNB_DMND_DATA_RD (1ULL << 0) 540 #define SNB_DMND_RFO (1ULL << 1) 541 #define SNB_DMND_IFETCH (1ULL << 2) 542 #define SNB_DMND_WB (1ULL << 3) 543 #define SNB_PF_DATA_RD (1ULL << 4) 544 #define SNB_PF_RFO (1ULL << 5) 545 #define SNB_PF_IFETCH (1ULL << 6) 546 #define SNB_LLC_DATA_RD (1ULL << 7) 547 #define SNB_LLC_RFO (1ULL << 8) 548 #define SNB_LLC_IFETCH (1ULL << 9) 549 #define SNB_BUS_LOCKS (1ULL << 10) 550 #define SNB_STRM_ST (1ULL << 11) 551 #define SNB_OTHER (1ULL << 15) 552 #define SNB_RESP_ANY (1ULL << 16) 553 #define SNB_NO_SUPP (1ULL << 17) 554 #define SNB_LLC_HITM (1ULL << 18) 555 #define SNB_LLC_HITE (1ULL << 19) 556 #define SNB_LLC_HITS (1ULL << 20) 557 #define SNB_LLC_HITF (1ULL << 21) 558 #define SNB_LOCAL (1ULL << 22) 559 #define SNB_REMOTE (0xffULL << 23) 560 #define SNB_SNP_NONE (1ULL << 31) 561 #define SNB_SNP_NOT_NEEDED (1ULL << 32) 562 #define SNB_SNP_MISS (1ULL << 33) 563 #define SNB_NO_FWD (1ULL << 34) 564 #define SNB_SNP_FWD (1ULL << 35) 565 #define SNB_HITM (1ULL << 36) 566 #define SNB_NON_DRAM (1ULL << 37) 567 568 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD) 569 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO) 570 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 571 572 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \ 573 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \ 574 SNB_HITM) 575 576 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY) 577 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY) 578 579 #define SNB_L3_ACCESS SNB_RESP_ANY 580 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM) 581 582 static __initconst const u64 snb_hw_cache_extra_regs 583 [PERF_COUNT_HW_CACHE_MAX] 584 [PERF_COUNT_HW_CACHE_OP_MAX] 585 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 586 { 587 [ C(LL ) ] = { 588 [ C(OP_READ) ] = { 589 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS, 590 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS, 591 }, 592 [ C(OP_WRITE) ] = { 593 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS, 594 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS, 595 }, 596 [ C(OP_PREFETCH) ] = { 597 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS, 598 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS, 599 }, 600 }, 601 [ C(NODE) ] = { 602 [ C(OP_READ) ] = { 603 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY, 604 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE, 605 }, 606 [ C(OP_WRITE) ] = { 607 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY, 608 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE, 609 }, 610 [ C(OP_PREFETCH) ] = { 611 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY, 612 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE, 613 }, 614 }, 615 }; 616 617 static __initconst const u64 snb_hw_cache_event_ids 618 [PERF_COUNT_HW_CACHE_MAX] 619 [PERF_COUNT_HW_CACHE_OP_MAX] 620 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 621 { 622 [ C(L1D) ] = { 623 [ C(OP_READ) ] = { 624 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ 625 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ 626 }, 627 [ C(OP_WRITE) ] = { 628 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ 629 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ 630 }, 631 [ C(OP_PREFETCH) ] = { 632 [ C(RESULT_ACCESS) ] = 0x0, 633 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ 634 }, 635 }, 636 [ C(L1I ) ] = { 637 [ C(OP_READ) ] = { 638 [ C(RESULT_ACCESS) ] = 0x0, 639 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ 640 }, 641 [ C(OP_WRITE) ] = { 642 [ C(RESULT_ACCESS) ] = -1, 643 [ C(RESULT_MISS) ] = -1, 644 }, 645 [ C(OP_PREFETCH) ] = { 646 [ C(RESULT_ACCESS) ] = 0x0, 647 [ C(RESULT_MISS) ] = 0x0, 648 }, 649 }, 650 [ C(LL ) ] = { 651 [ C(OP_READ) ] = { 652 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 653 [ C(RESULT_ACCESS) ] = 0x01b7, 654 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 655 [ C(RESULT_MISS) ] = 0x01b7, 656 }, 657 [ C(OP_WRITE) ] = { 658 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 659 [ C(RESULT_ACCESS) ] = 0x01b7, 660 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 661 [ C(RESULT_MISS) ] = 0x01b7, 662 }, 663 [ C(OP_PREFETCH) ] = { 664 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 665 [ C(RESULT_ACCESS) ] = 0x01b7, 666 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 667 [ C(RESULT_MISS) ] = 0x01b7, 668 }, 669 }, 670 [ C(DTLB) ] = { 671 [ C(OP_READ) ] = { 672 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ 673 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ 674 }, 675 [ C(OP_WRITE) ] = { 676 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ 677 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 678 }, 679 [ C(OP_PREFETCH) ] = { 680 [ C(RESULT_ACCESS) ] = 0x0, 681 [ C(RESULT_MISS) ] = 0x0, 682 }, 683 }, 684 [ C(ITLB) ] = { 685 [ C(OP_READ) ] = { 686 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ 687 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ 688 }, 689 [ C(OP_WRITE) ] = { 690 [ C(RESULT_ACCESS) ] = -1, 691 [ C(RESULT_MISS) ] = -1, 692 }, 693 [ C(OP_PREFETCH) ] = { 694 [ C(RESULT_ACCESS) ] = -1, 695 [ C(RESULT_MISS) ] = -1, 696 }, 697 }, 698 [ C(BPU ) ] = { 699 [ C(OP_READ) ] = { 700 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 701 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 702 }, 703 [ C(OP_WRITE) ] = { 704 [ C(RESULT_ACCESS) ] = -1, 705 [ C(RESULT_MISS) ] = -1, 706 }, 707 [ C(OP_PREFETCH) ] = { 708 [ C(RESULT_ACCESS) ] = -1, 709 [ C(RESULT_MISS) ] = -1, 710 }, 711 }, 712 [ C(NODE) ] = { 713 [ C(OP_READ) ] = { 714 [ C(RESULT_ACCESS) ] = 0x01b7, 715 [ C(RESULT_MISS) ] = 0x01b7, 716 }, 717 [ C(OP_WRITE) ] = { 718 [ C(RESULT_ACCESS) ] = 0x01b7, 719 [ C(RESULT_MISS) ] = 0x01b7, 720 }, 721 [ C(OP_PREFETCH) ] = { 722 [ C(RESULT_ACCESS) ] = 0x01b7, 723 [ C(RESULT_MISS) ] = 0x01b7, 724 }, 725 }, 726 727 }; 728 729 /* 730 * Notes on the events: 731 * - data reads do not include code reads (comparable to earlier tables) 732 * - data counts include speculative execution (except L1 write, dtlb, bpu) 733 * - remote node access includes remote memory, remote cache, remote mmio. 734 * - prefetches are not included in the counts because they are not 735 * reliably counted. 736 */ 737 738 #define HSW_DEMAND_DATA_RD BIT_ULL(0) 739 #define HSW_DEMAND_RFO BIT_ULL(1) 740 #define HSW_ANY_RESPONSE BIT_ULL(16) 741 #define HSW_SUPPLIER_NONE BIT_ULL(17) 742 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22) 743 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27) 744 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28) 745 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29) 746 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \ 747 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 748 HSW_L3_MISS_REMOTE_HOP2P) 749 #define HSW_SNOOP_NONE BIT_ULL(31) 750 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32) 751 #define HSW_SNOOP_MISS BIT_ULL(33) 752 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34) 753 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35) 754 #define HSW_SNOOP_HITM BIT_ULL(36) 755 #define HSW_SNOOP_NON_DRAM BIT_ULL(37) 756 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \ 757 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \ 758 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \ 759 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM) 760 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM) 761 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD 762 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO 763 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\ 764 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P) 765 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE 766 767 #define BDW_L3_MISS_LOCAL BIT(26) 768 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \ 769 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ 770 HSW_L3_MISS_REMOTE_HOP2P) 771 772 773 static __initconst const u64 hsw_hw_cache_event_ids 774 [PERF_COUNT_HW_CACHE_MAX] 775 [PERF_COUNT_HW_CACHE_OP_MAX] 776 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 777 { 778 [ C(L1D ) ] = { 779 [ C(OP_READ) ] = { 780 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 781 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ 782 }, 783 [ C(OP_WRITE) ] = { 784 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 785 [ C(RESULT_MISS) ] = 0x0, 786 }, 787 [ C(OP_PREFETCH) ] = { 788 [ C(RESULT_ACCESS) ] = 0x0, 789 [ C(RESULT_MISS) ] = 0x0, 790 }, 791 }, 792 [ C(L1I ) ] = { 793 [ C(OP_READ) ] = { 794 [ C(RESULT_ACCESS) ] = 0x0, 795 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ 796 }, 797 [ C(OP_WRITE) ] = { 798 [ C(RESULT_ACCESS) ] = -1, 799 [ C(RESULT_MISS) ] = -1, 800 }, 801 [ C(OP_PREFETCH) ] = { 802 [ C(RESULT_ACCESS) ] = 0x0, 803 [ C(RESULT_MISS) ] = 0x0, 804 }, 805 }, 806 [ C(LL ) ] = { 807 [ C(OP_READ) ] = { 808 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 809 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 810 }, 811 [ C(OP_WRITE) ] = { 812 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 813 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 814 }, 815 [ C(OP_PREFETCH) ] = { 816 [ C(RESULT_ACCESS) ] = 0x0, 817 [ C(RESULT_MISS) ] = 0x0, 818 }, 819 }, 820 [ C(DTLB) ] = { 821 [ C(OP_READ) ] = { 822 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 823 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ 824 }, 825 [ C(OP_WRITE) ] = { 826 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 827 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ 828 }, 829 [ C(OP_PREFETCH) ] = { 830 [ C(RESULT_ACCESS) ] = 0x0, 831 [ C(RESULT_MISS) ] = 0x0, 832 }, 833 }, 834 [ C(ITLB) ] = { 835 [ C(OP_READ) ] = { 836 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ 837 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ 838 }, 839 [ C(OP_WRITE) ] = { 840 [ C(RESULT_ACCESS) ] = -1, 841 [ C(RESULT_MISS) ] = -1, 842 }, 843 [ C(OP_PREFETCH) ] = { 844 [ C(RESULT_ACCESS) ] = -1, 845 [ C(RESULT_MISS) ] = -1, 846 }, 847 }, 848 [ C(BPU ) ] = { 849 [ C(OP_READ) ] = { 850 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ 851 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 852 }, 853 [ C(OP_WRITE) ] = { 854 [ C(RESULT_ACCESS) ] = -1, 855 [ C(RESULT_MISS) ] = -1, 856 }, 857 [ C(OP_PREFETCH) ] = { 858 [ C(RESULT_ACCESS) ] = -1, 859 [ C(RESULT_MISS) ] = -1, 860 }, 861 }, 862 [ C(NODE) ] = { 863 [ C(OP_READ) ] = { 864 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 865 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 866 }, 867 [ C(OP_WRITE) ] = { 868 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 869 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ 870 }, 871 [ C(OP_PREFETCH) ] = { 872 [ C(RESULT_ACCESS) ] = 0x0, 873 [ C(RESULT_MISS) ] = 0x0, 874 }, 875 }, 876 }; 877 878 static __initconst const u64 hsw_hw_cache_extra_regs 879 [PERF_COUNT_HW_CACHE_MAX] 880 [PERF_COUNT_HW_CACHE_OP_MAX] 881 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 882 { 883 [ C(LL ) ] = { 884 [ C(OP_READ) ] = { 885 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 886 HSW_LLC_ACCESS, 887 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 888 HSW_L3_MISS|HSW_ANY_SNOOP, 889 }, 890 [ C(OP_WRITE) ] = { 891 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 892 HSW_LLC_ACCESS, 893 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 894 HSW_L3_MISS|HSW_ANY_SNOOP, 895 }, 896 [ C(OP_PREFETCH) ] = { 897 [ C(RESULT_ACCESS) ] = 0x0, 898 [ C(RESULT_MISS) ] = 0x0, 899 }, 900 }, 901 [ C(NODE) ] = { 902 [ C(OP_READ) ] = { 903 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| 904 HSW_L3_MISS_LOCAL_DRAM| 905 HSW_SNOOP_DRAM, 906 [ C(RESULT_MISS) ] = HSW_DEMAND_READ| 907 HSW_L3_MISS_REMOTE| 908 HSW_SNOOP_DRAM, 909 }, 910 [ C(OP_WRITE) ] = { 911 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| 912 HSW_L3_MISS_LOCAL_DRAM| 913 HSW_SNOOP_DRAM, 914 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| 915 HSW_L3_MISS_REMOTE| 916 HSW_SNOOP_DRAM, 917 }, 918 [ C(OP_PREFETCH) ] = { 919 [ C(RESULT_ACCESS) ] = 0x0, 920 [ C(RESULT_MISS) ] = 0x0, 921 }, 922 }, 923 }; 924 925 static __initconst const u64 westmere_hw_cache_event_ids 926 [PERF_COUNT_HW_CACHE_MAX] 927 [PERF_COUNT_HW_CACHE_OP_MAX] 928 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 929 { 930 [ C(L1D) ] = { 931 [ C(OP_READ) ] = { 932 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 933 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 934 }, 935 [ C(OP_WRITE) ] = { 936 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 937 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 938 }, 939 [ C(OP_PREFETCH) ] = { 940 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 941 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 942 }, 943 }, 944 [ C(L1I ) ] = { 945 [ C(OP_READ) ] = { 946 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 947 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 948 }, 949 [ C(OP_WRITE) ] = { 950 [ C(RESULT_ACCESS) ] = -1, 951 [ C(RESULT_MISS) ] = -1, 952 }, 953 [ C(OP_PREFETCH) ] = { 954 [ C(RESULT_ACCESS) ] = 0x0, 955 [ C(RESULT_MISS) ] = 0x0, 956 }, 957 }, 958 [ C(LL ) ] = { 959 [ C(OP_READ) ] = { 960 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 961 [ C(RESULT_ACCESS) ] = 0x01b7, 962 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 963 [ C(RESULT_MISS) ] = 0x01b7, 964 }, 965 /* 966 * Use RFO, not WRITEBACK, because a write miss would typically occur 967 * on RFO. 968 */ 969 [ C(OP_WRITE) ] = { 970 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 971 [ C(RESULT_ACCESS) ] = 0x01b7, 972 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 973 [ C(RESULT_MISS) ] = 0x01b7, 974 }, 975 [ C(OP_PREFETCH) ] = { 976 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 977 [ C(RESULT_ACCESS) ] = 0x01b7, 978 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 979 [ C(RESULT_MISS) ] = 0x01b7, 980 }, 981 }, 982 [ C(DTLB) ] = { 983 [ C(OP_READ) ] = { 984 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 985 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 986 }, 987 [ C(OP_WRITE) ] = { 988 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 989 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 990 }, 991 [ C(OP_PREFETCH) ] = { 992 [ C(RESULT_ACCESS) ] = 0x0, 993 [ C(RESULT_MISS) ] = 0x0, 994 }, 995 }, 996 [ C(ITLB) ] = { 997 [ C(OP_READ) ] = { 998 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 999 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ 1000 }, 1001 [ C(OP_WRITE) ] = { 1002 [ C(RESULT_ACCESS) ] = -1, 1003 [ C(RESULT_MISS) ] = -1, 1004 }, 1005 [ C(OP_PREFETCH) ] = { 1006 [ C(RESULT_ACCESS) ] = -1, 1007 [ C(RESULT_MISS) ] = -1, 1008 }, 1009 }, 1010 [ C(BPU ) ] = { 1011 [ C(OP_READ) ] = { 1012 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1013 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1014 }, 1015 [ C(OP_WRITE) ] = { 1016 [ C(RESULT_ACCESS) ] = -1, 1017 [ C(RESULT_MISS) ] = -1, 1018 }, 1019 [ C(OP_PREFETCH) ] = { 1020 [ C(RESULT_ACCESS) ] = -1, 1021 [ C(RESULT_MISS) ] = -1, 1022 }, 1023 }, 1024 [ C(NODE) ] = { 1025 [ C(OP_READ) ] = { 1026 [ C(RESULT_ACCESS) ] = 0x01b7, 1027 [ C(RESULT_MISS) ] = 0x01b7, 1028 }, 1029 [ C(OP_WRITE) ] = { 1030 [ C(RESULT_ACCESS) ] = 0x01b7, 1031 [ C(RESULT_MISS) ] = 0x01b7, 1032 }, 1033 [ C(OP_PREFETCH) ] = { 1034 [ C(RESULT_ACCESS) ] = 0x01b7, 1035 [ C(RESULT_MISS) ] = 0x01b7, 1036 }, 1037 }, 1038 }; 1039 1040 /* 1041 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; 1042 * See IA32 SDM Vol 3B 30.6.1.3 1043 */ 1044 1045 #define NHM_DMND_DATA_RD (1 << 0) 1046 #define NHM_DMND_RFO (1 << 1) 1047 #define NHM_DMND_IFETCH (1 << 2) 1048 #define NHM_DMND_WB (1 << 3) 1049 #define NHM_PF_DATA_RD (1 << 4) 1050 #define NHM_PF_DATA_RFO (1 << 5) 1051 #define NHM_PF_IFETCH (1 << 6) 1052 #define NHM_OFFCORE_OTHER (1 << 7) 1053 #define NHM_UNCORE_HIT (1 << 8) 1054 #define NHM_OTHER_CORE_HIT_SNP (1 << 9) 1055 #define NHM_OTHER_CORE_HITM (1 << 10) 1056 /* reserved */ 1057 #define NHM_REMOTE_CACHE_FWD (1 << 12) 1058 #define NHM_REMOTE_DRAM (1 << 13) 1059 #define NHM_LOCAL_DRAM (1 << 14) 1060 #define NHM_NON_DRAM (1 << 15) 1061 1062 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD) 1063 #define NHM_REMOTE (NHM_REMOTE_DRAM) 1064 1065 #define NHM_DMND_READ (NHM_DMND_DATA_RD) 1066 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) 1067 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) 1068 1069 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) 1070 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD) 1071 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) 1072 1073 static __initconst const u64 nehalem_hw_cache_extra_regs 1074 [PERF_COUNT_HW_CACHE_MAX] 1075 [PERF_COUNT_HW_CACHE_OP_MAX] 1076 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1077 { 1078 [ C(LL ) ] = { 1079 [ C(OP_READ) ] = { 1080 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, 1081 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, 1082 }, 1083 [ C(OP_WRITE) ] = { 1084 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, 1085 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, 1086 }, 1087 [ C(OP_PREFETCH) ] = { 1088 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, 1089 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, 1090 }, 1091 }, 1092 [ C(NODE) ] = { 1093 [ C(OP_READ) ] = { 1094 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE, 1095 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE, 1096 }, 1097 [ C(OP_WRITE) ] = { 1098 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE, 1099 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE, 1100 }, 1101 [ C(OP_PREFETCH) ] = { 1102 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE, 1103 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE, 1104 }, 1105 }, 1106 }; 1107 1108 static __initconst const u64 nehalem_hw_cache_event_ids 1109 [PERF_COUNT_HW_CACHE_MAX] 1110 [PERF_COUNT_HW_CACHE_OP_MAX] 1111 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1112 { 1113 [ C(L1D) ] = { 1114 [ C(OP_READ) ] = { 1115 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ 1116 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ 1117 }, 1118 [ C(OP_WRITE) ] = { 1119 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ 1120 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ 1121 }, 1122 [ C(OP_PREFETCH) ] = { 1123 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 1124 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ 1125 }, 1126 }, 1127 [ C(L1I ) ] = { 1128 [ C(OP_READ) ] = { 1129 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1130 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1131 }, 1132 [ C(OP_WRITE) ] = { 1133 [ C(RESULT_ACCESS) ] = -1, 1134 [ C(RESULT_MISS) ] = -1, 1135 }, 1136 [ C(OP_PREFETCH) ] = { 1137 [ C(RESULT_ACCESS) ] = 0x0, 1138 [ C(RESULT_MISS) ] = 0x0, 1139 }, 1140 }, 1141 [ C(LL ) ] = { 1142 [ C(OP_READ) ] = { 1143 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1144 [ C(RESULT_ACCESS) ] = 0x01b7, 1145 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ 1146 [ C(RESULT_MISS) ] = 0x01b7, 1147 }, 1148 /* 1149 * Use RFO, not WRITEBACK, because a write miss would typically occur 1150 * on RFO. 1151 */ 1152 [ C(OP_WRITE) ] = { 1153 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1154 [ C(RESULT_ACCESS) ] = 0x01b7, 1155 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1156 [ C(RESULT_MISS) ] = 0x01b7, 1157 }, 1158 [ C(OP_PREFETCH) ] = { 1159 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1160 [ C(RESULT_ACCESS) ] = 0x01b7, 1161 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1162 [ C(RESULT_MISS) ] = 0x01b7, 1163 }, 1164 }, 1165 [ C(DTLB) ] = { 1166 [ C(OP_READ) ] = { 1167 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1168 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ 1169 }, 1170 [ C(OP_WRITE) ] = { 1171 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1172 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ 1173 }, 1174 [ C(OP_PREFETCH) ] = { 1175 [ C(RESULT_ACCESS) ] = 0x0, 1176 [ C(RESULT_MISS) ] = 0x0, 1177 }, 1178 }, 1179 [ C(ITLB) ] = { 1180 [ C(OP_READ) ] = { 1181 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ 1182 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ 1183 }, 1184 [ C(OP_WRITE) ] = { 1185 [ C(RESULT_ACCESS) ] = -1, 1186 [ C(RESULT_MISS) ] = -1, 1187 }, 1188 [ C(OP_PREFETCH) ] = { 1189 [ C(RESULT_ACCESS) ] = -1, 1190 [ C(RESULT_MISS) ] = -1, 1191 }, 1192 }, 1193 [ C(BPU ) ] = { 1194 [ C(OP_READ) ] = { 1195 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1196 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ 1197 }, 1198 [ C(OP_WRITE) ] = { 1199 [ C(RESULT_ACCESS) ] = -1, 1200 [ C(RESULT_MISS) ] = -1, 1201 }, 1202 [ C(OP_PREFETCH) ] = { 1203 [ C(RESULT_ACCESS) ] = -1, 1204 [ C(RESULT_MISS) ] = -1, 1205 }, 1206 }, 1207 [ C(NODE) ] = { 1208 [ C(OP_READ) ] = { 1209 [ C(RESULT_ACCESS) ] = 0x01b7, 1210 [ C(RESULT_MISS) ] = 0x01b7, 1211 }, 1212 [ C(OP_WRITE) ] = { 1213 [ C(RESULT_ACCESS) ] = 0x01b7, 1214 [ C(RESULT_MISS) ] = 0x01b7, 1215 }, 1216 [ C(OP_PREFETCH) ] = { 1217 [ C(RESULT_ACCESS) ] = 0x01b7, 1218 [ C(RESULT_MISS) ] = 0x01b7, 1219 }, 1220 }, 1221 }; 1222 1223 static __initconst const u64 core2_hw_cache_event_ids 1224 [PERF_COUNT_HW_CACHE_MAX] 1225 [PERF_COUNT_HW_CACHE_OP_MAX] 1226 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1227 { 1228 [ C(L1D) ] = { 1229 [ C(OP_READ) ] = { 1230 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ 1231 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ 1232 }, 1233 [ C(OP_WRITE) ] = { 1234 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ 1235 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ 1236 }, 1237 [ C(OP_PREFETCH) ] = { 1238 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ 1239 [ C(RESULT_MISS) ] = 0, 1240 }, 1241 }, 1242 [ C(L1I ) ] = { 1243 [ C(OP_READ) ] = { 1244 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ 1245 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ 1246 }, 1247 [ C(OP_WRITE) ] = { 1248 [ C(RESULT_ACCESS) ] = -1, 1249 [ C(RESULT_MISS) ] = -1, 1250 }, 1251 [ C(OP_PREFETCH) ] = { 1252 [ C(RESULT_ACCESS) ] = 0, 1253 [ C(RESULT_MISS) ] = 0, 1254 }, 1255 }, 1256 [ C(LL ) ] = { 1257 [ C(OP_READ) ] = { 1258 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1259 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1260 }, 1261 [ C(OP_WRITE) ] = { 1262 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1263 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1264 }, 1265 [ C(OP_PREFETCH) ] = { 1266 [ C(RESULT_ACCESS) ] = 0, 1267 [ C(RESULT_MISS) ] = 0, 1268 }, 1269 }, 1270 [ C(DTLB) ] = { 1271 [ C(OP_READ) ] = { 1272 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ 1273 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ 1274 }, 1275 [ C(OP_WRITE) ] = { 1276 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ 1277 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ 1278 }, 1279 [ C(OP_PREFETCH) ] = { 1280 [ C(RESULT_ACCESS) ] = 0, 1281 [ C(RESULT_MISS) ] = 0, 1282 }, 1283 }, 1284 [ C(ITLB) ] = { 1285 [ C(OP_READ) ] = { 1286 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1287 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ 1288 }, 1289 [ C(OP_WRITE) ] = { 1290 [ C(RESULT_ACCESS) ] = -1, 1291 [ C(RESULT_MISS) ] = -1, 1292 }, 1293 [ C(OP_PREFETCH) ] = { 1294 [ C(RESULT_ACCESS) ] = -1, 1295 [ C(RESULT_MISS) ] = -1, 1296 }, 1297 }, 1298 [ C(BPU ) ] = { 1299 [ C(OP_READ) ] = { 1300 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1301 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1302 }, 1303 [ C(OP_WRITE) ] = { 1304 [ C(RESULT_ACCESS) ] = -1, 1305 [ C(RESULT_MISS) ] = -1, 1306 }, 1307 [ C(OP_PREFETCH) ] = { 1308 [ C(RESULT_ACCESS) ] = -1, 1309 [ C(RESULT_MISS) ] = -1, 1310 }, 1311 }, 1312 }; 1313 1314 static __initconst const u64 atom_hw_cache_event_ids 1315 [PERF_COUNT_HW_CACHE_MAX] 1316 [PERF_COUNT_HW_CACHE_OP_MAX] 1317 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1318 { 1319 [ C(L1D) ] = { 1320 [ C(OP_READ) ] = { 1321 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ 1322 [ C(RESULT_MISS) ] = 0, 1323 }, 1324 [ C(OP_WRITE) ] = { 1325 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ 1326 [ C(RESULT_MISS) ] = 0, 1327 }, 1328 [ C(OP_PREFETCH) ] = { 1329 [ C(RESULT_ACCESS) ] = 0x0, 1330 [ C(RESULT_MISS) ] = 0, 1331 }, 1332 }, 1333 [ C(L1I ) ] = { 1334 [ C(OP_READ) ] = { 1335 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ 1336 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ 1337 }, 1338 [ C(OP_WRITE) ] = { 1339 [ C(RESULT_ACCESS) ] = -1, 1340 [ C(RESULT_MISS) ] = -1, 1341 }, 1342 [ C(OP_PREFETCH) ] = { 1343 [ C(RESULT_ACCESS) ] = 0, 1344 [ C(RESULT_MISS) ] = 0, 1345 }, 1346 }, 1347 [ C(LL ) ] = { 1348 [ C(OP_READ) ] = { 1349 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ 1350 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ 1351 }, 1352 [ C(OP_WRITE) ] = { 1353 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ 1354 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ 1355 }, 1356 [ C(OP_PREFETCH) ] = { 1357 [ C(RESULT_ACCESS) ] = 0, 1358 [ C(RESULT_MISS) ] = 0, 1359 }, 1360 }, 1361 [ C(DTLB) ] = { 1362 [ C(OP_READ) ] = { 1363 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ 1364 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ 1365 }, 1366 [ C(OP_WRITE) ] = { 1367 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ 1368 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ 1369 }, 1370 [ C(OP_PREFETCH) ] = { 1371 [ C(RESULT_ACCESS) ] = 0, 1372 [ C(RESULT_MISS) ] = 0, 1373 }, 1374 }, 1375 [ C(ITLB) ] = { 1376 [ C(OP_READ) ] = { 1377 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1378 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ 1379 }, 1380 [ C(OP_WRITE) ] = { 1381 [ C(RESULT_ACCESS) ] = -1, 1382 [ C(RESULT_MISS) ] = -1, 1383 }, 1384 [ C(OP_PREFETCH) ] = { 1385 [ C(RESULT_ACCESS) ] = -1, 1386 [ C(RESULT_MISS) ] = -1, 1387 }, 1388 }, 1389 [ C(BPU ) ] = { 1390 [ C(OP_READ) ] = { 1391 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1392 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1393 }, 1394 [ C(OP_WRITE) ] = { 1395 [ C(RESULT_ACCESS) ] = -1, 1396 [ C(RESULT_MISS) ] = -1, 1397 }, 1398 [ C(OP_PREFETCH) ] = { 1399 [ C(RESULT_ACCESS) ] = -1, 1400 [ C(RESULT_MISS) ] = -1, 1401 }, 1402 }, 1403 }; 1404 1405 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c"); 1406 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2"); 1407 /* no_alloc_cycles.not_delivered */ 1408 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm, 1409 "event=0xca,umask=0x50"); 1410 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2"); 1411 /* uops_retired.all */ 1412 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm, 1413 "event=0xc2,umask=0x10"); 1414 /* uops_retired.all */ 1415 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm, 1416 "event=0xc2,umask=0x10"); 1417 1418 static struct attribute *slm_events_attrs[] = { 1419 EVENT_PTR(td_total_slots_slm), 1420 EVENT_PTR(td_total_slots_scale_slm), 1421 EVENT_PTR(td_fetch_bubbles_slm), 1422 EVENT_PTR(td_fetch_bubbles_scale_slm), 1423 EVENT_PTR(td_slots_issued_slm), 1424 EVENT_PTR(td_slots_retired_slm), 1425 NULL 1426 }; 1427 1428 static struct extra_reg intel_slm_extra_regs[] __read_mostly = 1429 { 1430 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1431 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), 1432 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1), 1433 EVENT_EXTRA_END 1434 }; 1435 1436 #define SLM_DMND_READ SNB_DMND_DATA_RD 1437 #define SLM_DMND_WRITE SNB_DMND_RFO 1438 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1439 1440 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM) 1441 #define SLM_LLC_ACCESS SNB_RESP_ANY 1442 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM) 1443 1444 static __initconst const u64 slm_hw_cache_extra_regs 1445 [PERF_COUNT_HW_CACHE_MAX] 1446 [PERF_COUNT_HW_CACHE_OP_MAX] 1447 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1448 { 1449 [ C(LL ) ] = { 1450 [ C(OP_READ) ] = { 1451 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, 1452 [ C(RESULT_MISS) ] = 0, 1453 }, 1454 [ C(OP_WRITE) ] = { 1455 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, 1456 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS, 1457 }, 1458 [ C(OP_PREFETCH) ] = { 1459 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS, 1460 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS, 1461 }, 1462 }, 1463 }; 1464 1465 static __initconst const u64 slm_hw_cache_event_ids 1466 [PERF_COUNT_HW_CACHE_MAX] 1467 [PERF_COUNT_HW_CACHE_OP_MAX] 1468 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 1469 { 1470 [ C(L1D) ] = { 1471 [ C(OP_READ) ] = { 1472 [ C(RESULT_ACCESS) ] = 0, 1473 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */ 1474 }, 1475 [ C(OP_WRITE) ] = { 1476 [ C(RESULT_ACCESS) ] = 0, 1477 [ C(RESULT_MISS) ] = 0, 1478 }, 1479 [ C(OP_PREFETCH) ] = { 1480 [ C(RESULT_ACCESS) ] = 0, 1481 [ C(RESULT_MISS) ] = 0, 1482 }, 1483 }, 1484 [ C(L1I ) ] = { 1485 [ C(OP_READ) ] = { 1486 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */ 1487 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */ 1488 }, 1489 [ C(OP_WRITE) ] = { 1490 [ C(RESULT_ACCESS) ] = -1, 1491 [ C(RESULT_MISS) ] = -1, 1492 }, 1493 [ C(OP_PREFETCH) ] = { 1494 [ C(RESULT_ACCESS) ] = 0, 1495 [ C(RESULT_MISS) ] = 0, 1496 }, 1497 }, 1498 [ C(LL ) ] = { 1499 [ C(OP_READ) ] = { 1500 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ 1501 [ C(RESULT_ACCESS) ] = 0x01b7, 1502 [ C(RESULT_MISS) ] = 0, 1503 }, 1504 [ C(OP_WRITE) ] = { 1505 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ 1506 [ C(RESULT_ACCESS) ] = 0x01b7, 1507 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ 1508 [ C(RESULT_MISS) ] = 0x01b7, 1509 }, 1510 [ C(OP_PREFETCH) ] = { 1511 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ 1512 [ C(RESULT_ACCESS) ] = 0x01b7, 1513 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ 1514 [ C(RESULT_MISS) ] = 0x01b7, 1515 }, 1516 }, 1517 [ C(DTLB) ] = { 1518 [ C(OP_READ) ] = { 1519 [ C(RESULT_ACCESS) ] = 0, 1520 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */ 1521 }, 1522 [ C(OP_WRITE) ] = { 1523 [ C(RESULT_ACCESS) ] = 0, 1524 [ C(RESULT_MISS) ] = 0, 1525 }, 1526 [ C(OP_PREFETCH) ] = { 1527 [ C(RESULT_ACCESS) ] = 0, 1528 [ C(RESULT_MISS) ] = 0, 1529 }, 1530 }, 1531 [ C(ITLB) ] = { 1532 [ C(OP_READ) ] = { 1533 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ 1534 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */ 1535 }, 1536 [ C(OP_WRITE) ] = { 1537 [ C(RESULT_ACCESS) ] = -1, 1538 [ C(RESULT_MISS) ] = -1, 1539 }, 1540 [ C(OP_PREFETCH) ] = { 1541 [ C(RESULT_ACCESS) ] = -1, 1542 [ C(RESULT_MISS) ] = -1, 1543 }, 1544 }, 1545 [ C(BPU ) ] = { 1546 [ C(OP_READ) ] = { 1547 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ 1548 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ 1549 }, 1550 [ C(OP_WRITE) ] = { 1551 [ C(RESULT_ACCESS) ] = -1, 1552 [ C(RESULT_MISS) ] = -1, 1553 }, 1554 [ C(OP_PREFETCH) ] = { 1555 [ C(RESULT_ACCESS) ] = -1, 1556 [ C(RESULT_MISS) ] = -1, 1557 }, 1558 }, 1559 }; 1560 1561 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c"); 1562 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3"); 1563 /* UOPS_NOT_DELIVERED.ANY */ 1564 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c"); 1565 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */ 1566 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02"); 1567 /* UOPS_RETIRED.ANY */ 1568 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2"); 1569 /* UOPS_ISSUED.ANY */ 1570 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e"); 1571 1572 static struct attribute *glm_events_attrs[] = { 1573 EVENT_PTR(td_total_slots_glm), 1574 EVENT_PTR(td_total_slots_scale_glm), 1575 EVENT_PTR(td_fetch_bubbles_glm), 1576 EVENT_PTR(td_recovery_bubbles_glm), 1577 EVENT_PTR(td_slots_issued_glm), 1578 EVENT_PTR(td_slots_retired_glm), 1579 NULL 1580 }; 1581 1582 static struct extra_reg intel_glm_extra_regs[] __read_mostly = { 1583 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 1584 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0), 1585 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1), 1586 EVENT_EXTRA_END 1587 }; 1588 1589 #define GLM_DEMAND_DATA_RD BIT_ULL(0) 1590 #define GLM_DEMAND_RFO BIT_ULL(1) 1591 #define GLM_ANY_RESPONSE BIT_ULL(16) 1592 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33) 1593 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD 1594 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO 1595 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) 1596 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE 1597 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM) 1598 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM) 1599 1600 static __initconst const u64 glm_hw_cache_event_ids 1601 [PERF_COUNT_HW_CACHE_MAX] 1602 [PERF_COUNT_HW_CACHE_OP_MAX] 1603 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1604 [C(L1D)] = { 1605 [C(OP_READ)] = { 1606 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1607 [C(RESULT_MISS)] = 0x0, 1608 }, 1609 [C(OP_WRITE)] = { 1610 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1611 [C(RESULT_MISS)] = 0x0, 1612 }, 1613 [C(OP_PREFETCH)] = { 1614 [C(RESULT_ACCESS)] = 0x0, 1615 [C(RESULT_MISS)] = 0x0, 1616 }, 1617 }, 1618 [C(L1I)] = { 1619 [C(OP_READ)] = { 1620 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 1621 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 1622 }, 1623 [C(OP_WRITE)] = { 1624 [C(RESULT_ACCESS)] = -1, 1625 [C(RESULT_MISS)] = -1, 1626 }, 1627 [C(OP_PREFETCH)] = { 1628 [C(RESULT_ACCESS)] = 0x0, 1629 [C(RESULT_MISS)] = 0x0, 1630 }, 1631 }, 1632 [C(LL)] = { 1633 [C(OP_READ)] = { 1634 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1635 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1636 }, 1637 [C(OP_WRITE)] = { 1638 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1639 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1640 }, 1641 [C(OP_PREFETCH)] = { 1642 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1643 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1644 }, 1645 }, 1646 [C(DTLB)] = { 1647 [C(OP_READ)] = { 1648 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1649 [C(RESULT_MISS)] = 0x0, 1650 }, 1651 [C(OP_WRITE)] = { 1652 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1653 [C(RESULT_MISS)] = 0x0, 1654 }, 1655 [C(OP_PREFETCH)] = { 1656 [C(RESULT_ACCESS)] = 0x0, 1657 [C(RESULT_MISS)] = 0x0, 1658 }, 1659 }, 1660 [C(ITLB)] = { 1661 [C(OP_READ)] = { 1662 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 1663 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 1664 }, 1665 [C(OP_WRITE)] = { 1666 [C(RESULT_ACCESS)] = -1, 1667 [C(RESULT_MISS)] = -1, 1668 }, 1669 [C(OP_PREFETCH)] = { 1670 [C(RESULT_ACCESS)] = -1, 1671 [C(RESULT_MISS)] = -1, 1672 }, 1673 }, 1674 [C(BPU)] = { 1675 [C(OP_READ)] = { 1676 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1677 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1678 }, 1679 [C(OP_WRITE)] = { 1680 [C(RESULT_ACCESS)] = -1, 1681 [C(RESULT_MISS)] = -1, 1682 }, 1683 [C(OP_PREFETCH)] = { 1684 [C(RESULT_ACCESS)] = -1, 1685 [C(RESULT_MISS)] = -1, 1686 }, 1687 }, 1688 }; 1689 1690 static __initconst const u64 glm_hw_cache_extra_regs 1691 [PERF_COUNT_HW_CACHE_MAX] 1692 [PERF_COUNT_HW_CACHE_OP_MAX] 1693 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1694 [C(LL)] = { 1695 [C(OP_READ)] = { 1696 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 1697 GLM_LLC_ACCESS, 1698 [C(RESULT_MISS)] = GLM_DEMAND_READ| 1699 GLM_LLC_MISS, 1700 }, 1701 [C(OP_WRITE)] = { 1702 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 1703 GLM_LLC_ACCESS, 1704 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 1705 GLM_LLC_MISS, 1706 }, 1707 [C(OP_PREFETCH)] = { 1708 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH| 1709 GLM_LLC_ACCESS, 1710 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH| 1711 GLM_LLC_MISS, 1712 }, 1713 }, 1714 }; 1715 1716 static __initconst const u64 glp_hw_cache_event_ids 1717 [PERF_COUNT_HW_CACHE_MAX] 1718 [PERF_COUNT_HW_CACHE_OP_MAX] 1719 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1720 [C(L1D)] = { 1721 [C(OP_READ)] = { 1722 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1723 [C(RESULT_MISS)] = 0x0, 1724 }, 1725 [C(OP_WRITE)] = { 1726 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1727 [C(RESULT_MISS)] = 0x0, 1728 }, 1729 [C(OP_PREFETCH)] = { 1730 [C(RESULT_ACCESS)] = 0x0, 1731 [C(RESULT_MISS)] = 0x0, 1732 }, 1733 }, 1734 [C(L1I)] = { 1735 [C(OP_READ)] = { 1736 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */ 1737 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */ 1738 }, 1739 [C(OP_WRITE)] = { 1740 [C(RESULT_ACCESS)] = -1, 1741 [C(RESULT_MISS)] = -1, 1742 }, 1743 [C(OP_PREFETCH)] = { 1744 [C(RESULT_ACCESS)] = 0x0, 1745 [C(RESULT_MISS)] = 0x0, 1746 }, 1747 }, 1748 [C(LL)] = { 1749 [C(OP_READ)] = { 1750 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1751 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1752 }, 1753 [C(OP_WRITE)] = { 1754 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1755 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */ 1756 }, 1757 [C(OP_PREFETCH)] = { 1758 [C(RESULT_ACCESS)] = 0x0, 1759 [C(RESULT_MISS)] = 0x0, 1760 }, 1761 }, 1762 [C(DTLB)] = { 1763 [C(OP_READ)] = { 1764 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ 1765 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 1766 }, 1767 [C(OP_WRITE)] = { 1768 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ 1769 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 1770 }, 1771 [C(OP_PREFETCH)] = { 1772 [C(RESULT_ACCESS)] = 0x0, 1773 [C(RESULT_MISS)] = 0x0, 1774 }, 1775 }, 1776 [C(ITLB)] = { 1777 [C(OP_READ)] = { 1778 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */ 1779 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */ 1780 }, 1781 [C(OP_WRITE)] = { 1782 [C(RESULT_ACCESS)] = -1, 1783 [C(RESULT_MISS)] = -1, 1784 }, 1785 [C(OP_PREFETCH)] = { 1786 [C(RESULT_ACCESS)] = -1, 1787 [C(RESULT_MISS)] = -1, 1788 }, 1789 }, 1790 [C(BPU)] = { 1791 [C(OP_READ)] = { 1792 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ 1793 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ 1794 }, 1795 [C(OP_WRITE)] = { 1796 [C(RESULT_ACCESS)] = -1, 1797 [C(RESULT_MISS)] = -1, 1798 }, 1799 [C(OP_PREFETCH)] = { 1800 [C(RESULT_ACCESS)] = -1, 1801 [C(RESULT_MISS)] = -1, 1802 }, 1803 }, 1804 }; 1805 1806 static __initconst const u64 glp_hw_cache_extra_regs 1807 [PERF_COUNT_HW_CACHE_MAX] 1808 [PERF_COUNT_HW_CACHE_OP_MAX] 1809 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1810 [C(LL)] = { 1811 [C(OP_READ)] = { 1812 [C(RESULT_ACCESS)] = GLM_DEMAND_READ| 1813 GLM_LLC_ACCESS, 1814 [C(RESULT_MISS)] = GLM_DEMAND_READ| 1815 GLM_LLC_MISS, 1816 }, 1817 [C(OP_WRITE)] = { 1818 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE| 1819 GLM_LLC_ACCESS, 1820 [C(RESULT_MISS)] = GLM_DEMAND_WRITE| 1821 GLM_LLC_MISS, 1822 }, 1823 [C(OP_PREFETCH)] = { 1824 [C(RESULT_ACCESS)] = 0x0, 1825 [C(RESULT_MISS)] = 0x0, 1826 }, 1827 }, 1828 }; 1829 1830 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ 1831 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ 1832 #define KNL_MCDRAM_LOCAL BIT_ULL(21) 1833 #define KNL_MCDRAM_FAR BIT_ULL(22) 1834 #define KNL_DDR_LOCAL BIT_ULL(23) 1835 #define KNL_DDR_FAR BIT_ULL(24) 1836 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \ 1837 KNL_DDR_LOCAL | KNL_DDR_FAR) 1838 #define KNL_L2_READ SLM_DMND_READ 1839 #define KNL_L2_WRITE SLM_DMND_WRITE 1840 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH 1841 #define KNL_L2_ACCESS SLM_LLC_ACCESS 1842 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \ 1843 KNL_DRAM_ANY | SNB_SNP_ANY | \ 1844 SNB_NON_DRAM) 1845 1846 static __initconst const u64 knl_hw_cache_extra_regs 1847 [PERF_COUNT_HW_CACHE_MAX] 1848 [PERF_COUNT_HW_CACHE_OP_MAX] 1849 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 1850 [C(LL)] = { 1851 [C(OP_READ)] = { 1852 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS, 1853 [C(RESULT_MISS)] = 0, 1854 }, 1855 [C(OP_WRITE)] = { 1856 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS, 1857 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS, 1858 }, 1859 [C(OP_PREFETCH)] = { 1860 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS, 1861 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS, 1862 }, 1863 }, 1864 }; 1865 1866 /* 1867 * Used from PMIs where the LBRs are already disabled. 1868 * 1869 * This function could be called consecutively. It is required to remain in 1870 * disabled state if called consecutively. 1871 * 1872 * During consecutive calls, the same disable value will be written to related 1873 * registers, so the PMU state remains unchanged. 1874 * 1875 * intel_bts events don't coexist with intel PMU's BTS events because of 1876 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them 1877 * disabled around intel PMU's event batching etc, only inside the PMI handler. 1878 */ 1879 static void __intel_pmu_disable_all(void) 1880 { 1881 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1882 1883 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 1884 1885 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 1886 intel_pmu_disable_bts(); 1887 1888 intel_pmu_pebs_disable_all(); 1889 } 1890 1891 static void intel_pmu_disable_all(void) 1892 { 1893 __intel_pmu_disable_all(); 1894 intel_pmu_lbr_disable_all(); 1895 } 1896 1897 static void __intel_pmu_enable_all(int added, bool pmi) 1898 { 1899 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1900 1901 intel_pmu_pebs_enable_all(); 1902 intel_pmu_lbr_enable_all(pmi); 1903 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 1904 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 1905 1906 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 1907 struct perf_event *event = 1908 cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; 1909 1910 if (WARN_ON_ONCE(!event)) 1911 return; 1912 1913 intel_pmu_enable_bts(event->hw.config); 1914 } 1915 } 1916 1917 static void intel_pmu_enable_all(int added) 1918 { 1919 __intel_pmu_enable_all(added, false); 1920 } 1921 1922 /* 1923 * Workaround for: 1924 * Intel Errata AAK100 (model 26) 1925 * Intel Errata AAP53 (model 30) 1926 * Intel Errata BD53 (model 44) 1927 * 1928 * The official story: 1929 * These chips need to be 'reset' when adding counters by programming the 1930 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either 1931 * in sequence on the same PMC or on different PMCs. 1932 * 1933 * In practise it appears some of these events do in fact count, and 1934 * we need to program all 4 events. 1935 */ 1936 static void intel_pmu_nhm_workaround(void) 1937 { 1938 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1939 static const unsigned long nhm_magic[4] = { 1940 0x4300B5, 1941 0x4300D2, 1942 0x4300B1, 1943 0x4300B1 1944 }; 1945 struct perf_event *event; 1946 int i; 1947 1948 /* 1949 * The Errata requires below steps: 1950 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; 1951 * 2) Configure 4 PERFEVTSELx with the magic events and clear 1952 * the corresponding PMCx; 1953 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; 1954 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; 1955 * 5) Clear 4 pairs of ERFEVTSELx and PMCx; 1956 */ 1957 1958 /* 1959 * The real steps we choose are a little different from above. 1960 * A) To reduce MSR operations, we don't run step 1) as they 1961 * are already cleared before this function is called; 1962 * B) Call x86_perf_event_update to save PMCx before configuring 1963 * PERFEVTSELx with magic number; 1964 * C) With step 5), we do clear only when the PERFEVTSELx is 1965 * not used currently. 1966 * D) Call x86_perf_event_set_period to restore PMCx; 1967 */ 1968 1969 /* We always operate 4 pairs of PERF Counters */ 1970 for (i = 0; i < 4; i++) { 1971 event = cpuc->events[i]; 1972 if (event) 1973 x86_perf_event_update(event); 1974 } 1975 1976 for (i = 0; i < 4; i++) { 1977 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); 1978 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); 1979 } 1980 1981 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); 1982 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); 1983 1984 for (i = 0; i < 4; i++) { 1985 event = cpuc->events[i]; 1986 1987 if (event) { 1988 x86_perf_event_set_period(event); 1989 __x86_pmu_enable_event(&event->hw, 1990 ARCH_PERFMON_EVENTSEL_ENABLE); 1991 } else 1992 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); 1993 } 1994 } 1995 1996 static void intel_pmu_nhm_enable_all(int added) 1997 { 1998 if (added) 1999 intel_pmu_nhm_workaround(); 2000 intel_pmu_enable_all(added); 2001 } 2002 2003 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) 2004 { 2005 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; 2006 2007 if (cpuc->tfa_shadow != val) { 2008 cpuc->tfa_shadow = val; 2009 wrmsrl(MSR_TSX_FORCE_ABORT, val); 2010 } 2011 } 2012 2013 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 2014 { 2015 /* 2016 * We're going to use PMC3, make sure TFA is set before we touch it. 2017 */ 2018 if (cntr == 3 && !cpuc->is_fake) 2019 intel_set_tfa(cpuc, true); 2020 } 2021 2022 static void intel_tfa_pmu_enable_all(int added) 2023 { 2024 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2025 2026 /* 2027 * If we find PMC3 is no longer used when we enable the PMU, we can 2028 * clear TFA. 2029 */ 2030 if (!test_bit(3, cpuc->active_mask)) 2031 intel_set_tfa(cpuc, false); 2032 2033 intel_pmu_enable_all(added); 2034 } 2035 2036 static void enable_counter_freeze(void) 2037 { 2038 update_debugctlmsr(get_debugctlmsr() | 2039 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI); 2040 } 2041 2042 static void disable_counter_freeze(void) 2043 { 2044 update_debugctlmsr(get_debugctlmsr() & 2045 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI); 2046 } 2047 2048 static inline u64 intel_pmu_get_status(void) 2049 { 2050 u64 status; 2051 2052 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 2053 2054 return status; 2055 } 2056 2057 static inline void intel_pmu_ack_status(u64 ack) 2058 { 2059 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 2060 } 2061 2062 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) 2063 { 2064 int idx = hwc->idx - INTEL_PMC_IDX_FIXED; 2065 u64 ctrl_val, mask; 2066 2067 mask = 0xfULL << (idx * 4); 2068 2069 rdmsrl(hwc->config_base, ctrl_val); 2070 ctrl_val &= ~mask; 2071 wrmsrl(hwc->config_base, ctrl_val); 2072 } 2073 2074 static inline bool event_is_checkpointed(struct perf_event *event) 2075 { 2076 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0; 2077 } 2078 2079 static void intel_pmu_disable_event(struct perf_event *event) 2080 { 2081 struct hw_perf_event *hwc = &event->hw; 2082 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2083 2084 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { 2085 intel_pmu_disable_bts(); 2086 intel_pmu_drain_bts_buffer(); 2087 return; 2088 } 2089 2090 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); 2091 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); 2092 cpuc->intel_cp_status &= ~(1ull << hwc->idx); 2093 2094 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 2095 intel_pmu_disable_fixed(hwc); 2096 return; 2097 } 2098 2099 x86_pmu_disable_event(event); 2100 2101 /* 2102 * Needs to be called after x86_pmu_disable_event, 2103 * so we don't trigger the event without PEBS bit set. 2104 */ 2105 if (unlikely(event->attr.precise_ip)) 2106 intel_pmu_pebs_disable(event); 2107 } 2108 2109 static void intel_pmu_del_event(struct perf_event *event) 2110 { 2111 if (needs_branch_stack(event)) 2112 intel_pmu_lbr_del(event); 2113 if (event->attr.precise_ip) 2114 intel_pmu_pebs_del(event); 2115 } 2116 2117 static void intel_pmu_read_event(struct perf_event *event) 2118 { 2119 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) 2120 intel_pmu_auto_reload_read(event); 2121 else 2122 x86_perf_event_update(event); 2123 } 2124 2125 static void intel_pmu_enable_fixed(struct perf_event *event) 2126 { 2127 struct hw_perf_event *hwc = &event->hw; 2128 int idx = hwc->idx - INTEL_PMC_IDX_FIXED; 2129 u64 ctrl_val, mask, bits = 0; 2130 2131 /* 2132 * Enable IRQ generation (0x8), if not PEBS, 2133 * and enable ring-3 counting (0x2) and ring-0 counting (0x1) 2134 * if requested: 2135 */ 2136 if (!event->attr.precise_ip) 2137 bits |= 0x8; 2138 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) 2139 bits |= 0x2; 2140 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) 2141 bits |= 0x1; 2142 2143 /* 2144 * ANY bit is supported in v3 and up 2145 */ 2146 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) 2147 bits |= 0x4; 2148 2149 bits <<= (idx * 4); 2150 mask = 0xfULL << (idx * 4); 2151 2152 rdmsrl(hwc->config_base, ctrl_val); 2153 ctrl_val &= ~mask; 2154 ctrl_val |= bits; 2155 wrmsrl(hwc->config_base, ctrl_val); 2156 } 2157 2158 static void intel_pmu_enable_event(struct perf_event *event) 2159 { 2160 struct hw_perf_event *hwc = &event->hw; 2161 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2162 2163 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { 2164 if (!__this_cpu_read(cpu_hw_events.enabled)) 2165 return; 2166 2167 intel_pmu_enable_bts(hwc->config); 2168 return; 2169 } 2170 2171 if (event->attr.exclude_host) 2172 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); 2173 if (event->attr.exclude_guest) 2174 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); 2175 2176 if (unlikely(event_is_checkpointed(event))) 2177 cpuc->intel_cp_status |= (1ull << hwc->idx); 2178 2179 if (unlikely(event->attr.precise_ip)) 2180 intel_pmu_pebs_enable(event); 2181 2182 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 2183 intel_pmu_enable_fixed(event); 2184 return; 2185 } 2186 2187 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 2188 } 2189 2190 static void intel_pmu_add_event(struct perf_event *event) 2191 { 2192 if (event->attr.precise_ip) 2193 intel_pmu_pebs_add(event); 2194 if (needs_branch_stack(event)) 2195 intel_pmu_lbr_add(event); 2196 } 2197 2198 /* 2199 * Save and restart an expired event. Called by NMI contexts, 2200 * so it has to be careful about preempting normal event ops: 2201 */ 2202 int intel_pmu_save_and_restart(struct perf_event *event) 2203 { 2204 x86_perf_event_update(event); 2205 /* 2206 * For a checkpointed counter always reset back to 0. This 2207 * avoids a situation where the counter overflows, aborts the 2208 * transaction and is then set back to shortly before the 2209 * overflow, and overflows and aborts again. 2210 */ 2211 if (unlikely(event_is_checkpointed(event))) { 2212 /* No race with NMIs because the counter should not be armed */ 2213 wrmsrl(event->hw.event_base, 0); 2214 local64_set(&event->hw.prev_count, 0); 2215 } 2216 return x86_perf_event_set_period(event); 2217 } 2218 2219 static void intel_pmu_reset(void) 2220 { 2221 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); 2222 unsigned long flags; 2223 int idx; 2224 2225 if (!x86_pmu.num_counters) 2226 return; 2227 2228 local_irq_save(flags); 2229 2230 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); 2231 2232 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 2233 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); 2234 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); 2235 } 2236 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) 2237 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 2238 2239 if (ds) 2240 ds->bts_index = ds->bts_buffer_base; 2241 2242 /* Ack all overflows and disable fixed counters */ 2243 if (x86_pmu.version >= 2) { 2244 intel_pmu_ack_status(intel_pmu_get_status()); 2245 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2246 } 2247 2248 /* Reset LBRs and LBR freezing */ 2249 if (x86_pmu.lbr_nr) { 2250 update_debugctlmsr(get_debugctlmsr() & 2251 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR)); 2252 } 2253 2254 local_irq_restore(flags); 2255 } 2256 2257 static int handle_pmi_common(struct pt_regs *regs, u64 status) 2258 { 2259 struct perf_sample_data data; 2260 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2261 int bit; 2262 int handled = 0; 2263 2264 inc_irq_stat(apic_perf_irqs); 2265 2266 /* 2267 * Ignore a range of extra bits in status that do not indicate 2268 * overflow by themselves. 2269 */ 2270 status &= ~(GLOBAL_STATUS_COND_CHG | 2271 GLOBAL_STATUS_ASIF | 2272 GLOBAL_STATUS_LBRS_FROZEN); 2273 if (!status) 2274 return 0; 2275 /* 2276 * In case multiple PEBS events are sampled at the same time, 2277 * it is possible to have GLOBAL_STATUS bit 62 set indicating 2278 * PEBS buffer overflow and also seeing at most 3 PEBS counters 2279 * having their bits set in the status register. This is a sign 2280 * that there was at least one PEBS record pending at the time 2281 * of the PMU interrupt. PEBS counters must only be processed 2282 * via the drain_pebs() calls and not via the regular sample 2283 * processing loop coming after that the function, otherwise 2284 * phony regular samples may be generated in the sampling buffer 2285 * not marked with the EXACT tag. Another possibility is to have 2286 * one PEBS event and at least one non-PEBS event whic hoverflows 2287 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will 2288 * not be set, yet the overflow status bit for the PEBS counter will 2289 * be on Skylake. 2290 * 2291 * To avoid this problem, we systematically ignore the PEBS-enabled 2292 * counters from the GLOBAL_STATUS mask and we always process PEBS 2293 * events via drain_pebs(). 2294 */ 2295 if (x86_pmu.flags & PMU_FL_PEBS_ALL) 2296 status &= ~cpuc->pebs_enabled; 2297 else 2298 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); 2299 2300 /* 2301 * PEBS overflow sets bit 62 in the global status register 2302 */ 2303 if (__test_and_clear_bit(62, (unsigned long *)&status)) { 2304 handled++; 2305 x86_pmu.drain_pebs(regs); 2306 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; 2307 } 2308 2309 /* 2310 * Intel PT 2311 */ 2312 if (__test_and_clear_bit(55, (unsigned long *)&status)) { 2313 handled++; 2314 intel_pt_interrupt(); 2315 } 2316 2317 /* 2318 * Checkpointed counters can lead to 'spurious' PMIs because the 2319 * rollback caused by the PMI will have cleared the overflow status 2320 * bit. Therefore always force probe these counters. 2321 */ 2322 status |= cpuc->intel_cp_status; 2323 2324 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 2325 struct perf_event *event = cpuc->events[bit]; 2326 2327 handled++; 2328 2329 if (!test_bit(bit, cpuc->active_mask)) 2330 continue; 2331 2332 if (!intel_pmu_save_and_restart(event)) 2333 continue; 2334 2335 perf_sample_data_init(&data, 0, event->hw.last_period); 2336 2337 if (has_branch_stack(event)) 2338 data.br_stack = &cpuc->lbr_stack; 2339 2340 if (perf_event_overflow(event, &data, regs)) 2341 x86_pmu_stop(event, 0); 2342 } 2343 2344 return handled; 2345 } 2346 2347 static bool disable_counter_freezing = true; 2348 static int __init intel_perf_counter_freezing_setup(char *s) 2349 { 2350 bool res; 2351 2352 if (kstrtobool(s, &res)) 2353 return -EINVAL; 2354 2355 disable_counter_freezing = !res; 2356 return 1; 2357 } 2358 __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup); 2359 2360 /* 2361 * Simplified handler for Arch Perfmon v4: 2362 * - We rely on counter freezing/unfreezing to enable/disable the PMU. 2363 * This is done automatically on PMU ack. 2364 * - Ack the PMU only after the APIC. 2365 */ 2366 2367 static int intel_pmu_handle_irq_v4(struct pt_regs *regs) 2368 { 2369 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2370 int handled = 0; 2371 bool bts = false; 2372 u64 status; 2373 int pmu_enabled = cpuc->enabled; 2374 int loops = 0; 2375 2376 /* PMU has been disabled because of counter freezing */ 2377 cpuc->enabled = 0; 2378 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 2379 bts = true; 2380 intel_bts_disable_local(); 2381 handled = intel_pmu_drain_bts_buffer(); 2382 handled += intel_bts_interrupt(); 2383 } 2384 status = intel_pmu_get_status(); 2385 if (!status) 2386 goto done; 2387 again: 2388 intel_pmu_lbr_read(); 2389 if (++loops > 100) { 2390 static bool warned; 2391 2392 if (!warned) { 2393 WARN(1, "perfevents: irq loop stuck!\n"); 2394 perf_event_print_debug(); 2395 warned = true; 2396 } 2397 intel_pmu_reset(); 2398 goto done; 2399 } 2400 2401 2402 handled += handle_pmi_common(regs, status); 2403 done: 2404 /* Ack the PMI in the APIC */ 2405 apic_write(APIC_LVTPC, APIC_DM_NMI); 2406 2407 /* 2408 * The counters start counting immediately while ack the status. 2409 * Make it as close as possible to IRET. This avoids bogus 2410 * freezing on Skylake CPUs. 2411 */ 2412 if (status) { 2413 intel_pmu_ack_status(status); 2414 } else { 2415 /* 2416 * CPU may issues two PMIs very close to each other. 2417 * When the PMI handler services the first one, the 2418 * GLOBAL_STATUS is already updated to reflect both. 2419 * When it IRETs, the second PMI is immediately 2420 * handled and it sees clear status. At the meantime, 2421 * there may be a third PMI, because the freezing bit 2422 * isn't set since the ack in first PMI handlers. 2423 * Double check if there is more work to be done. 2424 */ 2425 status = intel_pmu_get_status(); 2426 if (status) 2427 goto again; 2428 } 2429 2430 if (bts) 2431 intel_bts_enable_local(); 2432 cpuc->enabled = pmu_enabled; 2433 return handled; 2434 } 2435 2436 /* 2437 * This handler is triggered by the local APIC, so the APIC IRQ handling 2438 * rules apply: 2439 */ 2440 static int intel_pmu_handle_irq(struct pt_regs *regs) 2441 { 2442 struct cpu_hw_events *cpuc; 2443 int loops; 2444 u64 status; 2445 int handled; 2446 int pmu_enabled; 2447 2448 cpuc = this_cpu_ptr(&cpu_hw_events); 2449 2450 /* 2451 * Save the PMU state. 2452 * It needs to be restored when leaving the handler. 2453 */ 2454 pmu_enabled = cpuc->enabled; 2455 /* 2456 * No known reason to not always do late ACK, 2457 * but just in case do it opt-in. 2458 */ 2459 if (!x86_pmu.late_ack) 2460 apic_write(APIC_LVTPC, APIC_DM_NMI); 2461 intel_bts_disable_local(); 2462 cpuc->enabled = 0; 2463 __intel_pmu_disable_all(); 2464 handled = intel_pmu_drain_bts_buffer(); 2465 handled += intel_bts_interrupt(); 2466 status = intel_pmu_get_status(); 2467 if (!status) 2468 goto done; 2469 2470 loops = 0; 2471 again: 2472 intel_pmu_lbr_read(); 2473 intel_pmu_ack_status(status); 2474 if (++loops > 100) { 2475 static bool warned; 2476 2477 if (!warned) { 2478 WARN(1, "perfevents: irq loop stuck!\n"); 2479 perf_event_print_debug(); 2480 warned = true; 2481 } 2482 intel_pmu_reset(); 2483 goto done; 2484 } 2485 2486 handled += handle_pmi_common(regs, status); 2487 2488 /* 2489 * Repeat if there is more work to be done: 2490 */ 2491 status = intel_pmu_get_status(); 2492 if (status) 2493 goto again; 2494 2495 done: 2496 /* Only restore PMU state when it's active. See x86_pmu_disable(). */ 2497 cpuc->enabled = pmu_enabled; 2498 if (pmu_enabled) 2499 __intel_pmu_enable_all(0, true); 2500 intel_bts_enable_local(); 2501 2502 /* 2503 * Only unmask the NMI after the overflow counters 2504 * have been reset. This avoids spurious NMIs on 2505 * Haswell CPUs. 2506 */ 2507 if (x86_pmu.late_ack) 2508 apic_write(APIC_LVTPC, APIC_DM_NMI); 2509 return handled; 2510 } 2511 2512 static struct event_constraint * 2513 intel_bts_constraints(struct perf_event *event) 2514 { 2515 if (unlikely(intel_pmu_has_bts(event))) 2516 return &bts_constraint; 2517 2518 return NULL; 2519 } 2520 2521 static int intel_alt_er(int idx, u64 config) 2522 { 2523 int alt_idx = idx; 2524 2525 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) 2526 return idx; 2527 2528 if (idx == EXTRA_REG_RSP_0) 2529 alt_idx = EXTRA_REG_RSP_1; 2530 2531 if (idx == EXTRA_REG_RSP_1) 2532 alt_idx = EXTRA_REG_RSP_0; 2533 2534 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask) 2535 return idx; 2536 2537 return alt_idx; 2538 } 2539 2540 static void intel_fixup_er(struct perf_event *event, int idx) 2541 { 2542 event->hw.extra_reg.idx = idx; 2543 2544 if (idx == EXTRA_REG_RSP_0) { 2545 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 2546 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event; 2547 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; 2548 } else if (idx == EXTRA_REG_RSP_1) { 2549 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 2550 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event; 2551 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 2552 } 2553 } 2554 2555 /* 2556 * manage allocation of shared extra msr for certain events 2557 * 2558 * sharing can be: 2559 * per-cpu: to be shared between the various events on a single PMU 2560 * per-core: per-cpu + shared by HT threads 2561 */ 2562 static struct event_constraint * 2563 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, 2564 struct perf_event *event, 2565 struct hw_perf_event_extra *reg) 2566 { 2567 struct event_constraint *c = &emptyconstraint; 2568 struct er_account *era; 2569 unsigned long flags; 2570 int idx = reg->idx; 2571 2572 /* 2573 * reg->alloc can be set due to existing state, so for fake cpuc we 2574 * need to ignore this, otherwise we might fail to allocate proper fake 2575 * state for this extra reg constraint. Also see the comment below. 2576 */ 2577 if (reg->alloc && !cpuc->is_fake) 2578 return NULL; /* call x86_get_event_constraint() */ 2579 2580 again: 2581 era = &cpuc->shared_regs->regs[idx]; 2582 /* 2583 * we use spin_lock_irqsave() to avoid lockdep issues when 2584 * passing a fake cpuc 2585 */ 2586 raw_spin_lock_irqsave(&era->lock, flags); 2587 2588 if (!atomic_read(&era->ref) || era->config == reg->config) { 2589 2590 /* 2591 * If its a fake cpuc -- as per validate_{group,event}() we 2592 * shouldn't touch event state and we can avoid doing so 2593 * since both will only call get_event_constraints() once 2594 * on each event, this avoids the need for reg->alloc. 2595 * 2596 * Not doing the ER fixup will only result in era->reg being 2597 * wrong, but since we won't actually try and program hardware 2598 * this isn't a problem either. 2599 */ 2600 if (!cpuc->is_fake) { 2601 if (idx != reg->idx) 2602 intel_fixup_er(event, idx); 2603 2604 /* 2605 * x86_schedule_events() can call get_event_constraints() 2606 * multiple times on events in the case of incremental 2607 * scheduling(). reg->alloc ensures we only do the ER 2608 * allocation once. 2609 */ 2610 reg->alloc = 1; 2611 } 2612 2613 /* lock in msr value */ 2614 era->config = reg->config; 2615 era->reg = reg->reg; 2616 2617 /* one more user */ 2618 atomic_inc(&era->ref); 2619 2620 /* 2621 * need to call x86_get_event_constraint() 2622 * to check if associated event has constraints 2623 */ 2624 c = NULL; 2625 } else { 2626 idx = intel_alt_er(idx, reg->config); 2627 if (idx != reg->idx) { 2628 raw_spin_unlock_irqrestore(&era->lock, flags); 2629 goto again; 2630 } 2631 } 2632 raw_spin_unlock_irqrestore(&era->lock, flags); 2633 2634 return c; 2635 } 2636 2637 static void 2638 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, 2639 struct hw_perf_event_extra *reg) 2640 { 2641 struct er_account *era; 2642 2643 /* 2644 * Only put constraint if extra reg was actually allocated. Also takes 2645 * care of event which do not use an extra shared reg. 2646 * 2647 * Also, if this is a fake cpuc we shouldn't touch any event state 2648 * (reg->alloc) and we don't care about leaving inconsistent cpuc state 2649 * either since it'll be thrown out. 2650 */ 2651 if (!reg->alloc || cpuc->is_fake) 2652 return; 2653 2654 era = &cpuc->shared_regs->regs[reg->idx]; 2655 2656 /* one fewer user */ 2657 atomic_dec(&era->ref); 2658 2659 /* allocate again next time */ 2660 reg->alloc = 0; 2661 } 2662 2663 static struct event_constraint * 2664 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, 2665 struct perf_event *event) 2666 { 2667 struct event_constraint *c = NULL, *d; 2668 struct hw_perf_event_extra *xreg, *breg; 2669 2670 xreg = &event->hw.extra_reg; 2671 if (xreg->idx != EXTRA_REG_NONE) { 2672 c = __intel_shared_reg_get_constraints(cpuc, event, xreg); 2673 if (c == &emptyconstraint) 2674 return c; 2675 } 2676 breg = &event->hw.branch_reg; 2677 if (breg->idx != EXTRA_REG_NONE) { 2678 d = __intel_shared_reg_get_constraints(cpuc, event, breg); 2679 if (d == &emptyconstraint) { 2680 __intel_shared_reg_put_constraints(cpuc, xreg); 2681 c = d; 2682 } 2683 } 2684 return c; 2685 } 2686 2687 struct event_constraint * 2688 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 2689 struct perf_event *event) 2690 { 2691 struct event_constraint *c; 2692 2693 if (x86_pmu.event_constraints) { 2694 for_each_event_constraint(c, x86_pmu.event_constraints) { 2695 if ((event->hw.config & c->cmask) == c->code) { 2696 event->hw.flags |= c->flags; 2697 return c; 2698 } 2699 } 2700 } 2701 2702 return &unconstrained; 2703 } 2704 2705 static struct event_constraint * 2706 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 2707 struct perf_event *event) 2708 { 2709 struct event_constraint *c; 2710 2711 c = intel_bts_constraints(event); 2712 if (c) 2713 return c; 2714 2715 c = intel_shared_regs_constraints(cpuc, event); 2716 if (c) 2717 return c; 2718 2719 c = intel_pebs_constraints(event); 2720 if (c) 2721 return c; 2722 2723 return x86_get_event_constraints(cpuc, idx, event); 2724 } 2725 2726 static void 2727 intel_start_scheduling(struct cpu_hw_events *cpuc) 2728 { 2729 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 2730 struct intel_excl_states *xl; 2731 int tid = cpuc->excl_thread_id; 2732 2733 /* 2734 * nothing needed if in group validation mode 2735 */ 2736 if (cpuc->is_fake || !is_ht_workaround_enabled()) 2737 return; 2738 2739 /* 2740 * no exclusion needed 2741 */ 2742 if (WARN_ON_ONCE(!excl_cntrs)) 2743 return; 2744 2745 xl = &excl_cntrs->states[tid]; 2746 2747 xl->sched_started = true; 2748 /* 2749 * lock shared state until we are done scheduling 2750 * in stop_event_scheduling() 2751 * makes scheduling appear as a transaction 2752 */ 2753 raw_spin_lock(&excl_cntrs->lock); 2754 } 2755 2756 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) 2757 { 2758 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 2759 struct event_constraint *c = cpuc->event_constraint[idx]; 2760 struct intel_excl_states *xl; 2761 int tid = cpuc->excl_thread_id; 2762 2763 if (cpuc->is_fake || !is_ht_workaround_enabled()) 2764 return; 2765 2766 if (WARN_ON_ONCE(!excl_cntrs)) 2767 return; 2768 2769 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) 2770 return; 2771 2772 xl = &excl_cntrs->states[tid]; 2773 2774 lockdep_assert_held(&excl_cntrs->lock); 2775 2776 if (c->flags & PERF_X86_EVENT_EXCL) 2777 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE; 2778 else 2779 xl->state[cntr] = INTEL_EXCL_SHARED; 2780 } 2781 2782 static void 2783 intel_stop_scheduling(struct cpu_hw_events *cpuc) 2784 { 2785 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 2786 struct intel_excl_states *xl; 2787 int tid = cpuc->excl_thread_id; 2788 2789 /* 2790 * nothing needed if in group validation mode 2791 */ 2792 if (cpuc->is_fake || !is_ht_workaround_enabled()) 2793 return; 2794 /* 2795 * no exclusion needed 2796 */ 2797 if (WARN_ON_ONCE(!excl_cntrs)) 2798 return; 2799 2800 xl = &excl_cntrs->states[tid]; 2801 2802 xl->sched_started = false; 2803 /* 2804 * release shared state lock (acquired in intel_start_scheduling()) 2805 */ 2806 raw_spin_unlock(&excl_cntrs->lock); 2807 } 2808 2809 static struct event_constraint * 2810 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) 2811 { 2812 WARN_ON_ONCE(!cpuc->constraint_list); 2813 2814 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { 2815 struct event_constraint *cx; 2816 2817 /* 2818 * grab pre-allocated constraint entry 2819 */ 2820 cx = &cpuc->constraint_list[idx]; 2821 2822 /* 2823 * initialize dynamic constraint 2824 * with static constraint 2825 */ 2826 *cx = *c; 2827 2828 /* 2829 * mark constraint as dynamic 2830 */ 2831 cx->flags |= PERF_X86_EVENT_DYNAMIC; 2832 c = cx; 2833 } 2834 2835 return c; 2836 } 2837 2838 static struct event_constraint * 2839 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, 2840 int idx, struct event_constraint *c) 2841 { 2842 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 2843 struct intel_excl_states *xlo; 2844 int tid = cpuc->excl_thread_id; 2845 int is_excl, i; 2846 2847 /* 2848 * validating a group does not require 2849 * enforcing cross-thread exclusion 2850 */ 2851 if (cpuc->is_fake || !is_ht_workaround_enabled()) 2852 return c; 2853 2854 /* 2855 * no exclusion needed 2856 */ 2857 if (WARN_ON_ONCE(!excl_cntrs)) 2858 return c; 2859 2860 /* 2861 * because we modify the constraint, we need 2862 * to make a copy. Static constraints come 2863 * from static const tables. 2864 * 2865 * only needed when constraint has not yet 2866 * been cloned (marked dynamic) 2867 */ 2868 c = dyn_constraint(cpuc, c, idx); 2869 2870 /* 2871 * From here on, the constraint is dynamic. 2872 * Either it was just allocated above, or it 2873 * was allocated during a earlier invocation 2874 * of this function 2875 */ 2876 2877 /* 2878 * state of sibling HT 2879 */ 2880 xlo = &excl_cntrs->states[tid ^ 1]; 2881 2882 /* 2883 * event requires exclusive counter access 2884 * across HT threads 2885 */ 2886 is_excl = c->flags & PERF_X86_EVENT_EXCL; 2887 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { 2888 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; 2889 if (!cpuc->n_excl++) 2890 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1); 2891 } 2892 2893 /* 2894 * Modify static constraint with current dynamic 2895 * state of thread 2896 * 2897 * EXCLUSIVE: sibling counter measuring exclusive event 2898 * SHARED : sibling counter measuring non-exclusive event 2899 * UNUSED : sibling counter unused 2900 */ 2901 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) { 2902 /* 2903 * exclusive event in sibling counter 2904 * our corresponding counter cannot be used 2905 * regardless of our event 2906 */ 2907 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) 2908 __clear_bit(i, c->idxmsk); 2909 /* 2910 * if measuring an exclusive event, sibling 2911 * measuring non-exclusive, then counter cannot 2912 * be used 2913 */ 2914 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) 2915 __clear_bit(i, c->idxmsk); 2916 } 2917 2918 /* 2919 * recompute actual bit weight for scheduling algorithm 2920 */ 2921 c->weight = hweight64(c->idxmsk64); 2922 2923 /* 2924 * if we return an empty mask, then switch 2925 * back to static empty constraint to avoid 2926 * the cost of freeing later on 2927 */ 2928 if (c->weight == 0) 2929 c = &emptyconstraint; 2930 2931 return c; 2932 } 2933 2934 static struct event_constraint * 2935 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 2936 struct perf_event *event) 2937 { 2938 struct event_constraint *c1 = NULL; 2939 struct event_constraint *c2; 2940 2941 if (idx >= 0) /* fake does < 0 */ 2942 c1 = cpuc->event_constraint[idx]; 2943 2944 /* 2945 * first time only 2946 * - static constraint: no change across incremental scheduling calls 2947 * - dynamic constraint: handled by intel_get_excl_constraints() 2948 */ 2949 c2 = __intel_get_event_constraints(cpuc, idx, event); 2950 if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) { 2951 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX); 2952 c1->weight = c2->weight; 2953 c2 = c1; 2954 } 2955 2956 if (cpuc->excl_cntrs) 2957 return intel_get_excl_constraints(cpuc, event, idx, c2); 2958 2959 return c2; 2960 } 2961 2962 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, 2963 struct perf_event *event) 2964 { 2965 struct hw_perf_event *hwc = &event->hw; 2966 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; 2967 int tid = cpuc->excl_thread_id; 2968 struct intel_excl_states *xl; 2969 2970 /* 2971 * nothing needed if in group validation mode 2972 */ 2973 if (cpuc->is_fake) 2974 return; 2975 2976 if (WARN_ON_ONCE(!excl_cntrs)) 2977 return; 2978 2979 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { 2980 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; 2981 if (!--cpuc->n_excl) 2982 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0); 2983 } 2984 2985 /* 2986 * If event was actually assigned, then mark the counter state as 2987 * unused now. 2988 */ 2989 if (hwc->idx >= 0) { 2990 xl = &excl_cntrs->states[tid]; 2991 2992 /* 2993 * put_constraint may be called from x86_schedule_events() 2994 * which already has the lock held so here make locking 2995 * conditional. 2996 */ 2997 if (!xl->sched_started) 2998 raw_spin_lock(&excl_cntrs->lock); 2999 3000 xl->state[hwc->idx] = INTEL_EXCL_UNUSED; 3001 3002 if (!xl->sched_started) 3003 raw_spin_unlock(&excl_cntrs->lock); 3004 } 3005 } 3006 3007 static void 3008 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, 3009 struct perf_event *event) 3010 { 3011 struct hw_perf_event_extra *reg; 3012 3013 reg = &event->hw.extra_reg; 3014 if (reg->idx != EXTRA_REG_NONE) 3015 __intel_shared_reg_put_constraints(cpuc, reg); 3016 3017 reg = &event->hw.branch_reg; 3018 if (reg->idx != EXTRA_REG_NONE) 3019 __intel_shared_reg_put_constraints(cpuc, reg); 3020 } 3021 3022 static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 3023 struct perf_event *event) 3024 { 3025 intel_put_shared_regs_event_constraints(cpuc, event); 3026 3027 /* 3028 * is PMU has exclusive counter restrictions, then 3029 * all events are subject to and must call the 3030 * put_excl_constraints() routine 3031 */ 3032 if (cpuc->excl_cntrs) 3033 intel_put_excl_constraints(cpuc, event); 3034 } 3035 3036 static void intel_pebs_aliases_core2(struct perf_event *event) 3037 { 3038 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3039 /* 3040 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3041 * (0x003c) so that we can use it with PEBS. 3042 * 3043 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3044 * PEBS capable. However we can use INST_RETIRED.ANY_P 3045 * (0x00c0), which is a PEBS capable event, to get the same 3046 * count. 3047 * 3048 * INST_RETIRED.ANY_P counts the number of cycles that retires 3049 * CNTMASK instructions. By setting CNTMASK to a value (16) 3050 * larger than the maximum number of instructions that can be 3051 * retired per cycle (4) and then inverting the condition, we 3052 * count all cycles that retire 16 or less instructions, which 3053 * is every cycle. 3054 * 3055 * Thereby we gain a PEBS capable cycle counter. 3056 */ 3057 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); 3058 3059 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3060 event->hw.config = alt_config; 3061 } 3062 } 3063 3064 static void intel_pebs_aliases_snb(struct perf_event *event) 3065 { 3066 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3067 /* 3068 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3069 * (0x003c) so that we can use it with PEBS. 3070 * 3071 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3072 * PEBS capable. However we can use UOPS_RETIRED.ALL 3073 * (0x01c2), which is a PEBS capable event, to get the same 3074 * count. 3075 * 3076 * UOPS_RETIRED.ALL counts the number of cycles that retires 3077 * CNTMASK micro-ops. By setting CNTMASK to a value (16) 3078 * larger than the maximum number of micro-ops that can be 3079 * retired per cycle (4) and then inverting the condition, we 3080 * count all cycles that retire 16 or less micro-ops, which 3081 * is every cycle. 3082 * 3083 * Thereby we gain a PEBS capable cycle counter. 3084 */ 3085 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); 3086 3087 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3088 event->hw.config = alt_config; 3089 } 3090 } 3091 3092 static void intel_pebs_aliases_precdist(struct perf_event *event) 3093 { 3094 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 3095 /* 3096 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 3097 * (0x003c) so that we can use it with PEBS. 3098 * 3099 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 3100 * PEBS capable. However we can use INST_RETIRED.PREC_DIST 3101 * (0x01c0), which is a PEBS capable event, to get the same 3102 * count. 3103 * 3104 * The PREC_DIST event has special support to minimize sample 3105 * shadowing effects. One drawback is that it can be 3106 * only programmed on counter 1, but that seems like an 3107 * acceptable trade off. 3108 */ 3109 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16); 3110 3111 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 3112 event->hw.config = alt_config; 3113 } 3114 } 3115 3116 static void intel_pebs_aliases_ivb(struct perf_event *event) 3117 { 3118 if (event->attr.precise_ip < 3) 3119 return intel_pebs_aliases_snb(event); 3120 return intel_pebs_aliases_precdist(event); 3121 } 3122 3123 static void intel_pebs_aliases_skl(struct perf_event *event) 3124 { 3125 if (event->attr.precise_ip < 3) 3126 return intel_pebs_aliases_core2(event); 3127 return intel_pebs_aliases_precdist(event); 3128 } 3129 3130 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) 3131 { 3132 unsigned long flags = x86_pmu.large_pebs_flags; 3133 3134 if (event->attr.use_clockid) 3135 flags &= ~PERF_SAMPLE_TIME; 3136 if (!event->attr.exclude_kernel) 3137 flags &= ~PERF_SAMPLE_REGS_USER; 3138 if (event->attr.sample_regs_user & ~PEBS_GP_REGS) 3139 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); 3140 return flags; 3141 } 3142 3143 static int intel_pmu_bts_config(struct perf_event *event) 3144 { 3145 struct perf_event_attr *attr = &event->attr; 3146 3147 if (unlikely(intel_pmu_has_bts(event))) { 3148 /* BTS is not supported by this architecture. */ 3149 if (!x86_pmu.bts_active) 3150 return -EOPNOTSUPP; 3151 3152 /* BTS is currently only allowed for user-mode. */ 3153 if (!attr->exclude_kernel) 3154 return -EOPNOTSUPP; 3155 3156 /* BTS is not allowed for precise events. */ 3157 if (attr->precise_ip) 3158 return -EOPNOTSUPP; 3159 3160 /* disallow bts if conflicting events are present */ 3161 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3162 return -EBUSY; 3163 3164 event->destroy = hw_perf_lbr_event_destroy; 3165 } 3166 3167 return 0; 3168 } 3169 3170 static int core_pmu_hw_config(struct perf_event *event) 3171 { 3172 int ret = x86_pmu_hw_config(event); 3173 3174 if (ret) 3175 return ret; 3176 3177 return intel_pmu_bts_config(event); 3178 } 3179 3180 static int intel_pmu_hw_config(struct perf_event *event) 3181 { 3182 int ret = x86_pmu_hw_config(event); 3183 3184 if (ret) 3185 return ret; 3186 3187 ret = intel_pmu_bts_config(event); 3188 if (ret) 3189 return ret; 3190 3191 if (event->attr.precise_ip) { 3192 if (!(event->attr.freq || event->attr.wakeup_events)) { 3193 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; 3194 if (!(event->attr.sample_type & 3195 ~intel_pmu_large_pebs_flags(event))) 3196 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; 3197 } 3198 if (x86_pmu.pebs_aliases) 3199 x86_pmu.pebs_aliases(event); 3200 3201 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) 3202 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY; 3203 } 3204 3205 if (needs_branch_stack(event)) { 3206 ret = intel_pmu_setup_lbr_filter(event); 3207 if (ret) 3208 return ret; 3209 3210 /* 3211 * BTS is set up earlier in this path, so don't account twice 3212 */ 3213 if (!unlikely(intel_pmu_has_bts(event))) { 3214 /* disallow lbr if conflicting events are present */ 3215 if (x86_add_exclusive(x86_lbr_exclusive_lbr)) 3216 return -EBUSY; 3217 3218 event->destroy = hw_perf_lbr_event_destroy; 3219 } 3220 } 3221 3222 if (event->attr.type != PERF_TYPE_RAW) 3223 return 0; 3224 3225 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) 3226 return 0; 3227 3228 if (x86_pmu.version < 3) 3229 return -EINVAL; 3230 3231 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 3232 return -EACCES; 3233 3234 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; 3235 3236 return 0; 3237 } 3238 3239 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr) 3240 { 3241 if (x86_pmu.guest_get_msrs) 3242 return x86_pmu.guest_get_msrs(nr); 3243 *nr = 0; 3244 return NULL; 3245 } 3246 EXPORT_SYMBOL_GPL(perf_guest_get_msrs); 3247 3248 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) 3249 { 3250 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3251 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 3252 3253 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; 3254 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; 3255 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; 3256 if (x86_pmu.flags & PMU_FL_PEBS_ALL) 3257 arr[0].guest &= ~cpuc->pebs_enabled; 3258 else 3259 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); 3260 *nr = 1; 3261 3262 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) { 3263 /* 3264 * If PMU counter has PEBS enabled it is not enough to 3265 * disable counter on a guest entry since PEBS memory 3266 * write can overshoot guest entry and corrupt guest 3267 * memory. Disabling PEBS solves the problem. 3268 * 3269 * Don't do this if the CPU already enforces it. 3270 */ 3271 arr[1].msr = MSR_IA32_PEBS_ENABLE; 3272 arr[1].host = cpuc->pebs_enabled; 3273 arr[1].guest = 0; 3274 *nr = 2; 3275 } 3276 3277 return arr; 3278 } 3279 3280 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) 3281 { 3282 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3283 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; 3284 int idx; 3285 3286 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 3287 struct perf_event *event = cpuc->events[idx]; 3288 3289 arr[idx].msr = x86_pmu_config_addr(idx); 3290 arr[idx].host = arr[idx].guest = 0; 3291 3292 if (!test_bit(idx, cpuc->active_mask)) 3293 continue; 3294 3295 arr[idx].host = arr[idx].guest = 3296 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; 3297 3298 if (event->attr.exclude_host) 3299 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 3300 else if (event->attr.exclude_guest) 3301 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 3302 } 3303 3304 *nr = x86_pmu.num_counters; 3305 return arr; 3306 } 3307 3308 static void core_pmu_enable_event(struct perf_event *event) 3309 { 3310 if (!event->attr.exclude_host) 3311 x86_pmu_enable_event(event); 3312 } 3313 3314 static void core_pmu_enable_all(int added) 3315 { 3316 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 3317 int idx; 3318 3319 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 3320 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; 3321 3322 if (!test_bit(idx, cpuc->active_mask) || 3323 cpuc->events[idx]->attr.exclude_host) 3324 continue; 3325 3326 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 3327 } 3328 } 3329 3330 static int hsw_hw_config(struct perf_event *event) 3331 { 3332 int ret = intel_pmu_hw_config(event); 3333 3334 if (ret) 3335 return ret; 3336 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE)) 3337 return 0; 3338 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); 3339 3340 /* 3341 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with 3342 * PEBS or in ANY thread mode. Since the results are non-sensical forbid 3343 * this combination. 3344 */ 3345 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) && 3346 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) || 3347 event->attr.precise_ip > 0)) 3348 return -EOPNOTSUPP; 3349 3350 if (event_is_checkpointed(event)) { 3351 /* 3352 * Sampling of checkpointed events can cause situations where 3353 * the CPU constantly aborts because of a overflow, which is 3354 * then checkpointed back and ignored. Forbid checkpointing 3355 * for sampling. 3356 * 3357 * But still allow a long sampling period, so that perf stat 3358 * from KVM works. 3359 */ 3360 if (event->attr.sample_period > 0 && 3361 event->attr.sample_period < 0x7fffffff) 3362 return -EOPNOTSUPP; 3363 } 3364 return 0; 3365 } 3366 3367 static struct event_constraint counter0_constraint = 3368 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1); 3369 3370 static struct event_constraint counter2_constraint = 3371 EVENT_CONSTRAINT(0, 0x4, 0); 3372 3373 static struct event_constraint * 3374 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3375 struct perf_event *event) 3376 { 3377 struct event_constraint *c; 3378 3379 c = intel_get_event_constraints(cpuc, idx, event); 3380 3381 /* Handle special quirk on in_tx_checkpointed only in counter 2 */ 3382 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) { 3383 if (c->idxmsk64 & (1U << 2)) 3384 return &counter2_constraint; 3385 return &emptyconstraint; 3386 } 3387 3388 return c; 3389 } 3390 3391 static struct event_constraint * 3392 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3393 struct perf_event *event) 3394 { 3395 struct event_constraint *c; 3396 3397 /* :ppp means to do reduced skid PEBS which is PMC0 only. */ 3398 if (event->attr.precise_ip == 3) 3399 return &counter0_constraint; 3400 3401 c = intel_get_event_constraints(cpuc, idx, event); 3402 3403 return c; 3404 } 3405 3406 static bool allow_tsx_force_abort = true; 3407 3408 static struct event_constraint * 3409 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, 3410 struct perf_event *event) 3411 { 3412 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); 3413 3414 /* 3415 * Without TFA we must not use PMC3. 3416 */ 3417 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) { 3418 c = dyn_constraint(cpuc, c, idx); 3419 c->idxmsk64 &= ~(1ULL << 3); 3420 c->weight--; 3421 } 3422 3423 return c; 3424 } 3425 3426 /* 3427 * Broadwell: 3428 * 3429 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared 3430 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine 3431 * the two to enforce a minimum period of 128 (the smallest value that has bits 3432 * 0-5 cleared and >= 100). 3433 * 3434 * Because of how the code in x86_perf_event_set_period() works, the truncation 3435 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period 3436 * to make up for the 'lost' events due to carrying the 'error' in period_left. 3437 * 3438 * Therefore the effective (average) period matches the requested period, 3439 * despite coarser hardware granularity. 3440 */ 3441 static u64 bdw_limit_period(struct perf_event *event, u64 left) 3442 { 3443 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == 3444 X86_CONFIG(.event=0xc0, .umask=0x01)) { 3445 if (left < 128) 3446 left = 128; 3447 left &= ~0x3fULL; 3448 } 3449 return left; 3450 } 3451 3452 PMU_FORMAT_ATTR(event, "config:0-7" ); 3453 PMU_FORMAT_ATTR(umask, "config:8-15" ); 3454 PMU_FORMAT_ATTR(edge, "config:18" ); 3455 PMU_FORMAT_ATTR(pc, "config:19" ); 3456 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */ 3457 PMU_FORMAT_ATTR(inv, "config:23" ); 3458 PMU_FORMAT_ATTR(cmask, "config:24-31" ); 3459 PMU_FORMAT_ATTR(in_tx, "config:32"); 3460 PMU_FORMAT_ATTR(in_tx_cp, "config:33"); 3461 3462 static struct attribute *intel_arch_formats_attr[] = { 3463 &format_attr_event.attr, 3464 &format_attr_umask.attr, 3465 &format_attr_edge.attr, 3466 &format_attr_pc.attr, 3467 &format_attr_inv.attr, 3468 &format_attr_cmask.attr, 3469 NULL, 3470 }; 3471 3472 ssize_t intel_event_sysfs_show(char *page, u64 config) 3473 { 3474 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT); 3475 3476 return x86_event_sysfs_show(page, config, event); 3477 } 3478 3479 static struct intel_shared_regs *allocate_shared_regs(int cpu) 3480 { 3481 struct intel_shared_regs *regs; 3482 int i; 3483 3484 regs = kzalloc_node(sizeof(struct intel_shared_regs), 3485 GFP_KERNEL, cpu_to_node(cpu)); 3486 if (regs) { 3487 /* 3488 * initialize the locks to keep lockdep happy 3489 */ 3490 for (i = 0; i < EXTRA_REG_MAX; i++) 3491 raw_spin_lock_init(®s->regs[i].lock); 3492 3493 regs->core_id = -1; 3494 } 3495 return regs; 3496 } 3497 3498 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) 3499 { 3500 struct intel_excl_cntrs *c; 3501 3502 c = kzalloc_node(sizeof(struct intel_excl_cntrs), 3503 GFP_KERNEL, cpu_to_node(cpu)); 3504 if (c) { 3505 raw_spin_lock_init(&c->lock); 3506 c->core_id = -1; 3507 } 3508 return c; 3509 } 3510 3511 3512 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) 3513 { 3514 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 3515 cpuc->shared_regs = allocate_shared_regs(cpu); 3516 if (!cpuc->shared_regs) 3517 goto err; 3518 } 3519 3520 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { 3521 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); 3522 3523 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); 3524 if (!cpuc->constraint_list) 3525 goto err_shared_regs; 3526 } 3527 3528 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 3529 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 3530 if (!cpuc->excl_cntrs) 3531 goto err_constraint_list; 3532 3533 cpuc->excl_thread_id = 0; 3534 } 3535 3536 return 0; 3537 3538 err_constraint_list: 3539 kfree(cpuc->constraint_list); 3540 cpuc->constraint_list = NULL; 3541 3542 err_shared_regs: 3543 kfree(cpuc->shared_regs); 3544 cpuc->shared_regs = NULL; 3545 3546 err: 3547 return -ENOMEM; 3548 } 3549 3550 static int intel_pmu_cpu_prepare(int cpu) 3551 { 3552 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); 3553 } 3554 3555 static void flip_smm_bit(void *data) 3556 { 3557 unsigned long set = *(unsigned long *)data; 3558 3559 if (set > 0) { 3560 msr_set_bit(MSR_IA32_DEBUGCTLMSR, 3561 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 3562 } else { 3563 msr_clear_bit(MSR_IA32_DEBUGCTLMSR, 3564 DEBUGCTLMSR_FREEZE_IN_SMM_BIT); 3565 } 3566 } 3567 3568 static void intel_pmu_cpu_starting(int cpu) 3569 { 3570 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 3571 int core_id = topology_core_id(cpu); 3572 int i; 3573 3574 init_debug_store_on_cpu(cpu); 3575 /* 3576 * Deal with CPUs that don't clear their LBRs on power-up. 3577 */ 3578 intel_pmu_lbr_reset(); 3579 3580 cpuc->lbr_sel = NULL; 3581 3582 if (x86_pmu.flags & PMU_FL_TFA) { 3583 WARN_ON_ONCE(cpuc->tfa_shadow); 3584 cpuc->tfa_shadow = ~0ULL; 3585 intel_set_tfa(cpuc, false); 3586 } 3587 3588 if (x86_pmu.version > 1) 3589 flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 3590 3591 if (x86_pmu.counter_freezing) 3592 enable_counter_freeze(); 3593 3594 if (!cpuc->shared_regs) 3595 return; 3596 3597 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { 3598 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 3599 struct intel_shared_regs *pc; 3600 3601 pc = per_cpu(cpu_hw_events, i).shared_regs; 3602 if (pc && pc->core_id == core_id) { 3603 cpuc->kfree_on_online[0] = cpuc->shared_regs; 3604 cpuc->shared_regs = pc; 3605 break; 3606 } 3607 } 3608 cpuc->shared_regs->core_id = core_id; 3609 cpuc->shared_regs->refcnt++; 3610 } 3611 3612 if (x86_pmu.lbr_sel_map) 3613 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 3614 3615 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 3616 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 3617 struct cpu_hw_events *sibling; 3618 struct intel_excl_cntrs *c; 3619 3620 sibling = &per_cpu(cpu_hw_events, i); 3621 c = sibling->excl_cntrs; 3622 if (c && c->core_id == core_id) { 3623 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; 3624 cpuc->excl_cntrs = c; 3625 if (!sibling->excl_thread_id) 3626 cpuc->excl_thread_id = 1; 3627 break; 3628 } 3629 } 3630 cpuc->excl_cntrs->core_id = core_id; 3631 cpuc->excl_cntrs->refcnt++; 3632 } 3633 } 3634 3635 static void free_excl_cntrs(struct cpu_hw_events *cpuc) 3636 { 3637 struct intel_excl_cntrs *c; 3638 3639 c = cpuc->excl_cntrs; 3640 if (c) { 3641 if (c->core_id == -1 || --c->refcnt == 0) 3642 kfree(c); 3643 cpuc->excl_cntrs = NULL; 3644 } 3645 3646 kfree(cpuc->constraint_list); 3647 cpuc->constraint_list = NULL; 3648 } 3649 3650 static void intel_pmu_cpu_dying(int cpu) 3651 { 3652 fini_debug_store_on_cpu(cpu); 3653 3654 if (x86_pmu.counter_freezing) 3655 disable_counter_freeze(); 3656 } 3657 3658 void intel_cpuc_finish(struct cpu_hw_events *cpuc) 3659 { 3660 struct intel_shared_regs *pc; 3661 3662 pc = cpuc->shared_regs; 3663 if (pc) { 3664 if (pc->core_id == -1 || --pc->refcnt == 0) 3665 kfree(pc); 3666 cpuc->shared_regs = NULL; 3667 } 3668 3669 free_excl_cntrs(cpuc); 3670 } 3671 3672 static void intel_pmu_cpu_dead(int cpu) 3673 { 3674 intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu)); 3675 } 3676 3677 static void intel_pmu_sched_task(struct perf_event_context *ctx, 3678 bool sched_in) 3679 { 3680 intel_pmu_pebs_sched_task(ctx, sched_in); 3681 intel_pmu_lbr_sched_task(ctx, sched_in); 3682 } 3683 3684 static int intel_pmu_check_period(struct perf_event *event, u64 value) 3685 { 3686 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; 3687 } 3688 3689 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 3690 3691 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 3692 3693 PMU_FORMAT_ATTR(frontend, "config1:0-23"); 3694 3695 static struct attribute *intel_arch3_formats_attr[] = { 3696 &format_attr_event.attr, 3697 &format_attr_umask.attr, 3698 &format_attr_edge.attr, 3699 &format_attr_pc.attr, 3700 &format_attr_any.attr, 3701 &format_attr_inv.attr, 3702 &format_attr_cmask.attr, 3703 NULL, 3704 }; 3705 3706 static struct attribute *hsw_format_attr[] = { 3707 &format_attr_in_tx.attr, 3708 &format_attr_in_tx_cp.attr, 3709 &format_attr_offcore_rsp.attr, 3710 &format_attr_ldlat.attr, 3711 NULL 3712 }; 3713 3714 static struct attribute *nhm_format_attr[] = { 3715 &format_attr_offcore_rsp.attr, 3716 &format_attr_ldlat.attr, 3717 NULL 3718 }; 3719 3720 static struct attribute *slm_format_attr[] = { 3721 &format_attr_offcore_rsp.attr, 3722 NULL 3723 }; 3724 3725 static struct attribute *skl_format_attr[] = { 3726 &format_attr_frontend.attr, 3727 NULL, 3728 }; 3729 3730 static __initconst const struct x86_pmu core_pmu = { 3731 .name = "core", 3732 .handle_irq = x86_pmu_handle_irq, 3733 .disable_all = x86_pmu_disable_all, 3734 .enable_all = core_pmu_enable_all, 3735 .enable = core_pmu_enable_event, 3736 .disable = x86_pmu_disable_event, 3737 .hw_config = core_pmu_hw_config, 3738 .schedule_events = x86_schedule_events, 3739 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 3740 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 3741 .event_map = intel_pmu_event_map, 3742 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 3743 .apic = 1, 3744 .large_pebs_flags = LARGE_PEBS_FLAGS, 3745 3746 /* 3747 * Intel PMCs cannot be accessed sanely above 32-bit width, 3748 * so we install an artificial 1<<31 period regardless of 3749 * the generic event period: 3750 */ 3751 .max_period = (1ULL<<31) - 1, 3752 .get_event_constraints = intel_get_event_constraints, 3753 .put_event_constraints = intel_put_event_constraints, 3754 .event_constraints = intel_core_event_constraints, 3755 .guest_get_msrs = core_guest_get_msrs, 3756 .format_attrs = intel_arch_formats_attr, 3757 .events_sysfs_show = intel_event_sysfs_show, 3758 3759 /* 3760 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs 3761 * together with PMU version 1 and thus be using core_pmu with 3762 * shared_regs. We need following callbacks here to allocate 3763 * it properly. 3764 */ 3765 .cpu_prepare = intel_pmu_cpu_prepare, 3766 .cpu_starting = intel_pmu_cpu_starting, 3767 .cpu_dying = intel_pmu_cpu_dying, 3768 .cpu_dead = intel_pmu_cpu_dead, 3769 3770 .check_period = intel_pmu_check_period, 3771 }; 3772 3773 static struct attribute *intel_pmu_attrs[]; 3774 3775 static __initconst const struct x86_pmu intel_pmu = { 3776 .name = "Intel", 3777 .handle_irq = intel_pmu_handle_irq, 3778 .disable_all = intel_pmu_disable_all, 3779 .enable_all = intel_pmu_enable_all, 3780 .enable = intel_pmu_enable_event, 3781 .disable = intel_pmu_disable_event, 3782 .add = intel_pmu_add_event, 3783 .del = intel_pmu_del_event, 3784 .read = intel_pmu_read_event, 3785 .hw_config = intel_pmu_hw_config, 3786 .schedule_events = x86_schedule_events, 3787 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 3788 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 3789 .event_map = intel_pmu_event_map, 3790 .max_events = ARRAY_SIZE(intel_perfmon_event_map), 3791 .apic = 1, 3792 .large_pebs_flags = LARGE_PEBS_FLAGS, 3793 /* 3794 * Intel PMCs cannot be accessed sanely above 32 bit width, 3795 * so we install an artificial 1<<31 period regardless of 3796 * the generic event period: 3797 */ 3798 .max_period = (1ULL << 31) - 1, 3799 .get_event_constraints = intel_get_event_constraints, 3800 .put_event_constraints = intel_put_event_constraints, 3801 .pebs_aliases = intel_pebs_aliases_core2, 3802 3803 .format_attrs = intel_arch3_formats_attr, 3804 .events_sysfs_show = intel_event_sysfs_show, 3805 3806 .attrs = intel_pmu_attrs, 3807 3808 .cpu_prepare = intel_pmu_cpu_prepare, 3809 .cpu_starting = intel_pmu_cpu_starting, 3810 .cpu_dying = intel_pmu_cpu_dying, 3811 .cpu_dead = intel_pmu_cpu_dead, 3812 3813 .guest_get_msrs = intel_guest_get_msrs, 3814 .sched_task = intel_pmu_sched_task, 3815 3816 .check_period = intel_pmu_check_period, 3817 }; 3818 3819 static __init void intel_clovertown_quirk(void) 3820 { 3821 /* 3822 * PEBS is unreliable due to: 3823 * 3824 * AJ67 - PEBS may experience CPL leaks 3825 * AJ68 - PEBS PMI may be delayed by one event 3826 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] 3827 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS 3828 * 3829 * AJ67 could be worked around by restricting the OS/USR flags. 3830 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. 3831 * 3832 * AJ106 could possibly be worked around by not allowing LBR 3833 * usage from PEBS, including the fixup. 3834 * AJ68 could possibly be worked around by always programming 3835 * a pebs_event_reset[0] value and coping with the lost events. 3836 * 3837 * But taken together it might just make sense to not enable PEBS on 3838 * these chips. 3839 */ 3840 pr_warn("PEBS disabled due to CPU errata\n"); 3841 x86_pmu.pebs = 0; 3842 x86_pmu.pebs_constraints = NULL; 3843 } 3844 3845 static const struct x86_cpu_desc isolation_ucodes[] = { 3846 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f), 3847 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e), 3848 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015), 3849 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), 3850 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), 3851 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023), 3852 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014), 3853 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010), 3854 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009), 3855 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009), 3856 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002), 3857 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), 3858 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), 3859 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), 3860 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c), 3861 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c), 3862 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e), 3863 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e), 3864 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e), 3865 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e), 3866 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e), 3867 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e), 3868 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e), 3869 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e), 3870 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e), 3871 {} 3872 }; 3873 3874 static void intel_check_pebs_isolation(void) 3875 { 3876 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes); 3877 } 3878 3879 static __init void intel_pebs_isolation_quirk(void) 3880 { 3881 WARN_ON_ONCE(x86_pmu.check_microcode); 3882 x86_pmu.check_microcode = intel_check_pebs_isolation; 3883 intel_check_pebs_isolation(); 3884 } 3885 3886 static const struct x86_cpu_desc pebs_ucodes[] = { 3887 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028), 3888 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618), 3889 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c), 3890 {} 3891 }; 3892 3893 static bool intel_snb_pebs_broken(void) 3894 { 3895 return !x86_cpu_has_min_microcode_rev(pebs_ucodes); 3896 } 3897 3898 static void intel_snb_check_microcode(void) 3899 { 3900 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken) 3901 return; 3902 3903 /* 3904 * Serialized by the microcode lock.. 3905 */ 3906 if (x86_pmu.pebs_broken) { 3907 pr_info("PEBS enabled due to microcode update\n"); 3908 x86_pmu.pebs_broken = 0; 3909 } else { 3910 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n"); 3911 x86_pmu.pebs_broken = 1; 3912 } 3913 } 3914 3915 static bool is_lbr_from(unsigned long msr) 3916 { 3917 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr; 3918 3919 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr; 3920 } 3921 3922 /* 3923 * Under certain circumstances, access certain MSR may cause #GP. 3924 * The function tests if the input MSR can be safely accessed. 3925 */ 3926 static bool check_msr(unsigned long msr, u64 mask) 3927 { 3928 u64 val_old, val_new, val_tmp; 3929 3930 /* 3931 * Read the current value, change it and read it back to see if it 3932 * matches, this is needed to detect certain hardware emulators 3933 * (qemu/kvm) that don't trap on the MSR access and always return 0s. 3934 */ 3935 if (rdmsrl_safe(msr, &val_old)) 3936 return false; 3937 3938 /* 3939 * Only change the bits which can be updated by wrmsrl. 3940 */ 3941 val_tmp = val_old ^ mask; 3942 3943 if (is_lbr_from(msr)) 3944 val_tmp = lbr_from_signext_quirk_wr(val_tmp); 3945 3946 if (wrmsrl_safe(msr, val_tmp) || 3947 rdmsrl_safe(msr, &val_new)) 3948 return false; 3949 3950 /* 3951 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value 3952 * should equal rdmsrl()'s even with the quirk. 3953 */ 3954 if (val_new != val_tmp) 3955 return false; 3956 3957 if (is_lbr_from(msr)) 3958 val_old = lbr_from_signext_quirk_wr(val_old); 3959 3960 /* Here it's sure that the MSR can be safely accessed. 3961 * Restore the old value and return. 3962 */ 3963 wrmsrl(msr, val_old); 3964 3965 return true; 3966 } 3967 3968 static __init void intel_sandybridge_quirk(void) 3969 { 3970 x86_pmu.check_microcode = intel_snb_check_microcode; 3971 cpus_read_lock(); 3972 intel_snb_check_microcode(); 3973 cpus_read_unlock(); 3974 } 3975 3976 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { 3977 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" }, 3978 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" }, 3979 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" }, 3980 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" }, 3981 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" }, 3982 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" }, 3983 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" }, 3984 }; 3985 3986 static __init void intel_arch_events_quirk(void) 3987 { 3988 int bit; 3989 3990 /* disable event that reported as not presend by cpuid */ 3991 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { 3992 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; 3993 pr_warn("CPUID marked event: \'%s\' unavailable\n", 3994 intel_arch_events_map[bit].name); 3995 } 3996 } 3997 3998 static __init void intel_nehalem_quirk(void) 3999 { 4000 union cpuid10_ebx ebx; 4001 4002 ebx.full = x86_pmu.events_maskl; 4003 if (ebx.split.no_branch_misses_retired) { 4004 /* 4005 * Erratum AAJ80 detected, we work it around by using 4006 * the BR_MISP_EXEC.ANY event. This will over-count 4007 * branch-misses, but it's still much better than the 4008 * architectural event which is often completely bogus: 4009 */ 4010 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; 4011 ebx.split.no_branch_misses_retired = 0; 4012 x86_pmu.events_maskl = ebx.full; 4013 pr_info("CPU erratum AAJ80 worked around\n"); 4014 } 4015 } 4016 4017 static const struct x86_cpu_desc counter_freezing_ucodes[] = { 4018 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e), 4019 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e), 4020 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008), 4021 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028), 4022 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028), 4023 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006), 4024 {} 4025 }; 4026 4027 static bool intel_counter_freezing_broken(void) 4028 { 4029 return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes); 4030 } 4031 4032 static __init void intel_counter_freezing_quirk(void) 4033 { 4034 /* Check if it's already disabled */ 4035 if (disable_counter_freezing) 4036 return; 4037 4038 /* 4039 * If the system starts with the wrong ucode, leave the 4040 * counter-freezing feature permanently disabled. 4041 */ 4042 if (intel_counter_freezing_broken()) { 4043 pr_info("PMU counter freezing disabled due to CPU errata," 4044 "please upgrade microcode\n"); 4045 x86_pmu.counter_freezing = false; 4046 x86_pmu.handle_irq = intel_pmu_handle_irq; 4047 } 4048 } 4049 4050 /* 4051 * enable software workaround for errata: 4052 * SNB: BJ122 4053 * IVB: BV98 4054 * HSW: HSD29 4055 * 4056 * Only needed when HT is enabled. However detecting 4057 * if HT is enabled is difficult (model specific). So instead, 4058 * we enable the workaround in the early boot, and verify if 4059 * it is needed in a later initcall phase once we have valid 4060 * topology information to check if HT is actually enabled 4061 */ 4062 static __init void intel_ht_bug(void) 4063 { 4064 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED; 4065 4066 x86_pmu.start_scheduling = intel_start_scheduling; 4067 x86_pmu.commit_scheduling = intel_commit_scheduling; 4068 x86_pmu.stop_scheduling = intel_stop_scheduling; 4069 } 4070 4071 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3"); 4072 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82") 4073 4074 /* Haswell special events */ 4075 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1"); 4076 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2"); 4077 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4"); 4078 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2"); 4079 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1"); 4080 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1"); 4081 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2"); 4082 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4"); 4083 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2"); 4084 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1"); 4085 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); 4086 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); 4087 4088 static struct attribute *hsw_events_attrs[] = { 4089 EVENT_PTR(td_slots_issued), 4090 EVENT_PTR(td_slots_retired), 4091 EVENT_PTR(td_fetch_bubbles), 4092 EVENT_PTR(td_total_slots), 4093 EVENT_PTR(td_total_slots_scale), 4094 EVENT_PTR(td_recovery_bubbles), 4095 EVENT_PTR(td_recovery_bubbles_scale), 4096 NULL 4097 }; 4098 4099 static struct attribute *hsw_mem_events_attrs[] = { 4100 EVENT_PTR(mem_ld_hsw), 4101 EVENT_PTR(mem_st_hsw), 4102 NULL, 4103 }; 4104 4105 static struct attribute *hsw_tsx_events_attrs[] = { 4106 EVENT_PTR(tx_start), 4107 EVENT_PTR(tx_commit), 4108 EVENT_PTR(tx_abort), 4109 EVENT_PTR(tx_capacity), 4110 EVENT_PTR(tx_conflict), 4111 EVENT_PTR(el_start), 4112 EVENT_PTR(el_commit), 4113 EVENT_PTR(el_abort), 4114 EVENT_PTR(el_capacity), 4115 EVENT_PTR(el_conflict), 4116 EVENT_PTR(cycles_t), 4117 EVENT_PTR(cycles_ct), 4118 NULL 4119 }; 4120 4121 static ssize_t freeze_on_smi_show(struct device *cdev, 4122 struct device_attribute *attr, 4123 char *buf) 4124 { 4125 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi); 4126 } 4127 4128 static DEFINE_MUTEX(freeze_on_smi_mutex); 4129 4130 static ssize_t freeze_on_smi_store(struct device *cdev, 4131 struct device_attribute *attr, 4132 const char *buf, size_t count) 4133 { 4134 unsigned long val; 4135 ssize_t ret; 4136 4137 ret = kstrtoul(buf, 0, &val); 4138 if (ret) 4139 return ret; 4140 4141 if (val > 1) 4142 return -EINVAL; 4143 4144 mutex_lock(&freeze_on_smi_mutex); 4145 4146 if (x86_pmu.attr_freeze_on_smi == val) 4147 goto done; 4148 4149 x86_pmu.attr_freeze_on_smi = val; 4150 4151 get_online_cpus(); 4152 on_each_cpu(flip_smm_bit, &val, 1); 4153 put_online_cpus(); 4154 done: 4155 mutex_unlock(&freeze_on_smi_mutex); 4156 4157 return count; 4158 } 4159 4160 static DEVICE_ATTR_RW(freeze_on_smi); 4161 4162 static ssize_t branches_show(struct device *cdev, 4163 struct device_attribute *attr, 4164 char *buf) 4165 { 4166 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); 4167 } 4168 4169 static DEVICE_ATTR_RO(branches); 4170 4171 static struct attribute *lbr_attrs[] = { 4172 &dev_attr_branches.attr, 4173 NULL 4174 }; 4175 4176 static char pmu_name_str[30]; 4177 4178 static ssize_t pmu_name_show(struct device *cdev, 4179 struct device_attribute *attr, 4180 char *buf) 4181 { 4182 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str); 4183 } 4184 4185 static DEVICE_ATTR_RO(pmu_name); 4186 4187 static struct attribute *intel_pmu_caps_attrs[] = { 4188 &dev_attr_pmu_name.attr, 4189 NULL 4190 }; 4191 4192 static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort); 4193 4194 static struct attribute *intel_pmu_attrs[] = { 4195 &dev_attr_freeze_on_smi.attr, 4196 NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */ 4197 NULL, 4198 }; 4199 4200 static __init struct attribute ** 4201 get_events_attrs(struct attribute **base, 4202 struct attribute **mem, 4203 struct attribute **tsx) 4204 { 4205 struct attribute **attrs = base; 4206 struct attribute **old; 4207 4208 if (mem && x86_pmu.pebs) 4209 attrs = merge_attr(attrs, mem); 4210 4211 if (tsx && boot_cpu_has(X86_FEATURE_RTM)) { 4212 old = attrs; 4213 attrs = merge_attr(attrs, tsx); 4214 if (old != base) 4215 kfree(old); 4216 } 4217 4218 return attrs; 4219 } 4220 4221 __init int intel_pmu_init(void) 4222 { 4223 struct attribute **extra_attr = NULL; 4224 struct attribute **mem_attr = NULL; 4225 struct attribute **tsx_attr = NULL; 4226 struct attribute **to_free = NULL; 4227 union cpuid10_edx edx; 4228 union cpuid10_eax eax; 4229 union cpuid10_ebx ebx; 4230 struct event_constraint *c; 4231 unsigned int unused; 4232 struct extra_reg *er; 4233 int version, i; 4234 char *name; 4235 4236 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 4237 switch (boot_cpu_data.x86) { 4238 case 0x6: 4239 return p6_pmu_init(); 4240 case 0xb: 4241 return knc_pmu_init(); 4242 case 0xf: 4243 return p4_pmu_init(); 4244 } 4245 return -ENODEV; 4246 } 4247 4248 /* 4249 * Check whether the Architectural PerfMon supports 4250 * Branch Misses Retired hw_event or not. 4251 */ 4252 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full); 4253 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) 4254 return -ENODEV; 4255 4256 version = eax.split.version_id; 4257 if (version < 2) 4258 x86_pmu = core_pmu; 4259 else 4260 x86_pmu = intel_pmu; 4261 4262 x86_pmu.version = version; 4263 x86_pmu.num_counters = eax.split.num_counters; 4264 x86_pmu.cntval_bits = eax.split.bit_width; 4265 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; 4266 4267 x86_pmu.events_maskl = ebx.full; 4268 x86_pmu.events_mask_len = eax.split.mask_length; 4269 4270 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); 4271 4272 /* 4273 * Quirk: v2 perfmon does not report fixed-purpose events, so 4274 * assume at least 3 events, when not running in a hypervisor: 4275 */ 4276 if (version > 1) { 4277 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR); 4278 4279 x86_pmu.num_counters_fixed = 4280 max((int)edx.split.num_counters_fixed, assume); 4281 } 4282 4283 if (version >= 4) 4284 x86_pmu.counter_freezing = !disable_counter_freezing; 4285 4286 if (boot_cpu_has(X86_FEATURE_PDCM)) { 4287 u64 capabilities; 4288 4289 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); 4290 x86_pmu.intel_cap.capabilities = capabilities; 4291 } 4292 4293 intel_ds_init(); 4294 4295 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ 4296 4297 /* 4298 * Install the hw-cache-events table: 4299 */ 4300 switch (boot_cpu_data.x86_model) { 4301 case INTEL_FAM6_CORE_YONAH: 4302 pr_cont("Core events, "); 4303 name = "core"; 4304 break; 4305 4306 case INTEL_FAM6_CORE2_MEROM: 4307 x86_add_quirk(intel_clovertown_quirk); 4308 /* fall through */ 4309 4310 case INTEL_FAM6_CORE2_MEROM_L: 4311 case INTEL_FAM6_CORE2_PENRYN: 4312 case INTEL_FAM6_CORE2_DUNNINGTON: 4313 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, 4314 sizeof(hw_cache_event_ids)); 4315 4316 intel_pmu_lbr_init_core(); 4317 4318 x86_pmu.event_constraints = intel_core2_event_constraints; 4319 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; 4320 pr_cont("Core2 events, "); 4321 name = "core2"; 4322 break; 4323 4324 case INTEL_FAM6_NEHALEM: 4325 case INTEL_FAM6_NEHALEM_EP: 4326 case INTEL_FAM6_NEHALEM_EX: 4327 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, 4328 sizeof(hw_cache_event_ids)); 4329 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 4330 sizeof(hw_cache_extra_regs)); 4331 4332 intel_pmu_lbr_init_nhm(); 4333 4334 x86_pmu.event_constraints = intel_nehalem_event_constraints; 4335 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 4336 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 4337 x86_pmu.extra_regs = intel_nehalem_extra_regs; 4338 4339 mem_attr = nhm_mem_events_attrs; 4340 4341 /* UOPS_ISSUED.STALLED_CYCLES */ 4342 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 4343 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 4344 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 4345 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 4346 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 4347 4348 intel_pmu_pebs_data_source_nhm(); 4349 x86_add_quirk(intel_nehalem_quirk); 4350 x86_pmu.pebs_no_tlb = 1; 4351 extra_attr = nhm_format_attr; 4352 4353 pr_cont("Nehalem events, "); 4354 name = "nehalem"; 4355 break; 4356 4357 case INTEL_FAM6_ATOM_BONNELL: 4358 case INTEL_FAM6_ATOM_BONNELL_MID: 4359 case INTEL_FAM6_ATOM_SALTWELL: 4360 case INTEL_FAM6_ATOM_SALTWELL_MID: 4361 case INTEL_FAM6_ATOM_SALTWELL_TABLET: 4362 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 4363 sizeof(hw_cache_event_ids)); 4364 4365 intel_pmu_lbr_init_atom(); 4366 4367 x86_pmu.event_constraints = intel_gen_event_constraints; 4368 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; 4369 x86_pmu.pebs_aliases = intel_pebs_aliases_core2; 4370 pr_cont("Atom events, "); 4371 name = "bonnell"; 4372 break; 4373 4374 case INTEL_FAM6_ATOM_SILVERMONT: 4375 case INTEL_FAM6_ATOM_SILVERMONT_X: 4376 case INTEL_FAM6_ATOM_SILVERMONT_MID: 4377 case INTEL_FAM6_ATOM_AIRMONT: 4378 case INTEL_FAM6_ATOM_AIRMONT_MID: 4379 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 4380 sizeof(hw_cache_event_ids)); 4381 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 4382 sizeof(hw_cache_extra_regs)); 4383 4384 intel_pmu_lbr_init_slm(); 4385 4386 x86_pmu.event_constraints = intel_slm_event_constraints; 4387 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 4388 x86_pmu.extra_regs = intel_slm_extra_regs; 4389 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4390 x86_pmu.cpu_events = slm_events_attrs; 4391 extra_attr = slm_format_attr; 4392 pr_cont("Silvermont events, "); 4393 name = "silvermont"; 4394 break; 4395 4396 case INTEL_FAM6_ATOM_GOLDMONT: 4397 case INTEL_FAM6_ATOM_GOLDMONT_X: 4398 x86_add_quirk(intel_counter_freezing_quirk); 4399 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, 4400 sizeof(hw_cache_event_ids)); 4401 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, 4402 sizeof(hw_cache_extra_regs)); 4403 4404 intel_pmu_lbr_init_skl(); 4405 4406 x86_pmu.event_constraints = intel_slm_event_constraints; 4407 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints; 4408 x86_pmu.extra_regs = intel_glm_extra_regs; 4409 /* 4410 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 4411 * for precise cycles. 4412 * :pp is identical to :ppp 4413 */ 4414 x86_pmu.pebs_aliases = NULL; 4415 x86_pmu.pebs_prec_dist = true; 4416 x86_pmu.lbr_pt_coexist = true; 4417 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4418 x86_pmu.cpu_events = glm_events_attrs; 4419 extra_attr = slm_format_attr; 4420 pr_cont("Goldmont events, "); 4421 name = "goldmont"; 4422 break; 4423 4424 case INTEL_FAM6_ATOM_GOLDMONT_PLUS: 4425 x86_add_quirk(intel_counter_freezing_quirk); 4426 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, 4427 sizeof(hw_cache_event_ids)); 4428 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, 4429 sizeof(hw_cache_extra_regs)); 4430 4431 intel_pmu_lbr_init_skl(); 4432 4433 x86_pmu.event_constraints = intel_slm_event_constraints; 4434 x86_pmu.extra_regs = intel_glm_extra_regs; 4435 /* 4436 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS 4437 * for precise cycles. 4438 */ 4439 x86_pmu.pebs_aliases = NULL; 4440 x86_pmu.pebs_prec_dist = true; 4441 x86_pmu.lbr_pt_coexist = true; 4442 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4443 x86_pmu.flags |= PMU_FL_PEBS_ALL; 4444 x86_pmu.get_event_constraints = glp_get_event_constraints; 4445 x86_pmu.cpu_events = glm_events_attrs; 4446 /* Goldmont Plus has 4-wide pipeline */ 4447 event_attr_td_total_slots_scale_glm.event_str = "4"; 4448 extra_attr = slm_format_attr; 4449 pr_cont("Goldmont plus events, "); 4450 name = "goldmont_plus"; 4451 break; 4452 4453 case INTEL_FAM6_WESTMERE: 4454 case INTEL_FAM6_WESTMERE_EP: 4455 case INTEL_FAM6_WESTMERE_EX: 4456 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 4457 sizeof(hw_cache_event_ids)); 4458 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 4459 sizeof(hw_cache_extra_regs)); 4460 4461 intel_pmu_lbr_init_nhm(); 4462 4463 x86_pmu.event_constraints = intel_westmere_event_constraints; 4464 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 4465 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; 4466 x86_pmu.extra_regs = intel_westmere_extra_regs; 4467 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4468 4469 mem_attr = nhm_mem_events_attrs; 4470 4471 /* UOPS_ISSUED.STALLED_CYCLES */ 4472 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 4473 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 4474 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ 4475 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 4476 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); 4477 4478 intel_pmu_pebs_data_source_nhm(); 4479 extra_attr = nhm_format_attr; 4480 pr_cont("Westmere events, "); 4481 name = "westmere"; 4482 break; 4483 4484 case INTEL_FAM6_SANDYBRIDGE: 4485 case INTEL_FAM6_SANDYBRIDGE_X: 4486 x86_add_quirk(intel_sandybridge_quirk); 4487 x86_add_quirk(intel_ht_bug); 4488 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 4489 sizeof(hw_cache_event_ids)); 4490 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 4491 sizeof(hw_cache_extra_regs)); 4492 4493 intel_pmu_lbr_init_snb(); 4494 4495 x86_pmu.event_constraints = intel_snb_event_constraints; 4496 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 4497 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 4498 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X) 4499 x86_pmu.extra_regs = intel_snbep_extra_regs; 4500 else 4501 x86_pmu.extra_regs = intel_snb_extra_regs; 4502 4503 4504 /* all extra regs are per-cpu when HT is on */ 4505 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4506 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 4507 4508 x86_pmu.cpu_events = snb_events_attrs; 4509 mem_attr = snb_mem_events_attrs; 4510 4511 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 4512 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 4513 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 4514 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ 4515 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 4516 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); 4517 4518 extra_attr = nhm_format_attr; 4519 4520 pr_cont("SandyBridge events, "); 4521 name = "sandybridge"; 4522 break; 4523 4524 case INTEL_FAM6_IVYBRIDGE: 4525 case INTEL_FAM6_IVYBRIDGE_X: 4526 x86_add_quirk(intel_ht_bug); 4527 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 4528 sizeof(hw_cache_event_ids)); 4529 /* dTLB-load-misses on IVB is different than SNB */ 4530 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ 4531 4532 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 4533 sizeof(hw_cache_extra_regs)); 4534 4535 intel_pmu_lbr_init_snb(); 4536 4537 x86_pmu.event_constraints = intel_ivb_event_constraints; 4538 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 4539 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 4540 x86_pmu.pebs_prec_dist = true; 4541 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X) 4542 x86_pmu.extra_regs = intel_snbep_extra_regs; 4543 else 4544 x86_pmu.extra_regs = intel_snb_extra_regs; 4545 /* all extra regs are per-cpu when HT is on */ 4546 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4547 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 4548 4549 x86_pmu.cpu_events = snb_events_attrs; 4550 mem_attr = snb_mem_events_attrs; 4551 4552 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 4553 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 4554 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 4555 4556 extra_attr = nhm_format_attr; 4557 4558 pr_cont("IvyBridge events, "); 4559 name = "ivybridge"; 4560 break; 4561 4562 4563 case INTEL_FAM6_HASWELL_CORE: 4564 case INTEL_FAM6_HASWELL_X: 4565 case INTEL_FAM6_HASWELL_ULT: 4566 case INTEL_FAM6_HASWELL_GT3E: 4567 x86_add_quirk(intel_ht_bug); 4568 x86_add_quirk(intel_pebs_isolation_quirk); 4569 x86_pmu.late_ack = true; 4570 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 4571 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 4572 4573 intel_pmu_lbr_init_hsw(); 4574 4575 x86_pmu.event_constraints = intel_hsw_event_constraints; 4576 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; 4577 x86_pmu.extra_regs = intel_snbep_extra_regs; 4578 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 4579 x86_pmu.pebs_prec_dist = true; 4580 /* all extra regs are per-cpu when HT is on */ 4581 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4582 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 4583 4584 x86_pmu.hw_config = hsw_hw_config; 4585 x86_pmu.get_event_constraints = hsw_get_event_constraints; 4586 x86_pmu.cpu_events = hsw_events_attrs; 4587 x86_pmu.lbr_double_abort = true; 4588 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 4589 hsw_format_attr : nhm_format_attr; 4590 mem_attr = hsw_mem_events_attrs; 4591 tsx_attr = hsw_tsx_events_attrs; 4592 pr_cont("Haswell events, "); 4593 name = "haswell"; 4594 break; 4595 4596 case INTEL_FAM6_BROADWELL_CORE: 4597 case INTEL_FAM6_BROADWELL_XEON_D: 4598 case INTEL_FAM6_BROADWELL_GT3E: 4599 case INTEL_FAM6_BROADWELL_X: 4600 x86_add_quirk(intel_pebs_isolation_quirk); 4601 x86_pmu.late_ack = true; 4602 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 4603 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 4604 4605 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */ 4606 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ | 4607 BDW_L3_MISS|HSW_SNOOP_DRAM; 4608 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS| 4609 HSW_SNOOP_DRAM; 4610 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ| 4611 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 4612 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE| 4613 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; 4614 4615 intel_pmu_lbr_init_hsw(); 4616 4617 x86_pmu.event_constraints = intel_bdw_event_constraints; 4618 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints; 4619 x86_pmu.extra_regs = intel_snbep_extra_regs; 4620 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb; 4621 x86_pmu.pebs_prec_dist = true; 4622 /* all extra regs are per-cpu when HT is on */ 4623 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4624 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 4625 4626 x86_pmu.hw_config = hsw_hw_config; 4627 x86_pmu.get_event_constraints = hsw_get_event_constraints; 4628 x86_pmu.cpu_events = hsw_events_attrs; 4629 x86_pmu.limit_period = bdw_limit_period; 4630 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 4631 hsw_format_attr : nhm_format_attr; 4632 mem_attr = hsw_mem_events_attrs; 4633 tsx_attr = hsw_tsx_events_attrs; 4634 pr_cont("Broadwell events, "); 4635 name = "broadwell"; 4636 break; 4637 4638 case INTEL_FAM6_XEON_PHI_KNL: 4639 case INTEL_FAM6_XEON_PHI_KNM: 4640 memcpy(hw_cache_event_ids, 4641 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 4642 memcpy(hw_cache_extra_regs, 4643 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 4644 intel_pmu_lbr_init_knl(); 4645 4646 x86_pmu.event_constraints = intel_slm_event_constraints; 4647 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; 4648 x86_pmu.extra_regs = intel_knl_extra_regs; 4649 4650 /* all extra regs are per-cpu when HT is on */ 4651 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4652 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 4653 extra_attr = slm_format_attr; 4654 pr_cont("Knights Landing/Mill events, "); 4655 name = "knights-landing"; 4656 break; 4657 4658 case INTEL_FAM6_SKYLAKE_MOBILE: 4659 case INTEL_FAM6_SKYLAKE_DESKTOP: 4660 case INTEL_FAM6_SKYLAKE_X: 4661 case INTEL_FAM6_KABYLAKE_MOBILE: 4662 case INTEL_FAM6_KABYLAKE_DESKTOP: 4663 x86_add_quirk(intel_pebs_isolation_quirk); 4664 x86_pmu.late_ack = true; 4665 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 4666 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 4667 intel_pmu_lbr_init_skl(); 4668 4669 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */ 4670 event_attr_td_recovery_bubbles.event_str_noht = 4671 "event=0xd,umask=0x1,cmask=1"; 4672 event_attr_td_recovery_bubbles.event_str_ht = 4673 "event=0xd,umask=0x1,cmask=1,any=1"; 4674 4675 x86_pmu.event_constraints = intel_skl_event_constraints; 4676 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints; 4677 x86_pmu.extra_regs = intel_skl_extra_regs; 4678 x86_pmu.pebs_aliases = intel_pebs_aliases_skl; 4679 x86_pmu.pebs_prec_dist = true; 4680 /* all extra regs are per-cpu when HT is on */ 4681 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 4682 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 4683 4684 x86_pmu.hw_config = hsw_hw_config; 4685 x86_pmu.get_event_constraints = hsw_get_event_constraints; 4686 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? 4687 hsw_format_attr : nhm_format_attr; 4688 extra_attr = merge_attr(extra_attr, skl_format_attr); 4689 to_free = extra_attr; 4690 x86_pmu.cpu_events = hsw_events_attrs; 4691 mem_attr = hsw_mem_events_attrs; 4692 tsx_attr = hsw_tsx_events_attrs; 4693 intel_pmu_pebs_data_source_skl( 4694 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); 4695 4696 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { 4697 x86_pmu.flags |= PMU_FL_TFA; 4698 x86_pmu.get_event_constraints = tfa_get_event_constraints; 4699 x86_pmu.enable_all = intel_tfa_pmu_enable_all; 4700 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; 4701 intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr; 4702 } 4703 4704 pr_cont("Skylake events, "); 4705 name = "skylake"; 4706 break; 4707 4708 default: 4709 switch (x86_pmu.version) { 4710 case 1: 4711 x86_pmu.event_constraints = intel_v1_event_constraints; 4712 pr_cont("generic architected perfmon v1, "); 4713 name = "generic_arch_v1"; 4714 break; 4715 default: 4716 /* 4717 * default constraints for v2 and up 4718 */ 4719 x86_pmu.event_constraints = intel_gen_event_constraints; 4720 pr_cont("generic architected perfmon, "); 4721 name = "generic_arch_v2+"; 4722 break; 4723 } 4724 } 4725 4726 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); 4727 4728 if (version >= 2 && extra_attr) { 4729 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, 4730 extra_attr); 4731 WARN_ON(!x86_pmu.format_attrs); 4732 } 4733 4734 x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events, 4735 mem_attr, tsx_attr); 4736 4737 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { 4738 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 4739 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); 4740 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; 4741 } 4742 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1; 4743 4744 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) { 4745 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 4746 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED); 4747 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; 4748 } 4749 4750 x86_pmu.intel_ctrl |= 4751 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED; 4752 4753 if (x86_pmu.event_constraints) { 4754 /* 4755 * event on fixed counter2 (REF_CYCLES) only works on this 4756 * counter, so do not extend mask to generic counters 4757 */ 4758 for_each_event_constraint(c, x86_pmu.event_constraints) { 4759 if (c->cmask == FIXED_EVENT_FLAGS 4760 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) { 4761 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; 4762 } 4763 c->idxmsk64 &= 4764 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed)); 4765 c->weight = hweight64(c->idxmsk64); 4766 } 4767 } 4768 4769 /* 4770 * Access LBR MSR may cause #GP under certain circumstances. 4771 * E.g. KVM doesn't support LBR MSR 4772 * Check all LBT MSR here. 4773 * Disable LBR access if any LBR MSRs can not be accessed. 4774 */ 4775 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL)) 4776 x86_pmu.lbr_nr = 0; 4777 for (i = 0; i < x86_pmu.lbr_nr; i++) { 4778 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && 4779 check_msr(x86_pmu.lbr_to + i, 0xffffUL))) 4780 x86_pmu.lbr_nr = 0; 4781 } 4782 4783 x86_pmu.caps_attrs = intel_pmu_caps_attrs; 4784 4785 if (x86_pmu.lbr_nr) { 4786 x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs); 4787 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); 4788 } 4789 4790 /* 4791 * Access extra MSR may cause #GP under certain circumstances. 4792 * E.g. KVM doesn't support offcore event 4793 * Check all extra_regs here. 4794 */ 4795 if (x86_pmu.extra_regs) { 4796 for (er = x86_pmu.extra_regs; er->msr; er++) { 4797 er->extra_msr_access = check_msr(er->msr, 0x11UL); 4798 /* Disable LBR select mapping */ 4799 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) 4800 x86_pmu.lbr_sel_map = NULL; 4801 } 4802 } 4803 4804 /* Support full width counters using alternative MSR range */ 4805 if (x86_pmu.intel_cap.full_width_write) { 4806 x86_pmu.max_period = x86_pmu.cntval_mask >> 1; 4807 x86_pmu.perfctr = MSR_IA32_PMC0; 4808 pr_cont("full-width counters, "); 4809 } 4810 4811 /* 4812 * For arch perfmon 4 use counter freezing to avoid 4813 * several MSR accesses in the PMI. 4814 */ 4815 if (x86_pmu.counter_freezing) 4816 x86_pmu.handle_irq = intel_pmu_handle_irq_v4; 4817 4818 kfree(to_free); 4819 return 0; 4820 } 4821 4822 /* 4823 * HT bug: phase 2 init 4824 * Called once we have valid topology information to check 4825 * whether or not HT is enabled 4826 * If HT is off, then we disable the workaround 4827 */ 4828 static __init int fixup_ht_bug(void) 4829 { 4830 int c; 4831 /* 4832 * problem not present on this CPU model, nothing to do 4833 */ 4834 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED)) 4835 return 0; 4836 4837 if (topology_max_smt_threads() > 1) { 4838 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n"); 4839 return 0; 4840 } 4841 4842 cpus_read_lock(); 4843 4844 hardlockup_detector_perf_stop(); 4845 4846 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); 4847 4848 x86_pmu.start_scheduling = NULL; 4849 x86_pmu.commit_scheduling = NULL; 4850 x86_pmu.stop_scheduling = NULL; 4851 4852 hardlockup_detector_perf_restart(); 4853 4854 for_each_online_cpu(c) 4855 free_excl_cntrs(&per_cpu(cpu_hw_events, c)); 4856 4857 cpus_read_unlock(); 4858 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); 4859 return 0; 4860 } 4861 subsys_initcall(fixup_ht_bug) 4862