xref: /openbmc/linux/arch/x86/events/intel/ds.c (revision fc7ce9c7)
17010d129SBorislav Petkov #include <linux/bitops.h>
27010d129SBorislav Petkov #include <linux/types.h>
37010d129SBorislav Petkov #include <linux/slab.h>
47010d129SBorislav Petkov 
57010d129SBorislav Petkov #include <asm/perf_event.h>
67010d129SBorislav Petkov #include <asm/insn.h>
77010d129SBorislav Petkov 
827f6d22bSBorislav Petkov #include "../perf_event.h"
97010d129SBorislav Petkov 
107010d129SBorislav Petkov /* The size of a BTS record in bytes: */
117010d129SBorislav Petkov #define BTS_RECORD_SIZE		24
127010d129SBorislav Petkov 
137010d129SBorislav Petkov #define BTS_BUFFER_SIZE		(PAGE_SIZE << 4)
147010d129SBorislav Petkov #define PEBS_BUFFER_SIZE	(PAGE_SIZE << 4)
157010d129SBorislav Petkov #define PEBS_FIXUP_SIZE		PAGE_SIZE
167010d129SBorislav Petkov 
177010d129SBorislav Petkov /*
187010d129SBorislav Petkov  * pebs_record_32 for p4 and core not supported
197010d129SBorislav Petkov 
207010d129SBorislav Petkov struct pebs_record_32 {
217010d129SBorislav Petkov 	u32 flags, ip;
227010d129SBorislav Petkov 	u32 ax, bc, cx, dx;
237010d129SBorislav Petkov 	u32 si, di, bp, sp;
247010d129SBorislav Petkov };
257010d129SBorislav Petkov 
267010d129SBorislav Petkov  */
277010d129SBorislav Petkov 
287010d129SBorislav Petkov union intel_x86_pebs_dse {
297010d129SBorislav Petkov 	u64 val;
307010d129SBorislav Petkov 	struct {
317010d129SBorislav Petkov 		unsigned int ld_dse:4;
327010d129SBorislav Petkov 		unsigned int ld_stlb_miss:1;
337010d129SBorislav Petkov 		unsigned int ld_locked:1;
347010d129SBorislav Petkov 		unsigned int ld_reserved:26;
357010d129SBorislav Petkov 	};
367010d129SBorislav Petkov 	struct {
377010d129SBorislav Petkov 		unsigned int st_l1d_hit:1;
387010d129SBorislav Petkov 		unsigned int st_reserved1:3;
397010d129SBorislav Petkov 		unsigned int st_stlb_miss:1;
407010d129SBorislav Petkov 		unsigned int st_locked:1;
417010d129SBorislav Petkov 		unsigned int st_reserved2:26;
427010d129SBorislav Petkov 	};
437010d129SBorislav Petkov };
447010d129SBorislav Petkov 
457010d129SBorislav Petkov 
467010d129SBorislav Petkov /*
477010d129SBorislav Petkov  * Map PEBS Load Latency Data Source encodings to generic
487010d129SBorislav Petkov  * memory data source information
497010d129SBorislav Petkov  */
507010d129SBorislav Petkov #define P(a, b) PERF_MEM_S(a, b)
517010d129SBorislav Petkov #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
526ae5fa61SAndi Kleen #define LEVEL(x) P(LVLNUM, x)
536ae5fa61SAndi Kleen #define REM P(REMOTE, REMOTE)
547010d129SBorislav Petkov #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
557010d129SBorislav Petkov 
56e17dc653SAndi Kleen /* Version for Sandy Bridge and later */
57e17dc653SAndi Kleen static u64 pebs_data_source[] = {
586ae5fa61SAndi Kleen 	P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
596ae5fa61SAndi Kleen 	OP_LH | P(LVL, L1)  | LEVEL(L1) | P(SNOOP, NONE),  /* 0x01: L1 local */
606ae5fa61SAndi Kleen 	OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
616ae5fa61SAndi Kleen 	OP_LH | P(LVL, L2)  | LEVEL(L2) | P(SNOOP, NONE),  /* 0x03: L2 hit */
626ae5fa61SAndi Kleen 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, NONE),  /* 0x04: L3 hit */
636ae5fa61SAndi Kleen 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, MISS),  /* 0x05: L3 hit, snoop miss */
646ae5fa61SAndi Kleen 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, HIT),   /* 0x06: L3 hit, snoop hit */
656ae5fa61SAndi Kleen 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, HITM),  /* 0x07: L3 hit, snoop hitm */
666ae5fa61SAndi Kleen 	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT),  /* 0x08: L3 miss snoop hit */
676ae5fa61SAndi Kleen 	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
686ae5fa61SAndi Kleen 	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | P(SNOOP, HIT),       /* 0x0a: L3 miss, shared */
696ae5fa61SAndi Kleen 	OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT),  /* 0x0b: L3 miss, shared */
706ae5fa61SAndi Kleen 	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | SNOOP_NONE_MISS,     /* 0x0c: L3 miss, excl */
716ae5fa61SAndi Kleen 	OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
726ae5fa61SAndi Kleen 	OP_LH | P(LVL, IO)  | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
736ae5fa61SAndi Kleen 	OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
747010d129SBorislav Petkov };
757010d129SBorislav Petkov 
76e17dc653SAndi Kleen /* Patch up minor differences in the bits */
77e17dc653SAndi Kleen void __init intel_pmu_pebs_data_source_nhm(void)
78e17dc653SAndi Kleen {
796ae5fa61SAndi Kleen 	pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
806ae5fa61SAndi Kleen 	pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
816ae5fa61SAndi Kleen 	pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
826ae5fa61SAndi Kleen }
836ae5fa61SAndi Kleen 
846ae5fa61SAndi Kleen void __init intel_pmu_pebs_data_source_skl(bool pmem)
856ae5fa61SAndi Kleen {
866ae5fa61SAndi Kleen 	u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
876ae5fa61SAndi Kleen 
886ae5fa61SAndi Kleen 	pebs_data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
896ae5fa61SAndi Kleen 	pebs_data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
906ae5fa61SAndi Kleen 	pebs_data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
916ae5fa61SAndi Kleen 	pebs_data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
926ae5fa61SAndi Kleen 	pebs_data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
93e17dc653SAndi Kleen }
94e17dc653SAndi Kleen 
957010d129SBorislav Petkov static u64 precise_store_data(u64 status)
967010d129SBorislav Petkov {
977010d129SBorislav Petkov 	union intel_x86_pebs_dse dse;
987010d129SBorislav Petkov 	u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
997010d129SBorislav Petkov 
1007010d129SBorislav Petkov 	dse.val = status;
1017010d129SBorislav Petkov 
1027010d129SBorislav Petkov 	/*
1037010d129SBorislav Petkov 	 * bit 4: TLB access
1047010d129SBorislav Petkov 	 * 1 = stored missed 2nd level TLB
1057010d129SBorislav Petkov 	 *
1067010d129SBorislav Petkov 	 * so it either hit the walker or the OS
1077010d129SBorislav Petkov 	 * otherwise hit 2nd level TLB
1087010d129SBorislav Petkov 	 */
1097010d129SBorislav Petkov 	if (dse.st_stlb_miss)
1107010d129SBorislav Petkov 		val |= P(TLB, MISS);
1117010d129SBorislav Petkov 	else
1127010d129SBorislav Petkov 		val |= P(TLB, HIT);
1137010d129SBorislav Petkov 
1147010d129SBorislav Petkov 	/*
1157010d129SBorislav Petkov 	 * bit 0: hit L1 data cache
1167010d129SBorislav Petkov 	 * if not set, then all we know is that
1177010d129SBorislav Petkov 	 * it missed L1D
1187010d129SBorislav Petkov 	 */
1197010d129SBorislav Petkov 	if (dse.st_l1d_hit)
1207010d129SBorislav Petkov 		val |= P(LVL, HIT);
1217010d129SBorislav Petkov 	else
1227010d129SBorislav Petkov 		val |= P(LVL, MISS);
1237010d129SBorislav Petkov 
1247010d129SBorislav Petkov 	/*
1257010d129SBorislav Petkov 	 * bit 5: Locked prefix
1267010d129SBorislav Petkov 	 */
1277010d129SBorislav Petkov 	if (dse.st_locked)
1287010d129SBorislav Petkov 		val |= P(LOCK, LOCKED);
1297010d129SBorislav Petkov 
1307010d129SBorislav Petkov 	return val;
1317010d129SBorislav Petkov }
1327010d129SBorislav Petkov 
1337010d129SBorislav Petkov static u64 precise_datala_hsw(struct perf_event *event, u64 status)
1347010d129SBorislav Petkov {
1357010d129SBorislav Petkov 	union perf_mem_data_src dse;
1367010d129SBorislav Petkov 
1377010d129SBorislav Petkov 	dse.val = PERF_MEM_NA;
1387010d129SBorislav Petkov 
1397010d129SBorislav Petkov 	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
1407010d129SBorislav Petkov 		dse.mem_op = PERF_MEM_OP_STORE;
1417010d129SBorislav Petkov 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
1427010d129SBorislav Petkov 		dse.mem_op = PERF_MEM_OP_LOAD;
1437010d129SBorislav Petkov 
1447010d129SBorislav Petkov 	/*
1457010d129SBorislav Petkov 	 * L1 info only valid for following events:
1467010d129SBorislav Petkov 	 *
1477010d129SBorislav Petkov 	 * MEM_UOPS_RETIRED.STLB_MISS_STORES
1487010d129SBorislav Petkov 	 * MEM_UOPS_RETIRED.LOCK_STORES
1497010d129SBorislav Petkov 	 * MEM_UOPS_RETIRED.SPLIT_STORES
1507010d129SBorislav Petkov 	 * MEM_UOPS_RETIRED.ALL_STORES
1517010d129SBorislav Petkov 	 */
1527010d129SBorislav Petkov 	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
1537010d129SBorislav Petkov 		if (status & 1)
1547010d129SBorislav Petkov 			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
1557010d129SBorislav Petkov 		else
1567010d129SBorislav Petkov 			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
1577010d129SBorislav Petkov 	}
1587010d129SBorislav Petkov 	return dse.val;
1597010d129SBorislav Petkov }
1607010d129SBorislav Petkov 
1617010d129SBorislav Petkov static u64 load_latency_data(u64 status)
1627010d129SBorislav Petkov {
1637010d129SBorislav Petkov 	union intel_x86_pebs_dse dse;
1647010d129SBorislav Petkov 	u64 val;
1657010d129SBorislav Petkov 
1667010d129SBorislav Petkov 	dse.val = status;
1677010d129SBorislav Petkov 
1687010d129SBorislav Petkov 	/*
1697010d129SBorislav Petkov 	 * use the mapping table for bit 0-3
1707010d129SBorislav Petkov 	 */
1717010d129SBorislav Petkov 	val = pebs_data_source[dse.ld_dse];
1727010d129SBorislav Petkov 
1737010d129SBorislav Petkov 	/*
1747010d129SBorislav Petkov 	 * Nehalem models do not support TLB, Lock infos
1757010d129SBorislav Petkov 	 */
17695298355SAndi Kleen 	if (x86_pmu.pebs_no_tlb) {
1777010d129SBorislav Petkov 		val |= P(TLB, NA) | P(LOCK, NA);
1787010d129SBorislav Petkov 		return val;
1797010d129SBorislav Petkov 	}
1807010d129SBorislav Petkov 	/*
1817010d129SBorislav Petkov 	 * bit 4: TLB access
1827010d129SBorislav Petkov 	 * 0 = did not miss 2nd level TLB
1837010d129SBorislav Petkov 	 * 1 = missed 2nd level TLB
1847010d129SBorislav Petkov 	 */
1857010d129SBorislav Petkov 	if (dse.ld_stlb_miss)
1867010d129SBorislav Petkov 		val |= P(TLB, MISS) | P(TLB, L2);
1877010d129SBorislav Petkov 	else
1887010d129SBorislav Petkov 		val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
1897010d129SBorislav Petkov 
1907010d129SBorislav Petkov 	/*
1917010d129SBorislav Petkov 	 * bit 5: locked prefix
1927010d129SBorislav Petkov 	 */
1937010d129SBorislav Petkov 	if (dse.ld_locked)
1947010d129SBorislav Petkov 		val |= P(LOCK, LOCKED);
1957010d129SBorislav Petkov 
1967010d129SBorislav Petkov 	return val;
1977010d129SBorislav Petkov }
1987010d129SBorislav Petkov 
1997010d129SBorislav Petkov struct pebs_record_core {
2007010d129SBorislav Petkov 	u64 flags, ip;
2017010d129SBorislav Petkov 	u64 ax, bx, cx, dx;
2027010d129SBorislav Petkov 	u64 si, di, bp, sp;
2037010d129SBorislav Petkov 	u64 r8,  r9,  r10, r11;
2047010d129SBorislav Petkov 	u64 r12, r13, r14, r15;
2057010d129SBorislav Petkov };
2067010d129SBorislav Petkov 
2077010d129SBorislav Petkov struct pebs_record_nhm {
2087010d129SBorislav Petkov 	u64 flags, ip;
2097010d129SBorislav Petkov 	u64 ax, bx, cx, dx;
2107010d129SBorislav Petkov 	u64 si, di, bp, sp;
2117010d129SBorislav Petkov 	u64 r8,  r9,  r10, r11;
2127010d129SBorislav Petkov 	u64 r12, r13, r14, r15;
2137010d129SBorislav Petkov 	u64 status, dla, dse, lat;
2147010d129SBorislav Petkov };
2157010d129SBorislav Petkov 
2167010d129SBorislav Petkov /*
2177010d129SBorislav Petkov  * Same as pebs_record_nhm, with two additional fields.
2187010d129SBorislav Petkov  */
2197010d129SBorislav Petkov struct pebs_record_hsw {
2207010d129SBorislav Petkov 	u64 flags, ip;
2217010d129SBorislav Petkov 	u64 ax, bx, cx, dx;
2227010d129SBorislav Petkov 	u64 si, di, bp, sp;
2237010d129SBorislav Petkov 	u64 r8,  r9,  r10, r11;
2247010d129SBorislav Petkov 	u64 r12, r13, r14, r15;
2257010d129SBorislav Petkov 	u64 status, dla, dse, lat;
2267010d129SBorislav Petkov 	u64 real_ip, tsx_tuning;
2277010d129SBorislav Petkov };
2287010d129SBorislav Petkov 
2297010d129SBorislav Petkov union hsw_tsx_tuning {
2307010d129SBorislav Petkov 	struct {
2317010d129SBorislav Petkov 		u32 cycles_last_block     : 32,
2327010d129SBorislav Petkov 		    hle_abort		  : 1,
2337010d129SBorislav Petkov 		    rtm_abort		  : 1,
2347010d129SBorislav Petkov 		    instruction_abort     : 1,
2357010d129SBorislav Petkov 		    non_instruction_abort : 1,
2367010d129SBorislav Petkov 		    retry		  : 1,
2377010d129SBorislav Petkov 		    data_conflict	  : 1,
2387010d129SBorislav Petkov 		    capacity_writes	  : 1,
2397010d129SBorislav Petkov 		    capacity_reads	  : 1;
2407010d129SBorislav Petkov 	};
2417010d129SBorislav Petkov 	u64	    value;
2427010d129SBorislav Petkov };
2437010d129SBorislav Petkov 
2447010d129SBorislav Petkov #define PEBS_HSW_TSX_FLAGS	0xff00000000ULL
2457010d129SBorislav Petkov 
2467010d129SBorislav Petkov /* Same as HSW, plus TSC */
2477010d129SBorislav Petkov 
2487010d129SBorislav Petkov struct pebs_record_skl {
2497010d129SBorislav Petkov 	u64 flags, ip;
2507010d129SBorislav Petkov 	u64 ax, bx, cx, dx;
2517010d129SBorislav Petkov 	u64 si, di, bp, sp;
2527010d129SBorislav Petkov 	u64 r8,  r9,  r10, r11;
2537010d129SBorislav Petkov 	u64 r12, r13, r14, r15;
2547010d129SBorislav Petkov 	u64 status, dla, dse, lat;
2557010d129SBorislav Petkov 	u64 real_ip, tsx_tuning;
2567010d129SBorislav Petkov 	u64 tsc;
2577010d129SBorislav Petkov };
2587010d129SBorislav Petkov 
2597010d129SBorislav Petkov void init_debug_store_on_cpu(int cpu)
2607010d129SBorislav Petkov {
2617010d129SBorislav Petkov 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
2627010d129SBorislav Petkov 
2637010d129SBorislav Petkov 	if (!ds)
2647010d129SBorislav Petkov 		return;
2657010d129SBorislav Petkov 
2667010d129SBorislav Petkov 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
2677010d129SBorislav Petkov 		     (u32)((u64)(unsigned long)ds),
2687010d129SBorislav Petkov 		     (u32)((u64)(unsigned long)ds >> 32));
2697010d129SBorislav Petkov }
2707010d129SBorislav Petkov 
2717010d129SBorislav Petkov void fini_debug_store_on_cpu(int cpu)
2727010d129SBorislav Petkov {
2737010d129SBorislav Petkov 	if (!per_cpu(cpu_hw_events, cpu).ds)
2747010d129SBorislav Petkov 		return;
2757010d129SBorislav Petkov 
2767010d129SBorislav Petkov 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
2777010d129SBorislav Petkov }
2787010d129SBorislav Petkov 
2797010d129SBorislav Petkov static DEFINE_PER_CPU(void *, insn_buffer);
2807010d129SBorislav Petkov 
2817010d129SBorislav Petkov static int alloc_pebs_buffer(int cpu)
2827010d129SBorislav Petkov {
2837010d129SBorislav Petkov 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
2847010d129SBorislav Petkov 	int node = cpu_to_node(cpu);
2857010d129SBorislav Petkov 	int max;
2867010d129SBorislav Petkov 	void *buffer, *ibuffer;
2877010d129SBorislav Petkov 
2887010d129SBorislav Petkov 	if (!x86_pmu.pebs)
2897010d129SBorislav Petkov 		return 0;
2907010d129SBorislav Petkov 
291e72daf3fSJiri Olsa 	buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
2927010d129SBorislav Petkov 	if (unlikely(!buffer))
2937010d129SBorislav Petkov 		return -ENOMEM;
2947010d129SBorislav Petkov 
2957010d129SBorislav Petkov 	/*
2967010d129SBorislav Petkov 	 * HSW+ already provides us the eventing ip; no need to allocate this
2977010d129SBorislav Petkov 	 * buffer then.
2987010d129SBorislav Petkov 	 */
2997010d129SBorislav Petkov 	if (x86_pmu.intel_cap.pebs_format < 2) {
3007010d129SBorislav Petkov 		ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
3017010d129SBorislav Petkov 		if (!ibuffer) {
3027010d129SBorislav Petkov 			kfree(buffer);
3037010d129SBorislav Petkov 			return -ENOMEM;
3047010d129SBorislav Petkov 		}
3057010d129SBorislav Petkov 		per_cpu(insn_buffer, cpu) = ibuffer;
3067010d129SBorislav Petkov 	}
3077010d129SBorislav Petkov 
308e72daf3fSJiri Olsa 	max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
3097010d129SBorislav Petkov 
3107010d129SBorislav Petkov 	ds->pebs_buffer_base = (u64)(unsigned long)buffer;
3117010d129SBorislav Petkov 	ds->pebs_index = ds->pebs_buffer_base;
3127010d129SBorislav Petkov 	ds->pebs_absolute_maximum = ds->pebs_buffer_base +
3137010d129SBorislav Petkov 		max * x86_pmu.pebs_record_size;
3147010d129SBorislav Petkov 
3157010d129SBorislav Petkov 	return 0;
3167010d129SBorislav Petkov }
3177010d129SBorislav Petkov 
3187010d129SBorislav Petkov static void release_pebs_buffer(int cpu)
3197010d129SBorislav Petkov {
3207010d129SBorislav Petkov 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
3217010d129SBorislav Petkov 
3227010d129SBorislav Petkov 	if (!ds || !x86_pmu.pebs)
3237010d129SBorislav Petkov 		return;
3247010d129SBorislav Petkov 
3257010d129SBorislav Petkov 	kfree(per_cpu(insn_buffer, cpu));
3267010d129SBorislav Petkov 	per_cpu(insn_buffer, cpu) = NULL;
3277010d129SBorislav Petkov 
3287010d129SBorislav Petkov 	kfree((void *)(unsigned long)ds->pebs_buffer_base);
3297010d129SBorislav Petkov 	ds->pebs_buffer_base = 0;
3307010d129SBorislav Petkov }
3317010d129SBorislav Petkov 
3327010d129SBorislav Petkov static int alloc_bts_buffer(int cpu)
3337010d129SBorislav Petkov {
3347010d129SBorislav Petkov 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
3357010d129SBorislav Petkov 	int node = cpu_to_node(cpu);
3367010d129SBorislav Petkov 	int max, thresh;
3377010d129SBorislav Petkov 	void *buffer;
3387010d129SBorislav Petkov 
3397010d129SBorislav Petkov 	if (!x86_pmu.bts)
3407010d129SBorislav Petkov 		return 0;
3417010d129SBorislav Petkov 
3427010d129SBorislav Petkov 	buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
3437010d129SBorislav Petkov 	if (unlikely(!buffer)) {
3447010d129SBorislav Petkov 		WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
3457010d129SBorislav Petkov 		return -ENOMEM;
3467010d129SBorislav Petkov 	}
3477010d129SBorislav Petkov 
3487010d129SBorislav Petkov 	max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
3497010d129SBorislav Petkov 	thresh = max / 16;
3507010d129SBorislav Petkov 
3517010d129SBorislav Petkov 	ds->bts_buffer_base = (u64)(unsigned long)buffer;
3527010d129SBorislav Petkov 	ds->bts_index = ds->bts_buffer_base;
3537010d129SBorislav Petkov 	ds->bts_absolute_maximum = ds->bts_buffer_base +
3547010d129SBorislav Petkov 		max * BTS_RECORD_SIZE;
3557010d129SBorislav Petkov 	ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
3567010d129SBorislav Petkov 		thresh * BTS_RECORD_SIZE;
3577010d129SBorislav Petkov 
3587010d129SBorislav Petkov 	return 0;
3597010d129SBorislav Petkov }
3607010d129SBorislav Petkov 
3617010d129SBorislav Petkov static void release_bts_buffer(int cpu)
3627010d129SBorislav Petkov {
3637010d129SBorislav Petkov 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
3647010d129SBorislav Petkov 
3657010d129SBorislav Petkov 	if (!ds || !x86_pmu.bts)
3667010d129SBorislav Petkov 		return;
3677010d129SBorislav Petkov 
3687010d129SBorislav Petkov 	kfree((void *)(unsigned long)ds->bts_buffer_base);
3697010d129SBorislav Petkov 	ds->bts_buffer_base = 0;
3707010d129SBorislav Petkov }
3717010d129SBorislav Petkov 
3727010d129SBorislav Petkov static int alloc_ds_buffer(int cpu)
3737010d129SBorislav Petkov {
3747010d129SBorislav Petkov 	int node = cpu_to_node(cpu);
3757010d129SBorislav Petkov 	struct debug_store *ds;
3767010d129SBorislav Petkov 
3777010d129SBorislav Petkov 	ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
3787010d129SBorislav Petkov 	if (unlikely(!ds))
3797010d129SBorislav Petkov 		return -ENOMEM;
3807010d129SBorislav Petkov 
3817010d129SBorislav Petkov 	per_cpu(cpu_hw_events, cpu).ds = ds;
3827010d129SBorislav Petkov 
3837010d129SBorislav Petkov 	return 0;
3847010d129SBorislav Petkov }
3857010d129SBorislav Petkov 
3867010d129SBorislav Petkov static void release_ds_buffer(int cpu)
3877010d129SBorislav Petkov {
3887010d129SBorislav Petkov 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
3897010d129SBorislav Petkov 
3907010d129SBorislav Petkov 	if (!ds)
3917010d129SBorislav Petkov 		return;
3927010d129SBorislav Petkov 
3937010d129SBorislav Petkov 	per_cpu(cpu_hw_events, cpu).ds = NULL;
3947010d129SBorislav Petkov 	kfree(ds);
3957010d129SBorislav Petkov }
3967010d129SBorislav Petkov 
3977010d129SBorislav Petkov void release_ds_buffers(void)
3987010d129SBorislav Petkov {
3997010d129SBorislav Petkov 	int cpu;
4007010d129SBorislav Petkov 
4017010d129SBorislav Petkov 	if (!x86_pmu.bts && !x86_pmu.pebs)
4027010d129SBorislav Petkov 		return;
4037010d129SBorislav Petkov 
4047010d129SBorislav Petkov 	get_online_cpus();
4057010d129SBorislav Petkov 	for_each_online_cpu(cpu)
4067010d129SBorislav Petkov 		fini_debug_store_on_cpu(cpu);
4077010d129SBorislav Petkov 
4087010d129SBorislav Petkov 	for_each_possible_cpu(cpu) {
4097010d129SBorislav Petkov 		release_pebs_buffer(cpu);
4107010d129SBorislav Petkov 		release_bts_buffer(cpu);
4117010d129SBorislav Petkov 		release_ds_buffer(cpu);
4127010d129SBorislav Petkov 	}
4137010d129SBorislav Petkov 	put_online_cpus();
4147010d129SBorislav Petkov }
4157010d129SBorislav Petkov 
4167010d129SBorislav Petkov void reserve_ds_buffers(void)
4177010d129SBorislav Petkov {
4187010d129SBorislav Petkov 	int bts_err = 0, pebs_err = 0;
4197010d129SBorislav Petkov 	int cpu;
4207010d129SBorislav Petkov 
4217010d129SBorislav Petkov 	x86_pmu.bts_active = 0;
4227010d129SBorislav Petkov 	x86_pmu.pebs_active = 0;
4237010d129SBorislav Petkov 
4247010d129SBorislav Petkov 	if (!x86_pmu.bts && !x86_pmu.pebs)
4257010d129SBorislav Petkov 		return;
4267010d129SBorislav Petkov 
4277010d129SBorislav Petkov 	if (!x86_pmu.bts)
4287010d129SBorislav Petkov 		bts_err = 1;
4297010d129SBorislav Petkov 
4307010d129SBorislav Petkov 	if (!x86_pmu.pebs)
4317010d129SBorislav Petkov 		pebs_err = 1;
4327010d129SBorislav Petkov 
4337010d129SBorislav Petkov 	get_online_cpus();
4347010d129SBorislav Petkov 
4357010d129SBorislav Petkov 	for_each_possible_cpu(cpu) {
4367010d129SBorislav Petkov 		if (alloc_ds_buffer(cpu)) {
4377010d129SBorislav Petkov 			bts_err = 1;
4387010d129SBorislav Petkov 			pebs_err = 1;
4397010d129SBorislav Petkov 		}
4407010d129SBorislav Petkov 
4417010d129SBorislav Petkov 		if (!bts_err && alloc_bts_buffer(cpu))
4427010d129SBorislav Petkov 			bts_err = 1;
4437010d129SBorislav Petkov 
4447010d129SBorislav Petkov 		if (!pebs_err && alloc_pebs_buffer(cpu))
4457010d129SBorislav Petkov 			pebs_err = 1;
4467010d129SBorislav Petkov 
4477010d129SBorislav Petkov 		if (bts_err && pebs_err)
4487010d129SBorislav Petkov 			break;
4497010d129SBorislav Petkov 	}
4507010d129SBorislav Petkov 
4517010d129SBorislav Petkov 	if (bts_err) {
4527010d129SBorislav Petkov 		for_each_possible_cpu(cpu)
4537010d129SBorislav Petkov 			release_bts_buffer(cpu);
4547010d129SBorislav Petkov 	}
4557010d129SBorislav Petkov 
4567010d129SBorislav Petkov 	if (pebs_err) {
4577010d129SBorislav Petkov 		for_each_possible_cpu(cpu)
4587010d129SBorislav Petkov 			release_pebs_buffer(cpu);
4597010d129SBorislav Petkov 	}
4607010d129SBorislav Petkov 
4617010d129SBorislav Petkov 	if (bts_err && pebs_err) {
4627010d129SBorislav Petkov 		for_each_possible_cpu(cpu)
4637010d129SBorislav Petkov 			release_ds_buffer(cpu);
4647010d129SBorislav Petkov 	} else {
4657010d129SBorislav Petkov 		if (x86_pmu.bts && !bts_err)
4667010d129SBorislav Petkov 			x86_pmu.bts_active = 1;
4677010d129SBorislav Petkov 
4687010d129SBorislav Petkov 		if (x86_pmu.pebs && !pebs_err)
4697010d129SBorislav Petkov 			x86_pmu.pebs_active = 1;
4707010d129SBorislav Petkov 
4717010d129SBorislav Petkov 		for_each_online_cpu(cpu)
4727010d129SBorislav Petkov 			init_debug_store_on_cpu(cpu);
4737010d129SBorislav Petkov 	}
4747010d129SBorislav Petkov 
4757010d129SBorislav Petkov 	put_online_cpus();
4767010d129SBorislav Petkov }
4777010d129SBorislav Petkov 
4787010d129SBorislav Petkov /*
4797010d129SBorislav Petkov  * BTS
4807010d129SBorislav Petkov  */
4817010d129SBorislav Petkov 
4827010d129SBorislav Petkov struct event_constraint bts_constraint =
4837010d129SBorislav Petkov 	EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
4847010d129SBorislav Petkov 
4857010d129SBorislav Petkov void intel_pmu_enable_bts(u64 config)
4867010d129SBorislav Petkov {
4877010d129SBorislav Petkov 	unsigned long debugctlmsr;
4887010d129SBorislav Petkov 
4897010d129SBorislav Petkov 	debugctlmsr = get_debugctlmsr();
4907010d129SBorislav Petkov 
4917010d129SBorislav Petkov 	debugctlmsr |= DEBUGCTLMSR_TR;
4927010d129SBorislav Petkov 	debugctlmsr |= DEBUGCTLMSR_BTS;
4937010d129SBorislav Petkov 	if (config & ARCH_PERFMON_EVENTSEL_INT)
4947010d129SBorislav Petkov 		debugctlmsr |= DEBUGCTLMSR_BTINT;
4957010d129SBorislav Petkov 
4967010d129SBorislav Petkov 	if (!(config & ARCH_PERFMON_EVENTSEL_OS))
4977010d129SBorislav Petkov 		debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
4987010d129SBorislav Petkov 
4997010d129SBorislav Petkov 	if (!(config & ARCH_PERFMON_EVENTSEL_USR))
5007010d129SBorislav Petkov 		debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
5017010d129SBorislav Petkov 
5027010d129SBorislav Petkov 	update_debugctlmsr(debugctlmsr);
5037010d129SBorislav Petkov }
5047010d129SBorislav Petkov 
5057010d129SBorislav Petkov void intel_pmu_disable_bts(void)
5067010d129SBorislav Petkov {
5077010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5087010d129SBorislav Petkov 	unsigned long debugctlmsr;
5097010d129SBorislav Petkov 
5107010d129SBorislav Petkov 	if (!cpuc->ds)
5117010d129SBorislav Petkov 		return;
5127010d129SBorislav Petkov 
5137010d129SBorislav Petkov 	debugctlmsr = get_debugctlmsr();
5147010d129SBorislav Petkov 
5157010d129SBorislav Petkov 	debugctlmsr &=
5167010d129SBorislav Petkov 		~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
5177010d129SBorislav Petkov 		  DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
5187010d129SBorislav Petkov 
5197010d129SBorislav Petkov 	update_debugctlmsr(debugctlmsr);
5207010d129SBorislav Petkov }
5217010d129SBorislav Petkov 
5227010d129SBorislav Petkov int intel_pmu_drain_bts_buffer(void)
5237010d129SBorislav Petkov {
5247010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5257010d129SBorislav Petkov 	struct debug_store *ds = cpuc->ds;
5267010d129SBorislav Petkov 	struct bts_record {
5277010d129SBorislav Petkov 		u64	from;
5287010d129SBorislav Petkov 		u64	to;
5297010d129SBorislav Petkov 		u64	flags;
5307010d129SBorislav Petkov 	};
5317010d129SBorislav Petkov 	struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
5327010d129SBorislav Petkov 	struct bts_record *at, *base, *top;
5337010d129SBorislav Petkov 	struct perf_output_handle handle;
5347010d129SBorislav Petkov 	struct perf_event_header header;
5357010d129SBorislav Petkov 	struct perf_sample_data data;
5367010d129SBorislav Petkov 	unsigned long skip = 0;
5377010d129SBorislav Petkov 	struct pt_regs regs;
5387010d129SBorislav Petkov 
5397010d129SBorislav Petkov 	if (!event)
5407010d129SBorislav Petkov 		return 0;
5417010d129SBorislav Petkov 
5427010d129SBorislav Petkov 	if (!x86_pmu.bts_active)
5437010d129SBorislav Petkov 		return 0;
5447010d129SBorislav Petkov 
5457010d129SBorislav Petkov 	base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
5467010d129SBorislav Petkov 	top  = (struct bts_record *)(unsigned long)ds->bts_index;
5477010d129SBorislav Petkov 
5487010d129SBorislav Petkov 	if (top <= base)
5497010d129SBorislav Petkov 		return 0;
5507010d129SBorislav Petkov 
5517010d129SBorislav Petkov 	memset(&regs, 0, sizeof(regs));
5527010d129SBorislav Petkov 
5537010d129SBorislav Petkov 	ds->bts_index = ds->bts_buffer_base;
5547010d129SBorislav Petkov 
5557010d129SBorislav Petkov 	perf_sample_data_init(&data, 0, event->hw.last_period);
5567010d129SBorislav Petkov 
5577010d129SBorislav Petkov 	/*
5587010d129SBorislav Petkov 	 * BTS leaks kernel addresses in branches across the cpl boundary,
5597010d129SBorislav Petkov 	 * such as traps or system calls, so unless the user is asking for
5607010d129SBorislav Petkov 	 * kernel tracing (and right now it's not possible), we'd need to
5617010d129SBorislav Petkov 	 * filter them out. But first we need to count how many of those we
5627010d129SBorislav Petkov 	 * have in the current batch. This is an extra O(n) pass, however,
5637010d129SBorislav Petkov 	 * it's much faster than the other one especially considering that
5647010d129SBorislav Petkov 	 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
5657010d129SBorislav Petkov 	 * alloc_bts_buffer()).
5667010d129SBorislav Petkov 	 */
5677010d129SBorislav Petkov 	for (at = base; at < top; at++) {
5687010d129SBorislav Petkov 		/*
5697010d129SBorislav Petkov 		 * Note that right now *this* BTS code only works if
5707010d129SBorislav Petkov 		 * attr::exclude_kernel is set, but let's keep this extra
5717010d129SBorislav Petkov 		 * check here in case that changes.
5727010d129SBorislav Petkov 		 */
5737010d129SBorislav Petkov 		if (event->attr.exclude_kernel &&
5747010d129SBorislav Petkov 		    (kernel_ip(at->from) || kernel_ip(at->to)))
5757010d129SBorislav Petkov 			skip++;
5767010d129SBorislav Petkov 	}
5777010d129SBorislav Petkov 
5787010d129SBorislav Petkov 	/*
5797010d129SBorislav Petkov 	 * Prepare a generic sample, i.e. fill in the invariant fields.
5807010d129SBorislav Petkov 	 * We will overwrite the from and to address before we output
5817010d129SBorislav Petkov 	 * the sample.
5827010d129SBorislav Petkov 	 */
583e8d8a90fSPeter Zijlstra 	rcu_read_lock();
5847010d129SBorislav Petkov 	perf_prepare_sample(&header, &data, event, &regs);
5857010d129SBorislav Petkov 
5867010d129SBorislav Petkov 	if (perf_output_begin(&handle, event, header.size *
5877010d129SBorislav Petkov 			      (top - base - skip)))
588e8d8a90fSPeter Zijlstra 		goto unlock;
5897010d129SBorislav Petkov 
5907010d129SBorislav Petkov 	for (at = base; at < top; at++) {
5917010d129SBorislav Petkov 		/* Filter out any records that contain kernel addresses. */
5927010d129SBorislav Petkov 		if (event->attr.exclude_kernel &&
5937010d129SBorislav Petkov 		    (kernel_ip(at->from) || kernel_ip(at->to)))
5947010d129SBorislav Petkov 			continue;
5957010d129SBorislav Petkov 
5967010d129SBorislav Petkov 		data.ip		= at->from;
5977010d129SBorislav Petkov 		data.addr	= at->to;
5987010d129SBorislav Petkov 
5997010d129SBorislav Petkov 		perf_output_sample(&handle, &header, &data, event);
6007010d129SBorislav Petkov 	}
6017010d129SBorislav Petkov 
6027010d129SBorislav Petkov 	perf_output_end(&handle);
6037010d129SBorislav Petkov 
6047010d129SBorislav Petkov 	/* There's new data available. */
6057010d129SBorislav Petkov 	event->hw.interrupts++;
6067010d129SBorislav Petkov 	event->pending_kill = POLL_IN;
607e8d8a90fSPeter Zijlstra unlock:
608e8d8a90fSPeter Zijlstra 	rcu_read_unlock();
6097010d129SBorislav Petkov 	return 1;
6107010d129SBorislav Petkov }
6117010d129SBorislav Petkov 
6127010d129SBorislav Petkov static inline void intel_pmu_drain_pebs_buffer(void)
6137010d129SBorislav Petkov {
6147010d129SBorislav Petkov 	struct pt_regs regs;
6157010d129SBorislav Petkov 
6167010d129SBorislav Petkov 	x86_pmu.drain_pebs(&regs);
6177010d129SBorislav Petkov }
6187010d129SBorislav Petkov 
6197010d129SBorislav Petkov /*
6207010d129SBorislav Petkov  * PEBS
6217010d129SBorislav Petkov  */
6227010d129SBorislav Petkov struct event_constraint intel_core2_pebs_event_constraints[] = {
6237010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
6247010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
6257010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
6267010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
6277010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
6287010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
6297010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
6307010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
6317010d129SBorislav Petkov };
6327010d129SBorislav Petkov 
6337010d129SBorislav Petkov struct event_constraint intel_atom_pebs_event_constraints[] = {
6347010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
6357010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
6367010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
6377010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
6387010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
6397010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
6407010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
6417010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
6427010d129SBorislav Petkov };
6437010d129SBorislav Petkov 
6447010d129SBorislav Petkov struct event_constraint intel_slm_pebs_event_constraints[] = {
6457010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
6467010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
6477010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
6487010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
6497010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
6507010d129SBorislav Petkov };
6517010d129SBorislav Petkov 
6528b92c3a7SKan Liang struct event_constraint intel_glm_pebs_event_constraints[] = {
6538b92c3a7SKan Liang 	/* Allow all events as PEBS with no flags */
6548b92c3a7SKan Liang 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
6558b92c3a7SKan Liang 	EVENT_CONSTRAINT_END
6568b92c3a7SKan Liang };
6578b92c3a7SKan Liang 
658dd0b06b5SKan Liang struct event_constraint intel_glp_pebs_event_constraints[] = {
659dd0b06b5SKan Liang 	/* Allow all events as PEBS with no flags */
660dd0b06b5SKan Liang 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
661dd0b06b5SKan Liang 	EVENT_CONSTRAINT_END
662dd0b06b5SKan Liang };
663dd0b06b5SKan Liang 
6647010d129SBorislav Petkov struct event_constraint intel_nehalem_pebs_event_constraints[] = {
6657010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
6667010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
6677010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
6687010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),    /* INST_RETIRED.ANY */
6697010d129SBorislav Petkov 	INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
6707010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
6717010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
6727010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
6737010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
6747010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
6757010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
6767010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
6777010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
6787010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
6797010d129SBorislav Petkov };
6807010d129SBorislav Petkov 
6817010d129SBorislav Petkov struct event_constraint intel_westmere_pebs_event_constraints[] = {
6827010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
6837010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
6847010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
6857010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),    /* INSTR_RETIRED.* */
6867010d129SBorislav Petkov 	INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
6877010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
6887010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
6897010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
6907010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
6917010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
6927010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
6937010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
6947010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
6957010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
6967010d129SBorislav Petkov };
6977010d129SBorislav Petkov 
6987010d129SBorislav Petkov struct event_constraint intel_snb_pebs_event_constraints[] = {
6997010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
7007010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
7017010d129SBorislav Petkov 	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
7027010d129SBorislav Petkov 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
7037010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
7047010d129SBorislav Petkov         INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
7057010d129SBorislav Petkov         INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
7067010d129SBorislav Petkov         INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
7077010d129SBorislav Petkov         INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),    /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
7087010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
7097010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
7107010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
7117010d129SBorislav Petkov };
7127010d129SBorislav Petkov 
7137010d129SBorislav Petkov struct event_constraint intel_ivb_pebs_event_constraints[] = {
7147010d129SBorislav Petkov         INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
7157010d129SBorislav Petkov         INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
7167010d129SBorislav Petkov 	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
7177010d129SBorislav Petkov 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
7187010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
7197010d129SBorislav Petkov 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
7207010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
7217010d129SBorislav Petkov 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
7227010d129SBorislav Petkov 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
7237010d129SBorislav Petkov 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
7247010d129SBorislav Petkov 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),    /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
7257010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
7267010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
7277010d129SBorislav Petkov         EVENT_CONSTRAINT_END
7287010d129SBorislav Petkov };
7297010d129SBorislav Petkov 
7307010d129SBorislav Petkov struct event_constraint intel_hsw_pebs_event_constraints[] = {
7317010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
7327010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
7337010d129SBorislav Petkov 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
7347010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
7357010d129SBorislav Petkov 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
7367010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
7377010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
7387010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
7397010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
7407010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
7417010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
7427010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
7437010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
7447010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
7457010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
7467010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf),    /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
7477010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf),    /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
7487010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
7497010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
7507010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
7517010d129SBorislav Petkov };
7527010d129SBorislav Petkov 
753b3e62463SStephane Eranian struct event_constraint intel_bdw_pebs_event_constraints[] = {
754b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
755b3e62463SStephane Eranian 	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
756b3e62463SStephane Eranian 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
757b3e62463SStephane Eranian 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
758b3e62463SStephane Eranian 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
759b3e62463SStephane Eranian 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
760b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
761b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
762b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
763b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
764b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
765b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
766b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
767b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
768b3e62463SStephane Eranian 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
769b3e62463SStephane Eranian 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
770b3e62463SStephane Eranian 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
771b3e62463SStephane Eranian 	/* Allow all events as PEBS with no flags */
772b3e62463SStephane Eranian 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
773b3e62463SStephane Eranian 	EVENT_CONSTRAINT_END
774b3e62463SStephane Eranian };
775b3e62463SStephane Eranian 
776b3e62463SStephane Eranian 
7777010d129SBorislav Petkov struct event_constraint intel_skl_pebs_event_constraints[] = {
7787010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
7797010d129SBorislav Petkov 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
7807010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
7817010d129SBorislav Petkov 	/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
7827010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
7837010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x1cd, 0xf),		      /* MEM_TRANS_RETIRED.* */
7847010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
7857010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
7867010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
7877010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
7887010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
7897010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
7907010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
7917010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
7927010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_RETIRED.* */
7937010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_L3_HIT_RETIRED.* */
7947010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_L3_MISS_RETIRED.* */
7957010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
7967010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
7977010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
7987010d129SBorislav Petkov };
7997010d129SBorislav Petkov 
8007010d129SBorislav Petkov struct event_constraint *intel_pebs_constraints(struct perf_event *event)
8017010d129SBorislav Petkov {
8027010d129SBorislav Petkov 	struct event_constraint *c;
8037010d129SBorislav Petkov 
8047010d129SBorislav Petkov 	if (!event->attr.precise_ip)
8057010d129SBorislav Petkov 		return NULL;
8067010d129SBorislav Petkov 
8077010d129SBorislav Petkov 	if (x86_pmu.pebs_constraints) {
8087010d129SBorislav Petkov 		for_each_event_constraint(c, x86_pmu.pebs_constraints) {
8097010d129SBorislav Petkov 			if ((event->hw.config & c->cmask) == c->code) {
8107010d129SBorislav Petkov 				event->hw.flags |= c->flags;
8117010d129SBorislav Petkov 				return c;
8127010d129SBorislav Petkov 			}
8137010d129SBorislav Petkov 		}
8147010d129SBorislav Petkov 	}
8157010d129SBorislav Petkov 
8167010d129SBorislav Petkov 	return &emptyconstraint;
8177010d129SBorislav Petkov }
8187010d129SBorislav Petkov 
81909e61b4fSPeter Zijlstra /*
82009e61b4fSPeter Zijlstra  * We need the sched_task callback even for per-cpu events when we use
82109e61b4fSPeter Zijlstra  * the large interrupt threshold, such that we can provide PID and TID
82209e61b4fSPeter Zijlstra  * to PEBS samples.
82309e61b4fSPeter Zijlstra  */
82409e61b4fSPeter Zijlstra static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
8257010d129SBorislav Petkov {
82609e61b4fSPeter Zijlstra 	return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
82709e61b4fSPeter Zijlstra }
82809e61b4fSPeter Zijlstra 
829df6c3db8SJiri Olsa void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
830df6c3db8SJiri Olsa {
831df6c3db8SJiri Olsa 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
832df6c3db8SJiri Olsa 
833df6c3db8SJiri Olsa 	if (!sched_in && pebs_needs_sched_cb(cpuc))
834df6c3db8SJiri Olsa 		intel_pmu_drain_pebs_buffer();
835df6c3db8SJiri Olsa }
836df6c3db8SJiri Olsa 
83709e61b4fSPeter Zijlstra static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
83809e61b4fSPeter Zijlstra {
83909e61b4fSPeter Zijlstra 	struct debug_store *ds = cpuc->ds;
84009e61b4fSPeter Zijlstra 	u64 threshold;
84109e61b4fSPeter Zijlstra 
84209e61b4fSPeter Zijlstra 	if (cpuc->n_pebs == cpuc->n_large_pebs) {
84309e61b4fSPeter Zijlstra 		threshold = ds->pebs_absolute_maximum -
84409e61b4fSPeter Zijlstra 			x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
84509e61b4fSPeter Zijlstra 	} else {
84609e61b4fSPeter Zijlstra 		threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
84709e61b4fSPeter Zijlstra 	}
84809e61b4fSPeter Zijlstra 
84909e61b4fSPeter Zijlstra 	ds->pebs_interrupt_threshold = threshold;
85009e61b4fSPeter Zijlstra }
85109e61b4fSPeter Zijlstra 
85209e61b4fSPeter Zijlstra static void
85309e61b4fSPeter Zijlstra pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
85409e61b4fSPeter Zijlstra {
855b6a32f02SJiri Olsa 	/*
856b6a32f02SJiri Olsa 	 * Make sure we get updated with the first PEBS
857b6a32f02SJiri Olsa 	 * event. It will trigger also during removal, but
858b6a32f02SJiri Olsa 	 * that does not hurt:
859b6a32f02SJiri Olsa 	 */
860b6a32f02SJiri Olsa 	bool update = cpuc->n_pebs == 1;
861b6a32f02SJiri Olsa 
86209e61b4fSPeter Zijlstra 	if (needed_cb != pebs_needs_sched_cb(cpuc)) {
86309e61b4fSPeter Zijlstra 		if (!needed_cb)
86409e61b4fSPeter Zijlstra 			perf_sched_cb_inc(pmu);
86509e61b4fSPeter Zijlstra 		else
86609e61b4fSPeter Zijlstra 			perf_sched_cb_dec(pmu);
86709e61b4fSPeter Zijlstra 
868b6a32f02SJiri Olsa 		update = true;
86909e61b4fSPeter Zijlstra 	}
870b6a32f02SJiri Olsa 
871b6a32f02SJiri Olsa 	if (update)
872b6a32f02SJiri Olsa 		pebs_update_threshold(cpuc);
87309e61b4fSPeter Zijlstra }
87409e61b4fSPeter Zijlstra 
87568f7082fSPeter Zijlstra void intel_pmu_pebs_add(struct perf_event *event)
87609e61b4fSPeter Zijlstra {
87709e61b4fSPeter Zijlstra 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
87809e61b4fSPeter Zijlstra 	struct hw_perf_event *hwc = &event->hw;
87909e61b4fSPeter Zijlstra 	bool needed_cb = pebs_needs_sched_cb(cpuc);
88009e61b4fSPeter Zijlstra 
88109e61b4fSPeter Zijlstra 	cpuc->n_pebs++;
88209e61b4fSPeter Zijlstra 	if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
88309e61b4fSPeter Zijlstra 		cpuc->n_large_pebs++;
88409e61b4fSPeter Zijlstra 
88509e61b4fSPeter Zijlstra 	pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
8867010d129SBorislav Petkov }
8877010d129SBorislav Petkov 
8887010d129SBorislav Petkov void intel_pmu_pebs_enable(struct perf_event *event)
8897010d129SBorislav Petkov {
8907010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
8917010d129SBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
8927010d129SBorislav Petkov 	struct debug_store *ds = cpuc->ds;
89309e61b4fSPeter Zijlstra 
8947010d129SBorislav Petkov 	hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
8957010d129SBorislav Petkov 
8967010d129SBorislav Petkov 	cpuc->pebs_enabled |= 1ULL << hwc->idx;
8977010d129SBorislav Petkov 
8987010d129SBorislav Petkov 	if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
8997010d129SBorislav Petkov 		cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
9007010d129SBorislav Petkov 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
9017010d129SBorislav Petkov 		cpuc->pebs_enabled |= 1ULL << 63;
9027010d129SBorislav Petkov 
9037010d129SBorislav Petkov 	/*
90409e61b4fSPeter Zijlstra 	 * Use auto-reload if possible to save a MSR write in the PMI.
90509e61b4fSPeter Zijlstra 	 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
9067010d129SBorislav Petkov 	 */
9077010d129SBorislav Petkov 	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
9087010d129SBorislav Petkov 		ds->pebs_event_reset[hwc->idx] =
9097010d129SBorislav Petkov 			(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
910dc853e26SJiri Olsa 	} else {
911dc853e26SJiri Olsa 		ds->pebs_event_reset[hwc->idx] = 0;
9127010d129SBorislav Petkov 	}
91309e61b4fSPeter Zijlstra }
9147010d129SBorislav Petkov 
91568f7082fSPeter Zijlstra void intel_pmu_pebs_del(struct perf_event *event)
91609e61b4fSPeter Zijlstra {
91709e61b4fSPeter Zijlstra 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
91809e61b4fSPeter Zijlstra 	struct hw_perf_event *hwc = &event->hw;
91909e61b4fSPeter Zijlstra 	bool needed_cb = pebs_needs_sched_cb(cpuc);
92009e61b4fSPeter Zijlstra 
92109e61b4fSPeter Zijlstra 	cpuc->n_pebs--;
92209e61b4fSPeter Zijlstra 	if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
92309e61b4fSPeter Zijlstra 		cpuc->n_large_pebs--;
92409e61b4fSPeter Zijlstra 
92509e61b4fSPeter Zijlstra 	pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
9267010d129SBorislav Petkov }
9277010d129SBorislav Petkov 
9287010d129SBorislav Petkov void intel_pmu_pebs_disable(struct perf_event *event)
9297010d129SBorislav Petkov {
9307010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
9317010d129SBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
9327010d129SBorislav Petkov 
93309e61b4fSPeter Zijlstra 	if (cpuc->n_pebs == cpuc->n_large_pebs)
9347010d129SBorislav Petkov 		intel_pmu_drain_pebs_buffer();
9357010d129SBorislav Petkov 
9367010d129SBorislav Petkov 	cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
9377010d129SBorislav Petkov 
9387010d129SBorislav Petkov 	if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
9397010d129SBorislav Petkov 		cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
9407010d129SBorislav Petkov 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
9417010d129SBorislav Petkov 		cpuc->pebs_enabled &= ~(1ULL << 63);
9427010d129SBorislav Petkov 
9437010d129SBorislav Petkov 	if (cpuc->enabled)
9447010d129SBorislav Petkov 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
9457010d129SBorislav Petkov 
9467010d129SBorislav Petkov 	hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
9477010d129SBorislav Petkov }
9487010d129SBorislav Petkov 
9497010d129SBorislav Petkov void intel_pmu_pebs_enable_all(void)
9507010d129SBorislav Petkov {
9517010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
9527010d129SBorislav Petkov 
9537010d129SBorislav Petkov 	if (cpuc->pebs_enabled)
9547010d129SBorislav Petkov 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
9557010d129SBorislav Petkov }
9567010d129SBorislav Petkov 
9577010d129SBorislav Petkov void intel_pmu_pebs_disable_all(void)
9587010d129SBorislav Petkov {
9597010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
9607010d129SBorislav Petkov 
9617010d129SBorislav Petkov 	if (cpuc->pebs_enabled)
9627010d129SBorislav Petkov 		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
9637010d129SBorislav Petkov }
9647010d129SBorislav Petkov 
9657010d129SBorislav Petkov static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
9667010d129SBorislav Petkov {
9677010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
9687010d129SBorislav Petkov 	unsigned long from = cpuc->lbr_entries[0].from;
9697010d129SBorislav Petkov 	unsigned long old_to, to = cpuc->lbr_entries[0].to;
9707010d129SBorislav Petkov 	unsigned long ip = regs->ip;
9717010d129SBorislav Petkov 	int is_64bit = 0;
9727010d129SBorislav Petkov 	void *kaddr;
9737010d129SBorislav Petkov 	int size;
9747010d129SBorislav Petkov 
9757010d129SBorislav Petkov 	/*
9767010d129SBorislav Petkov 	 * We don't need to fixup if the PEBS assist is fault like
9777010d129SBorislav Petkov 	 */
9787010d129SBorislav Petkov 	if (!x86_pmu.intel_cap.pebs_trap)
9797010d129SBorislav Petkov 		return 1;
9807010d129SBorislav Petkov 
9817010d129SBorislav Petkov 	/*
9827010d129SBorislav Petkov 	 * No LBR entry, no basic block, no rewinding
9837010d129SBorislav Petkov 	 */
9847010d129SBorislav Petkov 	if (!cpuc->lbr_stack.nr || !from || !to)
9857010d129SBorislav Petkov 		return 0;
9867010d129SBorislav Petkov 
9877010d129SBorislav Petkov 	/*
9887010d129SBorislav Petkov 	 * Basic blocks should never cross user/kernel boundaries
9897010d129SBorislav Petkov 	 */
9907010d129SBorislav Petkov 	if (kernel_ip(ip) != kernel_ip(to))
9917010d129SBorislav Petkov 		return 0;
9927010d129SBorislav Petkov 
9937010d129SBorislav Petkov 	/*
9947010d129SBorislav Petkov 	 * unsigned math, either ip is before the start (impossible) or
9957010d129SBorislav Petkov 	 * the basic block is larger than 1 page (sanity)
9967010d129SBorislav Petkov 	 */
9977010d129SBorislav Petkov 	if ((ip - to) > PEBS_FIXUP_SIZE)
9987010d129SBorislav Petkov 		return 0;
9997010d129SBorislav Petkov 
10007010d129SBorislav Petkov 	/*
10017010d129SBorislav Petkov 	 * We sampled a branch insn, rewind using the LBR stack
10027010d129SBorislav Petkov 	 */
10037010d129SBorislav Petkov 	if (ip == to) {
10047010d129SBorislav Petkov 		set_linear_ip(regs, from);
10057010d129SBorislav Petkov 		return 1;
10067010d129SBorislav Petkov 	}
10077010d129SBorislav Petkov 
10087010d129SBorislav Petkov 	size = ip - to;
10097010d129SBorislav Petkov 	if (!kernel_ip(ip)) {
10107010d129SBorislav Petkov 		int bytes;
10117010d129SBorislav Petkov 		u8 *buf = this_cpu_read(insn_buffer);
10127010d129SBorislav Petkov 
10137010d129SBorislav Petkov 		/* 'size' must fit our buffer, see above */
10147010d129SBorislav Petkov 		bytes = copy_from_user_nmi(buf, (void __user *)to, size);
10157010d129SBorislav Petkov 		if (bytes != 0)
10167010d129SBorislav Petkov 			return 0;
10177010d129SBorislav Petkov 
10187010d129SBorislav Petkov 		kaddr = buf;
10197010d129SBorislav Petkov 	} else {
10207010d129SBorislav Petkov 		kaddr = (void *)to;
10217010d129SBorislav Petkov 	}
10227010d129SBorislav Petkov 
10237010d129SBorislav Petkov 	do {
10247010d129SBorislav Petkov 		struct insn insn;
10257010d129SBorislav Petkov 
10267010d129SBorislav Petkov 		old_to = to;
10277010d129SBorislav Petkov 
10287010d129SBorislav Petkov #ifdef CONFIG_X86_64
10297010d129SBorislav Petkov 		is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
10307010d129SBorislav Petkov #endif
10317010d129SBorislav Petkov 		insn_init(&insn, kaddr, size, is_64bit);
10327010d129SBorislav Petkov 		insn_get_length(&insn);
10337010d129SBorislav Petkov 		/*
10347010d129SBorislav Petkov 		 * Make sure there was not a problem decoding the
10357010d129SBorislav Petkov 		 * instruction and getting the length.  This is
10367010d129SBorislav Petkov 		 * doubly important because we have an infinite
10377010d129SBorislav Petkov 		 * loop if insn.length=0.
10387010d129SBorislav Petkov 		 */
10397010d129SBorislav Petkov 		if (!insn.length)
10407010d129SBorislav Petkov 			break;
10417010d129SBorislav Petkov 
10427010d129SBorislav Petkov 		to += insn.length;
10437010d129SBorislav Petkov 		kaddr += insn.length;
10447010d129SBorislav Petkov 		size -= insn.length;
10457010d129SBorislav Petkov 	} while (to < ip);
10467010d129SBorislav Petkov 
10477010d129SBorislav Petkov 	if (to == ip) {
10487010d129SBorislav Petkov 		set_linear_ip(regs, old_to);
10497010d129SBorislav Petkov 		return 1;
10507010d129SBorislav Petkov 	}
10517010d129SBorislav Petkov 
10527010d129SBorislav Petkov 	/*
10537010d129SBorislav Petkov 	 * Even though we decoded the basic block, the instruction stream
10547010d129SBorislav Petkov 	 * never matched the given IP, either the TO or the IP got corrupted.
10557010d129SBorislav Petkov 	 */
10567010d129SBorislav Petkov 	return 0;
10577010d129SBorislav Petkov }
10587010d129SBorislav Petkov 
10597010d129SBorislav Petkov static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
10607010d129SBorislav Petkov {
10617010d129SBorislav Petkov 	if (pebs->tsx_tuning) {
10627010d129SBorislav Petkov 		union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
10637010d129SBorislav Petkov 		return tsx.cycles_last_block;
10647010d129SBorislav Petkov 	}
10657010d129SBorislav Petkov 	return 0;
10667010d129SBorislav Petkov }
10677010d129SBorislav Petkov 
10687010d129SBorislav Petkov static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
10697010d129SBorislav Petkov {
10707010d129SBorislav Petkov 	u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
10717010d129SBorislav Petkov 
10727010d129SBorislav Petkov 	/* For RTM XABORTs also log the abort code from AX */
10737010d129SBorislav Petkov 	if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
10747010d129SBorislav Petkov 		txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
10757010d129SBorislav Petkov 	return txn;
10767010d129SBorislav Petkov }
10777010d129SBorislav Petkov 
10787010d129SBorislav Petkov static void setup_pebs_sample_data(struct perf_event *event,
10797010d129SBorislav Petkov 				   struct pt_regs *iregs, void *__pebs,
10807010d129SBorislav Petkov 				   struct perf_sample_data *data,
10817010d129SBorislav Petkov 				   struct pt_regs *regs)
10827010d129SBorislav Petkov {
10837010d129SBorislav Petkov #define PERF_X86_EVENT_PEBS_HSW_PREC \
10847010d129SBorislav Petkov 		(PERF_X86_EVENT_PEBS_ST_HSW | \
10857010d129SBorislav Petkov 		 PERF_X86_EVENT_PEBS_LD_HSW | \
10867010d129SBorislav Petkov 		 PERF_X86_EVENT_PEBS_NA_HSW)
10877010d129SBorislav Petkov 	/*
10887010d129SBorislav Petkov 	 * We cast to the biggest pebs_record but are careful not to
10897010d129SBorislav Petkov 	 * unconditionally access the 'extra' entries.
10907010d129SBorislav Petkov 	 */
10917010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
10927010d129SBorislav Petkov 	struct pebs_record_skl *pebs = __pebs;
10937010d129SBorislav Petkov 	u64 sample_type;
10947010d129SBorislav Petkov 	int fll, fst, dsrc;
10957010d129SBorislav Petkov 	int fl = event->hw.flags;
10967010d129SBorislav Petkov 
10977010d129SBorislav Petkov 	if (pebs == NULL)
10987010d129SBorislav Petkov 		return;
10997010d129SBorislav Petkov 
11007010d129SBorislav Petkov 	sample_type = event->attr.sample_type;
11017010d129SBorislav Petkov 	dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
11027010d129SBorislav Petkov 
11037010d129SBorislav Petkov 	fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
11047010d129SBorislav Petkov 	fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
11057010d129SBorislav Petkov 
11067010d129SBorislav Petkov 	perf_sample_data_init(data, 0, event->hw.last_period);
11077010d129SBorislav Petkov 
11087010d129SBorislav Petkov 	data->period = event->hw.last_period;
11097010d129SBorislav Petkov 
11107010d129SBorislav Petkov 	/*
11117010d129SBorislav Petkov 	 * Use latency for weight (only avail with PEBS-LL)
11127010d129SBorislav Petkov 	 */
11137010d129SBorislav Petkov 	if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
11147010d129SBorislav Petkov 		data->weight = pebs->lat;
11157010d129SBorislav Petkov 
11167010d129SBorislav Petkov 	/*
11177010d129SBorislav Petkov 	 * data.data_src encodes the data source
11187010d129SBorislav Petkov 	 */
11197010d129SBorislav Petkov 	if (dsrc) {
11207010d129SBorislav Petkov 		u64 val = PERF_MEM_NA;
11217010d129SBorislav Petkov 		if (fll)
11227010d129SBorislav Petkov 			val = load_latency_data(pebs->dse);
11237010d129SBorislav Petkov 		else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
11247010d129SBorislav Petkov 			val = precise_datala_hsw(event, pebs->dse);
11257010d129SBorislav Petkov 		else if (fst)
11267010d129SBorislav Petkov 			val = precise_store_data(pebs->dse);
11277010d129SBorislav Petkov 		data->data_src.val = val;
11287010d129SBorislav Petkov 	}
11297010d129SBorislav Petkov 
11307010d129SBorislav Petkov 	/*
1131b8000586SPeter Zijlstra 	 * We use the interrupt regs as a base because the PEBS record does not
1132b8000586SPeter Zijlstra 	 * contain a full regs set, specifically it seems to lack segment
1133b8000586SPeter Zijlstra 	 * descriptors, which get used by things like user_mode().
11347010d129SBorislav Petkov 	 *
1135b8000586SPeter Zijlstra 	 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1136b8000586SPeter Zijlstra 	 *
1137b8000586SPeter Zijlstra 	 * We must however always use BP,SP from iregs for the unwinder to stay
1138b8000586SPeter Zijlstra 	 * sane; the record BP,SP can point into thin air when the record is
1139b8000586SPeter Zijlstra 	 * from a previous PMI context or an (I)RET happend between the record
1140b8000586SPeter Zijlstra 	 * and PMI.
11417010d129SBorislav Petkov 	 */
11427010d129SBorislav Petkov 	*regs = *iregs;
11437010d129SBorislav Petkov 	regs->flags = pebs->flags;
11447010d129SBorislav Petkov 	set_linear_ip(regs, pebs->ip);
11457010d129SBorislav Petkov 
11467010d129SBorislav Petkov 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
11477010d129SBorislav Petkov 		regs->ax = pebs->ax;
11487010d129SBorislav Petkov 		regs->bx = pebs->bx;
11497010d129SBorislav Petkov 		regs->cx = pebs->cx;
11507010d129SBorislav Petkov 		regs->dx = pebs->dx;
11517010d129SBorislav Petkov 		regs->si = pebs->si;
11527010d129SBorislav Petkov 		regs->di = pebs->di;
1153b8000586SPeter Zijlstra 
1154b8000586SPeter Zijlstra 		/*
1155b8000586SPeter Zijlstra 		 * Per the above; only set BP,SP if we don't need callchains.
1156b8000586SPeter Zijlstra 		 *
1157b8000586SPeter Zijlstra 		 * XXX: does this make sense?
1158b8000586SPeter Zijlstra 		 */
1159b8000586SPeter Zijlstra 		if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
11607010d129SBorislav Petkov 			regs->bp = pebs->bp;
11617010d129SBorislav Petkov 			regs->sp = pebs->sp;
1162b8000586SPeter Zijlstra 		}
11637010d129SBorislav Petkov 
1164b8000586SPeter Zijlstra 		/*
1165b8000586SPeter Zijlstra 		 * Preserve PERF_EFLAGS_VM from set_linear_ip().
1166b8000586SPeter Zijlstra 		 */
1167b8000586SPeter Zijlstra 		regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
11687010d129SBorislav Petkov #ifndef CONFIG_X86_32
11697010d129SBorislav Petkov 		regs->r8 = pebs->r8;
11707010d129SBorislav Petkov 		regs->r9 = pebs->r9;
11717010d129SBorislav Petkov 		regs->r10 = pebs->r10;
11727010d129SBorislav Petkov 		regs->r11 = pebs->r11;
11737010d129SBorislav Petkov 		regs->r12 = pebs->r12;
11747010d129SBorislav Petkov 		regs->r13 = pebs->r13;
11757010d129SBorislav Petkov 		regs->r14 = pebs->r14;
11767010d129SBorislav Petkov 		regs->r15 = pebs->r15;
11777010d129SBorislav Petkov #endif
11787010d129SBorislav Petkov 	}
11797010d129SBorislav Petkov 
11807010d129SBorislav Petkov 	if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
11817010d129SBorislav Petkov 		regs->ip = pebs->real_ip;
11827010d129SBorislav Petkov 		regs->flags |= PERF_EFLAGS_EXACT;
11837010d129SBorislav Petkov 	} else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
11847010d129SBorislav Petkov 		regs->flags |= PERF_EFLAGS_EXACT;
11857010d129SBorislav Petkov 	else
11867010d129SBorislav Petkov 		regs->flags &= ~PERF_EFLAGS_EXACT;
11877010d129SBorislav Petkov 
1188*fc7ce9c7SKan Liang 	if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
11897010d129SBorislav Petkov 	    x86_pmu.intel_cap.pebs_format >= 1)
11907010d129SBorislav Petkov 		data->addr = pebs->dla;
11917010d129SBorislav Petkov 
11927010d129SBorislav Petkov 	if (x86_pmu.intel_cap.pebs_format >= 2) {
11937010d129SBorislav Petkov 		/* Only set the TSX weight when no memory weight. */
11947010d129SBorislav Petkov 		if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
11957010d129SBorislav Petkov 			data->weight = intel_hsw_weight(pebs);
11967010d129SBorislav Petkov 
11977010d129SBorislav Petkov 		if (sample_type & PERF_SAMPLE_TRANSACTION)
11987010d129SBorislav Petkov 			data->txn = intel_hsw_transaction(pebs);
11997010d129SBorislav Petkov 	}
12007010d129SBorislav Petkov 
12017010d129SBorislav Petkov 	/*
12027010d129SBorislav Petkov 	 * v3 supplies an accurate time stamp, so we use that
12037010d129SBorislav Petkov 	 * for the time stamp.
12047010d129SBorislav Petkov 	 *
12057010d129SBorislav Petkov 	 * We can only do this for the default trace clock.
12067010d129SBorislav Petkov 	 */
12077010d129SBorislav Petkov 	if (x86_pmu.intel_cap.pebs_format >= 3 &&
12087010d129SBorislav Petkov 		event->attr.use_clockid == 0)
12097010d129SBorislav Petkov 		data->time = native_sched_clock_from_tsc(pebs->tsc);
12107010d129SBorislav Petkov 
12117010d129SBorislav Petkov 	if (has_branch_stack(event))
12127010d129SBorislav Petkov 		data->br_stack = &cpuc->lbr_stack;
12137010d129SBorislav Petkov }
12147010d129SBorislav Petkov 
12157010d129SBorislav Petkov static inline void *
12167010d129SBorislav Petkov get_next_pebs_record_by_bit(void *base, void *top, int bit)
12177010d129SBorislav Petkov {
12187010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
12197010d129SBorislav Petkov 	void *at;
12207010d129SBorislav Petkov 	u64 pebs_status;
12217010d129SBorislav Petkov 
12227010d129SBorislav Petkov 	/*
12237010d129SBorislav Petkov 	 * fmt0 does not have a status bitfield (does not use
12247010d129SBorislav Petkov 	 * perf_record_nhm format)
12257010d129SBorislav Petkov 	 */
12267010d129SBorislav Petkov 	if (x86_pmu.intel_cap.pebs_format < 1)
12277010d129SBorislav Petkov 		return base;
12287010d129SBorislav Petkov 
12297010d129SBorislav Petkov 	if (base == NULL)
12307010d129SBorislav Petkov 		return NULL;
12317010d129SBorislav Petkov 
12327010d129SBorislav Petkov 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
12337010d129SBorislav Petkov 		struct pebs_record_nhm *p = at;
12347010d129SBorislav Petkov 
12357010d129SBorislav Petkov 		if (test_bit(bit, (unsigned long *)&p->status)) {
12367010d129SBorislav Petkov 			/* PEBS v3 has accurate status bits */
12377010d129SBorislav Petkov 			if (x86_pmu.intel_cap.pebs_format >= 3)
12387010d129SBorislav Petkov 				return at;
12397010d129SBorislav Petkov 
12407010d129SBorislav Petkov 			if (p->status == (1 << bit))
12417010d129SBorislav Petkov 				return at;
12427010d129SBorislav Petkov 
12437010d129SBorislav Petkov 			/* clear non-PEBS bit and re-check */
12447010d129SBorislav Petkov 			pebs_status = p->status & cpuc->pebs_enabled;
1245fd583ad1SKan Liang 			pebs_status &= PEBS_COUNTER_MASK;
12467010d129SBorislav Petkov 			if (pebs_status == (1 << bit))
12477010d129SBorislav Petkov 				return at;
12487010d129SBorislav Petkov 		}
12497010d129SBorislav Petkov 	}
12507010d129SBorislav Petkov 	return NULL;
12517010d129SBorislav Petkov }
12527010d129SBorislav Petkov 
12537010d129SBorislav Petkov static void __intel_pmu_pebs_event(struct perf_event *event,
12547010d129SBorislav Petkov 				   struct pt_regs *iregs,
12557010d129SBorislav Petkov 				   void *base, void *top,
12567010d129SBorislav Petkov 				   int bit, int count)
12577010d129SBorislav Petkov {
12587010d129SBorislav Petkov 	struct perf_sample_data data;
12597010d129SBorislav Petkov 	struct pt_regs regs;
12607010d129SBorislav Petkov 	void *at = get_next_pebs_record_by_bit(base, top, bit);
12617010d129SBorislav Petkov 
12627010d129SBorislav Petkov 	if (!intel_pmu_save_and_restart(event) &&
12637010d129SBorislav Petkov 	    !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
12647010d129SBorislav Petkov 		return;
12657010d129SBorislav Petkov 
12667010d129SBorislav Petkov 	while (count > 1) {
12677010d129SBorislav Petkov 		setup_pebs_sample_data(event, iregs, at, &data, &regs);
12687010d129SBorislav Petkov 		perf_event_output(event, &data, &regs);
12697010d129SBorislav Petkov 		at += x86_pmu.pebs_record_size;
12707010d129SBorislav Petkov 		at = get_next_pebs_record_by_bit(at, top, bit);
12717010d129SBorislav Petkov 		count--;
12727010d129SBorislav Petkov 	}
12737010d129SBorislav Petkov 
12747010d129SBorislav Petkov 	setup_pebs_sample_data(event, iregs, at, &data, &regs);
12757010d129SBorislav Petkov 
12767010d129SBorislav Petkov 	/*
12777010d129SBorislav Petkov 	 * All but the last records are processed.
12787010d129SBorislav Petkov 	 * The last one is left to be able to call the overflow handler.
12797010d129SBorislav Petkov 	 */
12807010d129SBorislav Petkov 	if (perf_event_overflow(event, &data, &regs)) {
12817010d129SBorislav Petkov 		x86_pmu_stop(event, 0);
12827010d129SBorislav Petkov 		return;
12837010d129SBorislav Petkov 	}
12847010d129SBorislav Petkov 
12857010d129SBorislav Petkov }
12867010d129SBorislav Petkov 
12877010d129SBorislav Petkov static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
12887010d129SBorislav Petkov {
12897010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
12907010d129SBorislav Petkov 	struct debug_store *ds = cpuc->ds;
12917010d129SBorislav Petkov 	struct perf_event *event = cpuc->events[0]; /* PMC0 only */
12927010d129SBorislav Petkov 	struct pebs_record_core *at, *top;
12937010d129SBorislav Petkov 	int n;
12947010d129SBorislav Petkov 
12957010d129SBorislav Petkov 	if (!x86_pmu.pebs_active)
12967010d129SBorislav Petkov 		return;
12977010d129SBorislav Petkov 
12987010d129SBorislav Petkov 	at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
12997010d129SBorislav Petkov 	top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
13007010d129SBorislav Petkov 
13017010d129SBorislav Petkov 	/*
13027010d129SBorislav Petkov 	 * Whatever else happens, drain the thing
13037010d129SBorislav Petkov 	 */
13047010d129SBorislav Petkov 	ds->pebs_index = ds->pebs_buffer_base;
13057010d129SBorislav Petkov 
13067010d129SBorislav Petkov 	if (!test_bit(0, cpuc->active_mask))
13077010d129SBorislav Petkov 		return;
13087010d129SBorislav Petkov 
13097010d129SBorislav Petkov 	WARN_ON_ONCE(!event);
13107010d129SBorislav Petkov 
13117010d129SBorislav Petkov 	if (!event->attr.precise_ip)
13127010d129SBorislav Petkov 		return;
13137010d129SBorislav Petkov 
13147010d129SBorislav Petkov 	n = top - at;
13157010d129SBorislav Petkov 	if (n <= 0)
13167010d129SBorislav Petkov 		return;
13177010d129SBorislav Petkov 
13187010d129SBorislav Petkov 	__intel_pmu_pebs_event(event, iregs, at, top, 0, n);
13197010d129SBorislav Petkov }
13207010d129SBorislav Petkov 
13217010d129SBorislav Petkov static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
13227010d129SBorislav Petkov {
13237010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
13247010d129SBorislav Petkov 	struct debug_store *ds = cpuc->ds;
13257010d129SBorislav Petkov 	struct perf_event *event;
13267010d129SBorislav Petkov 	void *base, *at, *top;
13277010d129SBorislav Petkov 	short counts[MAX_PEBS_EVENTS] = {};
13287010d129SBorislav Petkov 	short error[MAX_PEBS_EVENTS] = {};
13297010d129SBorislav Petkov 	int bit, i;
13307010d129SBorislav Petkov 
13317010d129SBorislav Petkov 	if (!x86_pmu.pebs_active)
13327010d129SBorislav Petkov 		return;
13337010d129SBorislav Petkov 
13347010d129SBorislav Petkov 	base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
13357010d129SBorislav Petkov 	top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
13367010d129SBorislav Petkov 
13377010d129SBorislav Petkov 	ds->pebs_index = ds->pebs_buffer_base;
13387010d129SBorislav Petkov 
13397010d129SBorislav Petkov 	if (unlikely(base >= top))
13407010d129SBorislav Petkov 		return;
13417010d129SBorislav Petkov 
13427010d129SBorislav Petkov 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
13437010d129SBorislav Petkov 		struct pebs_record_nhm *p = at;
13447010d129SBorislav Petkov 		u64 pebs_status;
13457010d129SBorislav Petkov 
13468ef9b845SPeter Zijlstra 		pebs_status = p->status & cpuc->pebs_enabled;
13478ef9b845SPeter Zijlstra 		pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
13488ef9b845SPeter Zijlstra 
13498ef9b845SPeter Zijlstra 		/* PEBS v3 has more accurate status bits */
13507010d129SBorislav Petkov 		if (x86_pmu.intel_cap.pebs_format >= 3) {
13518ef9b845SPeter Zijlstra 			for_each_set_bit(bit, (unsigned long *)&pebs_status,
13528ef9b845SPeter Zijlstra 					 x86_pmu.max_pebs_events)
13537010d129SBorislav Petkov 				counts[bit]++;
13547010d129SBorislav Petkov 
13557010d129SBorislav Petkov 			continue;
13567010d129SBorislav Petkov 		}
13577010d129SBorislav Petkov 
13587010d129SBorislav Petkov 		/*
13597010d129SBorislav Petkov 		 * On some CPUs the PEBS status can be zero when PEBS is
13607010d129SBorislav Petkov 		 * racing with clearing of GLOBAL_STATUS.
13617010d129SBorislav Petkov 		 *
13627010d129SBorislav Petkov 		 * Normally we would drop that record, but in the
13637010d129SBorislav Petkov 		 * case when there is only a single active PEBS event
13647010d129SBorislav Petkov 		 * we can assume it's for that event.
13657010d129SBorislav Petkov 		 */
13667010d129SBorislav Petkov 		if (!pebs_status && cpuc->pebs_enabled &&
13677010d129SBorislav Petkov 			!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
13687010d129SBorislav Petkov 			pebs_status = cpuc->pebs_enabled;
13697010d129SBorislav Petkov 
13707010d129SBorislav Petkov 		bit = find_first_bit((unsigned long *)&pebs_status,
13717010d129SBorislav Petkov 					x86_pmu.max_pebs_events);
13727010d129SBorislav Petkov 		if (bit >= x86_pmu.max_pebs_events)
13737010d129SBorislav Petkov 			continue;
13747010d129SBorislav Petkov 
13757010d129SBorislav Petkov 		/*
13767010d129SBorislav Petkov 		 * The PEBS hardware does not deal well with the situation
13777010d129SBorislav Petkov 		 * when events happen near to each other and multiple bits
13787010d129SBorislav Petkov 		 * are set. But it should happen rarely.
13797010d129SBorislav Petkov 		 *
13807010d129SBorislav Petkov 		 * If these events include one PEBS and multiple non-PEBS
13817010d129SBorislav Petkov 		 * events, it doesn't impact PEBS record. The record will
13827010d129SBorislav Petkov 		 * be handled normally. (slow path)
13837010d129SBorislav Petkov 		 *
13847010d129SBorislav Petkov 		 * If these events include two or more PEBS events, the
13857010d129SBorislav Petkov 		 * records for the events can be collapsed into a single
13867010d129SBorislav Petkov 		 * one, and it's not possible to reconstruct all events
13877010d129SBorislav Petkov 		 * that caused the PEBS record. It's called collision.
13887010d129SBorislav Petkov 		 * If collision happened, the record will be dropped.
13897010d129SBorislav Petkov 		 */
13907010d129SBorislav Petkov 		if (p->status != (1ULL << bit)) {
13917010d129SBorislav Petkov 			for_each_set_bit(i, (unsigned long *)&pebs_status,
13927010d129SBorislav Petkov 					 x86_pmu.max_pebs_events)
13937010d129SBorislav Petkov 				error[i]++;
13947010d129SBorislav Petkov 			continue;
13957010d129SBorislav Petkov 		}
13967010d129SBorislav Petkov 
13977010d129SBorislav Petkov 		counts[bit]++;
13987010d129SBorislav Petkov 	}
13997010d129SBorislav Petkov 
14007010d129SBorislav Petkov 	for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
14017010d129SBorislav Petkov 		if ((counts[bit] == 0) && (error[bit] == 0))
14027010d129SBorislav Petkov 			continue;
14037010d129SBorislav Petkov 
14047010d129SBorislav Petkov 		event = cpuc->events[bit];
14058ef9b845SPeter Zijlstra 		if (WARN_ON_ONCE(!event))
14068ef9b845SPeter Zijlstra 			continue;
14078ef9b845SPeter Zijlstra 
14088ef9b845SPeter Zijlstra 		if (WARN_ON_ONCE(!event->attr.precise_ip))
14098ef9b845SPeter Zijlstra 			continue;
14107010d129SBorislav Petkov 
14117010d129SBorislav Petkov 		/* log dropped samples number */
1412475113d9SJiri Olsa 		if (error[bit]) {
14137010d129SBorislav Petkov 			perf_log_lost_samples(event, error[bit]);
14147010d129SBorislav Petkov 
1415475113d9SJiri Olsa 			if (perf_event_account_interrupt(event))
1416475113d9SJiri Olsa 				x86_pmu_stop(event, 0);
1417475113d9SJiri Olsa 		}
1418475113d9SJiri Olsa 
14197010d129SBorislav Petkov 		if (counts[bit]) {
14207010d129SBorislav Petkov 			__intel_pmu_pebs_event(event, iregs, base,
14217010d129SBorislav Petkov 					       top, bit, counts[bit]);
14227010d129SBorislav Petkov 		}
14237010d129SBorislav Petkov 	}
14247010d129SBorislav Petkov }
14257010d129SBorislav Petkov 
14267010d129SBorislav Petkov /*
14277010d129SBorislav Petkov  * BTS, PEBS probe and setup
14287010d129SBorislav Petkov  */
14297010d129SBorislav Petkov 
14307010d129SBorislav Petkov void __init intel_ds_init(void)
14317010d129SBorislav Petkov {
14327010d129SBorislav Petkov 	/*
14337010d129SBorislav Petkov 	 * No support for 32bit formats
14347010d129SBorislav Petkov 	 */
14357010d129SBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_DTES64))
14367010d129SBorislav Petkov 		return;
14377010d129SBorislav Petkov 
14387010d129SBorislav Petkov 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
14397010d129SBorislav Petkov 	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
1440e72daf3fSJiri Olsa 	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
14417010d129SBorislav Petkov 	if (x86_pmu.pebs) {
14427010d129SBorislav Petkov 		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
14437010d129SBorislav Petkov 		int format = x86_pmu.intel_cap.pebs_format;
14447010d129SBorislav Petkov 
14457010d129SBorislav Petkov 		switch (format) {
14467010d129SBorislav Petkov 		case 0:
14477010d129SBorislav Petkov 			pr_cont("PEBS fmt0%c, ", pebs_type);
14487010d129SBorislav Petkov 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
1449e72daf3fSJiri Olsa 			/*
1450e72daf3fSJiri Olsa 			 * Using >PAGE_SIZE buffers makes the WRMSR to
1451e72daf3fSJiri Olsa 			 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
1452e72daf3fSJiri Olsa 			 * mysteriously hang on Core2.
1453e72daf3fSJiri Olsa 			 *
1454e72daf3fSJiri Olsa 			 * As a workaround, we don't do this.
1455e72daf3fSJiri Olsa 			 */
1456e72daf3fSJiri Olsa 			x86_pmu.pebs_buffer_size = PAGE_SIZE;
14577010d129SBorislav Petkov 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
14587010d129SBorislav Petkov 			break;
14597010d129SBorislav Petkov 
14607010d129SBorislav Petkov 		case 1:
14617010d129SBorislav Petkov 			pr_cont("PEBS fmt1%c, ", pebs_type);
14627010d129SBorislav Petkov 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
14637010d129SBorislav Petkov 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
14647010d129SBorislav Petkov 			break;
14657010d129SBorislav Petkov 
14667010d129SBorislav Petkov 		case 2:
14677010d129SBorislav Petkov 			pr_cont("PEBS fmt2%c, ", pebs_type);
14687010d129SBorislav Petkov 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
14697010d129SBorislav Petkov 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
14707010d129SBorislav Petkov 			break;
14717010d129SBorislav Petkov 
14727010d129SBorislav Petkov 		case 3:
14737010d129SBorislav Petkov 			pr_cont("PEBS fmt3%c, ", pebs_type);
14747010d129SBorislav Petkov 			x86_pmu.pebs_record_size =
14757010d129SBorislav Petkov 						sizeof(struct pebs_record_skl);
14767010d129SBorislav Petkov 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
14777010d129SBorislav Petkov 			x86_pmu.free_running_flags |= PERF_SAMPLE_TIME;
14787010d129SBorislav Petkov 			break;
14797010d129SBorislav Petkov 
14807010d129SBorislav Petkov 		default:
14817010d129SBorislav Petkov 			pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
14827010d129SBorislav Petkov 			x86_pmu.pebs = 0;
14837010d129SBorislav Petkov 		}
14847010d129SBorislav Petkov 	}
14857010d129SBorislav Petkov }
14867010d129SBorislav Petkov 
14877010d129SBorislav Petkov void perf_restore_debug_store(void)
14887010d129SBorislav Petkov {
14897010d129SBorislav Petkov 	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
14907010d129SBorislav Petkov 
14917010d129SBorislav Petkov 	if (!x86_pmu.bts && !x86_pmu.pebs)
14927010d129SBorislav Petkov 		return;
14937010d129SBorislav Petkov 
14947010d129SBorislav Petkov 	wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
14957010d129SBorislav Petkov }
1496