xref: /openbmc/linux/arch/x86/events/intel/ds.c (revision cd6b984f)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
27010d129SBorislav Petkov #include <linux/bitops.h>
37010d129SBorislav Petkov #include <linux/types.h>
47010d129SBorislav Petkov #include <linux/slab.h>
57010d129SBorislav Petkov 
6c1961a46SHugh Dickins #include <asm/cpu_entry_area.h>
77010d129SBorislav Petkov #include <asm/perf_event.h>
842f3bdc5SPeter Zijlstra #include <asm/tlbflush.h>
97010d129SBorislav Petkov #include <asm/insn.h>
107010d129SBorislav Petkov 
1127f6d22bSBorislav Petkov #include "../perf_event.h"
127010d129SBorislav Petkov 
1310043e02SThomas Gleixner /* Waste a full page so it can be mapped into the cpu_entry_area */
1410043e02SThomas Gleixner DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
1510043e02SThomas Gleixner 
167010d129SBorislav Petkov /* The size of a BTS record in bytes: */
177010d129SBorislav Petkov #define BTS_RECORD_SIZE		24
187010d129SBorislav Petkov 
197010d129SBorislav Petkov #define PEBS_FIXUP_SIZE		PAGE_SIZE
207010d129SBorislav Petkov 
217010d129SBorislav Petkov /*
227010d129SBorislav Petkov  * pebs_record_32 for p4 and core not supported
237010d129SBorislav Petkov 
247010d129SBorislav Petkov struct pebs_record_32 {
257010d129SBorislav Petkov 	u32 flags, ip;
267010d129SBorislav Petkov 	u32 ax, bc, cx, dx;
277010d129SBorislav Petkov 	u32 si, di, bp, sp;
287010d129SBorislav Petkov };
297010d129SBorislav Petkov 
307010d129SBorislav Petkov  */
317010d129SBorislav Petkov 
327010d129SBorislav Petkov union intel_x86_pebs_dse {
337010d129SBorislav Petkov 	u64 val;
347010d129SBorislav Petkov 	struct {
357010d129SBorislav Petkov 		unsigned int ld_dse:4;
367010d129SBorislav Petkov 		unsigned int ld_stlb_miss:1;
377010d129SBorislav Petkov 		unsigned int ld_locked:1;
387010d129SBorislav Petkov 		unsigned int ld_reserved:26;
397010d129SBorislav Petkov 	};
407010d129SBorislav Petkov 	struct {
417010d129SBorislav Petkov 		unsigned int st_l1d_hit:1;
427010d129SBorislav Petkov 		unsigned int st_reserved1:3;
437010d129SBorislav Petkov 		unsigned int st_stlb_miss:1;
447010d129SBorislav Petkov 		unsigned int st_locked:1;
457010d129SBorislav Petkov 		unsigned int st_reserved2:26;
467010d129SBorislav Petkov 	};
477010d129SBorislav Petkov };
487010d129SBorislav Petkov 
497010d129SBorislav Petkov 
507010d129SBorislav Petkov /*
517010d129SBorislav Petkov  * Map PEBS Load Latency Data Source encodings to generic
527010d129SBorislav Petkov  * memory data source information
537010d129SBorislav Petkov  */
547010d129SBorislav Petkov #define P(a, b) PERF_MEM_S(a, b)
557010d129SBorislav Petkov #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
566ae5fa61SAndi Kleen #define LEVEL(x) P(LVLNUM, x)
576ae5fa61SAndi Kleen #define REM P(REMOTE, REMOTE)
587010d129SBorislav Petkov #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
597010d129SBorislav Petkov 
60e17dc653SAndi Kleen /* Version for Sandy Bridge and later */
61e17dc653SAndi Kleen static u64 pebs_data_source[] = {
626ae5fa61SAndi Kleen 	P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
636ae5fa61SAndi Kleen 	OP_LH | P(LVL, L1)  | LEVEL(L1) | P(SNOOP, NONE),  /* 0x01: L1 local */
646ae5fa61SAndi Kleen 	OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
656ae5fa61SAndi Kleen 	OP_LH | P(LVL, L2)  | LEVEL(L2) | P(SNOOP, NONE),  /* 0x03: L2 hit */
666ae5fa61SAndi Kleen 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, NONE),  /* 0x04: L3 hit */
676ae5fa61SAndi Kleen 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, MISS),  /* 0x05: L3 hit, snoop miss */
686ae5fa61SAndi Kleen 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, HIT),   /* 0x06: L3 hit, snoop hit */
696ae5fa61SAndi Kleen 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, HITM),  /* 0x07: L3 hit, snoop hitm */
706ae5fa61SAndi Kleen 	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT),  /* 0x08: L3 miss snoop hit */
716ae5fa61SAndi Kleen 	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
726ae5fa61SAndi Kleen 	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | P(SNOOP, HIT),       /* 0x0a: L3 miss, shared */
736ae5fa61SAndi Kleen 	OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT),  /* 0x0b: L3 miss, shared */
746ae5fa61SAndi Kleen 	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | SNOOP_NONE_MISS,     /* 0x0c: L3 miss, excl */
756ae5fa61SAndi Kleen 	OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
766ae5fa61SAndi Kleen 	OP_LH | P(LVL, IO)  | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
776ae5fa61SAndi Kleen 	OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
787010d129SBorislav Petkov };
797010d129SBorislav Petkov 
80e17dc653SAndi Kleen /* Patch up minor differences in the bits */
81e17dc653SAndi Kleen void __init intel_pmu_pebs_data_source_nhm(void)
82e17dc653SAndi Kleen {
836ae5fa61SAndi Kleen 	pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
846ae5fa61SAndi Kleen 	pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
856ae5fa61SAndi Kleen 	pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
866ae5fa61SAndi Kleen }
876ae5fa61SAndi Kleen 
886ae5fa61SAndi Kleen void __init intel_pmu_pebs_data_source_skl(bool pmem)
896ae5fa61SAndi Kleen {
906ae5fa61SAndi Kleen 	u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
916ae5fa61SAndi Kleen 
926ae5fa61SAndi Kleen 	pebs_data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
936ae5fa61SAndi Kleen 	pebs_data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
946ae5fa61SAndi Kleen 	pebs_data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
956ae5fa61SAndi Kleen 	pebs_data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
966ae5fa61SAndi Kleen 	pebs_data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
97e17dc653SAndi Kleen }
98e17dc653SAndi Kleen 
997010d129SBorislav Petkov static u64 precise_store_data(u64 status)
1007010d129SBorislav Petkov {
1017010d129SBorislav Petkov 	union intel_x86_pebs_dse dse;
1027010d129SBorislav Petkov 	u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
1037010d129SBorislav Petkov 
1047010d129SBorislav Petkov 	dse.val = status;
1057010d129SBorislav Petkov 
1067010d129SBorislav Petkov 	/*
1077010d129SBorislav Petkov 	 * bit 4: TLB access
1087010d129SBorislav Petkov 	 * 1 = stored missed 2nd level TLB
1097010d129SBorislav Petkov 	 *
1107010d129SBorislav Petkov 	 * so it either hit the walker or the OS
1117010d129SBorislav Petkov 	 * otherwise hit 2nd level TLB
1127010d129SBorislav Petkov 	 */
1137010d129SBorislav Petkov 	if (dse.st_stlb_miss)
1147010d129SBorislav Petkov 		val |= P(TLB, MISS);
1157010d129SBorislav Petkov 	else
1167010d129SBorislav Petkov 		val |= P(TLB, HIT);
1177010d129SBorislav Petkov 
1187010d129SBorislav Petkov 	/*
1197010d129SBorislav Petkov 	 * bit 0: hit L1 data cache
1207010d129SBorislav Petkov 	 * if not set, then all we know is that
1217010d129SBorislav Petkov 	 * it missed L1D
1227010d129SBorislav Petkov 	 */
1237010d129SBorislav Petkov 	if (dse.st_l1d_hit)
1247010d129SBorislav Petkov 		val |= P(LVL, HIT);
1257010d129SBorislav Petkov 	else
1267010d129SBorislav Petkov 		val |= P(LVL, MISS);
1277010d129SBorislav Petkov 
1287010d129SBorislav Petkov 	/*
1297010d129SBorislav Petkov 	 * bit 5: Locked prefix
1307010d129SBorislav Petkov 	 */
1317010d129SBorislav Petkov 	if (dse.st_locked)
1327010d129SBorislav Petkov 		val |= P(LOCK, LOCKED);
1337010d129SBorislav Petkov 
1347010d129SBorislav Petkov 	return val;
1357010d129SBorislav Petkov }
1367010d129SBorislav Petkov 
1377010d129SBorislav Petkov static u64 precise_datala_hsw(struct perf_event *event, u64 status)
1387010d129SBorislav Petkov {
1397010d129SBorislav Petkov 	union perf_mem_data_src dse;
1407010d129SBorislav Petkov 
1417010d129SBorislav Petkov 	dse.val = PERF_MEM_NA;
1427010d129SBorislav Petkov 
1437010d129SBorislav Petkov 	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
1447010d129SBorislav Petkov 		dse.mem_op = PERF_MEM_OP_STORE;
1457010d129SBorislav Petkov 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
1467010d129SBorislav Petkov 		dse.mem_op = PERF_MEM_OP_LOAD;
1477010d129SBorislav Petkov 
1487010d129SBorislav Petkov 	/*
1497010d129SBorislav Petkov 	 * L1 info only valid for following events:
1507010d129SBorislav Petkov 	 *
1517010d129SBorislav Petkov 	 * MEM_UOPS_RETIRED.STLB_MISS_STORES
1527010d129SBorislav Petkov 	 * MEM_UOPS_RETIRED.LOCK_STORES
1537010d129SBorislav Petkov 	 * MEM_UOPS_RETIRED.SPLIT_STORES
1547010d129SBorislav Petkov 	 * MEM_UOPS_RETIRED.ALL_STORES
1557010d129SBorislav Petkov 	 */
1567010d129SBorislav Petkov 	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
1577010d129SBorislav Petkov 		if (status & 1)
1587010d129SBorislav Petkov 			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
1597010d129SBorislav Petkov 		else
1607010d129SBorislav Petkov 			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
1617010d129SBorislav Petkov 	}
1627010d129SBorislav Petkov 	return dse.val;
1637010d129SBorislav Petkov }
1647010d129SBorislav Petkov 
1657010d129SBorislav Petkov static u64 load_latency_data(u64 status)
1667010d129SBorislav Petkov {
1677010d129SBorislav Petkov 	union intel_x86_pebs_dse dse;
1687010d129SBorislav Petkov 	u64 val;
1697010d129SBorislav Petkov 
1707010d129SBorislav Petkov 	dse.val = status;
1717010d129SBorislav Petkov 
1727010d129SBorislav Petkov 	/*
1737010d129SBorislav Petkov 	 * use the mapping table for bit 0-3
1747010d129SBorislav Petkov 	 */
1757010d129SBorislav Petkov 	val = pebs_data_source[dse.ld_dse];
1767010d129SBorislav Petkov 
1777010d129SBorislav Petkov 	/*
1787010d129SBorislav Petkov 	 * Nehalem models do not support TLB, Lock infos
1797010d129SBorislav Petkov 	 */
18095298355SAndi Kleen 	if (x86_pmu.pebs_no_tlb) {
1817010d129SBorislav Petkov 		val |= P(TLB, NA) | P(LOCK, NA);
1827010d129SBorislav Petkov 		return val;
1837010d129SBorislav Petkov 	}
1847010d129SBorislav Petkov 	/*
1857010d129SBorislav Petkov 	 * bit 4: TLB access
1867010d129SBorislav Petkov 	 * 0 = did not miss 2nd level TLB
1877010d129SBorislav Petkov 	 * 1 = missed 2nd level TLB
1887010d129SBorislav Petkov 	 */
1897010d129SBorislav Petkov 	if (dse.ld_stlb_miss)
1907010d129SBorislav Petkov 		val |= P(TLB, MISS) | P(TLB, L2);
1917010d129SBorislav Petkov 	else
1927010d129SBorislav Petkov 		val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
1937010d129SBorislav Petkov 
1947010d129SBorislav Petkov 	/*
1957010d129SBorislav Petkov 	 * bit 5: locked prefix
1967010d129SBorislav Petkov 	 */
1977010d129SBorislav Petkov 	if (dse.ld_locked)
1987010d129SBorislav Petkov 		val |= P(LOCK, LOCKED);
1997010d129SBorislav Petkov 
2007010d129SBorislav Petkov 	return val;
2017010d129SBorislav Petkov }
2027010d129SBorislav Petkov 
2037010d129SBorislav Petkov struct pebs_record_core {
2047010d129SBorislav Petkov 	u64 flags, ip;
2057010d129SBorislav Petkov 	u64 ax, bx, cx, dx;
2067010d129SBorislav Petkov 	u64 si, di, bp, sp;
2077010d129SBorislav Petkov 	u64 r8,  r9,  r10, r11;
2087010d129SBorislav Petkov 	u64 r12, r13, r14, r15;
2097010d129SBorislav Petkov };
2107010d129SBorislav Petkov 
2117010d129SBorislav Petkov struct pebs_record_nhm {
2127010d129SBorislav Petkov 	u64 flags, ip;
2137010d129SBorislav Petkov 	u64 ax, bx, cx, dx;
2147010d129SBorislav Petkov 	u64 si, di, bp, sp;
2157010d129SBorislav Petkov 	u64 r8,  r9,  r10, r11;
2167010d129SBorislav Petkov 	u64 r12, r13, r14, r15;
2177010d129SBorislav Petkov 	u64 status, dla, dse, lat;
2187010d129SBorislav Petkov };
2197010d129SBorislav Petkov 
2207010d129SBorislav Petkov /*
2217010d129SBorislav Petkov  * Same as pebs_record_nhm, with two additional fields.
2227010d129SBorislav Petkov  */
2237010d129SBorislav Petkov struct pebs_record_hsw {
2247010d129SBorislav Petkov 	u64 flags, ip;
2257010d129SBorislav Petkov 	u64 ax, bx, cx, dx;
2267010d129SBorislav Petkov 	u64 si, di, bp, sp;
2277010d129SBorislav Petkov 	u64 r8,  r9,  r10, r11;
2287010d129SBorislav Petkov 	u64 r12, r13, r14, r15;
2297010d129SBorislav Petkov 	u64 status, dla, dse, lat;
2307010d129SBorislav Petkov 	u64 real_ip, tsx_tuning;
2317010d129SBorislav Petkov };
2327010d129SBorislav Petkov 
2337010d129SBorislav Petkov union hsw_tsx_tuning {
2347010d129SBorislav Petkov 	struct {
2357010d129SBorislav Petkov 		u32 cycles_last_block     : 32,
2367010d129SBorislav Petkov 		    hle_abort		  : 1,
2377010d129SBorislav Petkov 		    rtm_abort		  : 1,
2387010d129SBorislav Petkov 		    instruction_abort     : 1,
2397010d129SBorislav Petkov 		    non_instruction_abort : 1,
2407010d129SBorislav Petkov 		    retry		  : 1,
2417010d129SBorislav Petkov 		    data_conflict	  : 1,
2427010d129SBorislav Petkov 		    capacity_writes	  : 1,
2437010d129SBorislav Petkov 		    capacity_reads	  : 1;
2447010d129SBorislav Petkov 	};
2457010d129SBorislav Petkov 	u64	    value;
2467010d129SBorislav Petkov };
2477010d129SBorislav Petkov 
2487010d129SBorislav Petkov #define PEBS_HSW_TSX_FLAGS	0xff00000000ULL
2497010d129SBorislav Petkov 
2507010d129SBorislav Petkov /* Same as HSW, plus TSC */
2517010d129SBorislav Petkov 
2527010d129SBorislav Petkov struct pebs_record_skl {
2537010d129SBorislav Petkov 	u64 flags, ip;
2547010d129SBorislav Petkov 	u64 ax, bx, cx, dx;
2557010d129SBorislav Petkov 	u64 si, di, bp, sp;
2567010d129SBorislav Petkov 	u64 r8,  r9,  r10, r11;
2577010d129SBorislav Petkov 	u64 r12, r13, r14, r15;
2587010d129SBorislav Petkov 	u64 status, dla, dse, lat;
2597010d129SBorislav Petkov 	u64 real_ip, tsx_tuning;
2607010d129SBorislav Petkov 	u64 tsc;
2617010d129SBorislav Petkov };
2627010d129SBorislav Petkov 
2637010d129SBorislav Petkov void init_debug_store_on_cpu(int cpu)
2647010d129SBorislav Petkov {
2657010d129SBorislav Petkov 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
2667010d129SBorislav Petkov 
2677010d129SBorislav Petkov 	if (!ds)
2687010d129SBorislav Petkov 		return;
2697010d129SBorislav Petkov 
2707010d129SBorislav Petkov 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
2717010d129SBorislav Petkov 		     (u32)((u64)(unsigned long)ds),
2727010d129SBorislav Petkov 		     (u32)((u64)(unsigned long)ds >> 32));
2737010d129SBorislav Petkov }
2747010d129SBorislav Petkov 
2757010d129SBorislav Petkov void fini_debug_store_on_cpu(int cpu)
2767010d129SBorislav Petkov {
2777010d129SBorislav Petkov 	if (!per_cpu(cpu_hw_events, cpu).ds)
2787010d129SBorislav Petkov 		return;
2797010d129SBorislav Petkov 
2807010d129SBorislav Petkov 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
2817010d129SBorislav Petkov }
2827010d129SBorislav Petkov 
2837010d129SBorislav Petkov static DEFINE_PER_CPU(void *, insn_buffer);
2847010d129SBorislav Petkov 
285c1961a46SHugh Dickins static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
286c1961a46SHugh Dickins {
28742f3bdc5SPeter Zijlstra 	unsigned long start = (unsigned long)cea;
288c1961a46SHugh Dickins 	phys_addr_t pa;
289c1961a46SHugh Dickins 	size_t msz = 0;
290c1961a46SHugh Dickins 
291c1961a46SHugh Dickins 	pa = virt_to_phys(addr);
29242f3bdc5SPeter Zijlstra 
29342f3bdc5SPeter Zijlstra 	preempt_disable();
294c1961a46SHugh Dickins 	for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
295c1961a46SHugh Dickins 		cea_set_pte(cea, pa, prot);
29642f3bdc5SPeter Zijlstra 
29742f3bdc5SPeter Zijlstra 	/*
29842f3bdc5SPeter Zijlstra 	 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
29942f3bdc5SPeter Zijlstra 	 * all TLB entries for it.
30042f3bdc5SPeter Zijlstra 	 */
30142f3bdc5SPeter Zijlstra 	flush_tlb_kernel_range(start, start + size);
30242f3bdc5SPeter Zijlstra 	preempt_enable();
303c1961a46SHugh Dickins }
304c1961a46SHugh Dickins 
305c1961a46SHugh Dickins static void ds_clear_cea(void *cea, size_t size)
306c1961a46SHugh Dickins {
30742f3bdc5SPeter Zijlstra 	unsigned long start = (unsigned long)cea;
308c1961a46SHugh Dickins 	size_t msz = 0;
309c1961a46SHugh Dickins 
31042f3bdc5SPeter Zijlstra 	preempt_disable();
311c1961a46SHugh Dickins 	for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
312c1961a46SHugh Dickins 		cea_set_pte(cea, 0, PAGE_NONE);
31342f3bdc5SPeter Zijlstra 
31442f3bdc5SPeter Zijlstra 	flush_tlb_kernel_range(start, start + size);
31542f3bdc5SPeter Zijlstra 	preempt_enable();
316c1961a46SHugh Dickins }
317c1961a46SHugh Dickins 
318c1961a46SHugh Dickins static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
319c1961a46SHugh Dickins {
320c1961a46SHugh Dickins 	unsigned int order = get_order(size);
321c1961a46SHugh Dickins 	int node = cpu_to_node(cpu);
322c1961a46SHugh Dickins 	struct page *page;
323c1961a46SHugh Dickins 
324c1961a46SHugh Dickins 	page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
325c1961a46SHugh Dickins 	return page ? page_address(page) : NULL;
326c1961a46SHugh Dickins }
327c1961a46SHugh Dickins 
328c1961a46SHugh Dickins static void dsfree_pages(const void *buffer, size_t size)
329c1961a46SHugh Dickins {
330c1961a46SHugh Dickins 	if (buffer)
331c1961a46SHugh Dickins 		free_pages((unsigned long)buffer, get_order(size));
332c1961a46SHugh Dickins }
333c1961a46SHugh Dickins 
3347010d129SBorislav Petkov static int alloc_pebs_buffer(int cpu)
3357010d129SBorislav Petkov {
336c1961a46SHugh Dickins 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
337c1961a46SHugh Dickins 	struct debug_store *ds = hwev->ds;
338c1961a46SHugh Dickins 	size_t bsiz = x86_pmu.pebs_buffer_size;
339c1961a46SHugh Dickins 	int max, node = cpu_to_node(cpu);
340c1961a46SHugh Dickins 	void *buffer, *ibuffer, *cea;
3417010d129SBorislav Petkov 
3427010d129SBorislav Petkov 	if (!x86_pmu.pebs)
3437010d129SBorislav Petkov 		return 0;
3447010d129SBorislav Petkov 
345c1961a46SHugh Dickins 	buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
3467010d129SBorislav Petkov 	if (unlikely(!buffer))
3477010d129SBorislav Petkov 		return -ENOMEM;
3487010d129SBorislav Petkov 
3497010d129SBorislav Petkov 	/*
3507010d129SBorislav Petkov 	 * HSW+ already provides us the eventing ip; no need to allocate this
3517010d129SBorislav Petkov 	 * buffer then.
3527010d129SBorislav Petkov 	 */
3537010d129SBorislav Petkov 	if (x86_pmu.intel_cap.pebs_format < 2) {
3547010d129SBorislav Petkov 		ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
3557010d129SBorislav Petkov 		if (!ibuffer) {
356c1961a46SHugh Dickins 			dsfree_pages(buffer, bsiz);
3577010d129SBorislav Petkov 			return -ENOMEM;
3587010d129SBorislav Petkov 		}
3597010d129SBorislav Petkov 		per_cpu(insn_buffer, cpu) = ibuffer;
3607010d129SBorislav Petkov 	}
361c1961a46SHugh Dickins 	hwev->ds_pebs_vaddr = buffer;
362c1961a46SHugh Dickins 	/* Update the cpu entry area mapping */
363c1961a46SHugh Dickins 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
364c1961a46SHugh Dickins 	ds->pebs_buffer_base = (unsigned long) cea;
365c1961a46SHugh Dickins 	ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
3667010d129SBorislav Petkov 	ds->pebs_index = ds->pebs_buffer_base;
367c1961a46SHugh Dickins 	max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
368c1961a46SHugh Dickins 	ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
3697010d129SBorislav Petkov 	return 0;
3707010d129SBorislav Petkov }
3717010d129SBorislav Petkov 
3727010d129SBorislav Petkov static void release_pebs_buffer(int cpu)
3737010d129SBorislav Petkov {
374c1961a46SHugh Dickins 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
375c1961a46SHugh Dickins 	void *cea;
3767010d129SBorislav Petkov 
377efe951d3SPeter Zijlstra 	if (!x86_pmu.pebs)
3787010d129SBorislav Petkov 		return;
3797010d129SBorislav Petkov 
3807010d129SBorislav Petkov 	kfree(per_cpu(insn_buffer, cpu));
3817010d129SBorislav Petkov 	per_cpu(insn_buffer, cpu) = NULL;
3827010d129SBorislav Petkov 
383c1961a46SHugh Dickins 	/* Clear the fixmap */
384c1961a46SHugh Dickins 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
385c1961a46SHugh Dickins 	ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
386c1961a46SHugh Dickins 	dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
387c1961a46SHugh Dickins 	hwev->ds_pebs_vaddr = NULL;
3887010d129SBorislav Petkov }
3897010d129SBorislav Petkov 
3907010d129SBorislav Petkov static int alloc_bts_buffer(int cpu)
3917010d129SBorislav Petkov {
392c1961a46SHugh Dickins 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
393c1961a46SHugh Dickins 	struct debug_store *ds = hwev->ds;
394c1961a46SHugh Dickins 	void *buffer, *cea;
395c1961a46SHugh Dickins 	int max;
3967010d129SBorislav Petkov 
3977010d129SBorislav Petkov 	if (!x86_pmu.bts)
3987010d129SBorislav Petkov 		return 0;
3997010d129SBorislav Petkov 
400c1961a46SHugh Dickins 	buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
4017010d129SBorislav Petkov 	if (unlikely(!buffer)) {
4027010d129SBorislav Petkov 		WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
4037010d129SBorislav Petkov 		return -ENOMEM;
4047010d129SBorislav Petkov 	}
405c1961a46SHugh Dickins 	hwev->ds_bts_vaddr = buffer;
406c1961a46SHugh Dickins 	/* Update the fixmap */
407c1961a46SHugh Dickins 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
408c1961a46SHugh Dickins 	ds->bts_buffer_base = (unsigned long) cea;
409c1961a46SHugh Dickins 	ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
4107010d129SBorislav Petkov 	ds->bts_index = ds->bts_buffer_base;
4112c991e40SHugh Dickins 	max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
4122c991e40SHugh Dickins 	ds->bts_absolute_maximum = ds->bts_buffer_base +
4132c991e40SHugh Dickins 					max * BTS_RECORD_SIZE;
4142c991e40SHugh Dickins 	ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
4152c991e40SHugh Dickins 					(max / 16) * BTS_RECORD_SIZE;
4167010d129SBorislav Petkov 	return 0;
4177010d129SBorislav Petkov }
4187010d129SBorislav Petkov 
4197010d129SBorislav Petkov static void release_bts_buffer(int cpu)
4207010d129SBorislav Petkov {
421c1961a46SHugh Dickins 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
422c1961a46SHugh Dickins 	void *cea;
4237010d129SBorislav Petkov 
424efe951d3SPeter Zijlstra 	if (!x86_pmu.bts)
4257010d129SBorislav Petkov 		return;
4267010d129SBorislav Petkov 
427c1961a46SHugh Dickins 	/* Clear the fixmap */
428c1961a46SHugh Dickins 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
429c1961a46SHugh Dickins 	ds_clear_cea(cea, BTS_BUFFER_SIZE);
430c1961a46SHugh Dickins 	dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
431c1961a46SHugh Dickins 	hwev->ds_bts_vaddr = NULL;
4327010d129SBorislav Petkov }
4337010d129SBorislav Petkov 
4347010d129SBorislav Petkov static int alloc_ds_buffer(int cpu)
4357010d129SBorislav Petkov {
436c1961a46SHugh Dickins 	struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
4377010d129SBorislav Petkov 
438c1961a46SHugh Dickins 	memset(ds, 0, sizeof(*ds));
4397010d129SBorislav Petkov 	per_cpu(cpu_hw_events, cpu).ds = ds;
4407010d129SBorislav Petkov 	return 0;
4417010d129SBorislav Petkov }
4427010d129SBorislav Petkov 
4437010d129SBorislav Petkov static void release_ds_buffer(int cpu)
4447010d129SBorislav Petkov {
4457010d129SBorislav Petkov 	per_cpu(cpu_hw_events, cpu).ds = NULL;
4467010d129SBorislav Petkov }
4477010d129SBorislav Petkov 
4487010d129SBorislav Petkov void release_ds_buffers(void)
4497010d129SBorislav Petkov {
4507010d129SBorislav Petkov 	int cpu;
4517010d129SBorislav Petkov 
4527010d129SBorislav Petkov 	if (!x86_pmu.bts && !x86_pmu.pebs)
4537010d129SBorislav Petkov 		return;
4547010d129SBorislav Petkov 
455efe951d3SPeter Zijlstra 	for_each_possible_cpu(cpu)
456efe951d3SPeter Zijlstra 		release_ds_buffer(cpu);
457efe951d3SPeter Zijlstra 
458efe951d3SPeter Zijlstra 	for_each_possible_cpu(cpu) {
459efe951d3SPeter Zijlstra 		/*
460efe951d3SPeter Zijlstra 		 * Again, ignore errors from offline CPUs, they will no longer
461efe951d3SPeter Zijlstra 		 * observe cpu_hw_events.ds and not program the DS_AREA when
462efe951d3SPeter Zijlstra 		 * they come up.
463efe951d3SPeter Zijlstra 		 */
4647010d129SBorislav Petkov 		fini_debug_store_on_cpu(cpu);
465efe951d3SPeter Zijlstra 	}
4667010d129SBorislav Petkov 
4677010d129SBorislav Petkov 	for_each_possible_cpu(cpu) {
4687010d129SBorislav Petkov 		release_pebs_buffer(cpu);
4697010d129SBorislav Petkov 		release_bts_buffer(cpu);
4707010d129SBorislav Petkov 	}
4717010d129SBorislav Petkov }
4727010d129SBorislav Petkov 
4737010d129SBorislav Petkov void reserve_ds_buffers(void)
4747010d129SBorislav Petkov {
4757010d129SBorislav Petkov 	int bts_err = 0, pebs_err = 0;
4767010d129SBorislav Petkov 	int cpu;
4777010d129SBorislav Petkov 
4787010d129SBorislav Petkov 	x86_pmu.bts_active = 0;
4797010d129SBorislav Petkov 	x86_pmu.pebs_active = 0;
4807010d129SBorislav Petkov 
4817010d129SBorislav Petkov 	if (!x86_pmu.bts && !x86_pmu.pebs)
4827010d129SBorislav Petkov 		return;
4837010d129SBorislav Petkov 
4847010d129SBorislav Petkov 	if (!x86_pmu.bts)
4857010d129SBorislav Petkov 		bts_err = 1;
4867010d129SBorislav Petkov 
4877010d129SBorislav Petkov 	if (!x86_pmu.pebs)
4887010d129SBorislav Petkov 		pebs_err = 1;
4897010d129SBorislav Petkov 
4907010d129SBorislav Petkov 	for_each_possible_cpu(cpu) {
4917010d129SBorislav Petkov 		if (alloc_ds_buffer(cpu)) {
4927010d129SBorislav Petkov 			bts_err = 1;
4937010d129SBorislav Petkov 			pebs_err = 1;
4947010d129SBorislav Petkov 		}
4957010d129SBorislav Petkov 
4967010d129SBorislav Petkov 		if (!bts_err && alloc_bts_buffer(cpu))
4977010d129SBorislav Petkov 			bts_err = 1;
4987010d129SBorislav Petkov 
4997010d129SBorislav Petkov 		if (!pebs_err && alloc_pebs_buffer(cpu))
5007010d129SBorislav Petkov 			pebs_err = 1;
5017010d129SBorislav Petkov 
5027010d129SBorislav Petkov 		if (bts_err && pebs_err)
5037010d129SBorislav Petkov 			break;
5047010d129SBorislav Petkov 	}
5057010d129SBorislav Petkov 
5067010d129SBorislav Petkov 	if (bts_err) {
5077010d129SBorislav Petkov 		for_each_possible_cpu(cpu)
5087010d129SBorislav Petkov 			release_bts_buffer(cpu);
5097010d129SBorislav Petkov 	}
5107010d129SBorislav Petkov 
5117010d129SBorislav Petkov 	if (pebs_err) {
5127010d129SBorislav Petkov 		for_each_possible_cpu(cpu)
5137010d129SBorislav Petkov 			release_pebs_buffer(cpu);
5147010d129SBorislav Petkov 	}
5157010d129SBorislav Petkov 
5167010d129SBorislav Petkov 	if (bts_err && pebs_err) {
5177010d129SBorislav Petkov 		for_each_possible_cpu(cpu)
5187010d129SBorislav Petkov 			release_ds_buffer(cpu);
5197010d129SBorislav Petkov 	} else {
5207010d129SBorislav Petkov 		if (x86_pmu.bts && !bts_err)
5217010d129SBorislav Petkov 			x86_pmu.bts_active = 1;
5227010d129SBorislav Petkov 
5237010d129SBorislav Petkov 		if (x86_pmu.pebs && !pebs_err)
5247010d129SBorislav Petkov 			x86_pmu.pebs_active = 1;
5257010d129SBorislav Petkov 
526efe951d3SPeter Zijlstra 		for_each_possible_cpu(cpu) {
527efe951d3SPeter Zijlstra 			/*
528efe951d3SPeter Zijlstra 			 * Ignores wrmsr_on_cpu() errors for offline CPUs they
529efe951d3SPeter Zijlstra 			 * will get this call through intel_pmu_cpu_starting().
530efe951d3SPeter Zijlstra 			 */
5317010d129SBorislav Petkov 			init_debug_store_on_cpu(cpu);
5327010d129SBorislav Petkov 		}
533efe951d3SPeter Zijlstra 	}
5347010d129SBorislav Petkov }
5357010d129SBorislav Petkov 
5367010d129SBorislav Petkov /*
5377010d129SBorislav Petkov  * BTS
5387010d129SBorislav Petkov  */
5397010d129SBorislav Petkov 
5407010d129SBorislav Petkov struct event_constraint bts_constraint =
5417010d129SBorislav Petkov 	EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
5427010d129SBorislav Petkov 
5437010d129SBorislav Petkov void intel_pmu_enable_bts(u64 config)
5447010d129SBorislav Petkov {
5457010d129SBorislav Petkov 	unsigned long debugctlmsr;
5467010d129SBorislav Petkov 
5477010d129SBorislav Petkov 	debugctlmsr = get_debugctlmsr();
5487010d129SBorislav Petkov 
5497010d129SBorislav Petkov 	debugctlmsr |= DEBUGCTLMSR_TR;
5507010d129SBorislav Petkov 	debugctlmsr |= DEBUGCTLMSR_BTS;
5517010d129SBorislav Petkov 	if (config & ARCH_PERFMON_EVENTSEL_INT)
5527010d129SBorislav Petkov 		debugctlmsr |= DEBUGCTLMSR_BTINT;
5537010d129SBorislav Petkov 
5547010d129SBorislav Petkov 	if (!(config & ARCH_PERFMON_EVENTSEL_OS))
5557010d129SBorislav Petkov 		debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
5567010d129SBorislav Petkov 
5577010d129SBorislav Petkov 	if (!(config & ARCH_PERFMON_EVENTSEL_USR))
5587010d129SBorislav Petkov 		debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
5597010d129SBorislav Petkov 
5607010d129SBorislav Petkov 	update_debugctlmsr(debugctlmsr);
5617010d129SBorislav Petkov }
5627010d129SBorislav Petkov 
5637010d129SBorislav Petkov void intel_pmu_disable_bts(void)
5647010d129SBorislav Petkov {
5657010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5667010d129SBorislav Petkov 	unsigned long debugctlmsr;
5677010d129SBorislav Petkov 
5687010d129SBorislav Petkov 	if (!cpuc->ds)
5697010d129SBorislav Petkov 		return;
5707010d129SBorislav Petkov 
5717010d129SBorislav Petkov 	debugctlmsr = get_debugctlmsr();
5727010d129SBorislav Petkov 
5737010d129SBorislav Petkov 	debugctlmsr &=
5747010d129SBorislav Petkov 		~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
5757010d129SBorislav Petkov 		  DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
5767010d129SBorislav Petkov 
5777010d129SBorislav Petkov 	update_debugctlmsr(debugctlmsr);
5787010d129SBorislav Petkov }
5797010d129SBorislav Petkov 
5807010d129SBorislav Petkov int intel_pmu_drain_bts_buffer(void)
5817010d129SBorislav Petkov {
5827010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5837010d129SBorislav Petkov 	struct debug_store *ds = cpuc->ds;
5847010d129SBorislav Petkov 	struct bts_record {
5857010d129SBorislav Petkov 		u64	from;
5867010d129SBorislav Petkov 		u64	to;
5877010d129SBorislav Petkov 		u64	flags;
5887010d129SBorislav Petkov 	};
5897010d129SBorislav Petkov 	struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
5907010d129SBorislav Petkov 	struct bts_record *at, *base, *top;
5917010d129SBorislav Petkov 	struct perf_output_handle handle;
5927010d129SBorislav Petkov 	struct perf_event_header header;
5937010d129SBorislav Petkov 	struct perf_sample_data data;
5947010d129SBorislav Petkov 	unsigned long skip = 0;
5957010d129SBorislav Petkov 	struct pt_regs regs;
5967010d129SBorislav Petkov 
5977010d129SBorislav Petkov 	if (!event)
5987010d129SBorislav Petkov 		return 0;
5997010d129SBorislav Petkov 
6007010d129SBorislav Petkov 	if (!x86_pmu.bts_active)
6017010d129SBorislav Petkov 		return 0;
6027010d129SBorislav Petkov 
6037010d129SBorislav Petkov 	base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
6047010d129SBorislav Petkov 	top  = (struct bts_record *)(unsigned long)ds->bts_index;
6057010d129SBorislav Petkov 
6067010d129SBorislav Petkov 	if (top <= base)
6077010d129SBorislav Petkov 		return 0;
6087010d129SBorislav Petkov 
6097010d129SBorislav Petkov 	memset(&regs, 0, sizeof(regs));
6107010d129SBorislav Petkov 
6117010d129SBorislav Petkov 	ds->bts_index = ds->bts_buffer_base;
6127010d129SBorislav Petkov 
6137010d129SBorislav Petkov 	perf_sample_data_init(&data, 0, event->hw.last_period);
6147010d129SBorislav Petkov 
6157010d129SBorislav Petkov 	/*
6167010d129SBorislav Petkov 	 * BTS leaks kernel addresses in branches across the cpl boundary,
6177010d129SBorislav Petkov 	 * such as traps or system calls, so unless the user is asking for
6187010d129SBorislav Petkov 	 * kernel tracing (and right now it's not possible), we'd need to
6197010d129SBorislav Petkov 	 * filter them out. But first we need to count how many of those we
6207010d129SBorislav Petkov 	 * have in the current batch. This is an extra O(n) pass, however,
6217010d129SBorislav Petkov 	 * it's much faster than the other one especially considering that
6227010d129SBorislav Petkov 	 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
6237010d129SBorislav Petkov 	 * alloc_bts_buffer()).
6247010d129SBorislav Petkov 	 */
6257010d129SBorislav Petkov 	for (at = base; at < top; at++) {
6267010d129SBorislav Petkov 		/*
6277010d129SBorislav Petkov 		 * Note that right now *this* BTS code only works if
6287010d129SBorislav Petkov 		 * attr::exclude_kernel is set, but let's keep this extra
6297010d129SBorislav Petkov 		 * check here in case that changes.
6307010d129SBorislav Petkov 		 */
6317010d129SBorislav Petkov 		if (event->attr.exclude_kernel &&
6327010d129SBorislav Petkov 		    (kernel_ip(at->from) || kernel_ip(at->to)))
6337010d129SBorislav Petkov 			skip++;
6347010d129SBorislav Petkov 	}
6357010d129SBorislav Petkov 
6367010d129SBorislav Petkov 	/*
6377010d129SBorislav Petkov 	 * Prepare a generic sample, i.e. fill in the invariant fields.
6387010d129SBorislav Petkov 	 * We will overwrite the from and to address before we output
6397010d129SBorislav Petkov 	 * the sample.
6407010d129SBorislav Petkov 	 */
641e8d8a90fSPeter Zijlstra 	rcu_read_lock();
6427010d129SBorislav Petkov 	perf_prepare_sample(&header, &data, event, &regs);
6437010d129SBorislav Petkov 
6447010d129SBorislav Petkov 	if (perf_output_begin(&handle, event, header.size *
6457010d129SBorislav Petkov 			      (top - base - skip)))
646e8d8a90fSPeter Zijlstra 		goto unlock;
6477010d129SBorislav Petkov 
6487010d129SBorislav Petkov 	for (at = base; at < top; at++) {
6497010d129SBorislav Petkov 		/* Filter out any records that contain kernel addresses. */
6507010d129SBorislav Petkov 		if (event->attr.exclude_kernel &&
6517010d129SBorislav Petkov 		    (kernel_ip(at->from) || kernel_ip(at->to)))
6527010d129SBorislav Petkov 			continue;
6537010d129SBorislav Petkov 
6547010d129SBorislav Petkov 		data.ip		= at->from;
6557010d129SBorislav Petkov 		data.addr	= at->to;
6567010d129SBorislav Petkov 
6577010d129SBorislav Petkov 		perf_output_sample(&handle, &header, &data, event);
6587010d129SBorislav Petkov 	}
6597010d129SBorislav Petkov 
6607010d129SBorislav Petkov 	perf_output_end(&handle);
6617010d129SBorislav Petkov 
6627010d129SBorislav Petkov 	/* There's new data available. */
6637010d129SBorislav Petkov 	event->hw.interrupts++;
6647010d129SBorislav Petkov 	event->pending_kill = POLL_IN;
665e8d8a90fSPeter Zijlstra unlock:
666e8d8a90fSPeter Zijlstra 	rcu_read_unlock();
6677010d129SBorislav Petkov 	return 1;
6687010d129SBorislav Petkov }
6697010d129SBorislav Petkov 
6707010d129SBorislav Petkov static inline void intel_pmu_drain_pebs_buffer(void)
6717010d129SBorislav Petkov {
6727010d129SBorislav Petkov 	struct pt_regs regs;
6737010d129SBorislav Petkov 
6747010d129SBorislav Petkov 	x86_pmu.drain_pebs(&regs);
6757010d129SBorislav Petkov }
6767010d129SBorislav Petkov 
6777010d129SBorislav Petkov /*
6787010d129SBorislav Petkov  * PEBS
6797010d129SBorislav Petkov  */
6807010d129SBorislav Petkov struct event_constraint intel_core2_pebs_event_constraints[] = {
6817010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
6827010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
6837010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
6847010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
6857010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
6867010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
68723e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
6887010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
6897010d129SBorislav Petkov };
6907010d129SBorislav Petkov 
6917010d129SBorislav Petkov struct event_constraint intel_atom_pebs_event_constraints[] = {
6927010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
6937010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
6947010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
6957010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
69623e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
6977010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
6987010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
6997010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
7007010d129SBorislav Petkov };
7017010d129SBorislav Petkov 
7027010d129SBorislav Petkov struct event_constraint intel_slm_pebs_event_constraints[] = {
7037010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
70423e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
7057010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
7067010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
7077010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
7087010d129SBorislav Petkov };
7097010d129SBorislav Petkov 
7108b92c3a7SKan Liang struct event_constraint intel_glm_pebs_event_constraints[] = {
7118b92c3a7SKan Liang 	/* Allow all events as PEBS with no flags */
7128b92c3a7SKan Liang 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
7138b92c3a7SKan Liang 	EVENT_CONSTRAINT_END
7148b92c3a7SKan Liang };
7158b92c3a7SKan Liang 
7167010d129SBorislav Petkov struct event_constraint intel_nehalem_pebs_event_constraints[] = {
7177010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
7187010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
7197010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
7207010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),    /* INST_RETIRED.ANY */
7217010d129SBorislav Petkov 	INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
7227010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
7237010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
7247010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
7257010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
7267010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
7277010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
7287010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
72923e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
7307010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
7317010d129SBorislav Petkov };
7327010d129SBorislav Petkov 
7337010d129SBorislav Petkov struct event_constraint intel_westmere_pebs_event_constraints[] = {
7347010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
7357010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
7367010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
7377010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),    /* INSTR_RETIRED.* */
7387010d129SBorislav Petkov 	INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
7397010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
7407010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
7417010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
7427010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
7437010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
7447010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
7457010d129SBorislav Petkov 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
74623e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
7477010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
7487010d129SBorislav Petkov };
7497010d129SBorislav Petkov 
7507010d129SBorislav Petkov struct event_constraint intel_snb_pebs_event_constraints[] = {
7517010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
7527010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
7537010d129SBorislav Petkov 	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
7547010d129SBorislav Petkov 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
75523e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
7567010d129SBorislav Petkov         INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
7577010d129SBorislav Petkov         INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
7587010d129SBorislav Petkov         INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
7597010d129SBorislav Petkov         INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),    /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
7607010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
7617010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
7627010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
7637010d129SBorislav Petkov };
7647010d129SBorislav Petkov 
7657010d129SBorislav Petkov struct event_constraint intel_ivb_pebs_event_constraints[] = {
7667010d129SBorislav Petkov         INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
7677010d129SBorislav Petkov         INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
7687010d129SBorislav Petkov 	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
7697010d129SBorislav Petkov 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
77023e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
7717010d129SBorislav Petkov 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
77223e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
7737010d129SBorislav Petkov 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
7747010d129SBorislav Petkov 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
7757010d129SBorislav Petkov 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
7767010d129SBorislav Petkov 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),    /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
7777010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
7787010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
7797010d129SBorislav Petkov         EVENT_CONSTRAINT_END
7807010d129SBorislav Petkov };
7817010d129SBorislav Petkov 
7827010d129SBorislav Petkov struct event_constraint intel_hsw_pebs_event_constraints[] = {
7837010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
7847010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
7857010d129SBorislav Petkov 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
78623e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
7877010d129SBorislav Petkov 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
78823e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
7897010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
7907010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
7917010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
7927010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
7937010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
7947010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
7957010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
7967010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
7977010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
7987010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf),    /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
7997010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf),    /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
8007010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
8017010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
8027010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
8037010d129SBorislav Petkov };
8047010d129SBorislav Petkov 
805b3e62463SStephane Eranian struct event_constraint intel_bdw_pebs_event_constraints[] = {
806b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
807b3e62463SStephane Eranian 	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
808b3e62463SStephane Eranian 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
80923e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
810b3e62463SStephane Eranian 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
81123e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
812b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
813b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
814b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
815b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
816b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
817b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
818b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
819b3e62463SStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
820b3e62463SStephane Eranian 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
821b3e62463SStephane Eranian 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
822b3e62463SStephane Eranian 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
823b3e62463SStephane Eranian 	/* Allow all events as PEBS with no flags */
824b3e62463SStephane Eranian 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
825b3e62463SStephane Eranian 	EVENT_CONSTRAINT_END
826b3e62463SStephane Eranian };
827b3e62463SStephane Eranian 
828b3e62463SStephane Eranian 
8297010d129SBorislav Petkov struct event_constraint intel_skl_pebs_event_constraints[] = {
8307010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
8317010d129SBorislav Petkov 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
83223e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
8337010d129SBorislav Petkov 	/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
83423e3983aSStephane Eranian 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
8357010d129SBorislav Petkov 	INTEL_PLD_CONSTRAINT(0x1cd, 0xf),		      /* MEM_TRANS_RETIRED.* */
8367010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
8377010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
8387010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
8397010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
8407010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
8417010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
8427010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
8437010d129SBorislav Petkov 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
8447010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_RETIRED.* */
8457010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_L3_HIT_RETIRED.* */
8467010d129SBorislav Petkov 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_L3_MISS_RETIRED.* */
8477010d129SBorislav Petkov 	/* Allow all events as PEBS with no flags */
8487010d129SBorislav Petkov 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
8497010d129SBorislav Petkov 	EVENT_CONSTRAINT_END
8507010d129SBorislav Petkov };
8517010d129SBorislav Petkov 
85260176089SKan Liang struct event_constraint intel_icl_pebs_event_constraints[] = {
85360176089SKan Liang 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL),	/* INST_RETIRED.PREC_DIST */
85460176089SKan Liang 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL),	/* SLOTS */
85560176089SKan Liang 
85660176089SKan Liang 	INTEL_PLD_CONSTRAINT(0x1cd, 0xff),			/* MEM_TRANS_RETIRED.LOAD_LATENCY */
85760176089SKan Liang 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf),	/* MEM_INST_RETIRED.LOAD */
85860176089SKan Liang 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf),	/* MEM_INST_RETIRED.STORE */
85960176089SKan Liang 
86060176089SKan Liang 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
86160176089SKan Liang 
86260176089SKan Liang 	INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),		/* MEM_INST_RETIRED.* */
86360176089SKan Liang 
86460176089SKan Liang 	/*
86560176089SKan Liang 	 * Everything else is handled by PMU_FL_PEBS_ALL, because we
86660176089SKan Liang 	 * need the full constraints from the main table.
86760176089SKan Liang 	 */
86860176089SKan Liang 
86960176089SKan Liang 	EVENT_CONSTRAINT_END
87060176089SKan Liang };
87160176089SKan Liang 
8727010d129SBorislav Petkov struct event_constraint *intel_pebs_constraints(struct perf_event *event)
8737010d129SBorislav Petkov {
8747010d129SBorislav Petkov 	struct event_constraint *c;
8757010d129SBorislav Petkov 
8767010d129SBorislav Petkov 	if (!event->attr.precise_ip)
8777010d129SBorislav Petkov 		return NULL;
8787010d129SBorislav Petkov 
8797010d129SBorislav Petkov 	if (x86_pmu.pebs_constraints) {
8807010d129SBorislav Petkov 		for_each_event_constraint(c, x86_pmu.pebs_constraints) {
88163b79f6eSPeter Zijlstra 			if (constraint_match(c, event->hw.config)) {
8827010d129SBorislav Petkov 				event->hw.flags |= c->flags;
8837010d129SBorislav Petkov 				return c;
8847010d129SBorislav Petkov 			}
8857010d129SBorislav Petkov 		}
8867010d129SBorislav Petkov 	}
8877010d129SBorislav Petkov 
88831962340SKan Liang 	/*
88931962340SKan Liang 	 * Extended PEBS support
89031962340SKan Liang 	 * Makes the PEBS code search the normal constraints.
89131962340SKan Liang 	 */
89231962340SKan Liang 	if (x86_pmu.flags & PMU_FL_PEBS_ALL)
89331962340SKan Liang 		return NULL;
89431962340SKan Liang 
8957010d129SBorislav Petkov 	return &emptyconstraint;
8967010d129SBorislav Petkov }
8977010d129SBorislav Petkov 
89809e61b4fSPeter Zijlstra /*
89909e61b4fSPeter Zijlstra  * We need the sched_task callback even for per-cpu events when we use
90009e61b4fSPeter Zijlstra  * the large interrupt threshold, such that we can provide PID and TID
90109e61b4fSPeter Zijlstra  * to PEBS samples.
90209e61b4fSPeter Zijlstra  */
90309e61b4fSPeter Zijlstra static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
9047010d129SBorislav Petkov {
90509e61b4fSPeter Zijlstra 	return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
90609e61b4fSPeter Zijlstra }
90709e61b4fSPeter Zijlstra 
908df6c3db8SJiri Olsa void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
909df6c3db8SJiri Olsa {
910df6c3db8SJiri Olsa 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
911df6c3db8SJiri Olsa 
912df6c3db8SJiri Olsa 	if (!sched_in && pebs_needs_sched_cb(cpuc))
913df6c3db8SJiri Olsa 		intel_pmu_drain_pebs_buffer();
914df6c3db8SJiri Olsa }
915df6c3db8SJiri Olsa 
91609e61b4fSPeter Zijlstra static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
91709e61b4fSPeter Zijlstra {
91809e61b4fSPeter Zijlstra 	struct debug_store *ds = cpuc->ds;
91909e61b4fSPeter Zijlstra 	u64 threshold;
920ec71a398SKan Liang 	int reserved;
921ec71a398SKan Liang 
922ec71a398SKan Liang 	if (x86_pmu.flags & PMU_FL_PEBS_ALL)
923ec71a398SKan Liang 		reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
924ec71a398SKan Liang 	else
925ec71a398SKan Liang 		reserved = x86_pmu.max_pebs_events;
92609e61b4fSPeter Zijlstra 
92709e61b4fSPeter Zijlstra 	if (cpuc->n_pebs == cpuc->n_large_pebs) {
92809e61b4fSPeter Zijlstra 		threshold = ds->pebs_absolute_maximum -
929c22497f5SKan Liang 			reserved * cpuc->pebs_record_size;
93009e61b4fSPeter Zijlstra 	} else {
931c22497f5SKan Liang 		threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
93209e61b4fSPeter Zijlstra 	}
93309e61b4fSPeter Zijlstra 
93409e61b4fSPeter Zijlstra 	ds->pebs_interrupt_threshold = threshold;
93509e61b4fSPeter Zijlstra }
93609e61b4fSPeter Zijlstra 
937c22497f5SKan Liang static void adaptive_pebs_record_size_update(void)
93809e61b4fSPeter Zijlstra {
939c22497f5SKan Liang 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
940c22497f5SKan Liang 	u64 pebs_data_cfg = cpuc->pebs_data_cfg;
941c22497f5SKan Liang 	int sz = sizeof(struct pebs_basic);
942c22497f5SKan Liang 
943c22497f5SKan Liang 	if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
944c22497f5SKan Liang 		sz += sizeof(struct pebs_meminfo);
945c22497f5SKan Liang 	if (pebs_data_cfg & PEBS_DATACFG_GP)
946c22497f5SKan Liang 		sz += sizeof(struct pebs_gprs);
947c22497f5SKan Liang 	if (pebs_data_cfg & PEBS_DATACFG_XMMS)
948c22497f5SKan Liang 		sz += sizeof(struct pebs_xmm);
949c22497f5SKan Liang 	if (pebs_data_cfg & PEBS_DATACFG_LBRS)
950c22497f5SKan Liang 		sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry);
951c22497f5SKan Liang 
952c22497f5SKan Liang 	cpuc->pebs_record_size = sz;
953c22497f5SKan Liang }
954c22497f5SKan Liang 
955c22497f5SKan Liang #define PERF_PEBS_MEMINFO_TYPE	(PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC |   \
956c22497f5SKan Liang 				PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
957c22497f5SKan Liang 				PERF_SAMPLE_TRANSACTION)
958c22497f5SKan Liang 
959c22497f5SKan Liang static u64 pebs_update_adaptive_cfg(struct perf_event *event)
960c22497f5SKan Liang {
961c22497f5SKan Liang 	struct perf_event_attr *attr = &event->attr;
962c22497f5SKan Liang 	u64 sample_type = attr->sample_type;
963c22497f5SKan Liang 	u64 pebs_data_cfg = 0;
964c22497f5SKan Liang 	bool gprs, tsx_weight;
965c22497f5SKan Liang 
966c22497f5SKan Liang 	if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
967c22497f5SKan Liang 	    attr->precise_ip > 1)
968c22497f5SKan Liang 		return pebs_data_cfg;
969c22497f5SKan Liang 
970c22497f5SKan Liang 	if (sample_type & PERF_PEBS_MEMINFO_TYPE)
971c22497f5SKan Liang 		pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
972c22497f5SKan Liang 
973c22497f5SKan Liang 	/*
974c22497f5SKan Liang 	 * We need GPRs when:
975c22497f5SKan Liang 	 * + user requested them
976c22497f5SKan Liang 	 * + precise_ip < 2 for the non event IP
977c22497f5SKan Liang 	 * + For RTM TSX weight we need GPRs for the abort code.
978c22497f5SKan Liang 	 */
979c22497f5SKan Liang 	gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
980c22497f5SKan Liang 	       (attr->sample_regs_intr & PEBS_GP_REGS);
981c22497f5SKan Liang 
982c22497f5SKan Liang 	tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) &&
983c22497f5SKan Liang 		     ((attr->config & INTEL_ARCH_EVENT_MASK) ==
984c22497f5SKan Liang 		      x86_pmu.rtm_abort_event);
985c22497f5SKan Liang 
986c22497f5SKan Liang 	if (gprs || (attr->precise_ip < 2) || tsx_weight)
987c22497f5SKan Liang 		pebs_data_cfg |= PEBS_DATACFG_GP;
988c22497f5SKan Liang 
989c22497f5SKan Liang 	if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
990dce86ac7SKan Liang 	    (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
991c22497f5SKan Liang 		pebs_data_cfg |= PEBS_DATACFG_XMMS;
992c22497f5SKan Liang 
993c22497f5SKan Liang 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
994c22497f5SKan Liang 		/*
995c22497f5SKan Liang 		 * For now always log all LBRs. Could configure this
996c22497f5SKan Liang 		 * later.
997c22497f5SKan Liang 		 */
998c22497f5SKan Liang 		pebs_data_cfg |= PEBS_DATACFG_LBRS |
999c22497f5SKan Liang 			((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
1000c22497f5SKan Liang 	}
1001c22497f5SKan Liang 
1002c22497f5SKan Liang 	return pebs_data_cfg;
1003c22497f5SKan Liang }
1004c22497f5SKan Liang 
1005c22497f5SKan Liang static void
1006c22497f5SKan Liang pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
1007c22497f5SKan Liang 		  struct perf_event *event, bool add)
1008c22497f5SKan Liang {
1009c22497f5SKan Liang 	struct pmu *pmu = event->ctx->pmu;
1010b6a32f02SJiri Olsa 	/*
1011b6a32f02SJiri Olsa 	 * Make sure we get updated with the first PEBS
1012b6a32f02SJiri Olsa 	 * event. It will trigger also during removal, but
1013b6a32f02SJiri Olsa 	 * that does not hurt:
1014b6a32f02SJiri Olsa 	 */
1015b6a32f02SJiri Olsa 	bool update = cpuc->n_pebs == 1;
1016b6a32f02SJiri Olsa 
101709e61b4fSPeter Zijlstra 	if (needed_cb != pebs_needs_sched_cb(cpuc)) {
101809e61b4fSPeter Zijlstra 		if (!needed_cb)
101909e61b4fSPeter Zijlstra 			perf_sched_cb_inc(pmu);
102009e61b4fSPeter Zijlstra 		else
102109e61b4fSPeter Zijlstra 			perf_sched_cb_dec(pmu);
102209e61b4fSPeter Zijlstra 
1023b6a32f02SJiri Olsa 		update = true;
102409e61b4fSPeter Zijlstra 	}
1025b6a32f02SJiri Olsa 
1026c22497f5SKan Liang 	/*
1027c22497f5SKan Liang 	 * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1028c22497f5SKan Liang 	 * iterating all remaining PEBS events to reconstruct the config.
1029c22497f5SKan Liang 	 */
1030c22497f5SKan Liang 	if (x86_pmu.intel_cap.pebs_baseline && add) {
1031c22497f5SKan Liang 		u64 pebs_data_cfg;
1032c22497f5SKan Liang 
1033c22497f5SKan Liang 		/* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
1034c22497f5SKan Liang 		if (cpuc->n_pebs == 1) {
1035c22497f5SKan Liang 			cpuc->pebs_data_cfg = 0;
1036c22497f5SKan Liang 			cpuc->pebs_record_size = sizeof(struct pebs_basic);
1037c22497f5SKan Liang 		}
1038c22497f5SKan Liang 
1039c22497f5SKan Liang 		pebs_data_cfg = pebs_update_adaptive_cfg(event);
1040c22497f5SKan Liang 
1041c22497f5SKan Liang 		/* Update pebs_record_size if new event requires more data. */
1042c22497f5SKan Liang 		if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
1043c22497f5SKan Liang 			cpuc->pebs_data_cfg |= pebs_data_cfg;
1044c22497f5SKan Liang 			adaptive_pebs_record_size_update();
1045c22497f5SKan Liang 			update = true;
1046c22497f5SKan Liang 		}
1047c22497f5SKan Liang 	}
1048c22497f5SKan Liang 
1049b6a32f02SJiri Olsa 	if (update)
1050b6a32f02SJiri Olsa 		pebs_update_threshold(cpuc);
105109e61b4fSPeter Zijlstra }
105209e61b4fSPeter Zijlstra 
105368f7082fSPeter Zijlstra void intel_pmu_pebs_add(struct perf_event *event)
105409e61b4fSPeter Zijlstra {
105509e61b4fSPeter Zijlstra 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
105609e61b4fSPeter Zijlstra 	struct hw_perf_event *hwc = &event->hw;
105709e61b4fSPeter Zijlstra 	bool needed_cb = pebs_needs_sched_cb(cpuc);
105809e61b4fSPeter Zijlstra 
105909e61b4fSPeter Zijlstra 	cpuc->n_pebs++;
1060174afc3eSKan Liang 	if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
106109e61b4fSPeter Zijlstra 		cpuc->n_large_pebs++;
106209e61b4fSPeter Zijlstra 
1063c22497f5SKan Liang 	pebs_update_state(needed_cb, cpuc, event, true);
10647010d129SBorislav Petkov }
10657010d129SBorislav Petkov 
10667010d129SBorislav Petkov void intel_pmu_pebs_enable(struct perf_event *event)
10677010d129SBorislav Petkov {
10687010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
10697010d129SBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
10707010d129SBorislav Petkov 	struct debug_store *ds = cpuc->ds;
107109e61b4fSPeter Zijlstra 
10727010d129SBorislav Petkov 	hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
10737010d129SBorislav Petkov 
10747010d129SBorislav Petkov 	cpuc->pebs_enabled |= 1ULL << hwc->idx;
10757010d129SBorislav Petkov 
107660176089SKan Liang 	if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
10777010d129SBorislav Petkov 		cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
10787010d129SBorislav Petkov 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
10797010d129SBorislav Petkov 		cpuc->pebs_enabled |= 1ULL << 63;
10807010d129SBorislav Petkov 
1081c22497f5SKan Liang 	if (x86_pmu.intel_cap.pebs_baseline) {
1082c22497f5SKan Liang 		hwc->config |= ICL_EVENTSEL_ADAPTIVE;
1083c22497f5SKan Liang 		if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
1084c22497f5SKan Liang 			wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
1085c22497f5SKan Liang 			cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
1086c22497f5SKan Liang 		}
1087c22497f5SKan Liang 	}
1088c22497f5SKan Liang 
10897010d129SBorislav Petkov 	/*
109009e61b4fSPeter Zijlstra 	 * Use auto-reload if possible to save a MSR write in the PMI.
109109e61b4fSPeter Zijlstra 	 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
10927010d129SBorislav Petkov 	 */
10937010d129SBorislav Petkov 	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1094ec71a398SKan Liang 		unsigned int idx = hwc->idx;
1095ec71a398SKan Liang 
1096ec71a398SKan Liang 		if (idx >= INTEL_PMC_IDX_FIXED)
1097ec71a398SKan Liang 			idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
1098ec71a398SKan Liang 		ds->pebs_event_reset[idx] =
10997010d129SBorislav Petkov 			(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
1100dc853e26SJiri Olsa 	} else {
1101dc853e26SJiri Olsa 		ds->pebs_event_reset[hwc->idx] = 0;
11027010d129SBorislav Petkov 	}
110309e61b4fSPeter Zijlstra }
11047010d129SBorislav Petkov 
110568f7082fSPeter Zijlstra void intel_pmu_pebs_del(struct perf_event *event)
110609e61b4fSPeter Zijlstra {
110709e61b4fSPeter Zijlstra 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
110809e61b4fSPeter Zijlstra 	struct hw_perf_event *hwc = &event->hw;
110909e61b4fSPeter Zijlstra 	bool needed_cb = pebs_needs_sched_cb(cpuc);
111009e61b4fSPeter Zijlstra 
111109e61b4fSPeter Zijlstra 	cpuc->n_pebs--;
1112174afc3eSKan Liang 	if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
111309e61b4fSPeter Zijlstra 		cpuc->n_large_pebs--;
111409e61b4fSPeter Zijlstra 
1115c22497f5SKan Liang 	pebs_update_state(needed_cb, cpuc, event, false);
11167010d129SBorislav Petkov }
11177010d129SBorislav Petkov 
11187010d129SBorislav Petkov void intel_pmu_pebs_disable(struct perf_event *event)
11197010d129SBorislav Petkov {
11207010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
11217010d129SBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
11227010d129SBorislav Petkov 
112309e61b4fSPeter Zijlstra 	if (cpuc->n_pebs == cpuc->n_large_pebs)
11247010d129SBorislav Petkov 		intel_pmu_drain_pebs_buffer();
11257010d129SBorislav Petkov 
11267010d129SBorislav Petkov 	cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
11277010d129SBorislav Petkov 
112860176089SKan Liang 	if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
112960176089SKan Liang 	    (x86_pmu.version < 5))
11307010d129SBorislav Petkov 		cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
11317010d129SBorislav Petkov 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
11327010d129SBorislav Petkov 		cpuc->pebs_enabled &= ~(1ULL << 63);
11337010d129SBorislav Petkov 
11347010d129SBorislav Petkov 	if (cpuc->enabled)
11357010d129SBorislav Petkov 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
11367010d129SBorislav Petkov 
11377010d129SBorislav Petkov 	hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
11387010d129SBorislav Petkov }
11397010d129SBorislav Petkov 
11407010d129SBorislav Petkov void intel_pmu_pebs_enable_all(void)
11417010d129SBorislav Petkov {
11427010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
11437010d129SBorislav Petkov 
11447010d129SBorislav Petkov 	if (cpuc->pebs_enabled)
11457010d129SBorislav Petkov 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
11467010d129SBorislav Petkov }
11477010d129SBorislav Petkov 
11487010d129SBorislav Petkov void intel_pmu_pebs_disable_all(void)
11497010d129SBorislav Petkov {
11507010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
11517010d129SBorislav Petkov 
11527010d129SBorislav Petkov 	if (cpuc->pebs_enabled)
11537010d129SBorislav Petkov 		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
11547010d129SBorislav Petkov }
11557010d129SBorislav Petkov 
11567010d129SBorislav Petkov static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
11577010d129SBorislav Petkov {
11587010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
11597010d129SBorislav Petkov 	unsigned long from = cpuc->lbr_entries[0].from;
11607010d129SBorislav Petkov 	unsigned long old_to, to = cpuc->lbr_entries[0].to;
11617010d129SBorislav Petkov 	unsigned long ip = regs->ip;
11627010d129SBorislav Petkov 	int is_64bit = 0;
11637010d129SBorislav Petkov 	void *kaddr;
11647010d129SBorislav Petkov 	int size;
11657010d129SBorislav Petkov 
11667010d129SBorislav Petkov 	/*
11677010d129SBorislav Petkov 	 * We don't need to fixup if the PEBS assist is fault like
11687010d129SBorislav Petkov 	 */
11697010d129SBorislav Petkov 	if (!x86_pmu.intel_cap.pebs_trap)
11707010d129SBorislav Petkov 		return 1;
11717010d129SBorislav Petkov 
11727010d129SBorislav Petkov 	/*
11737010d129SBorislav Petkov 	 * No LBR entry, no basic block, no rewinding
11747010d129SBorislav Petkov 	 */
11757010d129SBorislav Petkov 	if (!cpuc->lbr_stack.nr || !from || !to)
11767010d129SBorislav Petkov 		return 0;
11777010d129SBorislav Petkov 
11787010d129SBorislav Petkov 	/*
11797010d129SBorislav Petkov 	 * Basic blocks should never cross user/kernel boundaries
11807010d129SBorislav Petkov 	 */
11817010d129SBorislav Petkov 	if (kernel_ip(ip) != kernel_ip(to))
11827010d129SBorislav Petkov 		return 0;
11837010d129SBorislav Petkov 
11847010d129SBorislav Petkov 	/*
11857010d129SBorislav Petkov 	 * unsigned math, either ip is before the start (impossible) or
11867010d129SBorislav Petkov 	 * the basic block is larger than 1 page (sanity)
11877010d129SBorislav Petkov 	 */
11887010d129SBorislav Petkov 	if ((ip - to) > PEBS_FIXUP_SIZE)
11897010d129SBorislav Petkov 		return 0;
11907010d129SBorislav Petkov 
11917010d129SBorislav Petkov 	/*
11927010d129SBorislav Petkov 	 * We sampled a branch insn, rewind using the LBR stack
11937010d129SBorislav Petkov 	 */
11947010d129SBorislav Petkov 	if (ip == to) {
11957010d129SBorislav Petkov 		set_linear_ip(regs, from);
11967010d129SBorislav Petkov 		return 1;
11977010d129SBorislav Petkov 	}
11987010d129SBorislav Petkov 
11997010d129SBorislav Petkov 	size = ip - to;
12007010d129SBorislav Petkov 	if (!kernel_ip(ip)) {
12017010d129SBorislav Petkov 		int bytes;
12027010d129SBorislav Petkov 		u8 *buf = this_cpu_read(insn_buffer);
12037010d129SBorislav Petkov 
12047010d129SBorislav Petkov 		/* 'size' must fit our buffer, see above */
12057010d129SBorislav Petkov 		bytes = copy_from_user_nmi(buf, (void __user *)to, size);
12067010d129SBorislav Petkov 		if (bytes != 0)
12077010d129SBorislav Petkov 			return 0;
12087010d129SBorislav Petkov 
12097010d129SBorislav Petkov 		kaddr = buf;
12107010d129SBorislav Petkov 	} else {
12117010d129SBorislav Petkov 		kaddr = (void *)to;
12127010d129SBorislav Petkov 	}
12137010d129SBorislav Petkov 
12147010d129SBorislav Petkov 	do {
12157010d129SBorislav Petkov 		struct insn insn;
12167010d129SBorislav Petkov 
12177010d129SBorislav Petkov 		old_to = to;
12187010d129SBorislav Petkov 
12197010d129SBorislav Petkov #ifdef CONFIG_X86_64
12207010d129SBorislav Petkov 		is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
12217010d129SBorislav Petkov #endif
12227010d129SBorislav Petkov 		insn_init(&insn, kaddr, size, is_64bit);
12237010d129SBorislav Petkov 		insn_get_length(&insn);
12247010d129SBorislav Petkov 		/*
12257010d129SBorislav Petkov 		 * Make sure there was not a problem decoding the
12267010d129SBorislav Petkov 		 * instruction and getting the length.  This is
12277010d129SBorislav Petkov 		 * doubly important because we have an infinite
12287010d129SBorislav Petkov 		 * loop if insn.length=0.
12297010d129SBorislav Petkov 		 */
12307010d129SBorislav Petkov 		if (!insn.length)
12317010d129SBorislav Petkov 			break;
12327010d129SBorislav Petkov 
12337010d129SBorislav Petkov 		to += insn.length;
12347010d129SBorislav Petkov 		kaddr += insn.length;
12357010d129SBorislav Petkov 		size -= insn.length;
12367010d129SBorislav Petkov 	} while (to < ip);
12377010d129SBorislav Petkov 
12387010d129SBorislav Petkov 	if (to == ip) {
12397010d129SBorislav Petkov 		set_linear_ip(regs, old_to);
12407010d129SBorislav Petkov 		return 1;
12417010d129SBorislav Petkov 	}
12427010d129SBorislav Petkov 
12437010d129SBorislav Petkov 	/*
12447010d129SBorislav Petkov 	 * Even though we decoded the basic block, the instruction stream
12457010d129SBorislav Petkov 	 * never matched the given IP, either the TO or the IP got corrupted.
12467010d129SBorislav Petkov 	 */
12477010d129SBorislav Petkov 	return 0;
12487010d129SBorislav Petkov }
12497010d129SBorislav Petkov 
125048f38aa4SAndi Kleen static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
12517010d129SBorislav Petkov {
125248f38aa4SAndi Kleen 	if (tsx_tuning) {
125348f38aa4SAndi Kleen 		union hsw_tsx_tuning tsx = { .value = tsx_tuning };
12547010d129SBorislav Petkov 		return tsx.cycles_last_block;
12557010d129SBorislav Petkov 	}
12567010d129SBorislav Petkov 	return 0;
12577010d129SBorislav Petkov }
12587010d129SBorislav Petkov 
125948f38aa4SAndi Kleen static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
12607010d129SBorislav Petkov {
126148f38aa4SAndi Kleen 	u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
12627010d129SBorislav Petkov 
12637010d129SBorislav Petkov 	/* For RTM XABORTs also log the abort code from AX */
126448f38aa4SAndi Kleen 	if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
126548f38aa4SAndi Kleen 		txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
12667010d129SBorislav Petkov 	return txn;
12677010d129SBorislav Petkov }
12687010d129SBorislav Petkov 
1269c22497f5SKan Liang static inline u64 get_pebs_status(void *n)
1270c22497f5SKan Liang {
1271c22497f5SKan Liang 	if (x86_pmu.intel_cap.pebs_format < 4)
1272c22497f5SKan Liang 		return ((struct pebs_record_nhm *)n)->status;
1273c22497f5SKan Liang 	return ((struct pebs_basic *)n)->applicable_counters;
1274c22497f5SKan Liang }
1275c22497f5SKan Liang 
127648f38aa4SAndi Kleen #define PERF_X86_EVENT_PEBS_HSW_PREC \
127748f38aa4SAndi Kleen 		(PERF_X86_EVENT_PEBS_ST_HSW | \
127848f38aa4SAndi Kleen 		 PERF_X86_EVENT_PEBS_LD_HSW | \
127948f38aa4SAndi Kleen 		 PERF_X86_EVENT_PEBS_NA_HSW)
128048f38aa4SAndi Kleen 
128148f38aa4SAndi Kleen static u64 get_data_src(struct perf_event *event, u64 aux)
128248f38aa4SAndi Kleen {
128348f38aa4SAndi Kleen 	u64 val = PERF_MEM_NA;
128448f38aa4SAndi Kleen 	int fl = event->hw.flags;
128548f38aa4SAndi Kleen 	bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
128648f38aa4SAndi Kleen 
128748f38aa4SAndi Kleen 	if (fl & PERF_X86_EVENT_PEBS_LDLAT)
128848f38aa4SAndi Kleen 		val = load_latency_data(aux);
128948f38aa4SAndi Kleen 	else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
129048f38aa4SAndi Kleen 		val = precise_datala_hsw(event, aux);
129148f38aa4SAndi Kleen 	else if (fst)
129248f38aa4SAndi Kleen 		val = precise_store_data(aux);
129348f38aa4SAndi Kleen 	return val;
129448f38aa4SAndi Kleen }
129548f38aa4SAndi Kleen 
1296c22497f5SKan Liang static void setup_pebs_fixed_sample_data(struct perf_event *event,
12977010d129SBorislav Petkov 				   struct pt_regs *iregs, void *__pebs,
12987010d129SBorislav Petkov 				   struct perf_sample_data *data,
12997010d129SBorislav Petkov 				   struct pt_regs *regs)
13007010d129SBorislav Petkov {
13017010d129SBorislav Petkov 	/*
13027010d129SBorislav Petkov 	 * We cast to the biggest pebs_record but are careful not to
13037010d129SBorislav Petkov 	 * unconditionally access the 'extra' entries.
13047010d129SBorislav Petkov 	 */
13057010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
13067010d129SBorislav Petkov 	struct pebs_record_skl *pebs = __pebs;
13077010d129SBorislav Petkov 	u64 sample_type;
130848f38aa4SAndi Kleen 	int fll;
13097010d129SBorislav Petkov 
13107010d129SBorislav Petkov 	if (pebs == NULL)
13117010d129SBorislav Petkov 		return;
13127010d129SBorislav Petkov 
13137010d129SBorislav Petkov 	sample_type = event->attr.sample_type;
131448f38aa4SAndi Kleen 	fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
13157010d129SBorislav Petkov 
13167010d129SBorislav Petkov 	perf_sample_data_init(data, 0, event->hw.last_period);
13177010d129SBorislav Petkov 
13187010d129SBorislav Petkov 	data->period = event->hw.last_period;
13197010d129SBorislav Petkov 
13207010d129SBorislav Petkov 	/*
13217010d129SBorislav Petkov 	 * Use latency for weight (only avail with PEBS-LL)
13227010d129SBorislav Petkov 	 */
13237010d129SBorislav Petkov 	if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
13247010d129SBorislav Petkov 		data->weight = pebs->lat;
13257010d129SBorislav Petkov 
13267010d129SBorislav Petkov 	/*
13277010d129SBorislav Petkov 	 * data.data_src encodes the data source
13287010d129SBorislav Petkov 	 */
132948f38aa4SAndi Kleen 	if (sample_type & PERF_SAMPLE_DATA_SRC)
133048f38aa4SAndi Kleen 		data->data_src.val = get_data_src(event, pebs->dse);
13317010d129SBorislav Petkov 
13327010d129SBorislav Petkov 	/*
13336cbc304fSPeter Zijlstra 	 * We must however always use iregs for the unwinder to stay sane; the
13346cbc304fSPeter Zijlstra 	 * record BP,SP,IP can point into thin air when the record is from a
1335a97673a1SIngo Molnar 	 * previous PMI context or an (I)RET happened between the record and
13366cbc304fSPeter Zijlstra 	 * PMI.
13376cbc304fSPeter Zijlstra 	 */
13386cbc304fSPeter Zijlstra 	if (sample_type & PERF_SAMPLE_CALLCHAIN)
13396cbc304fSPeter Zijlstra 		data->callchain = perf_callchain(event, iregs);
13406cbc304fSPeter Zijlstra 
13416cbc304fSPeter Zijlstra 	/*
1342b8000586SPeter Zijlstra 	 * We use the interrupt regs as a base because the PEBS record does not
1343b8000586SPeter Zijlstra 	 * contain a full regs set, specifically it seems to lack segment
1344b8000586SPeter Zijlstra 	 * descriptors, which get used by things like user_mode().
13457010d129SBorislav Petkov 	 *
1346b8000586SPeter Zijlstra 	 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
13477010d129SBorislav Petkov 	 */
13487010d129SBorislav Petkov 	*regs = *iregs;
1349d1e7e602SStephane Eranian 
1350d1e7e602SStephane Eranian 	/*
1351d1e7e602SStephane Eranian 	 * Initialize regs_>flags from PEBS,
1352d1e7e602SStephane Eranian 	 * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1353d1e7e602SStephane Eranian 	 * i.e., do not rely on it being zero:
1354d1e7e602SStephane Eranian 	 */
1355d1e7e602SStephane Eranian 	regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
13567010d129SBorislav Petkov 
13577010d129SBorislav Petkov 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
13587010d129SBorislav Petkov 		regs->ax = pebs->ax;
13597010d129SBorislav Petkov 		regs->bx = pebs->bx;
13607010d129SBorislav Petkov 		regs->cx = pebs->cx;
13617010d129SBorislav Petkov 		regs->dx = pebs->dx;
13627010d129SBorislav Petkov 		regs->si = pebs->si;
13637010d129SBorislav Petkov 		regs->di = pebs->di;
1364b8000586SPeter Zijlstra 
13657010d129SBorislav Petkov 		regs->bp = pebs->bp;
13667010d129SBorislav Petkov 		regs->sp = pebs->sp;
13677010d129SBorislav Petkov 
13687010d129SBorislav Petkov #ifndef CONFIG_X86_32
13697010d129SBorislav Petkov 		regs->r8 = pebs->r8;
13707010d129SBorislav Petkov 		regs->r9 = pebs->r9;
13717010d129SBorislav Petkov 		regs->r10 = pebs->r10;
13727010d129SBorislav Petkov 		regs->r11 = pebs->r11;
13737010d129SBorislav Petkov 		regs->r12 = pebs->r12;
13747010d129SBorislav Petkov 		regs->r13 = pebs->r13;
13757010d129SBorislav Petkov 		regs->r14 = pebs->r14;
13767010d129SBorislav Petkov 		regs->r15 = pebs->r15;
13777010d129SBorislav Petkov #endif
13787010d129SBorislav Petkov 	}
13797010d129SBorislav Petkov 
138071eb9ee9SStephane Eranian 	if (event->attr.precise_ip > 1) {
1381d1e7e602SStephane Eranian 		/*
1382d1e7e602SStephane Eranian 		 * Haswell and later processors have an 'eventing IP'
1383d1e7e602SStephane Eranian 		 * (real IP) which fixes the off-by-1 skid in hardware.
1384d1e7e602SStephane Eranian 		 * Use it when precise_ip >= 2 :
1385d1e7e602SStephane Eranian 		 */
138671eb9ee9SStephane Eranian 		if (x86_pmu.intel_cap.pebs_format >= 2) {
138771eb9ee9SStephane Eranian 			set_linear_ip(regs, pebs->real_ip);
13887010d129SBorislav Petkov 			regs->flags |= PERF_EFLAGS_EXACT;
138971eb9ee9SStephane Eranian 		} else {
1390d1e7e602SStephane Eranian 			/* Otherwise, use PEBS off-by-1 IP: */
139171eb9ee9SStephane Eranian 			set_linear_ip(regs, pebs->ip);
139271eb9ee9SStephane Eranian 
1393d1e7e602SStephane Eranian 			/*
1394d1e7e602SStephane Eranian 			 * With precise_ip >= 2, try to fix up the off-by-1 IP
1395d1e7e602SStephane Eranian 			 * using the LBR. If successful, the fixup function
1396d1e7e602SStephane Eranian 			 * corrects regs->ip and calls set_linear_ip() on regs:
1397d1e7e602SStephane Eranian 			 */
139871eb9ee9SStephane Eranian 			if (intel_pmu_pebs_fixup_ip(regs))
13997010d129SBorislav Petkov 				regs->flags |= PERF_EFLAGS_EXACT;
140071eb9ee9SStephane Eranian 		}
1401d1e7e602SStephane Eranian 	} else {
1402d1e7e602SStephane Eranian 		/*
1403d1e7e602SStephane Eranian 		 * When precise_ip == 1, return the PEBS off-by-1 IP,
1404d1e7e602SStephane Eranian 		 * no fixup attempted:
1405d1e7e602SStephane Eranian 		 */
140671eb9ee9SStephane Eranian 		set_linear_ip(regs, pebs->ip);
1407d1e7e602SStephane Eranian 	}
140871eb9ee9SStephane Eranian 
14097010d129SBorislav Petkov 
1410fc7ce9c7SKan Liang 	if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
14117010d129SBorislav Petkov 	    x86_pmu.intel_cap.pebs_format >= 1)
14127010d129SBorislav Petkov 		data->addr = pebs->dla;
14137010d129SBorislav Petkov 
14147010d129SBorislav Petkov 	if (x86_pmu.intel_cap.pebs_format >= 2) {
14157010d129SBorislav Petkov 		/* Only set the TSX weight when no memory weight. */
14167010d129SBorislav Petkov 		if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
141748f38aa4SAndi Kleen 			data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
14187010d129SBorislav Petkov 
14197010d129SBorislav Petkov 		if (sample_type & PERF_SAMPLE_TRANSACTION)
142048f38aa4SAndi Kleen 			data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
142148f38aa4SAndi Kleen 							      pebs->ax);
14227010d129SBorislav Petkov 	}
14237010d129SBorislav Petkov 
14247010d129SBorislav Petkov 	/*
14257010d129SBorislav Petkov 	 * v3 supplies an accurate time stamp, so we use that
14267010d129SBorislav Petkov 	 * for the time stamp.
14277010d129SBorislav Petkov 	 *
14287010d129SBorislav Petkov 	 * We can only do this for the default trace clock.
14297010d129SBorislav Petkov 	 */
14307010d129SBorislav Petkov 	if (x86_pmu.intel_cap.pebs_format >= 3 &&
14317010d129SBorislav Petkov 		event->attr.use_clockid == 0)
14327010d129SBorislav Petkov 		data->time = native_sched_clock_from_tsc(pebs->tsc);
14337010d129SBorislav Petkov 
14347010d129SBorislav Petkov 	if (has_branch_stack(event))
14357010d129SBorislav Petkov 		data->br_stack = &cpuc->lbr_stack;
14367010d129SBorislav Petkov }
14377010d129SBorislav Petkov 
1438c22497f5SKan Liang static void adaptive_pebs_save_regs(struct pt_regs *regs,
1439c22497f5SKan Liang 				    struct pebs_gprs *gprs)
1440c22497f5SKan Liang {
1441c22497f5SKan Liang 	regs->ax = gprs->ax;
1442c22497f5SKan Liang 	regs->bx = gprs->bx;
1443c22497f5SKan Liang 	regs->cx = gprs->cx;
1444c22497f5SKan Liang 	regs->dx = gprs->dx;
1445c22497f5SKan Liang 	regs->si = gprs->si;
1446c22497f5SKan Liang 	regs->di = gprs->di;
1447c22497f5SKan Liang 	regs->bp = gprs->bp;
1448c22497f5SKan Liang 	regs->sp = gprs->sp;
1449c22497f5SKan Liang #ifndef CONFIG_X86_32
1450c22497f5SKan Liang 	regs->r8 = gprs->r8;
1451c22497f5SKan Liang 	regs->r9 = gprs->r9;
1452c22497f5SKan Liang 	regs->r10 = gprs->r10;
1453c22497f5SKan Liang 	regs->r11 = gprs->r11;
1454c22497f5SKan Liang 	regs->r12 = gprs->r12;
1455c22497f5SKan Liang 	regs->r13 = gprs->r13;
1456c22497f5SKan Liang 	regs->r14 = gprs->r14;
1457c22497f5SKan Liang 	regs->r15 = gprs->r15;
1458c22497f5SKan Liang #endif
1459c22497f5SKan Liang }
1460c22497f5SKan Liang 
1461c22497f5SKan Liang /*
1462c22497f5SKan Liang  * With adaptive PEBS the layout depends on what fields are configured.
1463c22497f5SKan Liang  */
1464c22497f5SKan Liang 
1465c22497f5SKan Liang static void setup_pebs_adaptive_sample_data(struct perf_event *event,
1466c22497f5SKan Liang 					    struct pt_regs *iregs, void *__pebs,
1467c22497f5SKan Liang 					    struct perf_sample_data *data,
1468c22497f5SKan Liang 					    struct pt_regs *regs)
1469c22497f5SKan Liang {
1470c22497f5SKan Liang 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1471c22497f5SKan Liang 	struct pebs_basic *basic = __pebs;
1472c22497f5SKan Liang 	void *next_record = basic + 1;
1473c22497f5SKan Liang 	u64 sample_type;
1474c22497f5SKan Liang 	u64 format_size;
1475c22497f5SKan Liang 	struct pebs_meminfo *meminfo = NULL;
1476c22497f5SKan Liang 	struct pebs_gprs *gprs = NULL;
1477c22497f5SKan Liang 	struct x86_perf_regs *perf_regs;
1478c22497f5SKan Liang 
1479c22497f5SKan Liang 	if (basic == NULL)
1480c22497f5SKan Liang 		return;
1481c22497f5SKan Liang 
1482c22497f5SKan Liang 	perf_regs = container_of(regs, struct x86_perf_regs, regs);
1483c22497f5SKan Liang 	perf_regs->xmm_regs = NULL;
1484c22497f5SKan Liang 
1485c22497f5SKan Liang 	sample_type = event->attr.sample_type;
1486c22497f5SKan Liang 	format_size = basic->format_size;
1487c22497f5SKan Liang 	perf_sample_data_init(data, 0, event->hw.last_period);
1488c22497f5SKan Liang 	data->period = event->hw.last_period;
1489c22497f5SKan Liang 
1490c22497f5SKan Liang 	if (event->attr.use_clockid == 0)
1491c22497f5SKan Liang 		data->time = native_sched_clock_from_tsc(basic->tsc);
1492c22497f5SKan Liang 
1493c22497f5SKan Liang 	/*
1494c22497f5SKan Liang 	 * We must however always use iregs for the unwinder to stay sane; the
1495c22497f5SKan Liang 	 * record BP,SP,IP can point into thin air when the record is from a
1496c22497f5SKan Liang 	 * previous PMI context or an (I)RET happened between the record and
1497c22497f5SKan Liang 	 * PMI.
1498c22497f5SKan Liang 	 */
1499c22497f5SKan Liang 	if (sample_type & PERF_SAMPLE_CALLCHAIN)
1500c22497f5SKan Liang 		data->callchain = perf_callchain(event, iregs);
1501c22497f5SKan Liang 
1502c22497f5SKan Liang 	*regs = *iregs;
1503c22497f5SKan Liang 	/* The ip in basic is EventingIP */
1504c22497f5SKan Liang 	set_linear_ip(regs, basic->ip);
1505c22497f5SKan Liang 	regs->flags = PERF_EFLAGS_EXACT;
1506c22497f5SKan Liang 
1507c22497f5SKan Liang 	/*
1508c22497f5SKan Liang 	 * The record for MEMINFO is in front of GP
1509c22497f5SKan Liang 	 * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1510c22497f5SKan Liang 	 * Save the pointer here but process later.
1511c22497f5SKan Liang 	 */
1512c22497f5SKan Liang 	if (format_size & PEBS_DATACFG_MEMINFO) {
1513c22497f5SKan Liang 		meminfo = next_record;
1514c22497f5SKan Liang 		next_record = meminfo + 1;
1515c22497f5SKan Liang 	}
1516c22497f5SKan Liang 
1517c22497f5SKan Liang 	if (format_size & PEBS_DATACFG_GP) {
1518c22497f5SKan Liang 		gprs = next_record;
1519c22497f5SKan Liang 		next_record = gprs + 1;
1520c22497f5SKan Liang 
1521c22497f5SKan Liang 		if (event->attr.precise_ip < 2) {
1522c22497f5SKan Liang 			set_linear_ip(regs, gprs->ip);
1523c22497f5SKan Liang 			regs->flags &= ~PERF_EFLAGS_EXACT;
1524c22497f5SKan Liang 		}
1525c22497f5SKan Liang 
1526c22497f5SKan Liang 		if (sample_type & PERF_SAMPLE_REGS_INTR)
1527c22497f5SKan Liang 			adaptive_pebs_save_regs(regs, gprs);
1528c22497f5SKan Liang 	}
1529c22497f5SKan Liang 
1530c22497f5SKan Liang 	if (format_size & PEBS_DATACFG_MEMINFO) {
1531c22497f5SKan Liang 		if (sample_type & PERF_SAMPLE_WEIGHT)
1532c22497f5SKan Liang 			data->weight = meminfo->latency ?:
1533c22497f5SKan Liang 				intel_get_tsx_weight(meminfo->tsx_tuning);
1534c22497f5SKan Liang 
1535c22497f5SKan Liang 		if (sample_type & PERF_SAMPLE_DATA_SRC)
1536c22497f5SKan Liang 			data->data_src.val = get_data_src(event, meminfo->aux);
1537c22497f5SKan Liang 
1538c22497f5SKan Liang 		if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
1539c22497f5SKan Liang 			data->addr = meminfo->address;
1540c22497f5SKan Liang 
1541c22497f5SKan Liang 		if (sample_type & PERF_SAMPLE_TRANSACTION)
1542c22497f5SKan Liang 			data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
1543c22497f5SKan Liang 							  gprs ? gprs->ax : 0);
1544c22497f5SKan Liang 	}
1545c22497f5SKan Liang 
1546c22497f5SKan Liang 	if (format_size & PEBS_DATACFG_XMMS) {
1547c22497f5SKan Liang 		struct pebs_xmm *xmm = next_record;
1548c22497f5SKan Liang 
1549c22497f5SKan Liang 		next_record = xmm + 1;
1550c22497f5SKan Liang 		perf_regs->xmm_regs = xmm->xmm;
1551c22497f5SKan Liang 	}
1552c22497f5SKan Liang 
1553c22497f5SKan Liang 	if (format_size & PEBS_DATACFG_LBRS) {
1554c22497f5SKan Liang 		struct pebs_lbr *lbr = next_record;
1555c22497f5SKan Liang 		int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
1556c22497f5SKan Liang 					& 0xff) + 1;
1557c22497f5SKan Liang 		next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry);
1558c22497f5SKan Liang 
1559c22497f5SKan Liang 		if (has_branch_stack(event)) {
1560c22497f5SKan Liang 			intel_pmu_store_pebs_lbrs(lbr);
1561c22497f5SKan Liang 			data->br_stack = &cpuc->lbr_stack;
1562c22497f5SKan Liang 		}
1563c22497f5SKan Liang 	}
1564c22497f5SKan Liang 
1565c22497f5SKan Liang 	WARN_ONCE(next_record != __pebs + (format_size >> 48),
1566c22497f5SKan Liang 			"PEBS record size %llu, expected %llu, config %llx\n",
1567c22497f5SKan Liang 			format_size >> 48,
1568c22497f5SKan Liang 			(u64)(next_record - __pebs),
1569c22497f5SKan Liang 			basic->format_size);
1570c22497f5SKan Liang }
1571c22497f5SKan Liang 
15727010d129SBorislav Petkov static inline void *
15737010d129SBorislav Petkov get_next_pebs_record_by_bit(void *base, void *top, int bit)
15747010d129SBorislav Petkov {
15757010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
15767010d129SBorislav Petkov 	void *at;
15777010d129SBorislav Petkov 	u64 pebs_status;
15787010d129SBorislav Petkov 
15797010d129SBorislav Petkov 	/*
15807010d129SBorislav Petkov 	 * fmt0 does not have a status bitfield (does not use
15817010d129SBorislav Petkov 	 * perf_record_nhm format)
15827010d129SBorislav Petkov 	 */
15837010d129SBorislav Petkov 	if (x86_pmu.intel_cap.pebs_format < 1)
15847010d129SBorislav Petkov 		return base;
15857010d129SBorislav Petkov 
15867010d129SBorislav Petkov 	if (base == NULL)
15877010d129SBorislav Petkov 		return NULL;
15887010d129SBorislav Petkov 
1589c22497f5SKan Liang 	for (at = base; at < top; at += cpuc->pebs_record_size) {
1590c22497f5SKan Liang 		unsigned long status = get_pebs_status(at);
15917010d129SBorislav Petkov 
1592c22497f5SKan Liang 		if (test_bit(bit, (unsigned long *)&status)) {
15937010d129SBorislav Petkov 			/* PEBS v3 has accurate status bits */
15947010d129SBorislav Petkov 			if (x86_pmu.intel_cap.pebs_format >= 3)
15957010d129SBorislav Petkov 				return at;
15967010d129SBorislav Petkov 
1597c22497f5SKan Liang 			if (status == (1 << bit))
15987010d129SBorislav Petkov 				return at;
15997010d129SBorislav Petkov 
16007010d129SBorislav Petkov 			/* clear non-PEBS bit and re-check */
1601c22497f5SKan Liang 			pebs_status = status & cpuc->pebs_enabled;
1602fd583ad1SKan Liang 			pebs_status &= PEBS_COUNTER_MASK;
16037010d129SBorislav Petkov 			if (pebs_status == (1 << bit))
16047010d129SBorislav Petkov 				return at;
16057010d129SBorislav Petkov 		}
16067010d129SBorislav Petkov 	}
16077010d129SBorislav Petkov 	return NULL;
16087010d129SBorislav Petkov }
16097010d129SBorislav Petkov 
16105bee2cc6SKan Liang void intel_pmu_auto_reload_read(struct perf_event *event)
16115bee2cc6SKan Liang {
16125bee2cc6SKan Liang 	WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
16135bee2cc6SKan Liang 
16145bee2cc6SKan Liang 	perf_pmu_disable(event->pmu);
16155bee2cc6SKan Liang 	intel_pmu_drain_pebs_buffer();
16165bee2cc6SKan Liang 	perf_pmu_enable(event->pmu);
16175bee2cc6SKan Liang }
16185bee2cc6SKan Liang 
1619d31fc13fSKan Liang /*
1620d31fc13fSKan Liang  * Special variant of intel_pmu_save_and_restart() for auto-reload.
1621d31fc13fSKan Liang  */
1622d31fc13fSKan Liang static int
1623d31fc13fSKan Liang intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1624d31fc13fSKan Liang {
1625d31fc13fSKan Liang 	struct hw_perf_event *hwc = &event->hw;
1626d31fc13fSKan Liang 	int shift = 64 - x86_pmu.cntval_bits;
1627d31fc13fSKan Liang 	u64 period = hwc->sample_period;
1628d31fc13fSKan Liang 	u64 prev_raw_count, new_raw_count;
1629d31fc13fSKan Liang 	s64 new, old;
1630d31fc13fSKan Liang 
1631d31fc13fSKan Liang 	WARN_ON(!period);
1632d31fc13fSKan Liang 
1633d31fc13fSKan Liang 	/*
1634d31fc13fSKan Liang 	 * drain_pebs() only happens when the PMU is disabled.
1635d31fc13fSKan Liang 	 */
1636d31fc13fSKan Liang 	WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1637d31fc13fSKan Liang 
1638d31fc13fSKan Liang 	prev_raw_count = local64_read(&hwc->prev_count);
1639d31fc13fSKan Liang 	rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1640d31fc13fSKan Liang 	local64_set(&hwc->prev_count, new_raw_count);
1641d31fc13fSKan Liang 
1642d31fc13fSKan Liang 	/*
1643d31fc13fSKan Liang 	 * Since the counter increments a negative counter value and
1644d31fc13fSKan Liang 	 * overflows on the sign switch, giving the interval:
1645d31fc13fSKan Liang 	 *
1646d31fc13fSKan Liang 	 *   [-period, 0]
1647d31fc13fSKan Liang 	 *
1648d31fc13fSKan Liang 	 * the difference between two consequtive reads is:
1649d31fc13fSKan Liang 	 *
1650d31fc13fSKan Liang 	 *   A) value2 - value1;
1651d31fc13fSKan Liang 	 *      when no overflows have happened in between,
1652d31fc13fSKan Liang 	 *
1653d31fc13fSKan Liang 	 *   B) (0 - value1) + (value2 - (-period));
1654d31fc13fSKan Liang 	 *      when one overflow happened in between,
1655d31fc13fSKan Liang 	 *
1656d31fc13fSKan Liang 	 *   C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1657d31fc13fSKan Liang 	 *      when @n overflows happened in between.
1658d31fc13fSKan Liang 	 *
1659d31fc13fSKan Liang 	 * Here A) is the obvious difference, B) is the extension to the
1660d31fc13fSKan Liang 	 * discrete interval, where the first term is to the top of the
1661d31fc13fSKan Liang 	 * interval and the second term is from the bottom of the next
1662d31fc13fSKan Liang 	 * interval and C) the extension to multiple intervals, where the
1663d31fc13fSKan Liang 	 * middle term is the whole intervals covered.
1664d31fc13fSKan Liang 	 *
1665d31fc13fSKan Liang 	 * An equivalent of C, by reduction, is:
1666d31fc13fSKan Liang 	 *
1667d31fc13fSKan Liang 	 *   value2 - value1 + n * period
1668d31fc13fSKan Liang 	 */
1669d31fc13fSKan Liang 	new = ((s64)(new_raw_count << shift) >> shift);
1670d31fc13fSKan Liang 	old = ((s64)(prev_raw_count << shift) >> shift);
1671d31fc13fSKan Liang 	local64_add(new - old + count * period, &event->count);
1672d31fc13fSKan Liang 
1673d31fc13fSKan Liang 	perf_event_update_userpage(event);
1674d31fc13fSKan Liang 
1675d31fc13fSKan Liang 	return 0;
1676d31fc13fSKan Liang }
1677d31fc13fSKan Liang 
16787010d129SBorislav Petkov static void __intel_pmu_pebs_event(struct perf_event *event,
16797010d129SBorislav Petkov 				   struct pt_regs *iregs,
16807010d129SBorislav Petkov 				   void *base, void *top,
1681c22497f5SKan Liang 				   int bit, int count,
1682c22497f5SKan Liang 				   void (*setup_sample)(struct perf_event *,
1683c22497f5SKan Liang 						struct pt_regs *,
1684c22497f5SKan Liang 						void *,
1685c22497f5SKan Liang 						struct perf_sample_data *,
1686c22497f5SKan Liang 						struct pt_regs *))
16877010d129SBorislav Petkov {
1688c22497f5SKan Liang 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1689d31fc13fSKan Liang 	struct hw_perf_event *hwc = &event->hw;
16907010d129SBorislav Petkov 	struct perf_sample_data data;
1691c22497f5SKan Liang 	struct x86_perf_regs perf_regs;
1692c22497f5SKan Liang 	struct pt_regs *regs = &perf_regs.regs;
16937010d129SBorislav Petkov 	void *at = get_next_pebs_record_by_bit(base, top, bit);
16947010d129SBorislav Petkov 
1695d31fc13fSKan Liang 	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1696d31fc13fSKan Liang 		/*
1697d31fc13fSKan Liang 		 * Now, auto-reload is only enabled in fixed period mode.
1698d31fc13fSKan Liang 		 * The reload value is always hwc->sample_period.
1699d31fc13fSKan Liang 		 * May need to change it, if auto-reload is enabled in
1700d31fc13fSKan Liang 		 * freq mode later.
1701d31fc13fSKan Liang 		 */
1702d31fc13fSKan Liang 		intel_pmu_save_and_restart_reload(event, count);
1703d31fc13fSKan Liang 	} else if (!intel_pmu_save_and_restart(event))
17047010d129SBorislav Petkov 		return;
17057010d129SBorislav Petkov 
17067010d129SBorislav Petkov 	while (count > 1) {
1707c22497f5SKan Liang 		setup_sample(event, iregs, at, &data, regs);
1708c22497f5SKan Liang 		perf_event_output(event, &data, regs);
1709c22497f5SKan Liang 		at += cpuc->pebs_record_size;
17107010d129SBorislav Petkov 		at = get_next_pebs_record_by_bit(at, top, bit);
17117010d129SBorislav Petkov 		count--;
17127010d129SBorislav Petkov 	}
17137010d129SBorislav Petkov 
1714c22497f5SKan Liang 	setup_sample(event, iregs, at, &data, regs);
17157010d129SBorislav Petkov 
17167010d129SBorislav Petkov 	/*
17177010d129SBorislav Petkov 	 * All but the last records are processed.
17187010d129SBorislav Petkov 	 * The last one is left to be able to call the overflow handler.
17197010d129SBorislav Petkov 	 */
1720c22497f5SKan Liang 	if (perf_event_overflow(event, &data, regs)) {
17217010d129SBorislav Petkov 		x86_pmu_stop(event, 0);
17227010d129SBorislav Petkov 		return;
17237010d129SBorislav Petkov 	}
17247010d129SBorislav Petkov 
17257010d129SBorislav Petkov }
17267010d129SBorislav Petkov 
17277010d129SBorislav Petkov static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
17287010d129SBorislav Petkov {
17297010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
17307010d129SBorislav Petkov 	struct debug_store *ds = cpuc->ds;
17317010d129SBorislav Petkov 	struct perf_event *event = cpuc->events[0]; /* PMC0 only */
17327010d129SBorislav Petkov 	struct pebs_record_core *at, *top;
17337010d129SBorislav Petkov 	int n;
17347010d129SBorislav Petkov 
17357010d129SBorislav Petkov 	if (!x86_pmu.pebs_active)
17367010d129SBorislav Petkov 		return;
17377010d129SBorislav Petkov 
17387010d129SBorislav Petkov 	at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
17397010d129SBorislav Petkov 	top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
17407010d129SBorislav Petkov 
17417010d129SBorislav Petkov 	/*
17427010d129SBorislav Petkov 	 * Whatever else happens, drain the thing
17437010d129SBorislav Petkov 	 */
17447010d129SBorislav Petkov 	ds->pebs_index = ds->pebs_buffer_base;
17457010d129SBorislav Petkov 
17467010d129SBorislav Petkov 	if (!test_bit(0, cpuc->active_mask))
17477010d129SBorislav Petkov 		return;
17487010d129SBorislav Petkov 
17497010d129SBorislav Petkov 	WARN_ON_ONCE(!event);
17507010d129SBorislav Petkov 
17517010d129SBorislav Petkov 	if (!event->attr.precise_ip)
17527010d129SBorislav Petkov 		return;
17537010d129SBorislav Petkov 
17547010d129SBorislav Petkov 	n = top - at;
1755d31fc13fSKan Liang 	if (n <= 0) {
1756d31fc13fSKan Liang 		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1757d31fc13fSKan Liang 			intel_pmu_save_and_restart_reload(event, 0);
17587010d129SBorislav Petkov 		return;
1759d31fc13fSKan Liang 	}
17607010d129SBorislav Petkov 
1761c22497f5SKan Liang 	__intel_pmu_pebs_event(event, iregs, at, top, 0, n,
1762c22497f5SKan Liang 			       setup_pebs_fixed_sample_data);
17637010d129SBorislav Petkov }
17647010d129SBorislav Petkov 
1765477f00f9SKan Liang static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
1766477f00f9SKan Liang {
1767477f00f9SKan Liang 	struct perf_event *event;
1768477f00f9SKan Liang 	int bit;
1769477f00f9SKan Liang 
1770477f00f9SKan Liang 	/*
1771477f00f9SKan Liang 	 * The drain_pebs() could be called twice in a short period
1772477f00f9SKan Liang 	 * for auto-reload event in pmu::read(). There are no
1773477f00f9SKan Liang 	 * overflows have happened in between.
1774477f00f9SKan Liang 	 * It needs to call intel_pmu_save_and_restart_reload() to
1775477f00f9SKan Liang 	 * update the event->count for this case.
1776477f00f9SKan Liang 	 */
1777477f00f9SKan Liang 	for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
1778477f00f9SKan Liang 		event = cpuc->events[bit];
1779477f00f9SKan Liang 		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1780477f00f9SKan Liang 			intel_pmu_save_and_restart_reload(event, 0);
1781477f00f9SKan Liang 	}
1782477f00f9SKan Liang }
1783477f00f9SKan Liang 
17847010d129SBorislav Petkov static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
17857010d129SBorislav Petkov {
17867010d129SBorislav Petkov 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
17877010d129SBorislav Petkov 	struct debug_store *ds = cpuc->ds;
17887010d129SBorislav Petkov 	struct perf_event *event;
17897010d129SBorislav Petkov 	void *base, *at, *top;
1790ec71a398SKan Liang 	short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1791ec71a398SKan Liang 	short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1792ec71a398SKan Liang 	int bit, i, size;
1793ec71a398SKan Liang 	u64 mask;
17947010d129SBorislav Petkov 
17957010d129SBorislav Petkov 	if (!x86_pmu.pebs_active)
17967010d129SBorislav Petkov 		return;
17977010d129SBorislav Petkov 
17987010d129SBorislav Petkov 	base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
17997010d129SBorislav Petkov 	top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
18007010d129SBorislav Petkov 
18017010d129SBorislav Petkov 	ds->pebs_index = ds->pebs_buffer_base;
18027010d129SBorislav Petkov 
1803ec71a398SKan Liang 	mask = (1ULL << x86_pmu.max_pebs_events) - 1;
1804ec71a398SKan Liang 	size = x86_pmu.max_pebs_events;
1805ec71a398SKan Liang 	if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
1806ec71a398SKan Liang 		mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
1807ec71a398SKan Liang 		size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
1808ec71a398SKan Liang 	}
1809ec71a398SKan Liang 
1810d31fc13fSKan Liang 	if (unlikely(base >= top)) {
1811477f00f9SKan Liang 		intel_pmu_pebs_event_update_no_drain(cpuc, size);
18127010d129SBorislav Petkov 		return;
1813d31fc13fSKan Liang 	}
18147010d129SBorislav Petkov 
18157010d129SBorislav Petkov 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
18167010d129SBorislav Petkov 		struct pebs_record_nhm *p = at;
18177010d129SBorislav Petkov 		u64 pebs_status;
18187010d129SBorislav Petkov 
18198ef9b845SPeter Zijlstra 		pebs_status = p->status & cpuc->pebs_enabled;
1820ec71a398SKan Liang 		pebs_status &= mask;
18218ef9b845SPeter Zijlstra 
18228ef9b845SPeter Zijlstra 		/* PEBS v3 has more accurate status bits */
18237010d129SBorislav Petkov 		if (x86_pmu.intel_cap.pebs_format >= 3) {
1824c22497f5SKan Liang 			for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
18257010d129SBorislav Petkov 				counts[bit]++;
18267010d129SBorislav Petkov 
18277010d129SBorislav Petkov 			continue;
18287010d129SBorislav Petkov 		}
18297010d129SBorislav Petkov 
18307010d129SBorislav Petkov 		/*
18317010d129SBorislav Petkov 		 * On some CPUs the PEBS status can be zero when PEBS is
18327010d129SBorislav Petkov 		 * racing with clearing of GLOBAL_STATUS.
18337010d129SBorislav Petkov 		 *
18347010d129SBorislav Petkov 		 * Normally we would drop that record, but in the
18357010d129SBorislav Petkov 		 * case when there is only a single active PEBS event
18367010d129SBorislav Petkov 		 * we can assume it's for that event.
18377010d129SBorislav Petkov 		 */
18387010d129SBorislav Petkov 		if (!pebs_status && cpuc->pebs_enabled &&
18397010d129SBorislav Petkov 			!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
18407010d129SBorislav Petkov 			pebs_status = cpuc->pebs_enabled;
18417010d129SBorislav Petkov 
18427010d129SBorislav Petkov 		bit = find_first_bit((unsigned long *)&pebs_status,
18437010d129SBorislav Petkov 					x86_pmu.max_pebs_events);
18447010d129SBorislav Petkov 		if (bit >= x86_pmu.max_pebs_events)
18457010d129SBorislav Petkov 			continue;
18467010d129SBorislav Petkov 
18477010d129SBorislav Petkov 		/*
18487010d129SBorislav Petkov 		 * The PEBS hardware does not deal well with the situation
18497010d129SBorislav Petkov 		 * when events happen near to each other and multiple bits
18507010d129SBorislav Petkov 		 * are set. But it should happen rarely.
18517010d129SBorislav Petkov 		 *
18527010d129SBorislav Petkov 		 * If these events include one PEBS and multiple non-PEBS
18537010d129SBorislav Petkov 		 * events, it doesn't impact PEBS record. The record will
18547010d129SBorislav Petkov 		 * be handled normally. (slow path)
18557010d129SBorislav Petkov 		 *
18567010d129SBorislav Petkov 		 * If these events include two or more PEBS events, the
18577010d129SBorislav Petkov 		 * records for the events can be collapsed into a single
18587010d129SBorislav Petkov 		 * one, and it's not possible to reconstruct all events
18597010d129SBorislav Petkov 		 * that caused the PEBS record. It's called collision.
18607010d129SBorislav Petkov 		 * If collision happened, the record will be dropped.
18617010d129SBorislav Petkov 		 */
18627010d129SBorislav Petkov 		if (p->status != (1ULL << bit)) {
1863c22497f5SKan Liang 			for_each_set_bit(i, (unsigned long *)&pebs_status, size)
18647010d129SBorislav Petkov 				error[i]++;
18657010d129SBorislav Petkov 			continue;
18667010d129SBorislav Petkov 		}
18677010d129SBorislav Petkov 
18687010d129SBorislav Petkov 		counts[bit]++;
18697010d129SBorislav Petkov 	}
18707010d129SBorislav Petkov 
1871c22497f5SKan Liang 	for_each_set_bit(bit, (unsigned long *)&mask, size) {
18727010d129SBorislav Petkov 		if ((counts[bit] == 0) && (error[bit] == 0))
18737010d129SBorislav Petkov 			continue;
18747010d129SBorislav Petkov 
18757010d129SBorislav Petkov 		event = cpuc->events[bit];
18768ef9b845SPeter Zijlstra 		if (WARN_ON_ONCE(!event))
18778ef9b845SPeter Zijlstra 			continue;
18788ef9b845SPeter Zijlstra 
18798ef9b845SPeter Zijlstra 		if (WARN_ON_ONCE(!event->attr.precise_ip))
18808ef9b845SPeter Zijlstra 			continue;
18817010d129SBorislav Petkov 
18827010d129SBorislav Petkov 		/* log dropped samples number */
1883475113d9SJiri Olsa 		if (error[bit]) {
18847010d129SBorislav Petkov 			perf_log_lost_samples(event, error[bit]);
18857010d129SBorislav Petkov 
1886475113d9SJiri Olsa 			if (perf_event_account_interrupt(event))
1887475113d9SJiri Olsa 				x86_pmu_stop(event, 0);
1888475113d9SJiri Olsa 		}
1889475113d9SJiri Olsa 
18907010d129SBorislav Petkov 		if (counts[bit]) {
18917010d129SBorislav Petkov 			__intel_pmu_pebs_event(event, iregs, base,
1892c22497f5SKan Liang 					       top, bit, counts[bit],
1893c22497f5SKan Liang 					       setup_pebs_fixed_sample_data);
18947010d129SBorislav Petkov 		}
18957010d129SBorislav Petkov 	}
18967010d129SBorislav Petkov }
18977010d129SBorislav Petkov 
1898c22497f5SKan Liang static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
1899c22497f5SKan Liang {
1900c22497f5SKan Liang 	short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1901c22497f5SKan Liang 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1902c22497f5SKan Liang 	struct debug_store *ds = cpuc->ds;
1903c22497f5SKan Liang 	struct perf_event *event;
1904c22497f5SKan Liang 	void *base, *at, *top;
1905c22497f5SKan Liang 	int bit, size;
1906c22497f5SKan Liang 	u64 mask;
1907c22497f5SKan Liang 
1908c22497f5SKan Liang 	if (!x86_pmu.pebs_active)
1909c22497f5SKan Liang 		return;
1910c22497f5SKan Liang 
1911c22497f5SKan Liang 	base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
1912c22497f5SKan Liang 	top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
1913c22497f5SKan Liang 
1914c22497f5SKan Liang 	ds->pebs_index = ds->pebs_buffer_base;
1915c22497f5SKan Liang 
1916c22497f5SKan Liang 	mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
1917c22497f5SKan Liang 	       (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
1918c22497f5SKan Liang 	size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
1919c22497f5SKan Liang 
1920c22497f5SKan Liang 	if (unlikely(base >= top)) {
1921c22497f5SKan Liang 		intel_pmu_pebs_event_update_no_drain(cpuc, size);
1922c22497f5SKan Liang 		return;
1923c22497f5SKan Liang 	}
1924c22497f5SKan Liang 
1925c22497f5SKan Liang 	for (at = base; at < top; at += cpuc->pebs_record_size) {
1926c22497f5SKan Liang 		u64 pebs_status;
1927c22497f5SKan Liang 
1928c22497f5SKan Liang 		pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
1929c22497f5SKan Liang 		pebs_status &= mask;
1930c22497f5SKan Liang 
1931c22497f5SKan Liang 		for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
1932c22497f5SKan Liang 			counts[bit]++;
1933c22497f5SKan Liang 	}
1934c22497f5SKan Liang 
1935c22497f5SKan Liang 	for_each_set_bit(bit, (unsigned long *)&mask, size) {
1936c22497f5SKan Liang 		if (counts[bit] == 0)
1937c22497f5SKan Liang 			continue;
1938c22497f5SKan Liang 
1939c22497f5SKan Liang 		event = cpuc->events[bit];
1940c22497f5SKan Liang 		if (WARN_ON_ONCE(!event))
1941c22497f5SKan Liang 			continue;
1942c22497f5SKan Liang 
1943c22497f5SKan Liang 		if (WARN_ON_ONCE(!event->attr.precise_ip))
1944c22497f5SKan Liang 			continue;
1945c22497f5SKan Liang 
1946c22497f5SKan Liang 		__intel_pmu_pebs_event(event, iregs, base,
1947c22497f5SKan Liang 				       top, bit, counts[bit],
1948c22497f5SKan Liang 				       setup_pebs_adaptive_sample_data);
1949c22497f5SKan Liang 	}
1950c22497f5SKan Liang }
1951c22497f5SKan Liang 
19527010d129SBorislav Petkov /*
19537010d129SBorislav Petkov  * BTS, PEBS probe and setup
19547010d129SBorislav Petkov  */
19557010d129SBorislav Petkov 
19567010d129SBorislav Petkov void __init intel_ds_init(void)
19577010d129SBorislav Petkov {
19587010d129SBorislav Petkov 	/*
19597010d129SBorislav Petkov 	 * No support for 32bit formats
19607010d129SBorislav Petkov 	 */
19617010d129SBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_DTES64))
19627010d129SBorislav Petkov 		return;
19637010d129SBorislav Petkov 
19647010d129SBorislav Petkov 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
19657010d129SBorislav Petkov 	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
1966e72daf3fSJiri Olsa 	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
1967*cd6b984fSKan Liang 	if (x86_pmu.version <= 4)
19689b545c04SAndi Kleen 		x86_pmu.pebs_no_isolation = 1;
1969*cd6b984fSKan Liang 
19707010d129SBorislav Petkov 	if (x86_pmu.pebs) {
19717010d129SBorislav Petkov 		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
1972c22497f5SKan Liang 		char *pebs_qual = "";
19737010d129SBorislav Petkov 		int format = x86_pmu.intel_cap.pebs_format;
19747010d129SBorislav Petkov 
1975c22497f5SKan Liang 		if (format < 4)
1976c22497f5SKan Liang 			x86_pmu.intel_cap.pebs_baseline = 0;
1977c22497f5SKan Liang 
19787010d129SBorislav Petkov 		switch (format) {
19797010d129SBorislav Petkov 		case 0:
19807010d129SBorislav Petkov 			pr_cont("PEBS fmt0%c, ", pebs_type);
19817010d129SBorislav Petkov 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
1982e72daf3fSJiri Olsa 			/*
1983e72daf3fSJiri Olsa 			 * Using >PAGE_SIZE buffers makes the WRMSR to
1984e72daf3fSJiri Olsa 			 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
1985e72daf3fSJiri Olsa 			 * mysteriously hang on Core2.
1986e72daf3fSJiri Olsa 			 *
1987e72daf3fSJiri Olsa 			 * As a workaround, we don't do this.
1988e72daf3fSJiri Olsa 			 */
1989e72daf3fSJiri Olsa 			x86_pmu.pebs_buffer_size = PAGE_SIZE;
19907010d129SBorislav Petkov 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
19917010d129SBorislav Petkov 			break;
19927010d129SBorislav Petkov 
19937010d129SBorislav Petkov 		case 1:
19947010d129SBorislav Petkov 			pr_cont("PEBS fmt1%c, ", pebs_type);
19957010d129SBorislav Petkov 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
19967010d129SBorislav Petkov 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
19977010d129SBorislav Petkov 			break;
19987010d129SBorislav Petkov 
19997010d129SBorislav Petkov 		case 2:
20007010d129SBorislav Petkov 			pr_cont("PEBS fmt2%c, ", pebs_type);
20017010d129SBorislav Petkov 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
20027010d129SBorislav Petkov 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
20037010d129SBorislav Petkov 			break;
20047010d129SBorislav Petkov 
20057010d129SBorislav Petkov 		case 3:
20067010d129SBorislav Petkov 			pr_cont("PEBS fmt3%c, ", pebs_type);
20077010d129SBorislav Petkov 			x86_pmu.pebs_record_size =
20087010d129SBorislav Petkov 						sizeof(struct pebs_record_skl);
20097010d129SBorislav Petkov 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2010174afc3eSKan Liang 			x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
20117010d129SBorislav Petkov 			break;
20127010d129SBorislav Petkov 
2013c22497f5SKan Liang 		case 4:
2014c22497f5SKan Liang 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
2015c22497f5SKan Liang 			x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
2016c22497f5SKan Liang 			if (x86_pmu.intel_cap.pebs_baseline) {
2017c22497f5SKan Liang 				x86_pmu.large_pebs_flags |=
2018c22497f5SKan Liang 					PERF_SAMPLE_BRANCH_STACK |
2019c22497f5SKan Liang 					PERF_SAMPLE_TIME;
2020c22497f5SKan Liang 				x86_pmu.flags |= PMU_FL_PEBS_ALL;
2021c22497f5SKan Liang 				pebs_qual = "-baseline";
2022e321d02dSKan Liang 				x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
2023c22497f5SKan Liang 			} else {
2024c22497f5SKan Liang 				/* Only basic record supported */
2025c22497f5SKan Liang 				x86_pmu.large_pebs_flags &=
2026c22497f5SKan Liang 					~(PERF_SAMPLE_ADDR |
2027c22497f5SKan Liang 					  PERF_SAMPLE_TIME |
2028c22497f5SKan Liang 					  PERF_SAMPLE_DATA_SRC |
2029c22497f5SKan Liang 					  PERF_SAMPLE_TRANSACTION |
2030c22497f5SKan Liang 					  PERF_SAMPLE_REGS_USER |
2031c22497f5SKan Liang 					  PERF_SAMPLE_REGS_INTR);
2032c22497f5SKan Liang 			}
2033c22497f5SKan Liang 			pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
2034c22497f5SKan Liang 			break;
2035c22497f5SKan Liang 
20367010d129SBorislav Petkov 		default:
20377010d129SBorislav Petkov 			pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
20387010d129SBorislav Petkov 			x86_pmu.pebs = 0;
20397010d129SBorislav Petkov 		}
20407010d129SBorislav Petkov 	}
20417010d129SBorislav Petkov }
20427010d129SBorislav Petkov 
20437010d129SBorislav Petkov void perf_restore_debug_store(void)
20447010d129SBorislav Petkov {
20457010d129SBorislav Petkov 	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
20467010d129SBorislav Petkov 
20477010d129SBorislav Petkov 	if (!x86_pmu.bts && !x86_pmu.pebs)
20487010d129SBorislav Petkov 		return;
20497010d129SBorislav Petkov 
20507010d129SBorislav Petkov 	wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
20517010d129SBorislav Petkov }
2052