1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
27010d129SBorislav Petkov #include <linux/bitops.h>
37010d129SBorislav Petkov #include <linux/types.h>
47010d129SBorislav Petkov #include <linux/slab.h>
589e97eb8SKan Liang #include <linux/sched/clock.h>
67010d129SBorislav Petkov
7c1961a46SHugh Dickins #include <asm/cpu_entry_area.h>
87010d129SBorislav Petkov #include <asm/perf_event.h>
942f3bdc5SPeter Zijlstra #include <asm/tlbflush.h>
107010d129SBorislav Petkov #include <asm/insn.h>
1159e9f587SSean Christopherson #include <asm/io.h>
1289e97eb8SKan Liang #include <asm/timer.h>
137010d129SBorislav Petkov
1427f6d22bSBorislav Petkov #include "../perf_event.h"
157010d129SBorislav Petkov
1610043e02SThomas Gleixner /* Waste a full page so it can be mapped into the cpu_entry_area */
1710043e02SThomas Gleixner DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
1810043e02SThomas Gleixner
197010d129SBorislav Petkov /* The size of a BTS record in bytes: */
207010d129SBorislav Petkov #define BTS_RECORD_SIZE 24
217010d129SBorislav Petkov
227010d129SBorislav Petkov #define PEBS_FIXUP_SIZE PAGE_SIZE
237010d129SBorislav Petkov
247010d129SBorislav Petkov /*
257010d129SBorislav Petkov * pebs_record_32 for p4 and core not supported
267010d129SBorislav Petkov
277010d129SBorislav Petkov struct pebs_record_32 {
287010d129SBorislav Petkov u32 flags, ip;
297010d129SBorislav Petkov u32 ax, bc, cx, dx;
307010d129SBorislav Petkov u32 si, di, bp, sp;
317010d129SBorislav Petkov };
327010d129SBorislav Petkov
337010d129SBorislav Petkov */
347010d129SBorislav Petkov
357010d129SBorislav Petkov union intel_x86_pebs_dse {
367010d129SBorislav Petkov u64 val;
377010d129SBorislav Petkov struct {
387010d129SBorislav Petkov unsigned int ld_dse:4;
397010d129SBorislav Petkov unsigned int ld_stlb_miss:1;
407010d129SBorislav Petkov unsigned int ld_locked:1;
4161b985e3SKan Liang unsigned int ld_data_blk:1;
4261b985e3SKan Liang unsigned int ld_addr_blk:1;
4361b985e3SKan Liang unsigned int ld_reserved:24;
447010d129SBorislav Petkov };
457010d129SBorislav Petkov struct {
467010d129SBorislav Petkov unsigned int st_l1d_hit:1;
477010d129SBorislav Petkov unsigned int st_reserved1:3;
487010d129SBorislav Petkov unsigned int st_stlb_miss:1;
497010d129SBorislav Petkov unsigned int st_locked:1;
507010d129SBorislav Petkov unsigned int st_reserved2:26;
517010d129SBorislav Petkov };
5261b985e3SKan Liang struct {
5361b985e3SKan Liang unsigned int st_lat_dse:4;
5461b985e3SKan Liang unsigned int st_lat_stlb_miss:1;
5561b985e3SKan Liang unsigned int st_lat_locked:1;
5661b985e3SKan Liang unsigned int ld_reserved3:26;
5761b985e3SKan Liang };
5838aaf921SKan Liang struct {
5938aaf921SKan Liang unsigned int mtl_dse:5;
6038aaf921SKan Liang unsigned int mtl_locked:1;
6138aaf921SKan Liang unsigned int mtl_stlb_miss:1;
6238aaf921SKan Liang unsigned int mtl_fwd_blk:1;
6338aaf921SKan Liang unsigned int ld_reserved4:24;
6438aaf921SKan Liang };
657010d129SBorislav Petkov };
667010d129SBorislav Petkov
677010d129SBorislav Petkov
687010d129SBorislav Petkov /*
697010d129SBorislav Petkov * Map PEBS Load Latency Data Source encodings to generic
707010d129SBorislav Petkov * memory data source information
717010d129SBorislav Petkov */
727010d129SBorislav Petkov #define P(a, b) PERF_MEM_S(a, b)
737010d129SBorislav Petkov #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
746ae5fa61SAndi Kleen #define LEVEL(x) P(LVLNUM, x)
756ae5fa61SAndi Kleen #define REM P(REMOTE, REMOTE)
767010d129SBorislav Petkov #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
777010d129SBorislav Petkov
78e17dc653SAndi Kleen /* Version for Sandy Bridge and later */
79e17dc653SAndi Kleen static u64 pebs_data_source[] = {
806ae5fa61SAndi Kleen P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
816ae5fa61SAndi Kleen OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */
826ae5fa61SAndi Kleen OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
836ae5fa61SAndi Kleen OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
846ae5fa61SAndi Kleen OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
856ae5fa61SAndi Kleen OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
866ae5fa61SAndi Kleen OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
876ae5fa61SAndi Kleen OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
886ae5fa61SAndi Kleen OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
896ae5fa61SAndi Kleen OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
906ae5fa61SAndi Kleen OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
916ae5fa61SAndi Kleen OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
926ae5fa61SAndi Kleen OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */
936ae5fa61SAndi Kleen OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
946ae5fa61SAndi Kleen OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
956ae5fa61SAndi Kleen OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
967010d129SBorislav Petkov };
977010d129SBorislav Petkov
98e17dc653SAndi Kleen /* Patch up minor differences in the bits */
intel_pmu_pebs_data_source_nhm(void)99e17dc653SAndi Kleen void __init intel_pmu_pebs_data_source_nhm(void)
100e17dc653SAndi Kleen {
1016ae5fa61SAndi Kleen pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
1026ae5fa61SAndi Kleen pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
1036ae5fa61SAndi Kleen pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
1046ae5fa61SAndi Kleen }
1056ae5fa61SAndi Kleen
__intel_pmu_pebs_data_source_skl(bool pmem,u64 * data_source)106ccf170e9SKan Liang static void __init __intel_pmu_pebs_data_source_skl(bool pmem, u64 *data_source)
1076ae5fa61SAndi Kleen {
1086ae5fa61SAndi Kleen u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
1096ae5fa61SAndi Kleen
110ccf170e9SKan Liang data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
111ccf170e9SKan Liang data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
112ccf170e9SKan Liang data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
113ccf170e9SKan Liang data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
114ccf170e9SKan Liang data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
115ccf170e9SKan Liang }
116ccf170e9SKan Liang
intel_pmu_pebs_data_source_skl(bool pmem)117ccf170e9SKan Liang void __init intel_pmu_pebs_data_source_skl(bool pmem)
118ccf170e9SKan Liang {
119ccf170e9SKan Liang __intel_pmu_pebs_data_source_skl(pmem, pebs_data_source);
120ccf170e9SKan Liang }
121ccf170e9SKan Liang
__intel_pmu_pebs_data_source_grt(u64 * data_source)12224919fdeSKan Liang static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source)
123ccf170e9SKan Liang {
124ccf170e9SKan Liang data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
125ccf170e9SKan Liang data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
126ccf170e9SKan Liang data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD);
127ccf170e9SKan Liang }
128ccf170e9SKan Liang
intel_pmu_pebs_data_source_grt(void)12924919fdeSKan Liang void __init intel_pmu_pebs_data_source_grt(void)
13024919fdeSKan Liang {
13124919fdeSKan Liang __intel_pmu_pebs_data_source_grt(pebs_data_source);
13224919fdeSKan Liang }
13324919fdeSKan Liang
intel_pmu_pebs_data_source_adl(void)134ccf170e9SKan Liang void __init intel_pmu_pebs_data_source_adl(void)
135ccf170e9SKan Liang {
136ccf170e9SKan Liang u64 *data_source;
137ccf170e9SKan Liang
138ccf170e9SKan Liang data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source;
139ccf170e9SKan Liang memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
140ccf170e9SKan Liang __intel_pmu_pebs_data_source_skl(false, data_source);
141ccf170e9SKan Liang
142ccf170e9SKan Liang data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
143ccf170e9SKan Liang memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
14424919fdeSKan Liang __intel_pmu_pebs_data_source_grt(data_source);
145e17dc653SAndi Kleen }
146e17dc653SAndi Kleen
__intel_pmu_pebs_data_source_cmt(u64 * data_source)147a430021fSKan Liang static void __init __intel_pmu_pebs_data_source_cmt(u64 *data_source)
14838aaf921SKan Liang {
14938aaf921SKan Liang data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD);
15038aaf921SKan Liang data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
15138aaf921SKan Liang data_source[0x0a] = OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, NONE);
15238aaf921SKan Liang data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
15338aaf921SKan Liang data_source[0x0c] = OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD);
15438aaf921SKan Liang data_source[0x0d] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM);
15538aaf921SKan Liang }
15638aaf921SKan Liang
intel_pmu_pebs_data_source_mtl(void)15738aaf921SKan Liang void __init intel_pmu_pebs_data_source_mtl(void)
15838aaf921SKan Liang {
15938aaf921SKan Liang u64 *data_source;
16038aaf921SKan Liang
16138aaf921SKan Liang data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source;
16238aaf921SKan Liang memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
16338aaf921SKan Liang __intel_pmu_pebs_data_source_skl(false, data_source);
16438aaf921SKan Liang
16538aaf921SKan Liang data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
16638aaf921SKan Liang memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
167a430021fSKan Liang __intel_pmu_pebs_data_source_cmt(data_source);
168a430021fSKan Liang }
169a430021fSKan Liang
intel_pmu_pebs_data_source_cmt(void)170a430021fSKan Liang void __init intel_pmu_pebs_data_source_cmt(void)
171a430021fSKan Liang {
172a430021fSKan Liang __intel_pmu_pebs_data_source_cmt(pebs_data_source);
17338aaf921SKan Liang }
17438aaf921SKan Liang
precise_store_data(u64 status)1757010d129SBorislav Petkov static u64 precise_store_data(u64 status)
1767010d129SBorislav Petkov {
1777010d129SBorislav Petkov union intel_x86_pebs_dse dse;
1787010d129SBorislav Petkov u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
1797010d129SBorislav Petkov
1807010d129SBorislav Petkov dse.val = status;
1817010d129SBorislav Petkov
1827010d129SBorislav Petkov /*
1837010d129SBorislav Petkov * bit 4: TLB access
1847010d129SBorislav Petkov * 1 = stored missed 2nd level TLB
1857010d129SBorislav Petkov *
1867010d129SBorislav Petkov * so it either hit the walker or the OS
1877010d129SBorislav Petkov * otherwise hit 2nd level TLB
1887010d129SBorislav Petkov */
1897010d129SBorislav Petkov if (dse.st_stlb_miss)
1907010d129SBorislav Petkov val |= P(TLB, MISS);
1917010d129SBorislav Petkov else
1927010d129SBorislav Petkov val |= P(TLB, HIT);
1937010d129SBorislav Petkov
1947010d129SBorislav Petkov /*
1957010d129SBorislav Petkov * bit 0: hit L1 data cache
1967010d129SBorislav Petkov * if not set, then all we know is that
1977010d129SBorislav Petkov * it missed L1D
1987010d129SBorislav Petkov */
1997010d129SBorislav Petkov if (dse.st_l1d_hit)
2007010d129SBorislav Petkov val |= P(LVL, HIT);
2017010d129SBorislav Petkov else
2027010d129SBorislav Petkov val |= P(LVL, MISS);
2037010d129SBorislav Petkov
2047010d129SBorislav Petkov /*
2057010d129SBorislav Petkov * bit 5: Locked prefix
2067010d129SBorislav Petkov */
2077010d129SBorislav Petkov if (dse.st_locked)
2087010d129SBorislav Petkov val |= P(LOCK, LOCKED);
2097010d129SBorislav Petkov
2107010d129SBorislav Petkov return val;
2117010d129SBorislav Petkov }
2127010d129SBorislav Petkov
precise_datala_hsw(struct perf_event * event,u64 status)2137010d129SBorislav Petkov static u64 precise_datala_hsw(struct perf_event *event, u64 status)
2147010d129SBorislav Petkov {
2157010d129SBorislav Petkov union perf_mem_data_src dse;
2167010d129SBorislav Petkov
2177010d129SBorislav Petkov dse.val = PERF_MEM_NA;
2187010d129SBorislav Petkov
2197010d129SBorislav Petkov if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
2207010d129SBorislav Petkov dse.mem_op = PERF_MEM_OP_STORE;
2217010d129SBorislav Petkov else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
2227010d129SBorislav Petkov dse.mem_op = PERF_MEM_OP_LOAD;
2237010d129SBorislav Petkov
2247010d129SBorislav Petkov /*
2257010d129SBorislav Petkov * L1 info only valid for following events:
2267010d129SBorislav Petkov *
2277010d129SBorislav Petkov * MEM_UOPS_RETIRED.STLB_MISS_STORES
2287010d129SBorislav Petkov * MEM_UOPS_RETIRED.LOCK_STORES
2297010d129SBorislav Petkov * MEM_UOPS_RETIRED.SPLIT_STORES
2307010d129SBorislav Petkov * MEM_UOPS_RETIRED.ALL_STORES
2317010d129SBorislav Petkov */
2327010d129SBorislav Petkov if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
2337010d129SBorislav Petkov if (status & 1)
2347010d129SBorislav Petkov dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
2357010d129SBorislav Petkov else
2367010d129SBorislav Petkov dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
2377010d129SBorislav Petkov }
2387010d129SBorislav Petkov return dse.val;
2397010d129SBorislav Petkov }
2407010d129SBorislav Petkov
pebs_set_tlb_lock(u64 * val,bool tlb,bool lock)24139a41278SKan Liang static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
24239a41278SKan Liang {
24339a41278SKan Liang /*
24439a41278SKan Liang * TLB access
24539a41278SKan Liang * 0 = did not miss 2nd level TLB
24639a41278SKan Liang * 1 = missed 2nd level TLB
24739a41278SKan Liang */
24839a41278SKan Liang if (tlb)
24939a41278SKan Liang *val |= P(TLB, MISS) | P(TLB, L2);
25039a41278SKan Liang else
25139a41278SKan Liang *val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
25239a41278SKan Liang
25339a41278SKan Liang /* locked prefix */
25439a41278SKan Liang if (lock)
25539a41278SKan Liang *val |= P(LOCK, LOCKED);
25639a41278SKan Liang }
25739a41278SKan Liang
25839a41278SKan Liang /* Retrieve the latency data for e-core of ADL */
__adl_latency_data_small(struct perf_event * event,u64 status,u8 dse,bool tlb,bool lock,bool blk)25938aaf921SKan Liang static u64 __adl_latency_data_small(struct perf_event *event, u64 status,
26038aaf921SKan Liang u8 dse, bool tlb, bool lock, bool blk)
26139a41278SKan Liang {
26239a41278SKan Liang u64 val;
26339a41278SKan Liang
26439a41278SKan Liang WARN_ON_ONCE(hybrid_pmu(event->pmu)->cpu_type == hybrid_big);
26539a41278SKan Liang
26638aaf921SKan Liang dse &= PERF_PEBS_DATA_SOURCE_MASK;
26738aaf921SKan Liang val = hybrid_var(event->pmu, pebs_data_source)[dse];
26839a41278SKan Liang
26938aaf921SKan Liang pebs_set_tlb_lock(&val, tlb, lock);
27039a41278SKan Liang
27138aaf921SKan Liang if (blk)
27239a41278SKan Liang val |= P(BLK, DATA);
27339a41278SKan Liang else
27439a41278SKan Liang val |= P(BLK, NA);
27539a41278SKan Liang
27639a41278SKan Liang return val;
27739a41278SKan Liang }
27839a41278SKan Liang
adl_latency_data_small(struct perf_event * event,u64 status)27938aaf921SKan Liang u64 adl_latency_data_small(struct perf_event *event, u64 status)
28038aaf921SKan Liang {
28138aaf921SKan Liang union intel_x86_pebs_dse dse;
28238aaf921SKan Liang
28338aaf921SKan Liang dse.val = status;
28438aaf921SKan Liang
28538aaf921SKan Liang return __adl_latency_data_small(event, status, dse.ld_dse,
28638aaf921SKan Liang dse.ld_locked, dse.ld_stlb_miss,
28738aaf921SKan Liang dse.ld_data_blk);
28838aaf921SKan Liang }
28938aaf921SKan Liang
29038aaf921SKan Liang /* Retrieve the latency data for e-core of MTL */
mtl_latency_data_small(struct perf_event * event,u64 status)29138aaf921SKan Liang u64 mtl_latency_data_small(struct perf_event *event, u64 status)
29238aaf921SKan Liang {
29338aaf921SKan Liang union intel_x86_pebs_dse dse;
29438aaf921SKan Liang
29538aaf921SKan Liang dse.val = status;
29638aaf921SKan Liang
29738aaf921SKan Liang return __adl_latency_data_small(event, status, dse.mtl_dse,
29838aaf921SKan Liang dse.mtl_stlb_miss, dse.mtl_locked,
29938aaf921SKan Liang dse.mtl_fwd_blk);
30038aaf921SKan Liang }
30138aaf921SKan Liang
load_latency_data(struct perf_event * event,u64 status)302ccf170e9SKan Liang static u64 load_latency_data(struct perf_event *event, u64 status)
3037010d129SBorislav Petkov {
3047010d129SBorislav Petkov union intel_x86_pebs_dse dse;
3057010d129SBorislav Petkov u64 val;
3067010d129SBorislav Petkov
3077010d129SBorislav Petkov dse.val = status;
3087010d129SBorislav Petkov
3097010d129SBorislav Petkov /*
3107010d129SBorislav Petkov * use the mapping table for bit 0-3
3117010d129SBorislav Petkov */
312ccf170e9SKan Liang val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse];
3137010d129SBorislav Petkov
3147010d129SBorislav Petkov /*
3157010d129SBorislav Petkov * Nehalem models do not support TLB, Lock infos
3167010d129SBorislav Petkov */
31795298355SAndi Kleen if (x86_pmu.pebs_no_tlb) {
3187010d129SBorislav Petkov val |= P(TLB, NA) | P(LOCK, NA);
3197010d129SBorislav Petkov return val;
3207010d129SBorislav Petkov }
3217010d129SBorislav Petkov
32239a41278SKan Liang pebs_set_tlb_lock(&val, dse.ld_stlb_miss, dse.ld_locked);
3237010d129SBorislav Petkov
32461b985e3SKan Liang /*
32561b985e3SKan Liang * Ice Lake and earlier models do not support block infos.
32661b985e3SKan Liang */
32761b985e3SKan Liang if (!x86_pmu.pebs_block) {
32861b985e3SKan Liang val |= P(BLK, NA);
32961b985e3SKan Liang return val;
33061b985e3SKan Liang }
33161b985e3SKan Liang /*
33261b985e3SKan Liang * bit 6: load was blocked since its data could not be forwarded
33361b985e3SKan Liang * from a preceding store
33461b985e3SKan Liang */
33561b985e3SKan Liang if (dse.ld_data_blk)
33661b985e3SKan Liang val |= P(BLK, DATA);
33761b985e3SKan Liang
33861b985e3SKan Liang /*
33961b985e3SKan Liang * bit 7: load was blocked due to potential address conflict with
34061b985e3SKan Liang * a preceding store
34161b985e3SKan Liang */
34261b985e3SKan Liang if (dse.ld_addr_blk)
34361b985e3SKan Liang val |= P(BLK, ADDR);
34461b985e3SKan Liang
34561b985e3SKan Liang if (!dse.ld_data_blk && !dse.ld_addr_blk)
34661b985e3SKan Liang val |= P(BLK, NA);
34761b985e3SKan Liang
34861b985e3SKan Liang return val;
34961b985e3SKan Liang }
35061b985e3SKan Liang
store_latency_data(struct perf_event * event,u64 status)351ccf170e9SKan Liang static u64 store_latency_data(struct perf_event *event, u64 status)
35261b985e3SKan Liang {
35361b985e3SKan Liang union intel_x86_pebs_dse dse;
354d4bdb0beSStephane Eranian union perf_mem_data_src src;
35561b985e3SKan Liang u64 val;
35661b985e3SKan Liang
35761b985e3SKan Liang dse.val = status;
35861b985e3SKan Liang
35961b985e3SKan Liang /*
36061b985e3SKan Liang * use the mapping table for bit 0-3
36161b985e3SKan Liang */
362ccf170e9SKan Liang val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse];
36361b985e3SKan Liang
36439a41278SKan Liang pebs_set_tlb_lock(&val, dse.st_lat_stlb_miss, dse.st_lat_locked);
36561b985e3SKan Liang
36661b985e3SKan Liang val |= P(BLK, NA);
36761b985e3SKan Liang
368d4bdb0beSStephane Eranian /*
369d4bdb0beSStephane Eranian * the pebs_data_source table is only for loads
370d4bdb0beSStephane Eranian * so override the mem_op to say STORE instead
371d4bdb0beSStephane Eranian */
372d4bdb0beSStephane Eranian src.val = val;
373d4bdb0beSStephane Eranian src.mem_op = P(OP,STORE);
374d4bdb0beSStephane Eranian
375d4bdb0beSStephane Eranian return src.val;
3767010d129SBorislav Petkov }
3777010d129SBorislav Petkov
3787010d129SBorislav Petkov struct pebs_record_core {
3797010d129SBorislav Petkov u64 flags, ip;
3807010d129SBorislav Petkov u64 ax, bx, cx, dx;
3817010d129SBorislav Petkov u64 si, di, bp, sp;
3827010d129SBorislav Petkov u64 r8, r9, r10, r11;
3837010d129SBorislav Petkov u64 r12, r13, r14, r15;
3847010d129SBorislav Petkov };
3857010d129SBorislav Petkov
3867010d129SBorislav Petkov struct pebs_record_nhm {
3877010d129SBorislav Petkov u64 flags, ip;
3887010d129SBorislav Petkov u64 ax, bx, cx, dx;
3897010d129SBorislav Petkov u64 si, di, bp, sp;
3907010d129SBorislav Petkov u64 r8, r9, r10, r11;
3917010d129SBorislav Petkov u64 r12, r13, r14, r15;
3927010d129SBorislav Petkov u64 status, dla, dse, lat;
3937010d129SBorislav Petkov };
3947010d129SBorislav Petkov
3957010d129SBorislav Petkov /*
3967010d129SBorislav Petkov * Same as pebs_record_nhm, with two additional fields.
3977010d129SBorislav Petkov */
3987010d129SBorislav Petkov struct pebs_record_hsw {
3997010d129SBorislav Petkov u64 flags, ip;
4007010d129SBorislav Petkov u64 ax, bx, cx, dx;
4017010d129SBorislav Petkov u64 si, di, bp, sp;
4027010d129SBorislav Petkov u64 r8, r9, r10, r11;
4037010d129SBorislav Petkov u64 r12, r13, r14, r15;
4047010d129SBorislav Petkov u64 status, dla, dse, lat;
4057010d129SBorislav Petkov u64 real_ip, tsx_tuning;
4067010d129SBorislav Petkov };
4077010d129SBorislav Petkov
4087010d129SBorislav Petkov union hsw_tsx_tuning {
4097010d129SBorislav Petkov struct {
4107010d129SBorislav Petkov u32 cycles_last_block : 32,
4117010d129SBorislav Petkov hle_abort : 1,
4127010d129SBorislav Petkov rtm_abort : 1,
4137010d129SBorislav Petkov instruction_abort : 1,
4147010d129SBorislav Petkov non_instruction_abort : 1,
4157010d129SBorislav Petkov retry : 1,
4167010d129SBorislav Petkov data_conflict : 1,
4177010d129SBorislav Petkov capacity_writes : 1,
4187010d129SBorislav Petkov capacity_reads : 1;
4197010d129SBorislav Petkov };
4207010d129SBorislav Petkov u64 value;
4217010d129SBorislav Petkov };
4227010d129SBorislav Petkov
4237010d129SBorislav Petkov #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
4247010d129SBorislav Petkov
4257010d129SBorislav Petkov /* Same as HSW, plus TSC */
4267010d129SBorislav Petkov
4277010d129SBorislav Petkov struct pebs_record_skl {
4287010d129SBorislav Petkov u64 flags, ip;
4297010d129SBorislav Petkov u64 ax, bx, cx, dx;
4307010d129SBorislav Petkov u64 si, di, bp, sp;
4317010d129SBorislav Petkov u64 r8, r9, r10, r11;
4327010d129SBorislav Petkov u64 r12, r13, r14, r15;
4337010d129SBorislav Petkov u64 status, dla, dse, lat;
4347010d129SBorislav Petkov u64 real_ip, tsx_tuning;
4357010d129SBorislav Petkov u64 tsc;
4367010d129SBorislav Petkov };
4377010d129SBorislav Petkov
init_debug_store_on_cpu(int cpu)4387010d129SBorislav Petkov void init_debug_store_on_cpu(int cpu)
4397010d129SBorislav Petkov {
4407010d129SBorislav Petkov struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
4417010d129SBorislav Petkov
4427010d129SBorislav Petkov if (!ds)
4437010d129SBorislav Petkov return;
4447010d129SBorislav Petkov
4457010d129SBorislav Petkov wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
4467010d129SBorislav Petkov (u32)((u64)(unsigned long)ds),
4477010d129SBorislav Petkov (u32)((u64)(unsigned long)ds >> 32));
4487010d129SBorislav Petkov }
4497010d129SBorislav Petkov
fini_debug_store_on_cpu(int cpu)4507010d129SBorislav Petkov void fini_debug_store_on_cpu(int cpu)
4517010d129SBorislav Petkov {
4527010d129SBorislav Petkov if (!per_cpu(cpu_hw_events, cpu).ds)
4537010d129SBorislav Petkov return;
4547010d129SBorislav Petkov
4557010d129SBorislav Petkov wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
4567010d129SBorislav Petkov }
4577010d129SBorislav Petkov
4587010d129SBorislav Petkov static DEFINE_PER_CPU(void *, insn_buffer);
4597010d129SBorislav Petkov
ds_update_cea(void * cea,void * addr,size_t size,pgprot_t prot)460c1961a46SHugh Dickins static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
461c1961a46SHugh Dickins {
46242f3bdc5SPeter Zijlstra unsigned long start = (unsigned long)cea;
463c1961a46SHugh Dickins phys_addr_t pa;
464c1961a46SHugh Dickins size_t msz = 0;
465c1961a46SHugh Dickins
466c1961a46SHugh Dickins pa = virt_to_phys(addr);
46742f3bdc5SPeter Zijlstra
46842f3bdc5SPeter Zijlstra preempt_disable();
469c1961a46SHugh Dickins for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
470c1961a46SHugh Dickins cea_set_pte(cea, pa, prot);
47142f3bdc5SPeter Zijlstra
47242f3bdc5SPeter Zijlstra /*
47342f3bdc5SPeter Zijlstra * This is a cross-CPU update of the cpu_entry_area, we must shoot down
47442f3bdc5SPeter Zijlstra * all TLB entries for it.
47542f3bdc5SPeter Zijlstra */
47642f3bdc5SPeter Zijlstra flush_tlb_kernel_range(start, start + size);
47742f3bdc5SPeter Zijlstra preempt_enable();
478c1961a46SHugh Dickins }
479c1961a46SHugh Dickins
ds_clear_cea(void * cea,size_t size)480c1961a46SHugh Dickins static void ds_clear_cea(void *cea, size_t size)
481c1961a46SHugh Dickins {
48242f3bdc5SPeter Zijlstra unsigned long start = (unsigned long)cea;
483c1961a46SHugh Dickins size_t msz = 0;
484c1961a46SHugh Dickins
48542f3bdc5SPeter Zijlstra preempt_disable();
486c1961a46SHugh Dickins for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
487c1961a46SHugh Dickins cea_set_pte(cea, 0, PAGE_NONE);
48842f3bdc5SPeter Zijlstra
48942f3bdc5SPeter Zijlstra flush_tlb_kernel_range(start, start + size);
49042f3bdc5SPeter Zijlstra preempt_enable();
491c1961a46SHugh Dickins }
492c1961a46SHugh Dickins
dsalloc_pages(size_t size,gfp_t flags,int cpu)493c1961a46SHugh Dickins static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
494c1961a46SHugh Dickins {
495c1961a46SHugh Dickins unsigned int order = get_order(size);
496c1961a46SHugh Dickins int node = cpu_to_node(cpu);
497c1961a46SHugh Dickins struct page *page;
498c1961a46SHugh Dickins
499c1961a46SHugh Dickins page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
500c1961a46SHugh Dickins return page ? page_address(page) : NULL;
501c1961a46SHugh Dickins }
502c1961a46SHugh Dickins
dsfree_pages(const void * buffer,size_t size)503c1961a46SHugh Dickins static void dsfree_pages(const void *buffer, size_t size)
504c1961a46SHugh Dickins {
505c1961a46SHugh Dickins if (buffer)
506c1961a46SHugh Dickins free_pages((unsigned long)buffer, get_order(size));
507c1961a46SHugh Dickins }
508c1961a46SHugh Dickins
alloc_pebs_buffer(int cpu)5097010d129SBorislav Petkov static int alloc_pebs_buffer(int cpu)
5107010d129SBorislav Petkov {
511c1961a46SHugh Dickins struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
512c1961a46SHugh Dickins struct debug_store *ds = hwev->ds;
513c1961a46SHugh Dickins size_t bsiz = x86_pmu.pebs_buffer_size;
514c1961a46SHugh Dickins int max, node = cpu_to_node(cpu);
5151fc654cfSIngo Molnar void *buffer, *insn_buff, *cea;
5167010d129SBorislav Petkov
5177010d129SBorislav Petkov if (!x86_pmu.pebs)
5187010d129SBorislav Petkov return 0;
5197010d129SBorislav Petkov
520c1961a46SHugh Dickins buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
5217010d129SBorislav Petkov if (unlikely(!buffer))
5227010d129SBorislav Petkov return -ENOMEM;
5237010d129SBorislav Petkov
5247010d129SBorislav Petkov /*
5257010d129SBorislav Petkov * HSW+ already provides us the eventing ip; no need to allocate this
5267010d129SBorislav Petkov * buffer then.
5277010d129SBorislav Petkov */
5287010d129SBorislav Petkov if (x86_pmu.intel_cap.pebs_format < 2) {
5291fc654cfSIngo Molnar insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
5301fc654cfSIngo Molnar if (!insn_buff) {
531c1961a46SHugh Dickins dsfree_pages(buffer, bsiz);
5327010d129SBorislav Petkov return -ENOMEM;
5337010d129SBorislav Petkov }
5341fc654cfSIngo Molnar per_cpu(insn_buffer, cpu) = insn_buff;
5357010d129SBorislav Petkov }
536c1961a46SHugh Dickins hwev->ds_pebs_vaddr = buffer;
537c1961a46SHugh Dickins /* Update the cpu entry area mapping */
538c1961a46SHugh Dickins cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
539c1961a46SHugh Dickins ds->pebs_buffer_base = (unsigned long) cea;
540c1961a46SHugh Dickins ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
5417010d129SBorislav Petkov ds->pebs_index = ds->pebs_buffer_base;
542c1961a46SHugh Dickins max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
543c1961a46SHugh Dickins ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
5447010d129SBorislav Petkov return 0;
5457010d129SBorislav Petkov }
5467010d129SBorislav Petkov
release_pebs_buffer(int cpu)5477010d129SBorislav Petkov static void release_pebs_buffer(int cpu)
5487010d129SBorislav Petkov {
549c1961a46SHugh Dickins struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
550c1961a46SHugh Dickins void *cea;
5517010d129SBorislav Petkov
552efe951d3SPeter Zijlstra if (!x86_pmu.pebs)
5537010d129SBorislav Petkov return;
5547010d129SBorislav Petkov
5557010d129SBorislav Petkov kfree(per_cpu(insn_buffer, cpu));
5567010d129SBorislav Petkov per_cpu(insn_buffer, cpu) = NULL;
5577010d129SBorislav Petkov
558c1961a46SHugh Dickins /* Clear the fixmap */
559c1961a46SHugh Dickins cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
560c1961a46SHugh Dickins ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
561c1961a46SHugh Dickins dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
562c1961a46SHugh Dickins hwev->ds_pebs_vaddr = NULL;
5637010d129SBorislav Petkov }
5647010d129SBorislav Petkov
alloc_bts_buffer(int cpu)5657010d129SBorislav Petkov static int alloc_bts_buffer(int cpu)
5667010d129SBorislav Petkov {
567c1961a46SHugh Dickins struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
568c1961a46SHugh Dickins struct debug_store *ds = hwev->ds;
569c1961a46SHugh Dickins void *buffer, *cea;
570c1961a46SHugh Dickins int max;
5717010d129SBorislav Petkov
5727010d129SBorislav Petkov if (!x86_pmu.bts)
5737010d129SBorislav Petkov return 0;
5747010d129SBorislav Petkov
575c1961a46SHugh Dickins buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
5767010d129SBorislav Petkov if (unlikely(!buffer)) {
5777010d129SBorislav Petkov WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
5787010d129SBorislav Petkov return -ENOMEM;
5797010d129SBorislav Petkov }
580c1961a46SHugh Dickins hwev->ds_bts_vaddr = buffer;
581c1961a46SHugh Dickins /* Update the fixmap */
582c1961a46SHugh Dickins cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
583c1961a46SHugh Dickins ds->bts_buffer_base = (unsigned long) cea;
584c1961a46SHugh Dickins ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
5857010d129SBorislav Petkov ds->bts_index = ds->bts_buffer_base;
5862c991e40SHugh Dickins max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
5872c991e40SHugh Dickins ds->bts_absolute_maximum = ds->bts_buffer_base +
5882c991e40SHugh Dickins max * BTS_RECORD_SIZE;
5892c991e40SHugh Dickins ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
5902c991e40SHugh Dickins (max / 16) * BTS_RECORD_SIZE;
5917010d129SBorislav Petkov return 0;
5927010d129SBorislav Petkov }
5937010d129SBorislav Petkov
release_bts_buffer(int cpu)5947010d129SBorislav Petkov static void release_bts_buffer(int cpu)
5957010d129SBorislav Petkov {
596c1961a46SHugh Dickins struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
597c1961a46SHugh Dickins void *cea;
5987010d129SBorislav Petkov
599efe951d3SPeter Zijlstra if (!x86_pmu.bts)
6007010d129SBorislav Petkov return;
6017010d129SBorislav Petkov
602c1961a46SHugh Dickins /* Clear the fixmap */
603c1961a46SHugh Dickins cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
604c1961a46SHugh Dickins ds_clear_cea(cea, BTS_BUFFER_SIZE);
605c1961a46SHugh Dickins dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
606c1961a46SHugh Dickins hwev->ds_bts_vaddr = NULL;
6077010d129SBorislav Petkov }
6087010d129SBorislav Petkov
alloc_ds_buffer(int cpu)6097010d129SBorislav Petkov static int alloc_ds_buffer(int cpu)
6107010d129SBorislav Petkov {
611c1961a46SHugh Dickins struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
6127010d129SBorislav Petkov
613c1961a46SHugh Dickins memset(ds, 0, sizeof(*ds));
6147010d129SBorislav Petkov per_cpu(cpu_hw_events, cpu).ds = ds;
6157010d129SBorislav Petkov return 0;
6167010d129SBorislav Petkov }
6177010d129SBorislav Petkov
release_ds_buffer(int cpu)6187010d129SBorislav Petkov static void release_ds_buffer(int cpu)
6197010d129SBorislav Petkov {
6207010d129SBorislav Petkov per_cpu(cpu_hw_events, cpu).ds = NULL;
6217010d129SBorislav Petkov }
6227010d129SBorislav Petkov
release_ds_buffers(void)6237010d129SBorislav Petkov void release_ds_buffers(void)
6247010d129SBorislav Petkov {
6257010d129SBorislav Petkov int cpu;
6267010d129SBorislav Petkov
6277010d129SBorislav Petkov if (!x86_pmu.bts && !x86_pmu.pebs)
6287010d129SBorislav Petkov return;
6297010d129SBorislav Petkov
630efe951d3SPeter Zijlstra for_each_possible_cpu(cpu)
631efe951d3SPeter Zijlstra release_ds_buffer(cpu);
632efe951d3SPeter Zijlstra
633efe951d3SPeter Zijlstra for_each_possible_cpu(cpu) {
634efe951d3SPeter Zijlstra /*
635efe951d3SPeter Zijlstra * Again, ignore errors from offline CPUs, they will no longer
636efe951d3SPeter Zijlstra * observe cpu_hw_events.ds and not program the DS_AREA when
637efe951d3SPeter Zijlstra * they come up.
638efe951d3SPeter Zijlstra */
6397010d129SBorislav Petkov fini_debug_store_on_cpu(cpu);
640efe951d3SPeter Zijlstra }
6417010d129SBorislav Petkov
6427010d129SBorislav Petkov for_each_possible_cpu(cpu) {
6437010d129SBorislav Petkov release_pebs_buffer(cpu);
6447010d129SBorislav Petkov release_bts_buffer(cpu);
6457010d129SBorislav Petkov }
6467010d129SBorislav Petkov }
6477010d129SBorislav Petkov
reserve_ds_buffers(void)6487010d129SBorislav Petkov void reserve_ds_buffers(void)
6497010d129SBorislav Petkov {
6507010d129SBorislav Petkov int bts_err = 0, pebs_err = 0;
6517010d129SBorislav Petkov int cpu;
6527010d129SBorislav Petkov
6537010d129SBorislav Petkov x86_pmu.bts_active = 0;
6547010d129SBorislav Petkov x86_pmu.pebs_active = 0;
6557010d129SBorislav Petkov
6567010d129SBorislav Petkov if (!x86_pmu.bts && !x86_pmu.pebs)
6577010d129SBorislav Petkov return;
6587010d129SBorislav Petkov
6597010d129SBorislav Petkov if (!x86_pmu.bts)
6607010d129SBorislav Petkov bts_err = 1;
6617010d129SBorislav Petkov
6627010d129SBorislav Petkov if (!x86_pmu.pebs)
6637010d129SBorislav Petkov pebs_err = 1;
6647010d129SBorislav Petkov
6657010d129SBorislav Petkov for_each_possible_cpu(cpu) {
6667010d129SBorislav Petkov if (alloc_ds_buffer(cpu)) {
6677010d129SBorislav Petkov bts_err = 1;
6687010d129SBorislav Petkov pebs_err = 1;
6697010d129SBorislav Petkov }
6707010d129SBorislav Petkov
6717010d129SBorislav Petkov if (!bts_err && alloc_bts_buffer(cpu))
6727010d129SBorislav Petkov bts_err = 1;
6737010d129SBorislav Petkov
6747010d129SBorislav Petkov if (!pebs_err && alloc_pebs_buffer(cpu))
6757010d129SBorislav Petkov pebs_err = 1;
6767010d129SBorislav Petkov
6777010d129SBorislav Petkov if (bts_err && pebs_err)
6787010d129SBorislav Petkov break;
6797010d129SBorislav Petkov }
6807010d129SBorislav Petkov
6817010d129SBorislav Petkov if (bts_err) {
6827010d129SBorislav Petkov for_each_possible_cpu(cpu)
6837010d129SBorislav Petkov release_bts_buffer(cpu);
6847010d129SBorislav Petkov }
6857010d129SBorislav Petkov
6867010d129SBorislav Petkov if (pebs_err) {
6877010d129SBorislav Petkov for_each_possible_cpu(cpu)
6887010d129SBorislav Petkov release_pebs_buffer(cpu);
6897010d129SBorislav Petkov }
6907010d129SBorislav Petkov
6917010d129SBorislav Petkov if (bts_err && pebs_err) {
6927010d129SBorislav Petkov for_each_possible_cpu(cpu)
6937010d129SBorislav Petkov release_ds_buffer(cpu);
6947010d129SBorislav Petkov } else {
6957010d129SBorislav Petkov if (x86_pmu.bts && !bts_err)
6967010d129SBorislav Petkov x86_pmu.bts_active = 1;
6977010d129SBorislav Petkov
6987010d129SBorislav Petkov if (x86_pmu.pebs && !pebs_err)
6997010d129SBorislav Petkov x86_pmu.pebs_active = 1;
7007010d129SBorislav Petkov
701efe951d3SPeter Zijlstra for_each_possible_cpu(cpu) {
702efe951d3SPeter Zijlstra /*
703efe951d3SPeter Zijlstra * Ignores wrmsr_on_cpu() errors for offline CPUs they
704efe951d3SPeter Zijlstra * will get this call through intel_pmu_cpu_starting().
705efe951d3SPeter Zijlstra */
7067010d129SBorislav Petkov init_debug_store_on_cpu(cpu);
7077010d129SBorislav Petkov }
708efe951d3SPeter Zijlstra }
7097010d129SBorislav Petkov }
7107010d129SBorislav Petkov
7117010d129SBorislav Petkov /*
7127010d129SBorislav Petkov * BTS
7137010d129SBorislav Petkov */
7147010d129SBorislav Petkov
7157010d129SBorislav Petkov struct event_constraint bts_constraint =
7167010d129SBorislav Petkov EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
7177010d129SBorislav Petkov
intel_pmu_enable_bts(u64 config)7187010d129SBorislav Petkov void intel_pmu_enable_bts(u64 config)
7197010d129SBorislav Petkov {
7207010d129SBorislav Petkov unsigned long debugctlmsr;
7217010d129SBorislav Petkov
7227010d129SBorislav Petkov debugctlmsr = get_debugctlmsr();
7237010d129SBorislav Petkov
7247010d129SBorislav Petkov debugctlmsr |= DEBUGCTLMSR_TR;
7257010d129SBorislav Petkov debugctlmsr |= DEBUGCTLMSR_BTS;
7267010d129SBorislav Petkov if (config & ARCH_PERFMON_EVENTSEL_INT)
7277010d129SBorislav Petkov debugctlmsr |= DEBUGCTLMSR_BTINT;
7287010d129SBorislav Petkov
7297010d129SBorislav Petkov if (!(config & ARCH_PERFMON_EVENTSEL_OS))
7307010d129SBorislav Petkov debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
7317010d129SBorislav Petkov
7327010d129SBorislav Petkov if (!(config & ARCH_PERFMON_EVENTSEL_USR))
7337010d129SBorislav Petkov debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
7347010d129SBorislav Petkov
7357010d129SBorislav Petkov update_debugctlmsr(debugctlmsr);
7367010d129SBorislav Petkov }
7377010d129SBorislav Petkov
intel_pmu_disable_bts(void)7387010d129SBorislav Petkov void intel_pmu_disable_bts(void)
7397010d129SBorislav Petkov {
7407010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
7417010d129SBorislav Petkov unsigned long debugctlmsr;
7427010d129SBorislav Petkov
7437010d129SBorislav Petkov if (!cpuc->ds)
7447010d129SBorislav Petkov return;
7457010d129SBorislav Petkov
7467010d129SBorislav Petkov debugctlmsr = get_debugctlmsr();
7477010d129SBorislav Petkov
7487010d129SBorislav Petkov debugctlmsr &=
7497010d129SBorislav Petkov ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
7507010d129SBorislav Petkov DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
7517010d129SBorislav Petkov
7527010d129SBorislav Petkov update_debugctlmsr(debugctlmsr);
7537010d129SBorislav Petkov }
7547010d129SBorislav Petkov
intel_pmu_drain_bts_buffer(void)7557010d129SBorislav Petkov int intel_pmu_drain_bts_buffer(void)
7567010d129SBorislav Petkov {
7577010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
7587010d129SBorislav Petkov struct debug_store *ds = cpuc->ds;
7597010d129SBorislav Petkov struct bts_record {
7607010d129SBorislav Petkov u64 from;
7617010d129SBorislav Petkov u64 to;
7627010d129SBorislav Petkov u64 flags;
7637010d129SBorislav Petkov };
7647010d129SBorislav Petkov struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
7657010d129SBorislav Petkov struct bts_record *at, *base, *top;
7667010d129SBorislav Petkov struct perf_output_handle handle;
7677010d129SBorislav Petkov struct perf_event_header header;
7687010d129SBorislav Petkov struct perf_sample_data data;
7697010d129SBorislav Petkov unsigned long skip = 0;
7707010d129SBorislav Petkov struct pt_regs regs;
7717010d129SBorislav Petkov
7727010d129SBorislav Petkov if (!event)
7737010d129SBorislav Petkov return 0;
7747010d129SBorislav Petkov
7757010d129SBorislav Petkov if (!x86_pmu.bts_active)
7767010d129SBorislav Petkov return 0;
7777010d129SBorislav Petkov
7787010d129SBorislav Petkov base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
7797010d129SBorislav Petkov top = (struct bts_record *)(unsigned long)ds->bts_index;
7807010d129SBorislav Petkov
7817010d129SBorislav Petkov if (top <= base)
7827010d129SBorislav Petkov return 0;
7837010d129SBorislav Petkov
7847010d129SBorislav Petkov memset(®s, 0, sizeof(regs));
7857010d129SBorislav Petkov
7867010d129SBorislav Petkov ds->bts_index = ds->bts_buffer_base;
7877010d129SBorislav Petkov
7887010d129SBorislav Petkov perf_sample_data_init(&data, 0, event->hw.last_period);
7897010d129SBorislav Petkov
7907010d129SBorislav Petkov /*
7917010d129SBorislav Petkov * BTS leaks kernel addresses in branches across the cpl boundary,
7927010d129SBorislav Petkov * such as traps or system calls, so unless the user is asking for
7937010d129SBorislav Petkov * kernel tracing (and right now it's not possible), we'd need to
7947010d129SBorislav Petkov * filter them out. But first we need to count how many of those we
7957010d129SBorislav Petkov * have in the current batch. This is an extra O(n) pass, however,
7967010d129SBorislav Petkov * it's much faster than the other one especially considering that
7977010d129SBorislav Petkov * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
7987010d129SBorislav Petkov * alloc_bts_buffer()).
7997010d129SBorislav Petkov */
8007010d129SBorislav Petkov for (at = base; at < top; at++) {
8017010d129SBorislav Petkov /*
8027010d129SBorislav Petkov * Note that right now *this* BTS code only works if
8037010d129SBorislav Petkov * attr::exclude_kernel is set, but let's keep this extra
8047010d129SBorislav Petkov * check here in case that changes.
8057010d129SBorislav Petkov */
8067010d129SBorislav Petkov if (event->attr.exclude_kernel &&
8077010d129SBorislav Petkov (kernel_ip(at->from) || kernel_ip(at->to)))
8087010d129SBorislav Petkov skip++;
8097010d129SBorislav Petkov }
8107010d129SBorislav Petkov
8117010d129SBorislav Petkov /*
8127010d129SBorislav Petkov * Prepare a generic sample, i.e. fill in the invariant fields.
8137010d129SBorislav Petkov * We will overwrite the from and to address before we output
8147010d129SBorislav Petkov * the sample.
8157010d129SBorislav Petkov */
816e8d8a90fSPeter Zijlstra rcu_read_lock();
817f6e70715SNamhyung Kim perf_prepare_sample(&data, event, ®s);
818f6e70715SNamhyung Kim perf_prepare_header(&header, &data, event, ®s);
8197010d129SBorislav Petkov
820267fb273SPeter Zijlstra if (perf_output_begin(&handle, &data, event,
821267fb273SPeter Zijlstra header.size * (top - base - skip)))
822e8d8a90fSPeter Zijlstra goto unlock;
8237010d129SBorislav Petkov
8247010d129SBorislav Petkov for (at = base; at < top; at++) {
8257010d129SBorislav Petkov /* Filter out any records that contain kernel addresses. */
8267010d129SBorislav Petkov if (event->attr.exclude_kernel &&
8277010d129SBorislav Petkov (kernel_ip(at->from) || kernel_ip(at->to)))
8287010d129SBorislav Petkov continue;
8297010d129SBorislav Petkov
8307010d129SBorislav Petkov data.ip = at->from;
8317010d129SBorislav Petkov data.addr = at->to;
8327010d129SBorislav Petkov
8337010d129SBorislav Petkov perf_output_sample(&handle, &header, &data, event);
8347010d129SBorislav Petkov }
8357010d129SBorislav Petkov
8367010d129SBorislav Petkov perf_output_end(&handle);
8377010d129SBorislav Petkov
8387010d129SBorislav Petkov /* There's new data available. */
8397010d129SBorislav Petkov event->hw.interrupts++;
8407010d129SBorislav Petkov event->pending_kill = POLL_IN;
841e8d8a90fSPeter Zijlstra unlock:
842e8d8a90fSPeter Zijlstra rcu_read_unlock();
8437010d129SBorislav Petkov return 1;
8447010d129SBorislav Petkov }
8457010d129SBorislav Petkov
intel_pmu_drain_pebs_buffer(void)8467010d129SBorislav Petkov static inline void intel_pmu_drain_pebs_buffer(void)
8477010d129SBorislav Petkov {
8489dfa9a5cSPeter Zijlstra struct perf_sample_data data;
8499dfa9a5cSPeter Zijlstra
8509dfa9a5cSPeter Zijlstra x86_pmu.drain_pebs(NULL, &data);
8517010d129SBorislav Petkov }
8527010d129SBorislav Petkov
8537010d129SBorislav Petkov /*
8547010d129SBorislav Petkov * PEBS
8557010d129SBorislav Petkov */
8567010d129SBorislav Petkov struct event_constraint intel_core2_pebs_event_constraints[] = {
8577010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
8587010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
8597010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
8607010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
8617010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
8627010d129SBorislav Petkov /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
86323e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
8647010d129SBorislav Petkov EVENT_CONSTRAINT_END
8657010d129SBorislav Petkov };
8667010d129SBorislav Petkov
8677010d129SBorislav Petkov struct event_constraint intel_atom_pebs_event_constraints[] = {
8687010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
8697010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
8707010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
8717010d129SBorislav Petkov /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
87223e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
8737010d129SBorislav Petkov /* Allow all events as PEBS with no flags */
8747010d129SBorislav Petkov INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
8757010d129SBorislav Petkov EVENT_CONSTRAINT_END
8767010d129SBorislav Petkov };
8777010d129SBorislav Petkov
8787010d129SBorislav Petkov struct event_constraint intel_slm_pebs_event_constraints[] = {
8797010d129SBorislav Petkov /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
88023e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
8817010d129SBorislav Petkov /* Allow all events as PEBS with no flags */
8827010d129SBorislav Petkov INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
8837010d129SBorislav Petkov EVENT_CONSTRAINT_END
8847010d129SBorislav Petkov };
8857010d129SBorislav Petkov
8868b92c3a7SKan Liang struct event_constraint intel_glm_pebs_event_constraints[] = {
8878b92c3a7SKan Liang /* Allow all events as PEBS with no flags */
8888b92c3a7SKan Liang INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
8898b92c3a7SKan Liang EVENT_CONSTRAINT_END
8908b92c3a7SKan Liang };
8918b92c3a7SKan Liang
892f83d2f91SKan Liang struct event_constraint intel_grt_pebs_event_constraints[] = {
893f83d2f91SKan Liang /* Allow all events as PEBS with no flags */
894cde643ffSKan Liang INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3),
89539a41278SKan Liang INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf),
896f83d2f91SKan Liang EVENT_CONSTRAINT_END
897f83d2f91SKan Liang };
898f83d2f91SKan Liang
8997010d129SBorislav Petkov struct event_constraint intel_nehalem_pebs_event_constraints[] = {
9007010d129SBorislav Petkov INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
9017010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
9027010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
9037010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
9047010d129SBorislav Petkov INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
9057010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
9067010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
9077010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
9087010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
9097010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
9107010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
9117010d129SBorislav Petkov /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
91223e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
9137010d129SBorislav Petkov EVENT_CONSTRAINT_END
9147010d129SBorislav Petkov };
9157010d129SBorislav Petkov
9167010d129SBorislav Petkov struct event_constraint intel_westmere_pebs_event_constraints[] = {
9177010d129SBorislav Petkov INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
9187010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
9197010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
9207010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
9217010d129SBorislav Petkov INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
9227010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
9237010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
9247010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
9257010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
9267010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
9277010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
9287010d129SBorislav Petkov /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
92923e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
9307010d129SBorislav Petkov EVENT_CONSTRAINT_END
9317010d129SBorislav Petkov };
9327010d129SBorislav Petkov
9337010d129SBorislav Petkov struct event_constraint intel_snb_pebs_event_constraints[] = {
9347010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
9357010d129SBorislav Petkov INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
9367010d129SBorislav Petkov INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
9377010d129SBorislav Petkov /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
93823e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
9397010d129SBorislav Petkov INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
9407010d129SBorislav Petkov INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
9417010d129SBorislav Petkov INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
9427010d129SBorislav Petkov INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
9437010d129SBorislav Petkov /* Allow all events as PEBS with no flags */
9447010d129SBorislav Petkov INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
9457010d129SBorislav Petkov EVENT_CONSTRAINT_END
9467010d129SBorislav Petkov };
9477010d129SBorislav Petkov
9487010d129SBorislav Petkov struct event_constraint intel_ivb_pebs_event_constraints[] = {
9497010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
9507010d129SBorislav Petkov INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
9517010d129SBorislav Petkov INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
9527010d129SBorislav Petkov /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
95323e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
9547010d129SBorislav Petkov /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
95523e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
9567010d129SBorislav Petkov INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
9577010d129SBorislav Petkov INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
9587010d129SBorislav Petkov INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
9597010d129SBorislav Petkov INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
9607010d129SBorislav Petkov /* Allow all events as PEBS with no flags */
9617010d129SBorislav Petkov INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
9627010d129SBorislav Petkov EVENT_CONSTRAINT_END
9637010d129SBorislav Petkov };
9647010d129SBorislav Petkov
9657010d129SBorislav Petkov struct event_constraint intel_hsw_pebs_event_constraints[] = {
9667010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
9677010d129SBorislav Petkov INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
9687010d129SBorislav Petkov /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
96923e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
9707010d129SBorislav Petkov /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
97123e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
9727010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
9737010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
9747010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
9757010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
9767010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
9777010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
9787010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
9797010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
9807010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
9817010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
9827010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
9837010d129SBorislav Petkov /* Allow all events as PEBS with no flags */
9847010d129SBorislav Petkov INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
9857010d129SBorislav Petkov EVENT_CONSTRAINT_END
9867010d129SBorislav Petkov };
9877010d129SBorislav Petkov
988b3e62463SStephane Eranian struct event_constraint intel_bdw_pebs_event_constraints[] = {
989b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
990b3e62463SStephane Eranian INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
991b3e62463SStephane Eranian /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
99223e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
993b3e62463SStephane Eranian /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
99423e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
995b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
996b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
997b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
998b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
999b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
1000b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
1001b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
1002b3e62463SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
1003b3e62463SStephane Eranian INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
1004b3e62463SStephane Eranian INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
1005b3e62463SStephane Eranian INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
1006b3e62463SStephane Eranian /* Allow all events as PEBS with no flags */
1007b3e62463SStephane Eranian INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
1008b3e62463SStephane Eranian EVENT_CONSTRAINT_END
1009b3e62463SStephane Eranian };
1010b3e62463SStephane Eranian
1011b3e62463SStephane Eranian
10127010d129SBorislav Petkov struct event_constraint intel_skl_pebs_event_constraints[] = {
10137010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
10147010d129SBorislav Petkov /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
101523e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
10167010d129SBorislav Petkov /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
101723e3983aSStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
10187010d129SBorislav Petkov INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
10197010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
10207010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
10217010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
10227010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
10237010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
10247010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
10257010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
10267010d129SBorislav Petkov INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
10277010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
10287010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
10297010d129SBorislav Petkov INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */
10307010d129SBorislav Petkov /* Allow all events as PEBS with no flags */
10317010d129SBorislav Petkov INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
10327010d129SBorislav Petkov EVENT_CONSTRAINT_END
10337010d129SBorislav Petkov };
10347010d129SBorislav Petkov
103560176089SKan Liang struct event_constraint intel_icl_pebs_event_constraints[] = {
10362de71ee1SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x100000000ULL), /* old INST_RETIRED.PREC_DIST */
10372de71ee1SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x0100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
10383d0c3953SKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
103960176089SKan Liang
104060176089SKan Liang INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
1041acc5568bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
1042acc5568bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
1043acc5568bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
1044acc5568bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
1045acc5568bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
1046acc5568bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
1047acc5568bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
104860176089SKan Liang
104960176089SKan Liang INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
105060176089SKan Liang
105160176089SKan Liang INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
105260176089SKan Liang
105360176089SKan Liang /*
105460176089SKan Liang * Everything else is handled by PMU_FL_PEBS_ALL, because we
105560176089SKan Liang * need the full constraints from the main table.
105660176089SKan Liang */
105760176089SKan Liang
105860176089SKan Liang EVENT_CONSTRAINT_END
105960176089SKan Liang };
106060176089SKan Liang
106161b985e3SKan Liang struct event_constraint intel_spr_pebs_event_constraints[] = {
10622de71ee1SStephane Eranian INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
106361b985e3SKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
106461b985e3SKan Liang
106561b985e3SKan Liang INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe),
106661b985e3SKan Liang INTEL_PLD_CONSTRAINT(0x1cd, 0xfe),
106761b985e3SKan Liang INTEL_PSD_CONSTRAINT(0x2cd, 0x1),
10680916886bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
10690916886bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
10700916886bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
10710916886bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
10720916886bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
10730916886bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
10740916886bSKan Liang INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
107561b985e3SKan Liang
107661b985e3SKan Liang INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
107761b985e3SKan Liang
107861b985e3SKan Liang INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),
107961b985e3SKan Liang
108061b985e3SKan Liang /*
108161b985e3SKan Liang * Everything else is handled by PMU_FL_PEBS_ALL, because we
108261b985e3SKan Liang * need the full constraints from the main table.
108361b985e3SKan Liang */
108461b985e3SKan Liang
108561b985e3SKan Liang EVENT_CONSTRAINT_END
108661b985e3SKan Liang };
108761b985e3SKan Liang
intel_pebs_constraints(struct perf_event * event)10887010d129SBorislav Petkov struct event_constraint *intel_pebs_constraints(struct perf_event *event)
10897010d129SBorislav Petkov {
109024ee38ffSKan Liang struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
10917010d129SBorislav Petkov struct event_constraint *c;
10927010d129SBorislav Petkov
10937010d129SBorislav Petkov if (!event->attr.precise_ip)
10947010d129SBorislav Petkov return NULL;
10957010d129SBorislav Petkov
109624ee38ffSKan Liang if (pebs_constraints) {
109724ee38ffSKan Liang for_each_event_constraint(c, pebs_constraints) {
109863b79f6eSPeter Zijlstra if (constraint_match(c, event->hw.config)) {
10997010d129SBorislav Petkov event->hw.flags |= c->flags;
11007010d129SBorislav Petkov return c;
11017010d129SBorislav Petkov }
11027010d129SBorislav Petkov }
11037010d129SBorislav Petkov }
11047010d129SBorislav Petkov
110531962340SKan Liang /*
110631962340SKan Liang * Extended PEBS support
110731962340SKan Liang * Makes the PEBS code search the normal constraints.
110831962340SKan Liang */
110931962340SKan Liang if (x86_pmu.flags & PMU_FL_PEBS_ALL)
111031962340SKan Liang return NULL;
111131962340SKan Liang
11127010d129SBorislav Petkov return &emptyconstraint;
11137010d129SBorislav Petkov }
11147010d129SBorislav Petkov
111509e61b4fSPeter Zijlstra /*
111609e61b4fSPeter Zijlstra * We need the sched_task callback even for per-cpu events when we use
111709e61b4fSPeter Zijlstra * the large interrupt threshold, such that we can provide PID and TID
111809e61b4fSPeter Zijlstra * to PEBS samples.
111909e61b4fSPeter Zijlstra */
pebs_needs_sched_cb(struct cpu_hw_events * cpuc)112009e61b4fSPeter Zijlstra static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
11217010d129SBorislav Petkov {
112242880f72SAlexander Shishkin if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
112342880f72SAlexander Shishkin return false;
112442880f72SAlexander Shishkin
112509e61b4fSPeter Zijlstra return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
112609e61b4fSPeter Zijlstra }
112709e61b4fSPeter Zijlstra
intel_pmu_pebs_sched_task(struct perf_event_pmu_context * pmu_ctx,bool sched_in)1128bd275681SPeter Zijlstra void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
1129df6c3db8SJiri Olsa {
1130df6c3db8SJiri Olsa struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1131df6c3db8SJiri Olsa
1132df6c3db8SJiri Olsa if (!sched_in && pebs_needs_sched_cb(cpuc))
1133df6c3db8SJiri Olsa intel_pmu_drain_pebs_buffer();
1134df6c3db8SJiri Olsa }
1135df6c3db8SJiri Olsa
pebs_update_threshold(struct cpu_hw_events * cpuc)113609e61b4fSPeter Zijlstra static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
113709e61b4fSPeter Zijlstra {
113809e61b4fSPeter Zijlstra struct debug_store *ds = cpuc->ds;
1139d4b294bfSKan Liang int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
1140d4b294bfSKan Liang int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
114109e61b4fSPeter Zijlstra u64 threshold;
1142ec71a398SKan Liang int reserved;
1143ec71a398SKan Liang
114442880f72SAlexander Shishkin if (cpuc->n_pebs_via_pt)
114542880f72SAlexander Shishkin return;
114642880f72SAlexander Shishkin
1147ec71a398SKan Liang if (x86_pmu.flags & PMU_FL_PEBS_ALL)
1148d4b294bfSKan Liang reserved = max_pebs_events + num_counters_fixed;
1149ec71a398SKan Liang else
1150d4b294bfSKan Liang reserved = max_pebs_events;
115109e61b4fSPeter Zijlstra
115209e61b4fSPeter Zijlstra if (cpuc->n_pebs == cpuc->n_large_pebs) {
115309e61b4fSPeter Zijlstra threshold = ds->pebs_absolute_maximum -
1154c22497f5SKan Liang reserved * cpuc->pebs_record_size;
115509e61b4fSPeter Zijlstra } else {
1156c22497f5SKan Liang threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
115709e61b4fSPeter Zijlstra }
115809e61b4fSPeter Zijlstra
115909e61b4fSPeter Zijlstra ds->pebs_interrupt_threshold = threshold;
116009e61b4fSPeter Zijlstra }
116109e61b4fSPeter Zijlstra
adaptive_pebs_record_size_update(void)1162c22497f5SKan Liang static void adaptive_pebs_record_size_update(void)
116309e61b4fSPeter Zijlstra {
1164c22497f5SKan Liang struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1165c22497f5SKan Liang u64 pebs_data_cfg = cpuc->pebs_data_cfg;
1166c22497f5SKan Liang int sz = sizeof(struct pebs_basic);
1167c22497f5SKan Liang
1168c22497f5SKan Liang if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
1169c22497f5SKan Liang sz += sizeof(struct pebs_meminfo);
1170c22497f5SKan Liang if (pebs_data_cfg & PEBS_DATACFG_GP)
1171c22497f5SKan Liang sz += sizeof(struct pebs_gprs);
1172c22497f5SKan Liang if (pebs_data_cfg & PEBS_DATACFG_XMMS)
1173c22497f5SKan Liang sz += sizeof(struct pebs_xmm);
1174c22497f5SKan Liang if (pebs_data_cfg & PEBS_DATACFG_LBRS)
11755624986dSKan Liang sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1176c22497f5SKan Liang
1177c22497f5SKan Liang cpuc->pebs_record_size = sz;
1178c22497f5SKan Liang }
1179c22497f5SKan Liang
1180c22497f5SKan Liang #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
11812a6c6b7dSKan Liang PERF_SAMPLE_PHYS_ADDR | \
11822a6c6b7dSKan Liang PERF_SAMPLE_WEIGHT_TYPE | \
118376a5433fSKan Liang PERF_SAMPLE_TRANSACTION | \
118476a5433fSKan Liang PERF_SAMPLE_DATA_PAGE_SIZE)
1185c22497f5SKan Liang
pebs_update_adaptive_cfg(struct perf_event * event)1186c22497f5SKan Liang static u64 pebs_update_adaptive_cfg(struct perf_event *event)
1187c22497f5SKan Liang {
1188c22497f5SKan Liang struct perf_event_attr *attr = &event->attr;
1189c22497f5SKan Liang u64 sample_type = attr->sample_type;
1190c22497f5SKan Liang u64 pebs_data_cfg = 0;
1191c22497f5SKan Liang bool gprs, tsx_weight;
1192c22497f5SKan Liang
1193c22497f5SKan Liang if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
1194c22497f5SKan Liang attr->precise_ip > 1)
1195c22497f5SKan Liang return pebs_data_cfg;
1196c22497f5SKan Liang
1197c22497f5SKan Liang if (sample_type & PERF_PEBS_MEMINFO_TYPE)
1198c22497f5SKan Liang pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
1199c22497f5SKan Liang
1200c22497f5SKan Liang /*
1201c22497f5SKan Liang * We need GPRs when:
1202c22497f5SKan Liang * + user requested them
1203c22497f5SKan Liang * + precise_ip < 2 for the non event IP
1204c22497f5SKan Liang * + For RTM TSX weight we need GPRs for the abort code.
1205c22497f5SKan Liang */
1206c22497f5SKan Liang gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
1207c22497f5SKan Liang (attr->sample_regs_intr & PEBS_GP_REGS);
1208c22497f5SKan Liang
12092a6c6b7dSKan Liang tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
1210c22497f5SKan Liang ((attr->config & INTEL_ARCH_EVENT_MASK) ==
1211c22497f5SKan Liang x86_pmu.rtm_abort_event);
1212c22497f5SKan Liang
1213c22497f5SKan Liang if (gprs || (attr->precise_ip < 2) || tsx_weight)
1214c22497f5SKan Liang pebs_data_cfg |= PEBS_DATACFG_GP;
1215c22497f5SKan Liang
1216c22497f5SKan Liang if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
1217dce86ac7SKan Liang (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
1218c22497f5SKan Liang pebs_data_cfg |= PEBS_DATACFG_XMMS;
1219c22497f5SKan Liang
1220c22497f5SKan Liang if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1221c22497f5SKan Liang /*
1222c22497f5SKan Liang * For now always log all LBRs. Could configure this
1223c22497f5SKan Liang * later.
1224c22497f5SKan Liang */
1225c22497f5SKan Liang pebs_data_cfg |= PEBS_DATACFG_LBRS |
1226c22497f5SKan Liang ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
1227c22497f5SKan Liang }
1228c22497f5SKan Liang
1229c22497f5SKan Liang return pebs_data_cfg;
1230c22497f5SKan Liang }
1231c22497f5SKan Liang
1232c22497f5SKan Liang static void
pebs_update_state(bool needed_cb,struct cpu_hw_events * cpuc,struct perf_event * event,bool add)1233c22497f5SKan Liang pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
1234c22497f5SKan Liang struct perf_event *event, bool add)
1235c22497f5SKan Liang {
1236bd275681SPeter Zijlstra struct pmu *pmu = event->pmu;
1237b752ea0cSKan Liang
1238b6a32f02SJiri Olsa /*
123992f32f10SKan Liang * Make sure we get updated with the first PEBS event.
124092f32f10SKan Liang * During removal, ->pebs_data_cfg is still valid for
124192f32f10SKan Liang * the last PEBS event. Don't clear it.
1242b6a32f02SJiri Olsa */
124392f32f10SKan Liang if ((cpuc->n_pebs == 1) && add)
1244b752ea0cSKan Liang cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
1245b6a32f02SJiri Olsa
124609e61b4fSPeter Zijlstra if (needed_cb != pebs_needs_sched_cb(cpuc)) {
124709e61b4fSPeter Zijlstra if (!needed_cb)
124809e61b4fSPeter Zijlstra perf_sched_cb_inc(pmu);
124909e61b4fSPeter Zijlstra else
125009e61b4fSPeter Zijlstra perf_sched_cb_dec(pmu);
125109e61b4fSPeter Zijlstra
1252b752ea0cSKan Liang cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
125309e61b4fSPeter Zijlstra }
1254b6a32f02SJiri Olsa
1255c22497f5SKan Liang /*
1256c22497f5SKan Liang * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1257c22497f5SKan Liang * iterating all remaining PEBS events to reconstruct the config.
1258c22497f5SKan Liang */
1259c22497f5SKan Liang if (x86_pmu.intel_cap.pebs_baseline && add) {
1260c22497f5SKan Liang u64 pebs_data_cfg;
1261c22497f5SKan Liang
1262c22497f5SKan Liang pebs_data_cfg = pebs_update_adaptive_cfg(event);
1263b752ea0cSKan Liang /*
1264b752ea0cSKan Liang * Be sure to update the thresholds when we change the record.
1265b752ea0cSKan Liang */
1266b752ea0cSKan Liang if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
1267b752ea0cSKan Liang cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
1268c22497f5SKan Liang }
1269c22497f5SKan Liang }
1270c22497f5SKan Liang
intel_pmu_pebs_add(struct perf_event * event)127168f7082fSPeter Zijlstra void intel_pmu_pebs_add(struct perf_event *event)
127209e61b4fSPeter Zijlstra {
127309e61b4fSPeter Zijlstra struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
127409e61b4fSPeter Zijlstra struct hw_perf_event *hwc = &event->hw;
127509e61b4fSPeter Zijlstra bool needed_cb = pebs_needs_sched_cb(cpuc);
127609e61b4fSPeter Zijlstra
127709e61b4fSPeter Zijlstra cpuc->n_pebs++;
1278174afc3eSKan Liang if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
127909e61b4fSPeter Zijlstra cpuc->n_large_pebs++;
128042880f72SAlexander Shishkin if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
128142880f72SAlexander Shishkin cpuc->n_pebs_via_pt++;
128209e61b4fSPeter Zijlstra
1283c22497f5SKan Liang pebs_update_state(needed_cb, cpuc, event, true);
12847010d129SBorislav Petkov }
12857010d129SBorislav Petkov
intel_pmu_pebs_via_pt_disable(struct perf_event * event)128642880f72SAlexander Shishkin static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
128742880f72SAlexander Shishkin {
128842880f72SAlexander Shishkin struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
128942880f72SAlexander Shishkin
129042880f72SAlexander Shishkin if (!is_pebs_pt(event))
129142880f72SAlexander Shishkin return;
129242880f72SAlexander Shishkin
129342880f72SAlexander Shishkin if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
129442880f72SAlexander Shishkin cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
129542880f72SAlexander Shishkin }
129642880f72SAlexander Shishkin
intel_pmu_pebs_via_pt_enable(struct perf_event * event)129742880f72SAlexander Shishkin static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
129842880f72SAlexander Shishkin {
129942880f72SAlexander Shishkin struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
130042880f72SAlexander Shishkin struct hw_perf_event *hwc = &event->hw;
130142880f72SAlexander Shishkin struct debug_store *ds = cpuc->ds;
13024c58d922SLike Xu u64 value = ds->pebs_event_reset[hwc->idx];
13034c58d922SLike Xu u32 base = MSR_RELOAD_PMC0;
13044c58d922SLike Xu unsigned int idx = hwc->idx;
130542880f72SAlexander Shishkin
130642880f72SAlexander Shishkin if (!is_pebs_pt(event))
130742880f72SAlexander Shishkin return;
130842880f72SAlexander Shishkin
130942880f72SAlexander Shishkin if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
131042880f72SAlexander Shishkin cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
131142880f72SAlexander Shishkin
131242880f72SAlexander Shishkin cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
131342880f72SAlexander Shishkin
13144c58d922SLike Xu if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
13154c58d922SLike Xu base = MSR_RELOAD_FIXED_CTR0;
13164c58d922SLike Xu idx = hwc->idx - INTEL_PMC_IDX_FIXED;
13172145e77fSKan Liang if (x86_pmu.intel_cap.pebs_format < 5)
13182145e77fSKan Liang value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx];
13192145e77fSKan Liang else
13204c58d922SLike Xu value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
13214c58d922SLike Xu }
13224c58d922SLike Xu wrmsrl(base + idx, value);
132342880f72SAlexander Shishkin }
132442880f72SAlexander Shishkin
intel_pmu_drain_large_pebs(struct cpu_hw_events * cpuc)1325b752ea0cSKan Liang static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
1326b752ea0cSKan Liang {
1327b752ea0cSKan Liang if (cpuc->n_pebs == cpuc->n_large_pebs &&
1328b752ea0cSKan Liang cpuc->n_pebs != cpuc->n_pebs_via_pt)
1329b752ea0cSKan Liang intel_pmu_drain_pebs_buffer();
1330b752ea0cSKan Liang }
1331b752ea0cSKan Liang
intel_pmu_pebs_enable(struct perf_event * event)13327010d129SBorislav Petkov void intel_pmu_pebs_enable(struct perf_event *event)
13337010d129SBorislav Petkov {
13347010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1335b752ea0cSKan Liang u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
13367010d129SBorislav Petkov struct hw_perf_event *hwc = &event->hw;
13377010d129SBorislav Petkov struct debug_store *ds = cpuc->ds;
13384c58d922SLike Xu unsigned int idx = hwc->idx;
133909e61b4fSPeter Zijlstra
13407010d129SBorislav Petkov hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
13417010d129SBorislav Petkov
13427010d129SBorislav Petkov cpuc->pebs_enabled |= 1ULL << hwc->idx;
13437010d129SBorislav Petkov
134460176089SKan Liang if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
13457010d129SBorislav Petkov cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
13467010d129SBorislav Petkov else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
13477010d129SBorislav Petkov cpuc->pebs_enabled |= 1ULL << 63;
13487010d129SBorislav Petkov
1349c22497f5SKan Liang if (x86_pmu.intel_cap.pebs_baseline) {
1350c22497f5SKan Liang hwc->config |= ICL_EVENTSEL_ADAPTIVE;
1351b752ea0cSKan Liang if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
1352b752ea0cSKan Liang /*
1353b752ea0cSKan Liang * drain_pebs() assumes uniform record size;
1354b752ea0cSKan Liang * hence we need to drain when changing said
1355b752ea0cSKan Liang * size.
1356b752ea0cSKan Liang */
1357b752ea0cSKan Liang intel_pmu_drain_large_pebs(cpuc);
1358b752ea0cSKan Liang adaptive_pebs_record_size_update();
1359b752ea0cSKan Liang wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
1360b752ea0cSKan Liang cpuc->active_pebs_data_cfg = pebs_data_cfg;
1361c22497f5SKan Liang }
1362c22497f5SKan Liang }
1363b752ea0cSKan Liang if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
1364b752ea0cSKan Liang cpuc->pebs_data_cfg = pebs_data_cfg;
1365b752ea0cSKan Liang pebs_update_threshold(cpuc);
1366b752ea0cSKan Liang }
1367c22497f5SKan Liang
13682145e77fSKan Liang if (idx >= INTEL_PMC_IDX_FIXED) {
13692145e77fSKan Liang if (x86_pmu.intel_cap.pebs_format < 5)
13702145e77fSKan Liang idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED);
13712145e77fSKan Liang else
13724c58d922SLike Xu idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
13732145e77fSKan Liang }
13744c58d922SLike Xu
13757010d129SBorislav Petkov /*
137609e61b4fSPeter Zijlstra * Use auto-reload if possible to save a MSR write in the PMI.
137709e61b4fSPeter Zijlstra * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
13787010d129SBorislav Petkov */
13797010d129SBorislav Petkov if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1380ec71a398SKan Liang ds->pebs_event_reset[idx] =
13817010d129SBorislav Petkov (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
1382dc853e26SJiri Olsa } else {
13834c58d922SLike Xu ds->pebs_event_reset[idx] = 0;
13847010d129SBorislav Petkov }
138542880f72SAlexander Shishkin
138642880f72SAlexander Shishkin intel_pmu_pebs_via_pt_enable(event);
138709e61b4fSPeter Zijlstra }
13887010d129SBorislav Petkov
intel_pmu_pebs_del(struct perf_event * event)138968f7082fSPeter Zijlstra void intel_pmu_pebs_del(struct perf_event *event)
139009e61b4fSPeter Zijlstra {
139109e61b4fSPeter Zijlstra struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
139209e61b4fSPeter Zijlstra struct hw_perf_event *hwc = &event->hw;
139309e61b4fSPeter Zijlstra bool needed_cb = pebs_needs_sched_cb(cpuc);
139409e61b4fSPeter Zijlstra
139509e61b4fSPeter Zijlstra cpuc->n_pebs--;
1396174afc3eSKan Liang if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
139709e61b4fSPeter Zijlstra cpuc->n_large_pebs--;
139842880f72SAlexander Shishkin if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
139942880f72SAlexander Shishkin cpuc->n_pebs_via_pt--;
140009e61b4fSPeter Zijlstra
1401c22497f5SKan Liang pebs_update_state(needed_cb, cpuc, event, false);
14027010d129SBorislav Petkov }
14037010d129SBorislav Petkov
intel_pmu_pebs_disable(struct perf_event * event)14047010d129SBorislav Petkov void intel_pmu_pebs_disable(struct perf_event *event)
14057010d129SBorislav Petkov {
14067010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
14077010d129SBorislav Petkov struct hw_perf_event *hwc = &event->hw;
14087010d129SBorislav Petkov
1409b752ea0cSKan Liang intel_pmu_drain_large_pebs(cpuc);
14107010d129SBorislav Petkov
14117010d129SBorislav Petkov cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
14127010d129SBorislav Petkov
141360176089SKan Liang if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
141460176089SKan Liang (x86_pmu.version < 5))
14157010d129SBorislav Petkov cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
14167010d129SBorislav Petkov else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
14177010d129SBorislav Petkov cpuc->pebs_enabled &= ~(1ULL << 63);
14187010d129SBorislav Petkov
141942880f72SAlexander Shishkin intel_pmu_pebs_via_pt_disable(event);
142042880f72SAlexander Shishkin
14217010d129SBorislav Petkov if (cpuc->enabled)
14227010d129SBorislav Petkov wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
14237010d129SBorislav Petkov
14247010d129SBorislav Petkov hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
14257010d129SBorislav Petkov }
14267010d129SBorislav Petkov
intel_pmu_pebs_enable_all(void)14277010d129SBorislav Petkov void intel_pmu_pebs_enable_all(void)
14287010d129SBorislav Petkov {
14297010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
14307010d129SBorislav Petkov
14317010d129SBorislav Petkov if (cpuc->pebs_enabled)
14327010d129SBorislav Petkov wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
14337010d129SBorislav Petkov }
14347010d129SBorislav Petkov
intel_pmu_pebs_disable_all(void)14357010d129SBorislav Petkov void intel_pmu_pebs_disable_all(void)
14367010d129SBorislav Petkov {
14377010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
14387010d129SBorislav Petkov
14397010d129SBorislav Petkov if (cpuc->pebs_enabled)
1440c22ac2a3SSong Liu __intel_pmu_pebs_disable_all();
14417010d129SBorislav Petkov }
14427010d129SBorislav Petkov
intel_pmu_pebs_fixup_ip(struct pt_regs * regs)14437010d129SBorislav Petkov static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
14447010d129SBorislav Petkov {
14457010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
14467010d129SBorislav Petkov unsigned long from = cpuc->lbr_entries[0].from;
14477010d129SBorislav Petkov unsigned long old_to, to = cpuc->lbr_entries[0].to;
14487010d129SBorislav Petkov unsigned long ip = regs->ip;
14497010d129SBorislav Petkov int is_64bit = 0;
14507010d129SBorislav Petkov void *kaddr;
14517010d129SBorislav Petkov int size;
14527010d129SBorislav Petkov
14537010d129SBorislav Petkov /*
14547010d129SBorislav Petkov * We don't need to fixup if the PEBS assist is fault like
14557010d129SBorislav Petkov */
14567010d129SBorislav Petkov if (!x86_pmu.intel_cap.pebs_trap)
14577010d129SBorislav Petkov return 1;
14587010d129SBorislav Petkov
14597010d129SBorislav Petkov /*
14607010d129SBorislav Petkov * No LBR entry, no basic block, no rewinding
14617010d129SBorislav Petkov */
14627010d129SBorislav Petkov if (!cpuc->lbr_stack.nr || !from || !to)
14637010d129SBorislav Petkov return 0;
14647010d129SBorislav Petkov
14657010d129SBorislav Petkov /*
14667010d129SBorislav Petkov * Basic blocks should never cross user/kernel boundaries
14677010d129SBorislav Petkov */
14687010d129SBorislav Petkov if (kernel_ip(ip) != kernel_ip(to))
14697010d129SBorislav Petkov return 0;
14707010d129SBorislav Petkov
14717010d129SBorislav Petkov /*
14727010d129SBorislav Petkov * unsigned math, either ip is before the start (impossible) or
14737010d129SBorislav Petkov * the basic block is larger than 1 page (sanity)
14747010d129SBorislav Petkov */
14757010d129SBorislav Petkov if ((ip - to) > PEBS_FIXUP_SIZE)
14767010d129SBorislav Petkov return 0;
14777010d129SBorislav Petkov
14787010d129SBorislav Petkov /*
14797010d129SBorislav Petkov * We sampled a branch insn, rewind using the LBR stack
14807010d129SBorislav Petkov */
14817010d129SBorislav Petkov if (ip == to) {
14827010d129SBorislav Petkov set_linear_ip(regs, from);
14837010d129SBorislav Petkov return 1;
14847010d129SBorislav Petkov }
14857010d129SBorislav Petkov
14867010d129SBorislav Petkov size = ip - to;
14877010d129SBorislav Petkov if (!kernel_ip(ip)) {
14887010d129SBorislav Petkov int bytes;
14897010d129SBorislav Petkov u8 *buf = this_cpu_read(insn_buffer);
14907010d129SBorislav Petkov
14917010d129SBorislav Petkov /* 'size' must fit our buffer, see above */
14927010d129SBorislav Petkov bytes = copy_from_user_nmi(buf, (void __user *)to, size);
14937010d129SBorislav Petkov if (bytes != 0)
14947010d129SBorislav Petkov return 0;
14957010d129SBorislav Petkov
14967010d129SBorislav Petkov kaddr = buf;
14977010d129SBorislav Petkov } else {
14987010d129SBorislav Petkov kaddr = (void *)to;
14997010d129SBorislav Petkov }
15007010d129SBorislav Petkov
15017010d129SBorislav Petkov do {
15027010d129SBorislav Petkov struct insn insn;
15037010d129SBorislav Petkov
15047010d129SBorislav Petkov old_to = to;
15057010d129SBorislav Petkov
15067010d129SBorislav Petkov #ifdef CONFIG_X86_64
1507375d4bfdSGabriel Krisman Bertazi is_64bit = kernel_ip(to) || any_64bit_mode(regs);
15087010d129SBorislav Petkov #endif
15097010d129SBorislav Petkov insn_init(&insn, kaddr, size, is_64bit);
15102ff49881SBorislav Petkov
15117010d129SBorislav Petkov /*
15122ff49881SBorislav Petkov * Make sure there was not a problem decoding the instruction.
15132ff49881SBorislav Petkov * This is doubly important because we have an infinite loop if
15142ff49881SBorislav Petkov * insn.length=0.
15157010d129SBorislav Petkov */
15162ff49881SBorislav Petkov if (insn_get_length(&insn))
15177010d129SBorislav Petkov break;
15187010d129SBorislav Petkov
15197010d129SBorislav Petkov to += insn.length;
15207010d129SBorislav Petkov kaddr += insn.length;
15217010d129SBorislav Petkov size -= insn.length;
15227010d129SBorislav Petkov } while (to < ip);
15237010d129SBorislav Petkov
15247010d129SBorislav Petkov if (to == ip) {
15257010d129SBorislav Petkov set_linear_ip(regs, old_to);
15267010d129SBorislav Petkov return 1;
15277010d129SBorislav Petkov }
15287010d129SBorislav Petkov
15297010d129SBorislav Petkov /*
15307010d129SBorislav Petkov * Even though we decoded the basic block, the instruction stream
15317010d129SBorislav Petkov * never matched the given IP, either the TO or the IP got corrupted.
15327010d129SBorislav Petkov */
15337010d129SBorislav Petkov return 0;
15347010d129SBorislav Petkov }
15357010d129SBorislav Petkov
intel_get_tsx_weight(u64 tsx_tuning)153648f38aa4SAndi Kleen static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
15377010d129SBorislav Petkov {
153848f38aa4SAndi Kleen if (tsx_tuning) {
153948f38aa4SAndi Kleen union hsw_tsx_tuning tsx = { .value = tsx_tuning };
15407010d129SBorislav Petkov return tsx.cycles_last_block;
15417010d129SBorislav Petkov }
15427010d129SBorislav Petkov return 0;
15437010d129SBorislav Petkov }
15447010d129SBorislav Petkov
intel_get_tsx_transaction(u64 tsx_tuning,u64 ax)154548f38aa4SAndi Kleen static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
15467010d129SBorislav Petkov {
154748f38aa4SAndi Kleen u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
15487010d129SBorislav Petkov
15497010d129SBorislav Petkov /* For RTM XABORTs also log the abort code from AX */
155048f38aa4SAndi Kleen if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
155148f38aa4SAndi Kleen txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
15527010d129SBorislav Petkov return txn;
15537010d129SBorislav Petkov }
15547010d129SBorislav Petkov
get_pebs_status(void * n)1555c22497f5SKan Liang static inline u64 get_pebs_status(void *n)
1556c22497f5SKan Liang {
1557c22497f5SKan Liang if (x86_pmu.intel_cap.pebs_format < 4)
1558c22497f5SKan Liang return ((struct pebs_record_nhm *)n)->status;
1559c22497f5SKan Liang return ((struct pebs_basic *)n)->applicable_counters;
1560c22497f5SKan Liang }
1561c22497f5SKan Liang
156248f38aa4SAndi Kleen #define PERF_X86_EVENT_PEBS_HSW_PREC \
156348f38aa4SAndi Kleen (PERF_X86_EVENT_PEBS_ST_HSW | \
156448f38aa4SAndi Kleen PERF_X86_EVENT_PEBS_LD_HSW | \
156548f38aa4SAndi Kleen PERF_X86_EVENT_PEBS_NA_HSW)
156648f38aa4SAndi Kleen
get_data_src(struct perf_event * event,u64 aux)156748f38aa4SAndi Kleen static u64 get_data_src(struct perf_event *event, u64 aux)
156848f38aa4SAndi Kleen {
156948f38aa4SAndi Kleen u64 val = PERF_MEM_NA;
157048f38aa4SAndi Kleen int fl = event->hw.flags;
157148f38aa4SAndi Kleen bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
157248f38aa4SAndi Kleen
157348f38aa4SAndi Kleen if (fl & PERF_X86_EVENT_PEBS_LDLAT)
1574ccf170e9SKan Liang val = load_latency_data(event, aux);
157561b985e3SKan Liang else if (fl & PERF_X86_EVENT_PEBS_STLAT)
1576ccf170e9SKan Liang val = store_latency_data(event, aux);
157739a41278SKan Liang else if (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID)
157839a41278SKan Liang val = x86_pmu.pebs_latency_data(event, aux);
157948f38aa4SAndi Kleen else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
158048f38aa4SAndi Kleen val = precise_datala_hsw(event, aux);
158148f38aa4SAndi Kleen else if (fst)
158248f38aa4SAndi Kleen val = precise_store_data(aux);
158348f38aa4SAndi Kleen return val;
158448f38aa4SAndi Kleen }
158548f38aa4SAndi Kleen
setup_pebs_time(struct perf_event * event,struct perf_sample_data * data,u64 tsc)158689e97eb8SKan Liang static void setup_pebs_time(struct perf_event *event,
158789e97eb8SKan Liang struct perf_sample_data *data,
158889e97eb8SKan Liang u64 tsc)
158989e97eb8SKan Liang {
159089e97eb8SKan Liang /* Converting to a user-defined clock is not supported yet. */
159189e97eb8SKan Liang if (event->attr.use_clockid != 0)
159289e97eb8SKan Liang return;
159389e97eb8SKan Liang
159489e97eb8SKan Liang /*
159589e97eb8SKan Liang * Doesn't support the conversion when the TSC is unstable.
159689e97eb8SKan Liang * The TSC unstable case is a corner case and very unlikely to
159789e97eb8SKan Liang * happen. If it happens, the TSC in a PEBS record will be
159889e97eb8SKan Liang * dropped and fall back to perf_event_clock().
159989e97eb8SKan Liang */
160089e97eb8SKan Liang if (!using_native_sched_clock() || !sched_clock_stable())
160189e97eb8SKan Liang return;
160289e97eb8SKan Liang
160389e97eb8SKan Liang data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset;
160489e97eb8SKan Liang data->sample_flags |= PERF_SAMPLE_TIME;
160589e97eb8SKan Liang }
160689e97eb8SKan Liang
160776a5433fSKan Liang #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
160876a5433fSKan Liang PERF_SAMPLE_PHYS_ADDR | \
160976a5433fSKan Liang PERF_SAMPLE_DATA_PAGE_SIZE)
161076a5433fSKan Liang
setup_pebs_fixed_sample_data(struct perf_event * event,struct pt_regs * iregs,void * __pebs,struct perf_sample_data * data,struct pt_regs * regs)1611c22497f5SKan Liang static void setup_pebs_fixed_sample_data(struct perf_event *event,
16127010d129SBorislav Petkov struct pt_regs *iregs, void *__pebs,
16137010d129SBorislav Petkov struct perf_sample_data *data,
16147010d129SBorislav Petkov struct pt_regs *regs)
16157010d129SBorislav Petkov {
16167010d129SBorislav Petkov /*
16177010d129SBorislav Petkov * We cast to the biggest pebs_record but are careful not to
16187010d129SBorislav Petkov * unconditionally access the 'extra' entries.
16197010d129SBorislav Petkov */
16207010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
16217010d129SBorislav Petkov struct pebs_record_skl *pebs = __pebs;
16227010d129SBorislav Petkov u64 sample_type;
162348f38aa4SAndi Kleen int fll;
16247010d129SBorislav Petkov
16257010d129SBorislav Petkov if (pebs == NULL)
16267010d129SBorislav Petkov return;
16277010d129SBorislav Petkov
16287010d129SBorislav Petkov sample_type = event->attr.sample_type;
162948f38aa4SAndi Kleen fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
16307010d129SBorislav Petkov
16317010d129SBorislav Petkov perf_sample_data_init(data, 0, event->hw.last_period);
16327010d129SBorislav Petkov
16337010d129SBorislav Petkov data->period = event->hw.last_period;
16347010d129SBorislav Petkov
16357010d129SBorislav Petkov /*
16367010d129SBorislav Petkov * Use latency for weight (only avail with PEBS-LL)
16377010d129SBorislav Petkov */
16382abe681dSKan Liang if (fll && (sample_type & PERF_SAMPLE_WEIGHT_TYPE)) {
16392a6c6b7dSKan Liang data->weight.full = pebs->lat;
16402abe681dSKan Liang data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
16412abe681dSKan Liang }
16427010d129SBorislav Petkov
16437010d129SBorislav Petkov /*
16447010d129SBorislav Petkov * data.data_src encodes the data source
16457010d129SBorislav Petkov */
1646e16fd7f2SKan Liang if (sample_type & PERF_SAMPLE_DATA_SRC) {
164748f38aa4SAndi Kleen data->data_src.val = get_data_src(event, pebs->dse);
1648e16fd7f2SKan Liang data->sample_flags |= PERF_SAMPLE_DATA_SRC;
1649e16fd7f2SKan Liang }
16507010d129SBorislav Petkov
16517010d129SBorislav Petkov /*
16526cbc304fSPeter Zijlstra * We must however always use iregs for the unwinder to stay sane; the
16536cbc304fSPeter Zijlstra * record BP,SP,IP can point into thin air when the record is from a
1654a97673a1SIngo Molnar * previous PMI context or an (I)RET happened between the record and
16556cbc304fSPeter Zijlstra * PMI.
16566cbc304fSPeter Zijlstra */
165731046500SNamhyung Kim if (sample_type & PERF_SAMPLE_CALLCHAIN)
165831046500SNamhyung Kim perf_sample_save_callchain(data, event, iregs);
16596cbc304fSPeter Zijlstra
16606cbc304fSPeter Zijlstra /*
1661b8000586SPeter Zijlstra * We use the interrupt regs as a base because the PEBS record does not
1662b8000586SPeter Zijlstra * contain a full regs set, specifically it seems to lack segment
1663b8000586SPeter Zijlstra * descriptors, which get used by things like user_mode().
16647010d129SBorislav Petkov *
1665b8000586SPeter Zijlstra * In the simple case fix up only the IP for PERF_SAMPLE_IP.
16667010d129SBorislav Petkov */
16677010d129SBorislav Petkov *regs = *iregs;
1668d1e7e602SStephane Eranian
1669d1e7e602SStephane Eranian /*
1670d1e7e602SStephane Eranian * Initialize regs_>flags from PEBS,
1671d1e7e602SStephane Eranian * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1672d1e7e602SStephane Eranian * i.e., do not rely on it being zero:
1673d1e7e602SStephane Eranian */
1674d1e7e602SStephane Eranian regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
16757010d129SBorislav Petkov
16767010d129SBorislav Petkov if (sample_type & PERF_SAMPLE_REGS_INTR) {
16777010d129SBorislav Petkov regs->ax = pebs->ax;
16787010d129SBorislav Petkov regs->bx = pebs->bx;
16797010d129SBorislav Petkov regs->cx = pebs->cx;
16807010d129SBorislav Petkov regs->dx = pebs->dx;
16817010d129SBorislav Petkov regs->si = pebs->si;
16827010d129SBorislav Petkov regs->di = pebs->di;
1683b8000586SPeter Zijlstra
16847010d129SBorislav Petkov regs->bp = pebs->bp;
16857010d129SBorislav Petkov regs->sp = pebs->sp;
16867010d129SBorislav Petkov
16877010d129SBorislav Petkov #ifndef CONFIG_X86_32
16887010d129SBorislav Petkov regs->r8 = pebs->r8;
16897010d129SBorislav Petkov regs->r9 = pebs->r9;
16907010d129SBorislav Petkov regs->r10 = pebs->r10;
16917010d129SBorislav Petkov regs->r11 = pebs->r11;
16927010d129SBorislav Petkov regs->r12 = pebs->r12;
16937010d129SBorislav Petkov regs->r13 = pebs->r13;
16947010d129SBorislav Petkov regs->r14 = pebs->r14;
16957010d129SBorislav Petkov regs->r15 = pebs->r15;
16967010d129SBorislav Petkov #endif
16977010d129SBorislav Petkov }
16987010d129SBorislav Petkov
169971eb9ee9SStephane Eranian if (event->attr.precise_ip > 1) {
1700d1e7e602SStephane Eranian /*
1701d1e7e602SStephane Eranian * Haswell and later processors have an 'eventing IP'
1702d1e7e602SStephane Eranian * (real IP) which fixes the off-by-1 skid in hardware.
1703d1e7e602SStephane Eranian * Use it when precise_ip >= 2 :
1704d1e7e602SStephane Eranian */
170571eb9ee9SStephane Eranian if (x86_pmu.intel_cap.pebs_format >= 2) {
170671eb9ee9SStephane Eranian set_linear_ip(regs, pebs->real_ip);
17077010d129SBorislav Petkov regs->flags |= PERF_EFLAGS_EXACT;
170871eb9ee9SStephane Eranian } else {
1709d1e7e602SStephane Eranian /* Otherwise, use PEBS off-by-1 IP: */
171071eb9ee9SStephane Eranian set_linear_ip(regs, pebs->ip);
171171eb9ee9SStephane Eranian
1712d1e7e602SStephane Eranian /*
1713d1e7e602SStephane Eranian * With precise_ip >= 2, try to fix up the off-by-1 IP
1714d1e7e602SStephane Eranian * using the LBR. If successful, the fixup function
1715d1e7e602SStephane Eranian * corrects regs->ip and calls set_linear_ip() on regs:
1716d1e7e602SStephane Eranian */
171771eb9ee9SStephane Eranian if (intel_pmu_pebs_fixup_ip(regs))
17187010d129SBorislav Petkov regs->flags |= PERF_EFLAGS_EXACT;
171971eb9ee9SStephane Eranian }
1720d1e7e602SStephane Eranian } else {
1721d1e7e602SStephane Eranian /*
1722d1e7e602SStephane Eranian * When precise_ip == 1, return the PEBS off-by-1 IP,
1723d1e7e602SStephane Eranian * no fixup attempted:
1724d1e7e602SStephane Eranian */
172571eb9ee9SStephane Eranian set_linear_ip(regs, pebs->ip);
1726d1e7e602SStephane Eranian }
172771eb9ee9SStephane Eranian
17287010d129SBorislav Petkov
172976a5433fSKan Liang if ((sample_type & PERF_SAMPLE_ADDR_TYPE) &&
17307b084630SNamhyung Kim x86_pmu.intel_cap.pebs_format >= 1) {
17317010d129SBorislav Petkov data->addr = pebs->dla;
17327b084630SNamhyung Kim data->sample_flags |= PERF_SAMPLE_ADDR;
17337b084630SNamhyung Kim }
17347010d129SBorislav Petkov
17357010d129SBorislav Petkov if (x86_pmu.intel_cap.pebs_format >= 2) {
17367010d129SBorislav Petkov /* Only set the TSX weight when no memory weight. */
17372abe681dSKan Liang if ((sample_type & PERF_SAMPLE_WEIGHT_TYPE) && !fll) {
17382a6c6b7dSKan Liang data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning);
17392abe681dSKan Liang data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
17402abe681dSKan Liang }
1741ee9db0e1SKan Liang if (sample_type & PERF_SAMPLE_TRANSACTION) {
174248f38aa4SAndi Kleen data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
174348f38aa4SAndi Kleen pebs->ax);
1744ee9db0e1SKan Liang data->sample_flags |= PERF_SAMPLE_TRANSACTION;
1745ee9db0e1SKan Liang }
17467010d129SBorislav Petkov }
17477010d129SBorislav Petkov
17487010d129SBorislav Petkov /*
17497010d129SBorislav Petkov * v3 supplies an accurate time stamp, so we use that
17507010d129SBorislav Petkov * for the time stamp.
17517010d129SBorislav Petkov *
17527010d129SBorislav Petkov * We can only do this for the default trace clock.
17537010d129SBorislav Petkov */
175489e97eb8SKan Liang if (x86_pmu.intel_cap.pebs_format >= 3)
175589e97eb8SKan Liang setup_pebs_time(event, data, pebs->tsc);
17567010d129SBorislav Petkov
1757eb55b455SNamhyung Kim if (has_branch_stack(event))
1758eb55b455SNamhyung Kim perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
17597010d129SBorislav Petkov }
17607010d129SBorislav Petkov
adaptive_pebs_save_regs(struct pt_regs * regs,struct pebs_gprs * gprs)1761c22497f5SKan Liang static void adaptive_pebs_save_regs(struct pt_regs *regs,
1762c22497f5SKan Liang struct pebs_gprs *gprs)
1763c22497f5SKan Liang {
1764c22497f5SKan Liang regs->ax = gprs->ax;
1765c22497f5SKan Liang regs->bx = gprs->bx;
1766c22497f5SKan Liang regs->cx = gprs->cx;
1767c22497f5SKan Liang regs->dx = gprs->dx;
1768c22497f5SKan Liang regs->si = gprs->si;
1769c22497f5SKan Liang regs->di = gprs->di;
1770c22497f5SKan Liang regs->bp = gprs->bp;
1771c22497f5SKan Liang regs->sp = gprs->sp;
1772c22497f5SKan Liang #ifndef CONFIG_X86_32
1773c22497f5SKan Liang regs->r8 = gprs->r8;
1774c22497f5SKan Liang regs->r9 = gprs->r9;
1775c22497f5SKan Liang regs->r10 = gprs->r10;
1776c22497f5SKan Liang regs->r11 = gprs->r11;
1777c22497f5SKan Liang regs->r12 = gprs->r12;
1778c22497f5SKan Liang regs->r13 = gprs->r13;
1779c22497f5SKan Liang regs->r14 = gprs->r14;
1780c22497f5SKan Liang regs->r15 = gprs->r15;
1781c22497f5SKan Liang #endif
1782c22497f5SKan Liang }
1783c22497f5SKan Liang
178461b985e3SKan Liang #define PEBS_LATENCY_MASK 0xffff
178561b985e3SKan Liang #define PEBS_CACHE_LATENCY_OFFSET 32
1786c87a3109SKan Liang #define PEBS_RETIRE_LATENCY_OFFSET 32
178761b985e3SKan Liang
1788c22497f5SKan Liang /*
1789c22497f5SKan Liang * With adaptive PEBS the layout depends on what fields are configured.
1790c22497f5SKan Liang */
1791c22497f5SKan Liang
setup_pebs_adaptive_sample_data(struct perf_event * event,struct pt_regs * iregs,void * __pebs,struct perf_sample_data * data,struct pt_regs * regs)1792c22497f5SKan Liang static void setup_pebs_adaptive_sample_data(struct perf_event *event,
1793c22497f5SKan Liang struct pt_regs *iregs, void *__pebs,
1794c22497f5SKan Liang struct perf_sample_data *data,
1795c22497f5SKan Liang struct pt_regs *regs)
1796c22497f5SKan Liang {
1797c22497f5SKan Liang struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1798c22497f5SKan Liang struct pebs_basic *basic = __pebs;
1799c22497f5SKan Liang void *next_record = basic + 1;
1800c22497f5SKan Liang u64 sample_type;
1801c22497f5SKan Liang u64 format_size;
1802c22497f5SKan Liang struct pebs_meminfo *meminfo = NULL;
1803c22497f5SKan Liang struct pebs_gprs *gprs = NULL;
1804c22497f5SKan Liang struct x86_perf_regs *perf_regs;
1805c22497f5SKan Liang
1806c22497f5SKan Liang if (basic == NULL)
1807c22497f5SKan Liang return;
1808c22497f5SKan Liang
1809c22497f5SKan Liang perf_regs = container_of(regs, struct x86_perf_regs, regs);
1810c22497f5SKan Liang perf_regs->xmm_regs = NULL;
1811c22497f5SKan Liang
1812c22497f5SKan Liang sample_type = event->attr.sample_type;
1813c22497f5SKan Liang format_size = basic->format_size;
1814c22497f5SKan Liang perf_sample_data_init(data, 0, event->hw.last_period);
1815c22497f5SKan Liang data->period = event->hw.last_period;
1816c22497f5SKan Liang
181789e97eb8SKan Liang setup_pebs_time(event, data, basic->tsc);
1818c22497f5SKan Liang
1819c22497f5SKan Liang /*
1820c22497f5SKan Liang * We must however always use iregs for the unwinder to stay sane; the
1821c22497f5SKan Liang * record BP,SP,IP can point into thin air when the record is from a
1822c22497f5SKan Liang * previous PMI context or an (I)RET happened between the record and
1823c22497f5SKan Liang * PMI.
1824c22497f5SKan Liang */
182531046500SNamhyung Kim if (sample_type & PERF_SAMPLE_CALLCHAIN)
182631046500SNamhyung Kim perf_sample_save_callchain(data, event, iregs);
1827c22497f5SKan Liang
1828c22497f5SKan Liang *regs = *iregs;
1829c22497f5SKan Liang /* The ip in basic is EventingIP */
1830c22497f5SKan Liang set_linear_ip(regs, basic->ip);
1831c22497f5SKan Liang regs->flags = PERF_EFLAGS_EXACT;
1832c22497f5SKan Liang
1833*344bb09fSKan Liang if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1834*344bb09fSKan Liang if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)
1835c87a3109SKan Liang data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK;
1836*344bb09fSKan Liang else
1837*344bb09fSKan Liang data->weight.var3_w = 0;
1838*344bb09fSKan Liang }
1839c87a3109SKan Liang
1840c22497f5SKan Liang /*
1841c22497f5SKan Liang * The record for MEMINFO is in front of GP
1842c22497f5SKan Liang * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1843c22497f5SKan Liang * Save the pointer here but process later.
1844c22497f5SKan Liang */
1845c22497f5SKan Liang if (format_size & PEBS_DATACFG_MEMINFO) {
1846c22497f5SKan Liang meminfo = next_record;
1847c22497f5SKan Liang next_record = meminfo + 1;
1848c22497f5SKan Liang }
1849c22497f5SKan Liang
1850c22497f5SKan Liang if (format_size & PEBS_DATACFG_GP) {
1851c22497f5SKan Liang gprs = next_record;
1852c22497f5SKan Liang next_record = gprs + 1;
1853c22497f5SKan Liang
1854c22497f5SKan Liang if (event->attr.precise_ip < 2) {
1855c22497f5SKan Liang set_linear_ip(regs, gprs->ip);
1856c22497f5SKan Liang regs->flags &= ~PERF_EFLAGS_EXACT;
1857c22497f5SKan Liang }
1858c22497f5SKan Liang
1859c22497f5SKan Liang if (sample_type & PERF_SAMPLE_REGS_INTR)
1860c22497f5SKan Liang adaptive_pebs_save_regs(regs, gprs);
1861c22497f5SKan Liang }
1862c22497f5SKan Liang
1863c22497f5SKan Liang if (format_size & PEBS_DATACFG_MEMINFO) {
186461b985e3SKan Liang if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
186561b985e3SKan Liang u64 weight = meminfo->latency;
186661b985e3SKan Liang
186761b985e3SKan Liang if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) {
186861b985e3SKan Liang data->weight.var2_w = weight & PEBS_LATENCY_MASK;
186961b985e3SKan Liang weight >>= PEBS_CACHE_LATENCY_OFFSET;
187061b985e3SKan Liang }
187161b985e3SKan Liang
187261b985e3SKan Liang /*
187361b985e3SKan Liang * Although meminfo::latency is defined as a u64,
187461b985e3SKan Liang * only the lower 32 bits include the valid data
187561b985e3SKan Liang * in practice on Ice Lake and earlier platforms.
187661b985e3SKan Liang */
187761b985e3SKan Liang if (sample_type & PERF_SAMPLE_WEIGHT) {
187861b985e3SKan Liang data->weight.full = weight ?:
1879c22497f5SKan Liang intel_get_tsx_weight(meminfo->tsx_tuning);
188061b985e3SKan Liang } else {
188161b985e3SKan Liang data->weight.var1_dw = (u32)(weight & PEBS_LATENCY_MASK) ?:
188261b985e3SKan Liang intel_get_tsx_weight(meminfo->tsx_tuning);
188361b985e3SKan Liang }
18842abe681dSKan Liang data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
188561b985e3SKan Liang }
1886c22497f5SKan Liang
1887e16fd7f2SKan Liang if (sample_type & PERF_SAMPLE_DATA_SRC) {
1888c22497f5SKan Liang data->data_src.val = get_data_src(event, meminfo->aux);
1889e16fd7f2SKan Liang data->sample_flags |= PERF_SAMPLE_DATA_SRC;
1890e16fd7f2SKan Liang }
1891c22497f5SKan Liang
18927b084630SNamhyung Kim if (sample_type & PERF_SAMPLE_ADDR_TYPE) {
1893c22497f5SKan Liang data->addr = meminfo->address;
18947b084630SNamhyung Kim data->sample_flags |= PERF_SAMPLE_ADDR;
18957b084630SNamhyung Kim }
1896c22497f5SKan Liang
1897ee9db0e1SKan Liang if (sample_type & PERF_SAMPLE_TRANSACTION) {
1898c22497f5SKan Liang data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
1899c22497f5SKan Liang gprs ? gprs->ax : 0);
1900ee9db0e1SKan Liang data->sample_flags |= PERF_SAMPLE_TRANSACTION;
1901ee9db0e1SKan Liang }
1902c22497f5SKan Liang }
1903c22497f5SKan Liang
1904c22497f5SKan Liang if (format_size & PEBS_DATACFG_XMMS) {
1905c22497f5SKan Liang struct pebs_xmm *xmm = next_record;
1906c22497f5SKan Liang
1907c22497f5SKan Liang next_record = xmm + 1;
1908c22497f5SKan Liang perf_regs->xmm_regs = xmm->xmm;
1909c22497f5SKan Liang }
1910c22497f5SKan Liang
1911c22497f5SKan Liang if (format_size & PEBS_DATACFG_LBRS) {
19125624986dSKan Liang struct lbr_entry *lbr = next_record;
1913c22497f5SKan Liang int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
1914c22497f5SKan Liang & 0xff) + 1;
19155624986dSKan Liang next_record = next_record + num_lbr * sizeof(struct lbr_entry);
1916c22497f5SKan Liang
1917c22497f5SKan Liang if (has_branch_stack(event)) {
1918c22497f5SKan Liang intel_pmu_store_pebs_lbrs(lbr);
1919eb55b455SNamhyung Kim perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
1920c22497f5SKan Liang }
1921c22497f5SKan Liang }
1922c22497f5SKan Liang
1923c22497f5SKan Liang WARN_ONCE(next_record != __pebs + (format_size >> 48),
1924c22497f5SKan Liang "PEBS record size %llu, expected %llu, config %llx\n",
1925c22497f5SKan Liang format_size >> 48,
1926c22497f5SKan Liang (u64)(next_record - __pebs),
1927c22497f5SKan Liang basic->format_size);
1928c22497f5SKan Liang }
1929c22497f5SKan Liang
19307010d129SBorislav Petkov static inline void *
get_next_pebs_record_by_bit(void * base,void * top,int bit)19317010d129SBorislav Petkov get_next_pebs_record_by_bit(void *base, void *top, int bit)
19327010d129SBorislav Petkov {
19337010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
19347010d129SBorislav Petkov void *at;
19357010d129SBorislav Petkov u64 pebs_status;
19367010d129SBorislav Petkov
19377010d129SBorislav Petkov /*
19387010d129SBorislav Petkov * fmt0 does not have a status bitfield (does not use
19397010d129SBorislav Petkov * perf_record_nhm format)
19407010d129SBorislav Petkov */
19417010d129SBorislav Petkov if (x86_pmu.intel_cap.pebs_format < 1)
19427010d129SBorislav Petkov return base;
19437010d129SBorislav Petkov
19447010d129SBorislav Petkov if (base == NULL)
19457010d129SBorislav Petkov return NULL;
19467010d129SBorislav Petkov
1947c22497f5SKan Liang for (at = base; at < top; at += cpuc->pebs_record_size) {
1948c22497f5SKan Liang unsigned long status = get_pebs_status(at);
19497010d129SBorislav Petkov
1950c22497f5SKan Liang if (test_bit(bit, (unsigned long *)&status)) {
19517010d129SBorislav Petkov /* PEBS v3 has accurate status bits */
19527010d129SBorislav Petkov if (x86_pmu.intel_cap.pebs_format >= 3)
19537010d129SBorislav Petkov return at;
19547010d129SBorislav Petkov
1955c22497f5SKan Liang if (status == (1 << bit))
19567010d129SBorislav Petkov return at;
19577010d129SBorislav Petkov
19587010d129SBorislav Petkov /* clear non-PEBS bit and re-check */
1959c22497f5SKan Liang pebs_status = status & cpuc->pebs_enabled;
1960fd583ad1SKan Liang pebs_status &= PEBS_COUNTER_MASK;
19617010d129SBorislav Petkov if (pebs_status == (1 << bit))
19627010d129SBorislav Petkov return at;
19637010d129SBorislav Petkov }
19647010d129SBorislav Petkov }
19657010d129SBorislav Petkov return NULL;
19667010d129SBorislav Petkov }
19677010d129SBorislav Petkov
intel_pmu_auto_reload_read(struct perf_event * event)19685bee2cc6SKan Liang void intel_pmu_auto_reload_read(struct perf_event *event)
19695bee2cc6SKan Liang {
19705bee2cc6SKan Liang WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
19715bee2cc6SKan Liang
19725bee2cc6SKan Liang perf_pmu_disable(event->pmu);
19735bee2cc6SKan Liang intel_pmu_drain_pebs_buffer();
19745bee2cc6SKan Liang perf_pmu_enable(event->pmu);
19755bee2cc6SKan Liang }
19765bee2cc6SKan Liang
1977d31fc13fSKan Liang /*
1978d31fc13fSKan Liang * Special variant of intel_pmu_save_and_restart() for auto-reload.
1979d31fc13fSKan Liang */
1980d31fc13fSKan Liang static int
intel_pmu_save_and_restart_reload(struct perf_event * event,int count)1981d31fc13fSKan Liang intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1982d31fc13fSKan Liang {
1983d31fc13fSKan Liang struct hw_perf_event *hwc = &event->hw;
1984d31fc13fSKan Liang int shift = 64 - x86_pmu.cntval_bits;
1985d31fc13fSKan Liang u64 period = hwc->sample_period;
1986d31fc13fSKan Liang u64 prev_raw_count, new_raw_count;
1987d31fc13fSKan Liang s64 new, old;
1988d31fc13fSKan Liang
1989d31fc13fSKan Liang WARN_ON(!period);
1990d31fc13fSKan Liang
1991d31fc13fSKan Liang /*
1992d31fc13fSKan Liang * drain_pebs() only happens when the PMU is disabled.
1993d31fc13fSKan Liang */
1994d31fc13fSKan Liang WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1995d31fc13fSKan Liang
1996d31fc13fSKan Liang prev_raw_count = local64_read(&hwc->prev_count);
1997d31fc13fSKan Liang rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1998d31fc13fSKan Liang local64_set(&hwc->prev_count, new_raw_count);
1999d31fc13fSKan Liang
2000d31fc13fSKan Liang /*
2001d31fc13fSKan Liang * Since the counter increments a negative counter value and
2002d31fc13fSKan Liang * overflows on the sign switch, giving the interval:
2003d31fc13fSKan Liang *
2004d31fc13fSKan Liang * [-period, 0]
2005d31fc13fSKan Liang *
2006d9f6e12fSIngo Molnar * the difference between two consecutive reads is:
2007d31fc13fSKan Liang *
2008d31fc13fSKan Liang * A) value2 - value1;
2009d31fc13fSKan Liang * when no overflows have happened in between,
2010d31fc13fSKan Liang *
2011d31fc13fSKan Liang * B) (0 - value1) + (value2 - (-period));
2012d31fc13fSKan Liang * when one overflow happened in between,
2013d31fc13fSKan Liang *
2014d31fc13fSKan Liang * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
2015d31fc13fSKan Liang * when @n overflows happened in between.
2016d31fc13fSKan Liang *
2017d31fc13fSKan Liang * Here A) is the obvious difference, B) is the extension to the
2018d31fc13fSKan Liang * discrete interval, where the first term is to the top of the
2019d31fc13fSKan Liang * interval and the second term is from the bottom of the next
2020d31fc13fSKan Liang * interval and C) the extension to multiple intervals, where the
2021d31fc13fSKan Liang * middle term is the whole intervals covered.
2022d31fc13fSKan Liang *
2023d31fc13fSKan Liang * An equivalent of C, by reduction, is:
2024d31fc13fSKan Liang *
2025d31fc13fSKan Liang * value2 - value1 + n * period
2026d31fc13fSKan Liang */
2027d31fc13fSKan Liang new = ((s64)(new_raw_count << shift) >> shift);
2028d31fc13fSKan Liang old = ((s64)(prev_raw_count << shift) >> shift);
2029d31fc13fSKan Liang local64_add(new - old + count * period, &event->count);
2030d31fc13fSKan Liang
2031f861854eSKan Liang local64_set(&hwc->period_left, -new);
2032f861854eSKan Liang
2033d31fc13fSKan Liang perf_event_update_userpage(event);
2034d31fc13fSKan Liang
2035d31fc13fSKan Liang return 0;
2036d31fc13fSKan Liang }
2037d31fc13fSKan Liang
20389dfa9a5cSPeter Zijlstra static __always_inline void
__intel_pmu_pebs_event(struct perf_event * event,struct pt_regs * iregs,struct perf_sample_data * data,void * base,void * top,int bit,int count,void (* setup_sample)(struct perf_event *,struct pt_regs *,void *,struct perf_sample_data *,struct pt_regs *))20399dfa9a5cSPeter Zijlstra __intel_pmu_pebs_event(struct perf_event *event,
20407010d129SBorislav Petkov struct pt_regs *iregs,
20419dfa9a5cSPeter Zijlstra struct perf_sample_data *data,
20427010d129SBorislav Petkov void *base, void *top,
2043c22497f5SKan Liang int bit, int count,
2044c22497f5SKan Liang void (*setup_sample)(struct perf_event *,
2045c22497f5SKan Liang struct pt_regs *,
2046c22497f5SKan Liang void *,
2047c22497f5SKan Liang struct perf_sample_data *,
2048c22497f5SKan Liang struct pt_regs *))
20497010d129SBorislav Petkov {
2050c22497f5SKan Liang struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2051d31fc13fSKan Liang struct hw_perf_event *hwc = &event->hw;
2052c22497f5SKan Liang struct x86_perf_regs perf_regs;
2053c22497f5SKan Liang struct pt_regs *regs = &perf_regs.regs;
20547010d129SBorislav Petkov void *at = get_next_pebs_record_by_bit(base, top, bit);
2055e506d1daSPeter Zijlstra static struct pt_regs dummy_iregs;
20567010d129SBorislav Petkov
2057d31fc13fSKan Liang if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
2058d31fc13fSKan Liang /*
2059d31fc13fSKan Liang * Now, auto-reload is only enabled in fixed period mode.
2060d31fc13fSKan Liang * The reload value is always hwc->sample_period.
2061d31fc13fSKan Liang * May need to change it, if auto-reload is enabled in
2062d31fc13fSKan Liang * freq mode later.
2063d31fc13fSKan Liang */
2064d31fc13fSKan Liang intel_pmu_save_and_restart_reload(event, count);
2065d31fc13fSKan Liang } else if (!intel_pmu_save_and_restart(event))
20667010d129SBorislav Petkov return;
20677010d129SBorislav Petkov
206835d1ce6bSKan Liang if (!iregs)
206935d1ce6bSKan Liang iregs = &dummy_iregs;
207035d1ce6bSKan Liang
20717010d129SBorislav Petkov while (count > 1) {
20729dfa9a5cSPeter Zijlstra setup_sample(event, iregs, at, data, regs);
20739dfa9a5cSPeter Zijlstra perf_event_output(event, data, regs);
2074c22497f5SKan Liang at += cpuc->pebs_record_size;
20757010d129SBorislav Petkov at = get_next_pebs_record_by_bit(at, top, bit);
20767010d129SBorislav Petkov count--;
20777010d129SBorislav Petkov }
20787010d129SBorislav Petkov
20799dfa9a5cSPeter Zijlstra setup_sample(event, iregs, at, data, regs);
208035d1ce6bSKan Liang if (iregs == &dummy_iregs) {
208135d1ce6bSKan Liang /*
208235d1ce6bSKan Liang * The PEBS records may be drained in the non-overflow context,
208335d1ce6bSKan Liang * e.g., large PEBS + context switch. Perf should treat the
208435d1ce6bSKan Liang * last record the same as other PEBS records, and doesn't
208535d1ce6bSKan Liang * invoke the generic overflow handler.
208635d1ce6bSKan Liang */
20879dfa9a5cSPeter Zijlstra perf_event_output(event, data, regs);
208835d1ce6bSKan Liang } else {
20897010d129SBorislav Petkov /*
20907010d129SBorislav Petkov * All but the last records are processed.
20917010d129SBorislav Petkov * The last one is left to be able to call the overflow handler.
20927010d129SBorislav Petkov */
20939dfa9a5cSPeter Zijlstra if (perf_event_overflow(event, data, regs))
20947010d129SBorislav Petkov x86_pmu_stop(event, 0);
20957010d129SBorislav Petkov }
20967010d129SBorislav Petkov }
20977010d129SBorislav Petkov
intel_pmu_drain_pebs_core(struct pt_regs * iregs,struct perf_sample_data * data)20989dfa9a5cSPeter Zijlstra static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
20997010d129SBorislav Petkov {
21007010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
21017010d129SBorislav Petkov struct debug_store *ds = cpuc->ds;
21027010d129SBorislav Petkov struct perf_event *event = cpuc->events[0]; /* PMC0 only */
21037010d129SBorislav Petkov struct pebs_record_core *at, *top;
21047010d129SBorislav Petkov int n;
21057010d129SBorislav Petkov
21067010d129SBorislav Petkov if (!x86_pmu.pebs_active)
21077010d129SBorislav Petkov return;
21087010d129SBorislav Petkov
21097010d129SBorislav Petkov at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
21107010d129SBorislav Petkov top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
21117010d129SBorislav Petkov
21127010d129SBorislav Petkov /*
21137010d129SBorislav Petkov * Whatever else happens, drain the thing
21147010d129SBorislav Petkov */
21157010d129SBorislav Petkov ds->pebs_index = ds->pebs_buffer_base;
21167010d129SBorislav Petkov
21177010d129SBorislav Petkov if (!test_bit(0, cpuc->active_mask))
21187010d129SBorislav Petkov return;
21197010d129SBorislav Petkov
21207010d129SBorislav Petkov WARN_ON_ONCE(!event);
21217010d129SBorislav Petkov
21227010d129SBorislav Petkov if (!event->attr.precise_ip)
21237010d129SBorislav Petkov return;
21247010d129SBorislav Petkov
21257010d129SBorislav Petkov n = top - at;
2126d31fc13fSKan Liang if (n <= 0) {
2127d31fc13fSKan Liang if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2128d31fc13fSKan Liang intel_pmu_save_and_restart_reload(event, 0);
21297010d129SBorislav Petkov return;
2130d31fc13fSKan Liang }
21317010d129SBorislav Petkov
21329dfa9a5cSPeter Zijlstra __intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
2133c22497f5SKan Liang setup_pebs_fixed_sample_data);
21347010d129SBorislav Petkov }
21357010d129SBorislav Petkov
intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events * cpuc,int size)2136477f00f9SKan Liang static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
2137477f00f9SKan Liang {
2138477f00f9SKan Liang struct perf_event *event;
2139477f00f9SKan Liang int bit;
2140477f00f9SKan Liang
2141477f00f9SKan Liang /*
2142477f00f9SKan Liang * The drain_pebs() could be called twice in a short period
2143477f00f9SKan Liang * for auto-reload event in pmu::read(). There are no
2144477f00f9SKan Liang * overflows have happened in between.
2145477f00f9SKan Liang * It needs to call intel_pmu_save_and_restart_reload() to
2146477f00f9SKan Liang * update the event->count for this case.
2147477f00f9SKan Liang */
2148477f00f9SKan Liang for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
2149477f00f9SKan Liang event = cpuc->events[bit];
2150477f00f9SKan Liang if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2151477f00f9SKan Liang intel_pmu_save_and_restart_reload(event, 0);
2152477f00f9SKan Liang }
2153477f00f9SKan Liang }
2154477f00f9SKan Liang
intel_pmu_drain_pebs_nhm(struct pt_regs * iregs,struct perf_sample_data * data)21559dfa9a5cSPeter Zijlstra static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
21567010d129SBorislav Petkov {
21577010d129SBorislav Petkov struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
21587010d129SBorislav Petkov struct debug_store *ds = cpuc->ds;
21597010d129SBorislav Petkov struct perf_event *event;
21607010d129SBorislav Petkov void *base, *at, *top;
2161ec71a398SKan Liang short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2162ec71a398SKan Liang short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2163ec71a398SKan Liang int bit, i, size;
2164ec71a398SKan Liang u64 mask;
21657010d129SBorislav Petkov
21667010d129SBorislav Petkov if (!x86_pmu.pebs_active)
21677010d129SBorislav Petkov return;
21687010d129SBorislav Petkov
21697010d129SBorislav Petkov base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
21707010d129SBorislav Petkov top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
21717010d129SBorislav Petkov
21727010d129SBorislav Petkov ds->pebs_index = ds->pebs_buffer_base;
21737010d129SBorislav Petkov
2174ec71a398SKan Liang mask = (1ULL << x86_pmu.max_pebs_events) - 1;
2175ec71a398SKan Liang size = x86_pmu.max_pebs_events;
2176ec71a398SKan Liang if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
2177ec71a398SKan Liang mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
2178ec71a398SKan Liang size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
2179ec71a398SKan Liang }
2180ec71a398SKan Liang
2181d31fc13fSKan Liang if (unlikely(base >= top)) {
2182477f00f9SKan Liang intel_pmu_pebs_event_update_no_drain(cpuc, size);
21837010d129SBorislav Petkov return;
2184d31fc13fSKan Liang }
21857010d129SBorislav Petkov
21867010d129SBorislav Petkov for (at = base; at < top; at += x86_pmu.pebs_record_size) {
21877010d129SBorislav Petkov struct pebs_record_nhm *p = at;
21887010d129SBorislav Petkov u64 pebs_status;
21897010d129SBorislav Petkov
21908ef9b845SPeter Zijlstra pebs_status = p->status & cpuc->pebs_enabled;
2191ec71a398SKan Liang pebs_status &= mask;
21928ef9b845SPeter Zijlstra
21938ef9b845SPeter Zijlstra /* PEBS v3 has more accurate status bits */
21947010d129SBorislav Petkov if (x86_pmu.intel_cap.pebs_format >= 3) {
2195c22497f5SKan Liang for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
21967010d129SBorislav Petkov counts[bit]++;
21977010d129SBorislav Petkov
21987010d129SBorislav Petkov continue;
21997010d129SBorislav Petkov }
22007010d129SBorislav Petkov
22017010d129SBorislav Petkov /*
22027010d129SBorislav Petkov * On some CPUs the PEBS status can be zero when PEBS is
22037010d129SBorislav Petkov * racing with clearing of GLOBAL_STATUS.
22047010d129SBorislav Petkov *
22057010d129SBorislav Petkov * Normally we would drop that record, but in the
22067010d129SBorislav Petkov * case when there is only a single active PEBS event
22077010d129SBorislav Petkov * we can assume it's for that event.
22087010d129SBorislav Petkov */
22097010d129SBorislav Petkov if (!pebs_status && cpuc->pebs_enabled &&
22107010d129SBorislav Petkov !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
2211d88d05a9SKan Liang pebs_status = p->status = cpuc->pebs_enabled;
22127010d129SBorislav Petkov
22137010d129SBorislav Petkov bit = find_first_bit((unsigned long *)&pebs_status,
22147010d129SBorislav Petkov x86_pmu.max_pebs_events);
22157010d129SBorislav Petkov if (bit >= x86_pmu.max_pebs_events)
22167010d129SBorislav Petkov continue;
22177010d129SBorislav Petkov
22187010d129SBorislav Petkov /*
22197010d129SBorislav Petkov * The PEBS hardware does not deal well with the situation
22207010d129SBorislav Petkov * when events happen near to each other and multiple bits
22217010d129SBorislav Petkov * are set. But it should happen rarely.
22227010d129SBorislav Petkov *
22237010d129SBorislav Petkov * If these events include one PEBS and multiple non-PEBS
22247010d129SBorislav Petkov * events, it doesn't impact PEBS record. The record will
22257010d129SBorislav Petkov * be handled normally. (slow path)
22267010d129SBorislav Petkov *
22277010d129SBorislav Petkov * If these events include two or more PEBS events, the
22287010d129SBorislav Petkov * records for the events can be collapsed into a single
22297010d129SBorislav Petkov * one, and it's not possible to reconstruct all events
22307010d129SBorislav Petkov * that caused the PEBS record. It's called collision.
22317010d129SBorislav Petkov * If collision happened, the record will be dropped.
22327010d129SBorislav Petkov */
2233fc17db8aSStephane Eranian if (pebs_status != (1ULL << bit)) {
2234c22497f5SKan Liang for_each_set_bit(i, (unsigned long *)&pebs_status, size)
22357010d129SBorislav Petkov error[i]++;
22367010d129SBorislav Petkov continue;
22377010d129SBorislav Petkov }
22387010d129SBorislav Petkov
22397010d129SBorislav Petkov counts[bit]++;
22407010d129SBorislav Petkov }
22417010d129SBorislav Petkov
2242c22497f5SKan Liang for_each_set_bit(bit, (unsigned long *)&mask, size) {
22437010d129SBorislav Petkov if ((counts[bit] == 0) && (error[bit] == 0))
22447010d129SBorislav Petkov continue;
22457010d129SBorislav Petkov
22467010d129SBorislav Petkov event = cpuc->events[bit];
22478ef9b845SPeter Zijlstra if (WARN_ON_ONCE(!event))
22488ef9b845SPeter Zijlstra continue;
22498ef9b845SPeter Zijlstra
22508ef9b845SPeter Zijlstra if (WARN_ON_ONCE(!event->attr.precise_ip))
22518ef9b845SPeter Zijlstra continue;
22527010d129SBorislav Petkov
22537010d129SBorislav Petkov /* log dropped samples number */
2254475113d9SJiri Olsa if (error[bit]) {
22557010d129SBorislav Petkov perf_log_lost_samples(event, error[bit]);
22567010d129SBorislav Petkov
22575debf021SNamhyung Kim if (iregs && perf_event_account_interrupt(event))
2258475113d9SJiri Olsa x86_pmu_stop(event, 0);
2259475113d9SJiri Olsa }
2260475113d9SJiri Olsa
22617010d129SBorislav Petkov if (counts[bit]) {
22629dfa9a5cSPeter Zijlstra __intel_pmu_pebs_event(event, iregs, data, base,
2263c22497f5SKan Liang top, bit, counts[bit],
2264c22497f5SKan Liang setup_pebs_fixed_sample_data);
22657010d129SBorislav Petkov }
22667010d129SBorislav Petkov }
22677010d129SBorislav Petkov }
22687010d129SBorislav Petkov
intel_pmu_drain_pebs_icl(struct pt_regs * iregs,struct perf_sample_data * data)22699dfa9a5cSPeter Zijlstra static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
2270c22497f5SKan Liang {
2271c22497f5SKan Liang short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2272c22497f5SKan Liang struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2273d4b294bfSKan Liang int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
2274d4b294bfSKan Liang int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2275c22497f5SKan Liang struct debug_store *ds = cpuc->ds;
2276c22497f5SKan Liang struct perf_event *event;
2277c22497f5SKan Liang void *base, *at, *top;
2278c22497f5SKan Liang int bit, size;
2279c22497f5SKan Liang u64 mask;
2280c22497f5SKan Liang
2281c22497f5SKan Liang if (!x86_pmu.pebs_active)
2282c22497f5SKan Liang return;
2283c22497f5SKan Liang
2284c22497f5SKan Liang base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
2285c22497f5SKan Liang top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
2286c22497f5SKan Liang
2287c22497f5SKan Liang ds->pebs_index = ds->pebs_buffer_base;
2288c22497f5SKan Liang
2289d4b294bfSKan Liang mask = ((1ULL << max_pebs_events) - 1) |
2290d4b294bfSKan Liang (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
2291d4b294bfSKan Liang size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
2292c22497f5SKan Liang
2293c22497f5SKan Liang if (unlikely(base >= top)) {
2294c22497f5SKan Liang intel_pmu_pebs_event_update_no_drain(cpuc, size);
2295c22497f5SKan Liang return;
2296c22497f5SKan Liang }
2297c22497f5SKan Liang
2298c22497f5SKan Liang for (at = base; at < top; at += cpuc->pebs_record_size) {
2299c22497f5SKan Liang u64 pebs_status;
2300c22497f5SKan Liang
2301c22497f5SKan Liang pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
2302c22497f5SKan Liang pebs_status &= mask;
2303c22497f5SKan Liang
2304c22497f5SKan Liang for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
2305c22497f5SKan Liang counts[bit]++;
2306c22497f5SKan Liang }
2307c22497f5SKan Liang
2308c22497f5SKan Liang for_each_set_bit(bit, (unsigned long *)&mask, size) {
2309c22497f5SKan Liang if (counts[bit] == 0)
2310c22497f5SKan Liang continue;
2311c22497f5SKan Liang
2312c22497f5SKan Liang event = cpuc->events[bit];
2313c22497f5SKan Liang if (WARN_ON_ONCE(!event))
2314c22497f5SKan Liang continue;
2315c22497f5SKan Liang
2316c22497f5SKan Liang if (WARN_ON_ONCE(!event->attr.precise_ip))
2317c22497f5SKan Liang continue;
2318c22497f5SKan Liang
23199dfa9a5cSPeter Zijlstra __intel_pmu_pebs_event(event, iregs, data, base,
2320c22497f5SKan Liang top, bit, counts[bit],
2321c22497f5SKan Liang setup_pebs_adaptive_sample_data);
2322c22497f5SKan Liang }
2323c22497f5SKan Liang }
2324c22497f5SKan Liang
23257010d129SBorislav Petkov /*
23267010d129SBorislav Petkov * BTS, PEBS probe and setup
23277010d129SBorislav Petkov */
23287010d129SBorislav Petkov
intel_ds_init(void)23297010d129SBorislav Petkov void __init intel_ds_init(void)
23307010d129SBorislav Petkov {
23317010d129SBorislav Petkov /*
23327010d129SBorislav Petkov * No support for 32bit formats
23337010d129SBorislav Petkov */
23347010d129SBorislav Petkov if (!boot_cpu_has(X86_FEATURE_DTES64))
23357010d129SBorislav Petkov return;
23367010d129SBorislav Petkov
23377010d129SBorislav Petkov x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
23387010d129SBorislav Petkov x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
2339e72daf3fSJiri Olsa x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
2340cd6b984fSKan Liang if (x86_pmu.version <= 4)
23419b545c04SAndi Kleen x86_pmu.pebs_no_isolation = 1;
2342cd6b984fSKan Liang
23437010d129SBorislav Petkov if (x86_pmu.pebs) {
23447010d129SBorislav Petkov char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
2345c22497f5SKan Liang char *pebs_qual = "";
23467010d129SBorislav Petkov int format = x86_pmu.intel_cap.pebs_format;
23477010d129SBorislav Petkov
2348c22497f5SKan Liang if (format < 4)
2349c22497f5SKan Liang x86_pmu.intel_cap.pebs_baseline = 0;
2350c22497f5SKan Liang
23517010d129SBorislav Petkov switch (format) {
23527010d129SBorislav Petkov case 0:
23537010d129SBorislav Petkov pr_cont("PEBS fmt0%c, ", pebs_type);
23547010d129SBorislav Petkov x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
2355e72daf3fSJiri Olsa /*
2356e72daf3fSJiri Olsa * Using >PAGE_SIZE buffers makes the WRMSR to
2357e72daf3fSJiri Olsa * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
2358e72daf3fSJiri Olsa * mysteriously hang on Core2.
2359e72daf3fSJiri Olsa *
2360e72daf3fSJiri Olsa * As a workaround, we don't do this.
2361e72daf3fSJiri Olsa */
2362e72daf3fSJiri Olsa x86_pmu.pebs_buffer_size = PAGE_SIZE;
23637010d129SBorislav Petkov x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
23647010d129SBorislav Petkov break;
23657010d129SBorislav Petkov
23667010d129SBorislav Petkov case 1:
23677010d129SBorislav Petkov pr_cont("PEBS fmt1%c, ", pebs_type);
23687010d129SBorislav Petkov x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
23697010d129SBorislav Petkov x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
23707010d129SBorislav Petkov break;
23717010d129SBorislav Petkov
23727010d129SBorislav Petkov case 2:
23737010d129SBorislav Petkov pr_cont("PEBS fmt2%c, ", pebs_type);
23747010d129SBorislav Petkov x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
23757010d129SBorislav Petkov x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
23767010d129SBorislav Petkov break;
23777010d129SBorislav Petkov
23787010d129SBorislav Petkov case 3:
23797010d129SBorislav Petkov pr_cont("PEBS fmt3%c, ", pebs_type);
23807010d129SBorislav Petkov x86_pmu.pebs_record_size =
23817010d129SBorislav Petkov sizeof(struct pebs_record_skl);
23827010d129SBorislav Petkov x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2383174afc3eSKan Liang x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
23847010d129SBorislav Petkov break;
23857010d129SBorislav Petkov
23862145e77fSKan Liang case 5:
238713738a36SLike Xu x86_pmu.pebs_ept = 1;
238813738a36SLike Xu fallthrough;
238913738a36SLike Xu case 4:
2390c22497f5SKan Liang x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
2391c22497f5SKan Liang x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
2392c22497f5SKan Liang if (x86_pmu.intel_cap.pebs_baseline) {
2393c22497f5SKan Liang x86_pmu.large_pebs_flags |=
2394c22497f5SKan Liang PERF_SAMPLE_BRANCH_STACK |
2395c22497f5SKan Liang PERF_SAMPLE_TIME;
2396c22497f5SKan Liang x86_pmu.flags |= PMU_FL_PEBS_ALL;
23977d359886SPeter Zijlstra x86_pmu.pebs_capable = ~0ULL;
2398c22497f5SKan Liang pebs_qual = "-baseline";
239961e76d53SKan Liang x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
2400c22497f5SKan Liang } else {
2401c22497f5SKan Liang /* Only basic record supported */
2402c22497f5SKan Liang x86_pmu.large_pebs_flags &=
2403c22497f5SKan Liang ~(PERF_SAMPLE_ADDR |
2404c22497f5SKan Liang PERF_SAMPLE_TIME |
2405c22497f5SKan Liang PERF_SAMPLE_DATA_SRC |
2406c22497f5SKan Liang PERF_SAMPLE_TRANSACTION |
2407c22497f5SKan Liang PERF_SAMPLE_REGS_USER |
2408c22497f5SKan Liang PERF_SAMPLE_REGS_INTR);
2409c22497f5SKan Liang }
2410c22497f5SKan Liang pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
241142880f72SAlexander Shishkin
2412d0946a88SKan Liang if (!is_hybrid() && x86_pmu.intel_cap.pebs_output_pt_available) {
241342880f72SAlexander Shishkin pr_cont("PEBS-via-PT, ");
241461e76d53SKan Liang x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
241542880f72SAlexander Shishkin }
241642880f72SAlexander Shishkin
2417c22497f5SKan Liang break;
2418c22497f5SKan Liang
24197010d129SBorislav Petkov default:
24207010d129SBorislav Petkov pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
24217010d129SBorislav Petkov x86_pmu.pebs = 0;
24227010d129SBorislav Petkov }
24237010d129SBorislav Petkov }
24247010d129SBorislav Petkov }
24257010d129SBorislav Petkov
perf_restore_debug_store(void)24267010d129SBorislav Petkov void perf_restore_debug_store(void)
24277010d129SBorislav Petkov {
24287010d129SBorislav Petkov struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
24297010d129SBorislav Petkov
24307010d129SBorislav Petkov if (!x86_pmu.bts && !x86_pmu.pebs)
24317010d129SBorislav Petkov return;
24327010d129SBorislav Petkov
24337010d129SBorislav Petkov wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
24347010d129SBorislav Petkov }
2435