xref: /openbmc/linux/arch/x86/events/intel/ds.c (revision d699090510c3223641a23834b4710e2d4309a6ad)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bitops.h>
3 #include <linux/types.h>
4 #include <linux/slab.h>
5 #include <linux/sched/clock.h>
6 
7 #include <asm/cpu_entry_area.h>
8 #include <asm/perf_event.h>
9 #include <asm/tlbflush.h>
10 #include <asm/insn.h>
11 #include <asm/io.h>
12 #include <asm/timer.h>
13 
14 #include "../perf_event.h"
15 
16 /* Waste a full page so it can be mapped into the cpu_entry_area */
17 DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
18 
19 /* The size of a BTS record in bytes: */
20 #define BTS_RECORD_SIZE		24
21 
22 #define PEBS_FIXUP_SIZE		PAGE_SIZE
23 
24 /*
25  * pebs_record_32 for p4 and core not supported
26 
27 struct pebs_record_32 {
28 	u32 flags, ip;
29 	u32 ax, bc, cx, dx;
30 	u32 si, di, bp, sp;
31 };
32 
33  */
34 
35 union intel_x86_pebs_dse {
36 	u64 val;
37 	struct {
38 		unsigned int ld_dse:4;
39 		unsigned int ld_stlb_miss:1;
40 		unsigned int ld_locked:1;
41 		unsigned int ld_data_blk:1;
42 		unsigned int ld_addr_blk:1;
43 		unsigned int ld_reserved:24;
44 	};
45 	struct {
46 		unsigned int st_l1d_hit:1;
47 		unsigned int st_reserved1:3;
48 		unsigned int st_stlb_miss:1;
49 		unsigned int st_locked:1;
50 		unsigned int st_reserved2:26;
51 	};
52 	struct {
53 		unsigned int st_lat_dse:4;
54 		unsigned int st_lat_stlb_miss:1;
55 		unsigned int st_lat_locked:1;
56 		unsigned int ld_reserved3:26;
57 	};
58 	struct {
59 		unsigned int mtl_dse:5;
60 		unsigned int mtl_locked:1;
61 		unsigned int mtl_stlb_miss:1;
62 		unsigned int mtl_fwd_blk:1;
63 		unsigned int ld_reserved4:24;
64 	};
65 };
66 
67 
68 /*
69  * Map PEBS Load Latency Data Source encodings to generic
70  * memory data source information
71  */
72 #define P(a, b) PERF_MEM_S(a, b)
73 #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
74 #define LEVEL(x) P(LVLNUM, x)
75 #define REM P(REMOTE, REMOTE)
76 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
77 
78 /* Version for Sandy Bridge and later */
79 static u64 pebs_data_source[] = {
80 	P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
81 	OP_LH | P(LVL, L1)  | LEVEL(L1) | P(SNOOP, NONE),  /* 0x01: L1 local */
82 	OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
83 	OP_LH | P(LVL, L2)  | LEVEL(L2) | P(SNOOP, NONE),  /* 0x03: L2 hit */
84 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, NONE),  /* 0x04: L3 hit */
85 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, MISS),  /* 0x05: L3 hit, snoop miss */
86 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, HIT),   /* 0x06: L3 hit, snoop hit */
87 	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, HITM),  /* 0x07: L3 hit, snoop hitm */
88 	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT),  /* 0x08: L3 miss snoop hit */
89 	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
90 	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | P(SNOOP, HIT),       /* 0x0a: L3 miss, shared */
91 	OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT),  /* 0x0b: L3 miss, shared */
92 	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | SNOOP_NONE_MISS,     /* 0x0c: L3 miss, excl */
93 	OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
94 	OP_LH | P(LVL, IO)  | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
95 	OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
96 };
97 
98 /* Patch up minor differences in the bits */
intel_pmu_pebs_data_source_nhm(void)99 void __init intel_pmu_pebs_data_source_nhm(void)
100 {
101 	pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
102 	pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
103 	pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
104 }
105 
__intel_pmu_pebs_data_source_skl(bool pmem,u64 * data_source)106 static void __init __intel_pmu_pebs_data_source_skl(bool pmem, u64 *data_source)
107 {
108 	u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
109 
110 	data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
111 	data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
112 	data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
113 	data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
114 	data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
115 }
116 
intel_pmu_pebs_data_source_skl(bool pmem)117 void __init intel_pmu_pebs_data_source_skl(bool pmem)
118 {
119 	__intel_pmu_pebs_data_source_skl(pmem, pebs_data_source);
120 }
121 
__intel_pmu_pebs_data_source_grt(u64 * data_source)122 static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source)
123 {
124 	data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
125 	data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
126 	data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD);
127 }
128 
intel_pmu_pebs_data_source_grt(void)129 void __init intel_pmu_pebs_data_source_grt(void)
130 {
131 	__intel_pmu_pebs_data_source_grt(pebs_data_source);
132 }
133 
intel_pmu_pebs_data_source_adl(void)134 void __init intel_pmu_pebs_data_source_adl(void)
135 {
136 	u64 *data_source;
137 
138 	data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source;
139 	memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
140 	__intel_pmu_pebs_data_source_skl(false, data_source);
141 
142 	data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
143 	memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
144 	__intel_pmu_pebs_data_source_grt(data_source);
145 }
146 
__intel_pmu_pebs_data_source_cmt(u64 * data_source)147 static void __init __intel_pmu_pebs_data_source_cmt(u64 *data_source)
148 {
149 	data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD);
150 	data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
151 	data_source[0x0a] = OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | P(SNOOP, NONE);
152 	data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
153 	data_source[0x0c] = OP_LH | LEVEL(RAM) | REM | P(SNOOPX, FWD);
154 	data_source[0x0d] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, HITM);
155 }
156 
intel_pmu_pebs_data_source_mtl(void)157 void __init intel_pmu_pebs_data_source_mtl(void)
158 {
159 	u64 *data_source;
160 
161 	data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source;
162 	memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
163 	__intel_pmu_pebs_data_source_skl(false, data_source);
164 
165 	data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
166 	memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
167 	__intel_pmu_pebs_data_source_cmt(data_source);
168 }
169 
intel_pmu_pebs_data_source_cmt(void)170 void __init intel_pmu_pebs_data_source_cmt(void)
171 {
172 	__intel_pmu_pebs_data_source_cmt(pebs_data_source);
173 }
174 
precise_store_data(u64 status)175 static u64 precise_store_data(u64 status)
176 {
177 	union intel_x86_pebs_dse dse;
178 	u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
179 
180 	dse.val = status;
181 
182 	/*
183 	 * bit 4: TLB access
184 	 * 1 = stored missed 2nd level TLB
185 	 *
186 	 * so it either hit the walker or the OS
187 	 * otherwise hit 2nd level TLB
188 	 */
189 	if (dse.st_stlb_miss)
190 		val |= P(TLB, MISS);
191 	else
192 		val |= P(TLB, HIT);
193 
194 	/*
195 	 * bit 0: hit L1 data cache
196 	 * if not set, then all we know is that
197 	 * it missed L1D
198 	 */
199 	if (dse.st_l1d_hit)
200 		val |= P(LVL, HIT);
201 	else
202 		val |= P(LVL, MISS);
203 
204 	/*
205 	 * bit 5: Locked prefix
206 	 */
207 	if (dse.st_locked)
208 		val |= P(LOCK, LOCKED);
209 
210 	return val;
211 }
212 
precise_datala_hsw(struct perf_event * event,u64 status)213 static u64 precise_datala_hsw(struct perf_event *event, u64 status)
214 {
215 	union perf_mem_data_src dse;
216 
217 	dse.val = PERF_MEM_NA;
218 
219 	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
220 		dse.mem_op = PERF_MEM_OP_STORE;
221 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
222 		dse.mem_op = PERF_MEM_OP_LOAD;
223 
224 	/*
225 	 * L1 info only valid for following events:
226 	 *
227 	 * MEM_UOPS_RETIRED.STLB_MISS_STORES
228 	 * MEM_UOPS_RETIRED.LOCK_STORES
229 	 * MEM_UOPS_RETIRED.SPLIT_STORES
230 	 * MEM_UOPS_RETIRED.ALL_STORES
231 	 */
232 	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
233 		if (status & 1)
234 			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
235 		else
236 			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
237 	}
238 	return dse.val;
239 }
240 
pebs_set_tlb_lock(u64 * val,bool tlb,bool lock)241 static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
242 {
243 	/*
244 	 * TLB access
245 	 * 0 = did not miss 2nd level TLB
246 	 * 1 = missed 2nd level TLB
247 	 */
248 	if (tlb)
249 		*val |= P(TLB, MISS) | P(TLB, L2);
250 	else
251 		*val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
252 
253 	/* locked prefix */
254 	if (lock)
255 		*val |= P(LOCK, LOCKED);
256 }
257 
258 /* Retrieve the latency data for e-core of ADL */
__adl_latency_data_small(struct perf_event * event,u64 status,u8 dse,bool tlb,bool lock,bool blk)259 static u64 __adl_latency_data_small(struct perf_event *event, u64 status,
260 				     u8 dse, bool tlb, bool lock, bool blk)
261 {
262 	u64 val;
263 
264 	WARN_ON_ONCE(hybrid_pmu(event->pmu)->cpu_type == hybrid_big);
265 
266 	dse &= PERF_PEBS_DATA_SOURCE_MASK;
267 	val = hybrid_var(event->pmu, pebs_data_source)[dse];
268 
269 	pebs_set_tlb_lock(&val, tlb, lock);
270 
271 	if (blk)
272 		val |= P(BLK, DATA);
273 	else
274 		val |= P(BLK, NA);
275 
276 	return val;
277 }
278 
adl_latency_data_small(struct perf_event * event,u64 status)279 u64 adl_latency_data_small(struct perf_event *event, u64 status)
280 {
281 	union intel_x86_pebs_dse dse;
282 
283 	dse.val = status;
284 
285 	return __adl_latency_data_small(event, status, dse.ld_dse,
286 					dse.ld_locked, dse.ld_stlb_miss,
287 					dse.ld_data_blk);
288 }
289 
290 /* Retrieve the latency data for e-core of MTL */
mtl_latency_data_small(struct perf_event * event,u64 status)291 u64 mtl_latency_data_small(struct perf_event *event, u64 status)
292 {
293 	union intel_x86_pebs_dse dse;
294 
295 	dse.val = status;
296 
297 	return __adl_latency_data_small(event, status, dse.mtl_dse,
298 					dse.mtl_stlb_miss, dse.mtl_locked,
299 					dse.mtl_fwd_blk);
300 }
301 
load_latency_data(struct perf_event * event,u64 status)302 static u64 load_latency_data(struct perf_event *event, u64 status)
303 {
304 	union intel_x86_pebs_dse dse;
305 	u64 val;
306 
307 	dse.val = status;
308 
309 	/*
310 	 * use the mapping table for bit 0-3
311 	 */
312 	val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse];
313 
314 	/*
315 	 * Nehalem models do not support TLB, Lock infos
316 	 */
317 	if (x86_pmu.pebs_no_tlb) {
318 		val |= P(TLB, NA) | P(LOCK, NA);
319 		return val;
320 	}
321 
322 	pebs_set_tlb_lock(&val, dse.ld_stlb_miss, dse.ld_locked);
323 
324 	/*
325 	 * Ice Lake and earlier models do not support block infos.
326 	 */
327 	if (!x86_pmu.pebs_block) {
328 		val |= P(BLK, NA);
329 		return val;
330 	}
331 	/*
332 	 * bit 6: load was blocked since its data could not be forwarded
333 	 *        from a preceding store
334 	 */
335 	if (dse.ld_data_blk)
336 		val |= P(BLK, DATA);
337 
338 	/*
339 	 * bit 7: load was blocked due to potential address conflict with
340 	 *        a preceding store
341 	 */
342 	if (dse.ld_addr_blk)
343 		val |= P(BLK, ADDR);
344 
345 	if (!dse.ld_data_blk && !dse.ld_addr_blk)
346 		val |= P(BLK, NA);
347 
348 	return val;
349 }
350 
store_latency_data(struct perf_event * event,u64 status)351 static u64 store_latency_data(struct perf_event *event, u64 status)
352 {
353 	union intel_x86_pebs_dse dse;
354 	union perf_mem_data_src src;
355 	u64 val;
356 
357 	dse.val = status;
358 
359 	/*
360 	 * use the mapping table for bit 0-3
361 	 */
362 	val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse];
363 
364 	pebs_set_tlb_lock(&val, dse.st_lat_stlb_miss, dse.st_lat_locked);
365 
366 	val |= P(BLK, NA);
367 
368 	/*
369 	 * the pebs_data_source table is only for loads
370 	 * so override the mem_op to say STORE instead
371 	 */
372 	src.val = val;
373 	src.mem_op = P(OP,STORE);
374 
375 	return src.val;
376 }
377 
378 struct pebs_record_core {
379 	u64 flags, ip;
380 	u64 ax, bx, cx, dx;
381 	u64 si, di, bp, sp;
382 	u64 r8,  r9,  r10, r11;
383 	u64 r12, r13, r14, r15;
384 };
385 
386 struct pebs_record_nhm {
387 	u64 flags, ip;
388 	u64 ax, bx, cx, dx;
389 	u64 si, di, bp, sp;
390 	u64 r8,  r9,  r10, r11;
391 	u64 r12, r13, r14, r15;
392 	u64 status, dla, dse, lat;
393 };
394 
395 /*
396  * Same as pebs_record_nhm, with two additional fields.
397  */
398 struct pebs_record_hsw {
399 	u64 flags, ip;
400 	u64 ax, bx, cx, dx;
401 	u64 si, di, bp, sp;
402 	u64 r8,  r9,  r10, r11;
403 	u64 r12, r13, r14, r15;
404 	u64 status, dla, dse, lat;
405 	u64 real_ip, tsx_tuning;
406 };
407 
408 union hsw_tsx_tuning {
409 	struct {
410 		u32 cycles_last_block     : 32,
411 		    hle_abort		  : 1,
412 		    rtm_abort		  : 1,
413 		    instruction_abort     : 1,
414 		    non_instruction_abort : 1,
415 		    retry		  : 1,
416 		    data_conflict	  : 1,
417 		    capacity_writes	  : 1,
418 		    capacity_reads	  : 1;
419 	};
420 	u64	    value;
421 };
422 
423 #define PEBS_HSW_TSX_FLAGS	0xff00000000ULL
424 
425 /* Same as HSW, plus TSC */
426 
427 struct pebs_record_skl {
428 	u64 flags, ip;
429 	u64 ax, bx, cx, dx;
430 	u64 si, di, bp, sp;
431 	u64 r8,  r9,  r10, r11;
432 	u64 r12, r13, r14, r15;
433 	u64 status, dla, dse, lat;
434 	u64 real_ip, tsx_tuning;
435 	u64 tsc;
436 };
437 
init_debug_store_on_cpu(int cpu)438 void init_debug_store_on_cpu(int cpu)
439 {
440 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
441 
442 	if (!ds)
443 		return;
444 
445 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
446 		     (u32)((u64)(unsigned long)ds),
447 		     (u32)((u64)(unsigned long)ds >> 32));
448 }
449 
fini_debug_store_on_cpu(int cpu)450 void fini_debug_store_on_cpu(int cpu)
451 {
452 	if (!per_cpu(cpu_hw_events, cpu).ds)
453 		return;
454 
455 	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
456 }
457 
458 static DEFINE_PER_CPU(void *, insn_buffer);
459 
ds_update_cea(void * cea,void * addr,size_t size,pgprot_t prot)460 static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
461 {
462 	unsigned long start = (unsigned long)cea;
463 	phys_addr_t pa;
464 	size_t msz = 0;
465 
466 	pa = virt_to_phys(addr);
467 
468 	preempt_disable();
469 	for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
470 		cea_set_pte(cea, pa, prot);
471 
472 	/*
473 	 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
474 	 * all TLB entries for it.
475 	 */
476 	flush_tlb_kernel_range(start, start + size);
477 	preempt_enable();
478 }
479 
ds_clear_cea(void * cea,size_t size)480 static void ds_clear_cea(void *cea, size_t size)
481 {
482 	unsigned long start = (unsigned long)cea;
483 	size_t msz = 0;
484 
485 	preempt_disable();
486 	for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
487 		cea_set_pte(cea, 0, PAGE_NONE);
488 
489 	flush_tlb_kernel_range(start, start + size);
490 	preempt_enable();
491 }
492 
dsalloc_pages(size_t size,gfp_t flags,int cpu)493 static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
494 {
495 	unsigned int order = get_order(size);
496 	int node = cpu_to_node(cpu);
497 	struct page *page;
498 
499 	page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
500 	return page ? page_address(page) : NULL;
501 }
502 
dsfree_pages(const void * buffer,size_t size)503 static void dsfree_pages(const void *buffer, size_t size)
504 {
505 	if (buffer)
506 		free_pages((unsigned long)buffer, get_order(size));
507 }
508 
alloc_pebs_buffer(int cpu)509 static int alloc_pebs_buffer(int cpu)
510 {
511 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
512 	struct debug_store *ds = hwev->ds;
513 	size_t bsiz = x86_pmu.pebs_buffer_size;
514 	int max, node = cpu_to_node(cpu);
515 	void *buffer, *insn_buff, *cea;
516 
517 	if (!x86_pmu.pebs)
518 		return 0;
519 
520 	buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
521 	if (unlikely(!buffer))
522 		return -ENOMEM;
523 
524 	/*
525 	 * HSW+ already provides us the eventing ip; no need to allocate this
526 	 * buffer then.
527 	 */
528 	if (x86_pmu.intel_cap.pebs_format < 2) {
529 		insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
530 		if (!insn_buff) {
531 			dsfree_pages(buffer, bsiz);
532 			return -ENOMEM;
533 		}
534 		per_cpu(insn_buffer, cpu) = insn_buff;
535 	}
536 	hwev->ds_pebs_vaddr = buffer;
537 	/* Update the cpu entry area mapping */
538 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
539 	ds->pebs_buffer_base = (unsigned long) cea;
540 	ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
541 	ds->pebs_index = ds->pebs_buffer_base;
542 	max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
543 	ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
544 	return 0;
545 }
546 
release_pebs_buffer(int cpu)547 static void release_pebs_buffer(int cpu)
548 {
549 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
550 	void *cea;
551 
552 	if (!x86_pmu.pebs)
553 		return;
554 
555 	kfree(per_cpu(insn_buffer, cpu));
556 	per_cpu(insn_buffer, cpu) = NULL;
557 
558 	/* Clear the fixmap */
559 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
560 	ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
561 	dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
562 	hwev->ds_pebs_vaddr = NULL;
563 }
564 
alloc_bts_buffer(int cpu)565 static int alloc_bts_buffer(int cpu)
566 {
567 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
568 	struct debug_store *ds = hwev->ds;
569 	void *buffer, *cea;
570 	int max;
571 
572 	if (!x86_pmu.bts)
573 		return 0;
574 
575 	buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
576 	if (unlikely(!buffer)) {
577 		WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
578 		return -ENOMEM;
579 	}
580 	hwev->ds_bts_vaddr = buffer;
581 	/* Update the fixmap */
582 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
583 	ds->bts_buffer_base = (unsigned long) cea;
584 	ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
585 	ds->bts_index = ds->bts_buffer_base;
586 	max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
587 	ds->bts_absolute_maximum = ds->bts_buffer_base +
588 					max * BTS_RECORD_SIZE;
589 	ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
590 					(max / 16) * BTS_RECORD_SIZE;
591 	return 0;
592 }
593 
release_bts_buffer(int cpu)594 static void release_bts_buffer(int cpu)
595 {
596 	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
597 	void *cea;
598 
599 	if (!x86_pmu.bts)
600 		return;
601 
602 	/* Clear the fixmap */
603 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
604 	ds_clear_cea(cea, BTS_BUFFER_SIZE);
605 	dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
606 	hwev->ds_bts_vaddr = NULL;
607 }
608 
alloc_ds_buffer(int cpu)609 static int alloc_ds_buffer(int cpu)
610 {
611 	struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
612 
613 	memset(ds, 0, sizeof(*ds));
614 	per_cpu(cpu_hw_events, cpu).ds = ds;
615 	return 0;
616 }
617 
release_ds_buffer(int cpu)618 static void release_ds_buffer(int cpu)
619 {
620 	per_cpu(cpu_hw_events, cpu).ds = NULL;
621 }
622 
release_ds_buffers(void)623 void release_ds_buffers(void)
624 {
625 	int cpu;
626 
627 	if (!x86_pmu.bts && !x86_pmu.pebs)
628 		return;
629 
630 	for_each_possible_cpu(cpu)
631 		release_ds_buffer(cpu);
632 
633 	for_each_possible_cpu(cpu) {
634 		/*
635 		 * Again, ignore errors from offline CPUs, they will no longer
636 		 * observe cpu_hw_events.ds and not program the DS_AREA when
637 		 * they come up.
638 		 */
639 		fini_debug_store_on_cpu(cpu);
640 	}
641 
642 	for_each_possible_cpu(cpu) {
643 		release_pebs_buffer(cpu);
644 		release_bts_buffer(cpu);
645 	}
646 }
647 
reserve_ds_buffers(void)648 void reserve_ds_buffers(void)
649 {
650 	int bts_err = 0, pebs_err = 0;
651 	int cpu;
652 
653 	x86_pmu.bts_active = 0;
654 	x86_pmu.pebs_active = 0;
655 
656 	if (!x86_pmu.bts && !x86_pmu.pebs)
657 		return;
658 
659 	if (!x86_pmu.bts)
660 		bts_err = 1;
661 
662 	if (!x86_pmu.pebs)
663 		pebs_err = 1;
664 
665 	for_each_possible_cpu(cpu) {
666 		if (alloc_ds_buffer(cpu)) {
667 			bts_err = 1;
668 			pebs_err = 1;
669 		}
670 
671 		if (!bts_err && alloc_bts_buffer(cpu))
672 			bts_err = 1;
673 
674 		if (!pebs_err && alloc_pebs_buffer(cpu))
675 			pebs_err = 1;
676 
677 		if (bts_err && pebs_err)
678 			break;
679 	}
680 
681 	if (bts_err) {
682 		for_each_possible_cpu(cpu)
683 			release_bts_buffer(cpu);
684 	}
685 
686 	if (pebs_err) {
687 		for_each_possible_cpu(cpu)
688 			release_pebs_buffer(cpu);
689 	}
690 
691 	if (bts_err && pebs_err) {
692 		for_each_possible_cpu(cpu)
693 			release_ds_buffer(cpu);
694 	} else {
695 		if (x86_pmu.bts && !bts_err)
696 			x86_pmu.bts_active = 1;
697 
698 		if (x86_pmu.pebs && !pebs_err)
699 			x86_pmu.pebs_active = 1;
700 
701 		for_each_possible_cpu(cpu) {
702 			/*
703 			 * Ignores wrmsr_on_cpu() errors for offline CPUs they
704 			 * will get this call through intel_pmu_cpu_starting().
705 			 */
706 			init_debug_store_on_cpu(cpu);
707 		}
708 	}
709 }
710 
711 /*
712  * BTS
713  */
714 
715 struct event_constraint bts_constraint =
716 	EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
717 
intel_pmu_enable_bts(u64 config)718 void intel_pmu_enable_bts(u64 config)
719 {
720 	unsigned long debugctlmsr;
721 
722 	debugctlmsr = get_debugctlmsr();
723 
724 	debugctlmsr |= DEBUGCTLMSR_TR;
725 	debugctlmsr |= DEBUGCTLMSR_BTS;
726 	if (config & ARCH_PERFMON_EVENTSEL_INT)
727 		debugctlmsr |= DEBUGCTLMSR_BTINT;
728 
729 	if (!(config & ARCH_PERFMON_EVENTSEL_OS))
730 		debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
731 
732 	if (!(config & ARCH_PERFMON_EVENTSEL_USR))
733 		debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
734 
735 	update_debugctlmsr(debugctlmsr);
736 }
737 
intel_pmu_disable_bts(void)738 void intel_pmu_disable_bts(void)
739 {
740 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
741 	unsigned long debugctlmsr;
742 
743 	if (!cpuc->ds)
744 		return;
745 
746 	debugctlmsr = get_debugctlmsr();
747 
748 	debugctlmsr &=
749 		~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
750 		  DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
751 
752 	update_debugctlmsr(debugctlmsr);
753 }
754 
intel_pmu_drain_bts_buffer(void)755 int intel_pmu_drain_bts_buffer(void)
756 {
757 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
758 	struct debug_store *ds = cpuc->ds;
759 	struct bts_record {
760 		u64	from;
761 		u64	to;
762 		u64	flags;
763 	};
764 	struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
765 	struct bts_record *at, *base, *top;
766 	struct perf_output_handle handle;
767 	struct perf_event_header header;
768 	struct perf_sample_data data;
769 	unsigned long skip = 0;
770 	struct pt_regs regs;
771 
772 	if (!event)
773 		return 0;
774 
775 	if (!x86_pmu.bts_active)
776 		return 0;
777 
778 	base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
779 	top  = (struct bts_record *)(unsigned long)ds->bts_index;
780 
781 	if (top <= base)
782 		return 0;
783 
784 	memset(&regs, 0, sizeof(regs));
785 
786 	ds->bts_index = ds->bts_buffer_base;
787 
788 	perf_sample_data_init(&data, 0, event->hw.last_period);
789 
790 	/*
791 	 * BTS leaks kernel addresses in branches across the cpl boundary,
792 	 * such as traps or system calls, so unless the user is asking for
793 	 * kernel tracing (and right now it's not possible), we'd need to
794 	 * filter them out. But first we need to count how many of those we
795 	 * have in the current batch. This is an extra O(n) pass, however,
796 	 * it's much faster than the other one especially considering that
797 	 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
798 	 * alloc_bts_buffer()).
799 	 */
800 	for (at = base; at < top; at++) {
801 		/*
802 		 * Note that right now *this* BTS code only works if
803 		 * attr::exclude_kernel is set, but let's keep this extra
804 		 * check here in case that changes.
805 		 */
806 		if (event->attr.exclude_kernel &&
807 		    (kernel_ip(at->from) || kernel_ip(at->to)))
808 			skip++;
809 	}
810 
811 	/*
812 	 * Prepare a generic sample, i.e. fill in the invariant fields.
813 	 * We will overwrite the from and to address before we output
814 	 * the sample.
815 	 */
816 	rcu_read_lock();
817 	perf_prepare_sample(&data, event, &regs);
818 	perf_prepare_header(&header, &data, event, &regs);
819 
820 	if (perf_output_begin(&handle, &data, event,
821 			      header.size * (top - base - skip)))
822 		goto unlock;
823 
824 	for (at = base; at < top; at++) {
825 		/* Filter out any records that contain kernel addresses. */
826 		if (event->attr.exclude_kernel &&
827 		    (kernel_ip(at->from) || kernel_ip(at->to)))
828 			continue;
829 
830 		data.ip		= at->from;
831 		data.addr	= at->to;
832 
833 		perf_output_sample(&handle, &header, &data, event);
834 	}
835 
836 	perf_output_end(&handle);
837 
838 	/* There's new data available. */
839 	event->hw.interrupts++;
840 	event->pending_kill = POLL_IN;
841 unlock:
842 	rcu_read_unlock();
843 	return 1;
844 }
845 
intel_pmu_drain_pebs_buffer(void)846 void intel_pmu_drain_pebs_buffer(void)
847 {
848 	struct perf_sample_data data;
849 
850 	static_call(x86_pmu_drain_pebs)(NULL, &data);
851 }
852 
853 /*
854  * PEBS
855  */
856 struct event_constraint intel_core2_pebs_event_constraints[] = {
857 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
858 	INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
859 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
860 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
861 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
862 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
863 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
864 	EVENT_CONSTRAINT_END
865 };
866 
867 struct event_constraint intel_atom_pebs_event_constraints[] = {
868 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
869 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
870 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
871 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
872 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
873 	/* Allow all events as PEBS with no flags */
874 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
875 	EVENT_CONSTRAINT_END
876 };
877 
878 struct event_constraint intel_slm_pebs_event_constraints[] = {
879 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
880 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
881 	/* Allow all events as PEBS with no flags */
882 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
883 	EVENT_CONSTRAINT_END
884 };
885 
886 struct event_constraint intel_glm_pebs_event_constraints[] = {
887 	/* Allow all events as PEBS with no flags */
888 	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
889 	EVENT_CONSTRAINT_END
890 };
891 
892 struct event_constraint intel_grt_pebs_event_constraints[] = {
893 	/* Allow all events as PEBS with no flags */
894 	INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3),
895 	INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf),
896 	EVENT_CONSTRAINT_END
897 };
898 
899 struct event_constraint intel_nehalem_pebs_event_constraints[] = {
900 	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
901 	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
902 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
903 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),    /* INST_RETIRED.ANY */
904 	INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
905 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
906 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
907 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
908 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
909 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
910 	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
911 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
912 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
913 	EVENT_CONSTRAINT_END
914 };
915 
916 struct event_constraint intel_westmere_pebs_event_constraints[] = {
917 	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
918 	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
919 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
920 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),    /* INSTR_RETIRED.* */
921 	INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
922 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
923 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
924 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
925 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
926 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
927 	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
928 	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
929 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
930 	EVENT_CONSTRAINT_END
931 };
932 
933 struct event_constraint intel_snb_pebs_event_constraints[] = {
934 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
935 	INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
936 	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
937 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
938 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
939         INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
940         INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
941         INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
942         INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),    /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
943 	/* Allow all events as PEBS with no flags */
944 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
945 	EVENT_CONSTRAINT_END
946 };
947 
948 struct event_constraint intel_ivb_pebs_event_constraints[] = {
949         INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
950         INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
951 	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
952 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
953 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
954 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
955 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
956 	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
957 	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
958 	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
959 	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),    /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
960 	/* Allow all events as PEBS with no flags */
961 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
962         EVENT_CONSTRAINT_END
963 };
964 
965 struct event_constraint intel_hsw_pebs_event_constraints[] = {
966 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
967 	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
968 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
969 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
970 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
971 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
972 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
973 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
974 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
975 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
976 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
977 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
978 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
979 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
980 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
981 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf),    /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
982 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf),    /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
983 	/* Allow all events as PEBS with no flags */
984 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
985 	EVENT_CONSTRAINT_END
986 };
987 
988 struct event_constraint intel_bdw_pebs_event_constraints[] = {
989 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
990 	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
991 	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
992 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
993 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
994 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
995 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
996 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
997 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
998 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
999 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
1000 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
1001 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
1002 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
1003 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
1004 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
1005 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
1006 	/* Allow all events as PEBS with no flags */
1007 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
1008 	EVENT_CONSTRAINT_END
1009 };
1010 
1011 
1012 struct event_constraint intel_skl_pebs_event_constraints[] = {
1013 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
1014 	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
1015 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
1016 	/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
1017 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
1018 	INTEL_PLD_CONSTRAINT(0x1cd, 0xf),		      /* MEM_TRANS_RETIRED.* */
1019 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
1020 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
1021 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
1022 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
1023 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
1024 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
1025 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
1026 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
1027 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_RETIRED.* */
1028 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_L3_HIT_RETIRED.* */
1029 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_L3_MISS_RETIRED.* */
1030 	/* Allow all events as PEBS with no flags */
1031 	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
1032 	EVENT_CONSTRAINT_END
1033 };
1034 
1035 struct event_constraint intel_icl_pebs_event_constraints[] = {
1036 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x100000000ULL),	/* old INST_RETIRED.PREC_DIST */
1037 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x0100, 0x100000000ULL),	/* INST_RETIRED.PREC_DIST */
1038 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),	/* SLOTS */
1039 
1040 	INTEL_PLD_CONSTRAINT(0x1cd, 0xff),			/* MEM_TRANS_RETIRED.LOAD_LATENCY */
1041 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf),	/* MEM_INST_RETIRED.STLB_MISS_LOADS */
1042 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf),	/* MEM_INST_RETIRED.STLB_MISS_STORES */
1043 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf),	/* MEM_INST_RETIRED.LOCK_LOADS */
1044 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf),	/* MEM_INST_RETIRED.SPLIT_LOADS */
1045 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf),	/* MEM_INST_RETIRED.SPLIT_STORES */
1046 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf),	/* MEM_INST_RETIRED.ALL_LOADS */
1047 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf),	/* MEM_INST_RETIRED.ALL_STORES */
1048 
1049 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
1050 
1051 	INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),		/* MEM_INST_RETIRED.* */
1052 
1053 	/*
1054 	 * Everything else is handled by PMU_FL_PEBS_ALL, because we
1055 	 * need the full constraints from the main table.
1056 	 */
1057 
1058 	EVENT_CONSTRAINT_END
1059 };
1060 
1061 struct event_constraint intel_spr_pebs_event_constraints[] = {
1062 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL),	/* INST_RETIRED.PREC_DIST */
1063 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
1064 
1065 	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe),
1066 	INTEL_PLD_CONSTRAINT(0x1cd, 0xfe),
1067 	INTEL_PSD_CONSTRAINT(0x2cd, 0x1),
1068 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf),	/* MEM_INST_RETIRED.STLB_MISS_LOADS */
1069 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf),	/* MEM_INST_RETIRED.STLB_MISS_STORES */
1070 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf),	/* MEM_INST_RETIRED.LOCK_LOADS */
1071 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf),	/* MEM_INST_RETIRED.SPLIT_LOADS */
1072 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf),	/* MEM_INST_RETIRED.SPLIT_STORES */
1073 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf),	/* MEM_INST_RETIRED.ALL_LOADS */
1074 	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf),	/* MEM_INST_RETIRED.ALL_STORES */
1075 
1076 	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
1077 
1078 	INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),
1079 
1080 	/*
1081 	 * Everything else is handled by PMU_FL_PEBS_ALL, because we
1082 	 * need the full constraints from the main table.
1083 	 */
1084 
1085 	EVENT_CONSTRAINT_END
1086 };
1087 
intel_pebs_constraints(struct perf_event * event)1088 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
1089 {
1090 	struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
1091 	struct event_constraint *c;
1092 
1093 	if (!event->attr.precise_ip)
1094 		return NULL;
1095 
1096 	if (pebs_constraints) {
1097 		for_each_event_constraint(c, pebs_constraints) {
1098 			if (constraint_match(c, event->hw.config)) {
1099 				event->hw.flags |= c->flags;
1100 				return c;
1101 			}
1102 		}
1103 	}
1104 
1105 	/*
1106 	 * Extended PEBS support
1107 	 * Makes the PEBS code search the normal constraints.
1108 	 */
1109 	if (x86_pmu.flags & PMU_FL_PEBS_ALL)
1110 		return NULL;
1111 
1112 	return &emptyconstraint;
1113 }
1114 
1115 /*
1116  * We need the sched_task callback even for per-cpu events when we use
1117  * the large interrupt threshold, such that we can provide PID and TID
1118  * to PEBS samples.
1119  */
pebs_needs_sched_cb(struct cpu_hw_events * cpuc)1120 static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
1121 {
1122 	if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
1123 		return false;
1124 
1125 	return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
1126 }
1127 
intel_pmu_pebs_sched_task(struct perf_event_pmu_context * pmu_ctx,bool sched_in)1128 void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
1129 {
1130 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1131 
1132 	if (!sched_in && pebs_needs_sched_cb(cpuc))
1133 		intel_pmu_drain_pebs_buffer();
1134 }
1135 
pebs_update_threshold(struct cpu_hw_events * cpuc)1136 static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
1137 {
1138 	struct debug_store *ds = cpuc->ds;
1139 	int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
1140 	int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
1141 	u64 threshold;
1142 	int reserved;
1143 
1144 	if (cpuc->n_pebs_via_pt)
1145 		return;
1146 
1147 	if (x86_pmu.flags & PMU_FL_PEBS_ALL)
1148 		reserved = max_pebs_events + num_counters_fixed;
1149 	else
1150 		reserved = max_pebs_events;
1151 
1152 	if (cpuc->n_pebs == cpuc->n_large_pebs) {
1153 		threshold = ds->pebs_absolute_maximum -
1154 			reserved * cpuc->pebs_record_size;
1155 	} else {
1156 		threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
1157 	}
1158 
1159 	ds->pebs_interrupt_threshold = threshold;
1160 }
1161 
adaptive_pebs_record_size_update(void)1162 static void adaptive_pebs_record_size_update(void)
1163 {
1164 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1165 	u64 pebs_data_cfg = cpuc->pebs_data_cfg;
1166 	int sz = sizeof(struct pebs_basic);
1167 
1168 	if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
1169 		sz += sizeof(struct pebs_meminfo);
1170 	if (pebs_data_cfg & PEBS_DATACFG_GP)
1171 		sz += sizeof(struct pebs_gprs);
1172 	if (pebs_data_cfg & PEBS_DATACFG_XMMS)
1173 		sz += sizeof(struct pebs_xmm);
1174 	if (pebs_data_cfg & PEBS_DATACFG_LBRS)
1175 		sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1176 
1177 	cpuc->pebs_record_size = sz;
1178 }
1179 
1180 #define PERF_PEBS_MEMINFO_TYPE	(PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC |   \
1181 				PERF_SAMPLE_PHYS_ADDR |			     \
1182 				PERF_SAMPLE_WEIGHT_TYPE |		     \
1183 				PERF_SAMPLE_TRANSACTION |		     \
1184 				PERF_SAMPLE_DATA_PAGE_SIZE)
1185 
pebs_update_adaptive_cfg(struct perf_event * event)1186 static u64 pebs_update_adaptive_cfg(struct perf_event *event)
1187 {
1188 	struct perf_event_attr *attr = &event->attr;
1189 	u64 sample_type = attr->sample_type;
1190 	u64 pebs_data_cfg = 0;
1191 	bool gprs, tsx_weight;
1192 
1193 	if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
1194 	    attr->precise_ip > 1)
1195 		return pebs_data_cfg;
1196 
1197 	if (sample_type & PERF_PEBS_MEMINFO_TYPE)
1198 		pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
1199 
1200 	/*
1201 	 * We need GPRs when:
1202 	 * + user requested them
1203 	 * + precise_ip < 2 for the non event IP
1204 	 * + For RTM TSX weight we need GPRs for the abort code.
1205 	 */
1206 	gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
1207 		(attr->sample_regs_intr & PEBS_GP_REGS)) ||
1208 	       ((sample_type & PERF_SAMPLE_REGS_USER) &&
1209 		(attr->sample_regs_user & PEBS_GP_REGS));
1210 
1211 	tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
1212 		     ((attr->config & INTEL_ARCH_EVENT_MASK) ==
1213 		      x86_pmu.rtm_abort_event);
1214 
1215 	if (gprs || (attr->precise_ip < 2) || tsx_weight)
1216 		pebs_data_cfg |= PEBS_DATACFG_GP;
1217 
1218 	if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
1219 	    (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
1220 		pebs_data_cfg |= PEBS_DATACFG_XMMS;
1221 
1222 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1223 		/*
1224 		 * For now always log all LBRs. Could configure this
1225 		 * later.
1226 		 */
1227 		pebs_data_cfg |= PEBS_DATACFG_LBRS |
1228 			((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
1229 	}
1230 
1231 	return pebs_data_cfg;
1232 }
1233 
1234 static void
pebs_update_state(bool needed_cb,struct cpu_hw_events * cpuc,struct perf_event * event,bool add)1235 pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
1236 		  struct perf_event *event, bool add)
1237 {
1238 	struct pmu *pmu = event->pmu;
1239 
1240 	/*
1241 	 * Make sure we get updated with the first PEBS event.
1242 	 * During removal, ->pebs_data_cfg is still valid for
1243 	 * the last PEBS event. Don't clear it.
1244 	 */
1245 	if ((cpuc->n_pebs == 1) && add)
1246 		cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
1247 
1248 	if (needed_cb != pebs_needs_sched_cb(cpuc)) {
1249 		if (!needed_cb)
1250 			perf_sched_cb_inc(pmu);
1251 		else
1252 			perf_sched_cb_dec(pmu);
1253 
1254 		cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
1255 	}
1256 
1257 	/*
1258 	 * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1259 	 * iterating all remaining PEBS events to reconstruct the config.
1260 	 */
1261 	if (x86_pmu.intel_cap.pebs_baseline && add) {
1262 		u64 pebs_data_cfg;
1263 
1264 		pebs_data_cfg = pebs_update_adaptive_cfg(event);
1265 		/*
1266 		 * Be sure to update the thresholds when we change the record.
1267 		 */
1268 		if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
1269 			cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
1270 	}
1271 }
1272 
intel_pmu_pebs_add(struct perf_event * event)1273 void intel_pmu_pebs_add(struct perf_event *event)
1274 {
1275 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1276 	struct hw_perf_event *hwc = &event->hw;
1277 	bool needed_cb = pebs_needs_sched_cb(cpuc);
1278 
1279 	cpuc->n_pebs++;
1280 	if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1281 		cpuc->n_large_pebs++;
1282 	if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
1283 		cpuc->n_pebs_via_pt++;
1284 
1285 	pebs_update_state(needed_cb, cpuc, event, true);
1286 }
1287 
intel_pmu_pebs_via_pt_disable(struct perf_event * event)1288 static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
1289 {
1290 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1291 
1292 	if (!is_pebs_pt(event))
1293 		return;
1294 
1295 	if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
1296 		cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
1297 }
1298 
intel_pmu_pebs_via_pt_enable(struct perf_event * event)1299 static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
1300 {
1301 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1302 	struct hw_perf_event *hwc = &event->hw;
1303 	struct debug_store *ds = cpuc->ds;
1304 	u64 value = ds->pebs_event_reset[hwc->idx];
1305 	u32 base = MSR_RELOAD_PMC0;
1306 	unsigned int idx = hwc->idx;
1307 
1308 	if (!is_pebs_pt(event))
1309 		return;
1310 
1311 	if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
1312 		cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
1313 
1314 	cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
1315 
1316 	if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
1317 		base = MSR_RELOAD_FIXED_CTR0;
1318 		idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1319 		if (x86_pmu.intel_cap.pebs_format < 5)
1320 			value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx];
1321 		else
1322 			value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
1323 	}
1324 	wrmsrl(base + idx, value);
1325 }
1326 
intel_pmu_drain_large_pebs(struct cpu_hw_events * cpuc)1327 static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
1328 {
1329 	if (cpuc->n_pebs == cpuc->n_large_pebs &&
1330 	    cpuc->n_pebs != cpuc->n_pebs_via_pt)
1331 		intel_pmu_drain_pebs_buffer();
1332 }
1333 
intel_pmu_pebs_enable(struct perf_event * event)1334 void intel_pmu_pebs_enable(struct perf_event *event)
1335 {
1336 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1337 	u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
1338 	struct hw_perf_event *hwc = &event->hw;
1339 	struct debug_store *ds = cpuc->ds;
1340 	unsigned int idx = hwc->idx;
1341 
1342 	hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
1343 
1344 	cpuc->pebs_enabled |= 1ULL << hwc->idx;
1345 
1346 	if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
1347 		cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
1348 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1349 		cpuc->pebs_enabled |= 1ULL << 63;
1350 
1351 	if (x86_pmu.intel_cap.pebs_baseline) {
1352 		hwc->config |= ICL_EVENTSEL_ADAPTIVE;
1353 		if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
1354 			/*
1355 			 * drain_pebs() assumes uniform record size;
1356 			 * hence we need to drain when changing said
1357 			 * size.
1358 			 */
1359 			intel_pmu_drain_pebs_buffer();
1360 			adaptive_pebs_record_size_update();
1361 			wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
1362 			cpuc->active_pebs_data_cfg = pebs_data_cfg;
1363 		}
1364 	}
1365 	if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
1366 		cpuc->pebs_data_cfg = pebs_data_cfg;
1367 		pebs_update_threshold(cpuc);
1368 	}
1369 
1370 	if (idx >= INTEL_PMC_IDX_FIXED) {
1371 		if (x86_pmu.intel_cap.pebs_format < 5)
1372 			idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED);
1373 		else
1374 			idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
1375 	}
1376 
1377 	/*
1378 	 * Use auto-reload if possible to save a MSR write in the PMI.
1379 	 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
1380 	 */
1381 	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1382 		ds->pebs_event_reset[idx] =
1383 			(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
1384 	} else {
1385 		ds->pebs_event_reset[idx] = 0;
1386 	}
1387 
1388 	intel_pmu_pebs_via_pt_enable(event);
1389 }
1390 
intel_pmu_pebs_del(struct perf_event * event)1391 void intel_pmu_pebs_del(struct perf_event *event)
1392 {
1393 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1394 	struct hw_perf_event *hwc = &event->hw;
1395 	bool needed_cb = pebs_needs_sched_cb(cpuc);
1396 
1397 	cpuc->n_pebs--;
1398 	if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1399 		cpuc->n_large_pebs--;
1400 	if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
1401 		cpuc->n_pebs_via_pt--;
1402 
1403 	pebs_update_state(needed_cb, cpuc, event, false);
1404 }
1405 
intel_pmu_pebs_disable(struct perf_event * event)1406 void intel_pmu_pebs_disable(struct perf_event *event)
1407 {
1408 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1409 	struct hw_perf_event *hwc = &event->hw;
1410 
1411 	intel_pmu_drain_large_pebs(cpuc);
1412 
1413 	cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
1414 
1415 	if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
1416 	    (x86_pmu.version < 5))
1417 		cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
1418 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1419 		cpuc->pebs_enabled &= ~(1ULL << 63);
1420 
1421 	intel_pmu_pebs_via_pt_disable(event);
1422 
1423 	if (cpuc->enabled)
1424 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1425 
1426 	hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
1427 }
1428 
intel_pmu_pebs_enable_all(void)1429 void intel_pmu_pebs_enable_all(void)
1430 {
1431 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1432 
1433 	if (cpuc->pebs_enabled)
1434 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1435 }
1436 
intel_pmu_pebs_disable_all(void)1437 void intel_pmu_pebs_disable_all(void)
1438 {
1439 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1440 
1441 	if (cpuc->pebs_enabled)
1442 		__intel_pmu_pebs_disable_all();
1443 }
1444 
intel_pmu_pebs_fixup_ip(struct pt_regs * regs)1445 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
1446 {
1447 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1448 	unsigned long from = cpuc->lbr_entries[0].from;
1449 	unsigned long old_to, to = cpuc->lbr_entries[0].to;
1450 	unsigned long ip = regs->ip;
1451 	int is_64bit = 0;
1452 	void *kaddr;
1453 	int size;
1454 
1455 	/*
1456 	 * We don't need to fixup if the PEBS assist is fault like
1457 	 */
1458 	if (!x86_pmu.intel_cap.pebs_trap)
1459 		return 1;
1460 
1461 	/*
1462 	 * No LBR entry, no basic block, no rewinding
1463 	 */
1464 	if (!cpuc->lbr_stack.nr || !from || !to)
1465 		return 0;
1466 
1467 	/*
1468 	 * Basic blocks should never cross user/kernel boundaries
1469 	 */
1470 	if (kernel_ip(ip) != kernel_ip(to))
1471 		return 0;
1472 
1473 	/*
1474 	 * unsigned math, either ip is before the start (impossible) or
1475 	 * the basic block is larger than 1 page (sanity)
1476 	 */
1477 	if ((ip - to) > PEBS_FIXUP_SIZE)
1478 		return 0;
1479 
1480 	/*
1481 	 * We sampled a branch insn, rewind using the LBR stack
1482 	 */
1483 	if (ip == to) {
1484 		set_linear_ip(regs, from);
1485 		return 1;
1486 	}
1487 
1488 	size = ip - to;
1489 	if (!kernel_ip(ip)) {
1490 		int bytes;
1491 		u8 *buf = this_cpu_read(insn_buffer);
1492 
1493 		/* 'size' must fit our buffer, see above */
1494 		bytes = copy_from_user_nmi(buf, (void __user *)to, size);
1495 		if (bytes != 0)
1496 			return 0;
1497 
1498 		kaddr = buf;
1499 	} else {
1500 		kaddr = (void *)to;
1501 	}
1502 
1503 	do {
1504 		struct insn insn;
1505 
1506 		old_to = to;
1507 
1508 #ifdef CONFIG_X86_64
1509 		is_64bit = kernel_ip(to) || any_64bit_mode(regs);
1510 #endif
1511 		insn_init(&insn, kaddr, size, is_64bit);
1512 
1513 		/*
1514 		 * Make sure there was not a problem decoding the instruction.
1515 		 * This is doubly important because we have an infinite loop if
1516 		 * insn.length=0.
1517 		 */
1518 		if (insn_get_length(&insn))
1519 			break;
1520 
1521 		to += insn.length;
1522 		kaddr += insn.length;
1523 		size -= insn.length;
1524 	} while (to < ip);
1525 
1526 	if (to == ip) {
1527 		set_linear_ip(regs, old_to);
1528 		return 1;
1529 	}
1530 
1531 	/*
1532 	 * Even though we decoded the basic block, the instruction stream
1533 	 * never matched the given IP, either the TO or the IP got corrupted.
1534 	 */
1535 	return 0;
1536 }
1537 
intel_get_tsx_weight(u64 tsx_tuning)1538 static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
1539 {
1540 	if (tsx_tuning) {
1541 		union hsw_tsx_tuning tsx = { .value = tsx_tuning };
1542 		return tsx.cycles_last_block;
1543 	}
1544 	return 0;
1545 }
1546 
intel_get_tsx_transaction(u64 tsx_tuning,u64 ax)1547 static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
1548 {
1549 	u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
1550 
1551 	/* For RTM XABORTs also log the abort code from AX */
1552 	if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
1553 		txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1554 	return txn;
1555 }
1556 
get_pebs_status(void * n)1557 static inline u64 get_pebs_status(void *n)
1558 {
1559 	if (x86_pmu.intel_cap.pebs_format < 4)
1560 		return ((struct pebs_record_nhm *)n)->status;
1561 	return ((struct pebs_basic *)n)->applicable_counters;
1562 }
1563 
1564 #define PERF_X86_EVENT_PEBS_HSW_PREC \
1565 		(PERF_X86_EVENT_PEBS_ST_HSW | \
1566 		 PERF_X86_EVENT_PEBS_LD_HSW | \
1567 		 PERF_X86_EVENT_PEBS_NA_HSW)
1568 
get_data_src(struct perf_event * event,u64 aux)1569 static u64 get_data_src(struct perf_event *event, u64 aux)
1570 {
1571 	u64 val = PERF_MEM_NA;
1572 	int fl = event->hw.flags;
1573 	bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
1574 
1575 	if (fl & PERF_X86_EVENT_PEBS_LDLAT)
1576 		val = load_latency_data(event, aux);
1577 	else if (fl & PERF_X86_EVENT_PEBS_STLAT)
1578 		val = store_latency_data(event, aux);
1579 	else if (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID)
1580 		val = x86_pmu.pebs_latency_data(event, aux);
1581 	else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
1582 		val = precise_datala_hsw(event, aux);
1583 	else if (fst)
1584 		val = precise_store_data(aux);
1585 	return val;
1586 }
1587 
setup_pebs_time(struct perf_event * event,struct perf_sample_data * data,u64 tsc)1588 static void setup_pebs_time(struct perf_event *event,
1589 			    struct perf_sample_data *data,
1590 			    u64 tsc)
1591 {
1592 	/* Converting to a user-defined clock is not supported yet. */
1593 	if (event->attr.use_clockid != 0)
1594 		return;
1595 
1596 	/*
1597 	 * Doesn't support the conversion when the TSC is unstable.
1598 	 * The TSC unstable case is a corner case and very unlikely to
1599 	 * happen. If it happens, the TSC in a PEBS record will be
1600 	 * dropped and fall back to perf_event_clock().
1601 	 */
1602 	if (!using_native_sched_clock() || !sched_clock_stable())
1603 		return;
1604 
1605 	data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset;
1606 	data->sample_flags |= PERF_SAMPLE_TIME;
1607 }
1608 
1609 #define PERF_SAMPLE_ADDR_TYPE	(PERF_SAMPLE_ADDR |		\
1610 				 PERF_SAMPLE_PHYS_ADDR |	\
1611 				 PERF_SAMPLE_DATA_PAGE_SIZE)
1612 
setup_pebs_fixed_sample_data(struct perf_event * event,struct pt_regs * iregs,void * __pebs,struct perf_sample_data * data,struct pt_regs * regs)1613 static void setup_pebs_fixed_sample_data(struct perf_event *event,
1614 				   struct pt_regs *iregs, void *__pebs,
1615 				   struct perf_sample_data *data,
1616 				   struct pt_regs *regs)
1617 {
1618 	/*
1619 	 * We cast to the biggest pebs_record but are careful not to
1620 	 * unconditionally access the 'extra' entries.
1621 	 */
1622 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1623 	struct pebs_record_skl *pebs = __pebs;
1624 	u64 sample_type;
1625 	int fll;
1626 
1627 	if (pebs == NULL)
1628 		return;
1629 
1630 	sample_type = event->attr.sample_type;
1631 	fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
1632 
1633 	perf_sample_data_init(data, 0, event->hw.last_period);
1634 
1635 	data->period = event->hw.last_period;
1636 
1637 	/*
1638 	 * Use latency for weight (only avail with PEBS-LL)
1639 	 */
1640 	if (fll && (sample_type & PERF_SAMPLE_WEIGHT_TYPE)) {
1641 		data->weight.full = pebs->lat;
1642 		data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
1643 	}
1644 
1645 	/*
1646 	 * data.data_src encodes the data source
1647 	 */
1648 	if (sample_type & PERF_SAMPLE_DATA_SRC) {
1649 		data->data_src.val = get_data_src(event, pebs->dse);
1650 		data->sample_flags |= PERF_SAMPLE_DATA_SRC;
1651 	}
1652 
1653 	/*
1654 	 * We must however always use iregs for the unwinder to stay sane; the
1655 	 * record BP,SP,IP can point into thin air when the record is from a
1656 	 * previous PMI context or an (I)RET happened between the record and
1657 	 * PMI.
1658 	 */
1659 	if (sample_type & PERF_SAMPLE_CALLCHAIN)
1660 		perf_sample_save_callchain(data, event, iregs);
1661 
1662 	/*
1663 	 * We use the interrupt regs as a base because the PEBS record does not
1664 	 * contain a full regs set, specifically it seems to lack segment
1665 	 * descriptors, which get used by things like user_mode().
1666 	 *
1667 	 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1668 	 */
1669 	*regs = *iregs;
1670 
1671 	/*
1672 	 * Initialize regs_>flags from PEBS,
1673 	 * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1674 	 * i.e., do not rely on it being zero:
1675 	 */
1676 	regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
1677 
1678 	if (sample_type & PERF_SAMPLE_REGS_INTR) {
1679 		regs->ax = pebs->ax;
1680 		regs->bx = pebs->bx;
1681 		regs->cx = pebs->cx;
1682 		regs->dx = pebs->dx;
1683 		regs->si = pebs->si;
1684 		regs->di = pebs->di;
1685 
1686 		regs->bp = pebs->bp;
1687 		regs->sp = pebs->sp;
1688 
1689 #ifndef CONFIG_X86_32
1690 		regs->r8 = pebs->r8;
1691 		regs->r9 = pebs->r9;
1692 		regs->r10 = pebs->r10;
1693 		regs->r11 = pebs->r11;
1694 		regs->r12 = pebs->r12;
1695 		regs->r13 = pebs->r13;
1696 		regs->r14 = pebs->r14;
1697 		regs->r15 = pebs->r15;
1698 #endif
1699 	}
1700 
1701 	if (event->attr.precise_ip > 1) {
1702 		/*
1703 		 * Haswell and later processors have an 'eventing IP'
1704 		 * (real IP) which fixes the off-by-1 skid in hardware.
1705 		 * Use it when precise_ip >= 2 :
1706 		 */
1707 		if (x86_pmu.intel_cap.pebs_format >= 2) {
1708 			set_linear_ip(regs, pebs->real_ip);
1709 			regs->flags |= PERF_EFLAGS_EXACT;
1710 		} else {
1711 			/* Otherwise, use PEBS off-by-1 IP: */
1712 			set_linear_ip(regs, pebs->ip);
1713 
1714 			/*
1715 			 * With precise_ip >= 2, try to fix up the off-by-1 IP
1716 			 * using the LBR. If successful, the fixup function
1717 			 * corrects regs->ip and calls set_linear_ip() on regs:
1718 			 */
1719 			if (intel_pmu_pebs_fixup_ip(regs))
1720 				regs->flags |= PERF_EFLAGS_EXACT;
1721 		}
1722 	} else {
1723 		/*
1724 		 * When precise_ip == 1, return the PEBS off-by-1 IP,
1725 		 * no fixup attempted:
1726 		 */
1727 		set_linear_ip(regs, pebs->ip);
1728 	}
1729 
1730 
1731 	if ((sample_type & PERF_SAMPLE_ADDR_TYPE) &&
1732 	    x86_pmu.intel_cap.pebs_format >= 1) {
1733 		data->addr = pebs->dla;
1734 		data->sample_flags |= PERF_SAMPLE_ADDR;
1735 	}
1736 
1737 	if (x86_pmu.intel_cap.pebs_format >= 2) {
1738 		/* Only set the TSX weight when no memory weight. */
1739 		if ((sample_type & PERF_SAMPLE_WEIGHT_TYPE) && !fll) {
1740 			data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning);
1741 			data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
1742 		}
1743 		if (sample_type & PERF_SAMPLE_TRANSACTION) {
1744 			data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
1745 							      pebs->ax);
1746 			data->sample_flags |= PERF_SAMPLE_TRANSACTION;
1747 		}
1748 	}
1749 
1750 	/*
1751 	 * v3 supplies an accurate time stamp, so we use that
1752 	 * for the time stamp.
1753 	 *
1754 	 * We can only do this for the default trace clock.
1755 	 */
1756 	if (x86_pmu.intel_cap.pebs_format >= 3)
1757 		setup_pebs_time(event, data, pebs->tsc);
1758 
1759 	if (has_branch_stack(event))
1760 		perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
1761 }
1762 
adaptive_pebs_save_regs(struct pt_regs * regs,struct pebs_gprs * gprs)1763 static void adaptive_pebs_save_regs(struct pt_regs *regs,
1764 				    struct pebs_gprs *gprs)
1765 {
1766 	regs->ax = gprs->ax;
1767 	regs->bx = gprs->bx;
1768 	regs->cx = gprs->cx;
1769 	regs->dx = gprs->dx;
1770 	regs->si = gprs->si;
1771 	regs->di = gprs->di;
1772 	regs->bp = gprs->bp;
1773 	regs->sp = gprs->sp;
1774 #ifndef CONFIG_X86_32
1775 	regs->r8 = gprs->r8;
1776 	regs->r9 = gprs->r9;
1777 	regs->r10 = gprs->r10;
1778 	regs->r11 = gprs->r11;
1779 	regs->r12 = gprs->r12;
1780 	regs->r13 = gprs->r13;
1781 	regs->r14 = gprs->r14;
1782 	regs->r15 = gprs->r15;
1783 #endif
1784 }
1785 
1786 #define PEBS_LATENCY_MASK			0xffff
1787 #define PEBS_CACHE_LATENCY_OFFSET		32
1788 #define PEBS_RETIRE_LATENCY_OFFSET		32
1789 
1790 /*
1791  * With adaptive PEBS the layout depends on what fields are configured.
1792  */
1793 
setup_pebs_adaptive_sample_data(struct perf_event * event,struct pt_regs * iregs,void * __pebs,struct perf_sample_data * data,struct pt_regs * regs)1794 static void setup_pebs_adaptive_sample_data(struct perf_event *event,
1795 					    struct pt_regs *iregs, void *__pebs,
1796 					    struct perf_sample_data *data,
1797 					    struct pt_regs *regs)
1798 {
1799 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1800 	struct pebs_basic *basic = __pebs;
1801 	void *next_record = basic + 1;
1802 	u64 sample_type;
1803 	u64 format_size;
1804 	struct pebs_meminfo *meminfo = NULL;
1805 	struct pebs_gprs *gprs = NULL;
1806 	struct x86_perf_regs *perf_regs;
1807 
1808 	if (basic == NULL)
1809 		return;
1810 
1811 	perf_regs = container_of(regs, struct x86_perf_regs, regs);
1812 	perf_regs->xmm_regs = NULL;
1813 
1814 	sample_type = event->attr.sample_type;
1815 	format_size = basic->format_size;
1816 	perf_sample_data_init(data, 0, event->hw.last_period);
1817 	data->period = event->hw.last_period;
1818 
1819 	setup_pebs_time(event, data, basic->tsc);
1820 
1821 	/*
1822 	 * We must however always use iregs for the unwinder to stay sane; the
1823 	 * record BP,SP,IP can point into thin air when the record is from a
1824 	 * previous PMI context or an (I)RET happened between the record and
1825 	 * PMI.
1826 	 */
1827 	if (sample_type & PERF_SAMPLE_CALLCHAIN)
1828 		perf_sample_save_callchain(data, event, iregs);
1829 
1830 	*regs = *iregs;
1831 	/* The ip in basic is EventingIP */
1832 	set_linear_ip(regs, basic->ip);
1833 	regs->flags = PERF_EFLAGS_EXACT;
1834 
1835 	if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1836 		if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)
1837 			data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK;
1838 		else
1839 			data->weight.var3_w = 0;
1840 	}
1841 
1842 	/*
1843 	 * The record for MEMINFO is in front of GP
1844 	 * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1845 	 * Save the pointer here but process later.
1846 	 */
1847 	if (format_size & PEBS_DATACFG_MEMINFO) {
1848 		meminfo = next_record;
1849 		next_record = meminfo + 1;
1850 	}
1851 
1852 	if (format_size & PEBS_DATACFG_GP) {
1853 		gprs = next_record;
1854 		next_record = gprs + 1;
1855 
1856 		if (event->attr.precise_ip < 2) {
1857 			set_linear_ip(regs, gprs->ip);
1858 			regs->flags &= ~PERF_EFLAGS_EXACT;
1859 		}
1860 
1861 		if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
1862 			adaptive_pebs_save_regs(regs, gprs);
1863 	}
1864 
1865 	if (format_size & PEBS_DATACFG_MEMINFO) {
1866 		if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1867 			u64 weight = meminfo->latency;
1868 
1869 			if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) {
1870 				data->weight.var2_w = weight & PEBS_LATENCY_MASK;
1871 				weight >>= PEBS_CACHE_LATENCY_OFFSET;
1872 			}
1873 
1874 			/*
1875 			 * Although meminfo::latency is defined as a u64,
1876 			 * only the lower 32 bits include the valid data
1877 			 * in practice on Ice Lake and earlier platforms.
1878 			 */
1879 			if (sample_type & PERF_SAMPLE_WEIGHT) {
1880 				data->weight.full = weight ?:
1881 					intel_get_tsx_weight(meminfo->tsx_tuning);
1882 			} else {
1883 				data->weight.var1_dw = (u32)(weight & PEBS_LATENCY_MASK) ?:
1884 					intel_get_tsx_weight(meminfo->tsx_tuning);
1885 			}
1886 			data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
1887 		}
1888 
1889 		if (sample_type & PERF_SAMPLE_DATA_SRC) {
1890 			data->data_src.val = get_data_src(event, meminfo->aux);
1891 			data->sample_flags |= PERF_SAMPLE_DATA_SRC;
1892 		}
1893 
1894 		if (sample_type & PERF_SAMPLE_ADDR_TYPE) {
1895 			data->addr = meminfo->address;
1896 			data->sample_flags |= PERF_SAMPLE_ADDR;
1897 		}
1898 
1899 		if (sample_type & PERF_SAMPLE_TRANSACTION) {
1900 			data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
1901 							  gprs ? gprs->ax : 0);
1902 			data->sample_flags |= PERF_SAMPLE_TRANSACTION;
1903 		}
1904 	}
1905 
1906 	if (format_size & PEBS_DATACFG_XMMS) {
1907 		struct pebs_xmm *xmm = next_record;
1908 
1909 		next_record = xmm + 1;
1910 		perf_regs->xmm_regs = xmm->xmm;
1911 	}
1912 
1913 	if (format_size & PEBS_DATACFG_LBRS) {
1914 		struct lbr_entry *lbr = next_record;
1915 		int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
1916 					& 0xff) + 1;
1917 		next_record = next_record + num_lbr * sizeof(struct lbr_entry);
1918 
1919 		if (has_branch_stack(event)) {
1920 			intel_pmu_store_pebs_lbrs(lbr);
1921 			perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
1922 		}
1923 	}
1924 
1925 	WARN_ONCE(next_record != __pebs + (format_size >> 48),
1926 			"PEBS record size %llu, expected %llu, config %llx\n",
1927 			format_size >> 48,
1928 			(u64)(next_record - __pebs),
1929 			basic->format_size);
1930 }
1931 
1932 static inline void *
get_next_pebs_record_by_bit(void * base,void * top,int bit)1933 get_next_pebs_record_by_bit(void *base, void *top, int bit)
1934 {
1935 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1936 	void *at;
1937 	u64 pebs_status;
1938 
1939 	/*
1940 	 * fmt0 does not have a status bitfield (does not use
1941 	 * perf_record_nhm format)
1942 	 */
1943 	if (x86_pmu.intel_cap.pebs_format < 1)
1944 		return base;
1945 
1946 	if (base == NULL)
1947 		return NULL;
1948 
1949 	for (at = base; at < top; at += cpuc->pebs_record_size) {
1950 		unsigned long status = get_pebs_status(at);
1951 
1952 		if (test_bit(bit, (unsigned long *)&status)) {
1953 			/* PEBS v3 has accurate status bits */
1954 			if (x86_pmu.intel_cap.pebs_format >= 3)
1955 				return at;
1956 
1957 			if (status == (1 << bit))
1958 				return at;
1959 
1960 			/* clear non-PEBS bit and re-check */
1961 			pebs_status = status & cpuc->pebs_enabled;
1962 			pebs_status &= PEBS_COUNTER_MASK;
1963 			if (pebs_status == (1 << bit))
1964 				return at;
1965 		}
1966 	}
1967 	return NULL;
1968 }
1969 
1970 /*
1971  * Special variant of intel_pmu_save_and_restart() for auto-reload.
1972  */
1973 static int
intel_pmu_save_and_restart_reload(struct perf_event * event,int count)1974 intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1975 {
1976 	struct hw_perf_event *hwc = &event->hw;
1977 	int shift = 64 - x86_pmu.cntval_bits;
1978 	u64 period = hwc->sample_period;
1979 	u64 prev_raw_count, new_raw_count;
1980 	s64 new, old;
1981 
1982 	WARN_ON(!period);
1983 
1984 	/*
1985 	 * drain_pebs() only happens when the PMU is disabled.
1986 	 */
1987 	WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1988 
1989 	prev_raw_count = local64_read(&hwc->prev_count);
1990 	rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1991 	local64_set(&hwc->prev_count, new_raw_count);
1992 
1993 	/*
1994 	 * Since the counter increments a negative counter value and
1995 	 * overflows on the sign switch, giving the interval:
1996 	 *
1997 	 *   [-period, 0]
1998 	 *
1999 	 * the difference between two consecutive reads is:
2000 	 *
2001 	 *   A) value2 - value1;
2002 	 *      when no overflows have happened in between,
2003 	 *
2004 	 *   B) (0 - value1) + (value2 - (-period));
2005 	 *      when one overflow happened in between,
2006 	 *
2007 	 *   C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
2008 	 *      when @n overflows happened in between.
2009 	 *
2010 	 * Here A) is the obvious difference, B) is the extension to the
2011 	 * discrete interval, where the first term is to the top of the
2012 	 * interval and the second term is from the bottom of the next
2013 	 * interval and C) the extension to multiple intervals, where the
2014 	 * middle term is the whole intervals covered.
2015 	 *
2016 	 * An equivalent of C, by reduction, is:
2017 	 *
2018 	 *   value2 - value1 + n * period
2019 	 */
2020 	new = ((s64)(new_raw_count << shift) >> shift);
2021 	old = ((s64)(prev_raw_count << shift) >> shift);
2022 	local64_add(new - old + count * period, &event->count);
2023 
2024 	local64_set(&hwc->period_left, -new);
2025 
2026 	perf_event_update_userpage(event);
2027 
2028 	return 0;
2029 }
2030 
2031 static __always_inline void
__intel_pmu_pebs_event(struct perf_event * event,struct pt_regs * iregs,struct perf_sample_data * data,void * base,void * top,int bit,int count,void (* setup_sample)(struct perf_event *,struct pt_regs *,void *,struct perf_sample_data *,struct pt_regs *))2032 __intel_pmu_pebs_event(struct perf_event *event,
2033 		       struct pt_regs *iregs,
2034 		       struct perf_sample_data *data,
2035 		       void *base, void *top,
2036 		       int bit, int count,
2037 		       void (*setup_sample)(struct perf_event *,
2038 					    struct pt_regs *,
2039 					    void *,
2040 					    struct perf_sample_data *,
2041 					    struct pt_regs *))
2042 {
2043 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2044 	struct hw_perf_event *hwc = &event->hw;
2045 	struct x86_perf_regs perf_regs;
2046 	struct pt_regs *regs = &perf_regs.regs;
2047 	void *at = get_next_pebs_record_by_bit(base, top, bit);
2048 	static struct pt_regs dummy_iregs;
2049 
2050 	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
2051 		/*
2052 		 * Now, auto-reload is only enabled in fixed period mode.
2053 		 * The reload value is always hwc->sample_period.
2054 		 * May need to change it, if auto-reload is enabled in
2055 		 * freq mode later.
2056 		 */
2057 		intel_pmu_save_and_restart_reload(event, count);
2058 	} else if (!intel_pmu_save_and_restart(event))
2059 		return;
2060 
2061 	if (!iregs)
2062 		iregs = &dummy_iregs;
2063 
2064 	while (count > 1) {
2065 		setup_sample(event, iregs, at, data, regs);
2066 		perf_event_output(event, data, regs);
2067 		at += cpuc->pebs_record_size;
2068 		at = get_next_pebs_record_by_bit(at, top, bit);
2069 		count--;
2070 	}
2071 
2072 	setup_sample(event, iregs, at, data, regs);
2073 	if (iregs == &dummy_iregs) {
2074 		/*
2075 		 * The PEBS records may be drained in the non-overflow context,
2076 		 * e.g., large PEBS + context switch. Perf should treat the
2077 		 * last record the same as other PEBS records, and doesn't
2078 		 * invoke the generic overflow handler.
2079 		 */
2080 		perf_event_output(event, data, regs);
2081 	} else {
2082 		/*
2083 		 * All but the last records are processed.
2084 		 * The last one is left to be able to call the overflow handler.
2085 		 */
2086 		if (perf_event_overflow(event, data, regs))
2087 			x86_pmu_stop(event, 0);
2088 	}
2089 }
2090 
intel_pmu_drain_pebs_core(struct pt_regs * iregs,struct perf_sample_data * data)2091 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
2092 {
2093 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2094 	struct debug_store *ds = cpuc->ds;
2095 	struct perf_event *event = cpuc->events[0]; /* PMC0 only */
2096 	struct pebs_record_core *at, *top;
2097 	int n;
2098 
2099 	if (!x86_pmu.pebs_active)
2100 		return;
2101 
2102 	at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
2103 	top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
2104 
2105 	/*
2106 	 * Whatever else happens, drain the thing
2107 	 */
2108 	ds->pebs_index = ds->pebs_buffer_base;
2109 
2110 	if (!test_bit(0, cpuc->active_mask))
2111 		return;
2112 
2113 	WARN_ON_ONCE(!event);
2114 
2115 	if (!event->attr.precise_ip)
2116 		return;
2117 
2118 	n = top - at;
2119 	if (n <= 0) {
2120 		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2121 			intel_pmu_save_and_restart_reload(event, 0);
2122 		return;
2123 	}
2124 
2125 	__intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
2126 			       setup_pebs_fixed_sample_data);
2127 }
2128 
intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events * cpuc,int size)2129 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
2130 {
2131 	struct perf_event *event;
2132 	int bit;
2133 
2134 	/*
2135 	 * The drain_pebs() could be called twice in a short period
2136 	 * for auto-reload event in pmu::read(). There are no
2137 	 * overflows have happened in between.
2138 	 * It needs to call intel_pmu_save_and_restart_reload() to
2139 	 * update the event->count for this case.
2140 	 */
2141 	for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
2142 		event = cpuc->events[bit];
2143 		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2144 			intel_pmu_save_and_restart_reload(event, 0);
2145 	}
2146 }
2147 
intel_pmu_drain_pebs_nhm(struct pt_regs * iregs,struct perf_sample_data * data)2148 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
2149 {
2150 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2151 	struct debug_store *ds = cpuc->ds;
2152 	struct perf_event *event;
2153 	void *base, *at, *top;
2154 	short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2155 	short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2156 	int bit, i, size;
2157 	u64 mask;
2158 
2159 	if (!x86_pmu.pebs_active)
2160 		return;
2161 
2162 	base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
2163 	top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
2164 
2165 	ds->pebs_index = ds->pebs_buffer_base;
2166 
2167 	mask = (1ULL << x86_pmu.max_pebs_events) - 1;
2168 	size = x86_pmu.max_pebs_events;
2169 	if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
2170 		mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
2171 		size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
2172 	}
2173 
2174 	if (unlikely(base >= top)) {
2175 		intel_pmu_pebs_event_update_no_drain(cpuc, size);
2176 		return;
2177 	}
2178 
2179 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
2180 		struct pebs_record_nhm *p = at;
2181 		u64 pebs_status;
2182 
2183 		pebs_status = p->status & cpuc->pebs_enabled;
2184 		pebs_status &= mask;
2185 
2186 		/* PEBS v3 has more accurate status bits */
2187 		if (x86_pmu.intel_cap.pebs_format >= 3) {
2188 			for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
2189 				counts[bit]++;
2190 
2191 			continue;
2192 		}
2193 
2194 		/*
2195 		 * On some CPUs the PEBS status can be zero when PEBS is
2196 		 * racing with clearing of GLOBAL_STATUS.
2197 		 *
2198 		 * Normally we would drop that record, but in the
2199 		 * case when there is only a single active PEBS event
2200 		 * we can assume it's for that event.
2201 		 */
2202 		if (!pebs_status && cpuc->pebs_enabled &&
2203 			!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
2204 			pebs_status = p->status = cpuc->pebs_enabled;
2205 
2206 		bit = find_first_bit((unsigned long *)&pebs_status,
2207 					x86_pmu.max_pebs_events);
2208 		if (bit >= x86_pmu.max_pebs_events)
2209 			continue;
2210 
2211 		/*
2212 		 * The PEBS hardware does not deal well with the situation
2213 		 * when events happen near to each other and multiple bits
2214 		 * are set. But it should happen rarely.
2215 		 *
2216 		 * If these events include one PEBS and multiple non-PEBS
2217 		 * events, it doesn't impact PEBS record. The record will
2218 		 * be handled normally. (slow path)
2219 		 *
2220 		 * If these events include two or more PEBS events, the
2221 		 * records for the events can be collapsed into a single
2222 		 * one, and it's not possible to reconstruct all events
2223 		 * that caused the PEBS record. It's called collision.
2224 		 * If collision happened, the record will be dropped.
2225 		 */
2226 		if (pebs_status != (1ULL << bit)) {
2227 			for_each_set_bit(i, (unsigned long *)&pebs_status, size)
2228 				error[i]++;
2229 			continue;
2230 		}
2231 
2232 		counts[bit]++;
2233 	}
2234 
2235 	for_each_set_bit(bit, (unsigned long *)&mask, size) {
2236 		if ((counts[bit] == 0) && (error[bit] == 0))
2237 			continue;
2238 
2239 		event = cpuc->events[bit];
2240 		if (WARN_ON_ONCE(!event))
2241 			continue;
2242 
2243 		if (WARN_ON_ONCE(!event->attr.precise_ip))
2244 			continue;
2245 
2246 		/* log dropped samples number */
2247 		if (error[bit]) {
2248 			perf_log_lost_samples(event, error[bit]);
2249 
2250 			if (iregs && perf_event_account_interrupt(event))
2251 				x86_pmu_stop(event, 0);
2252 		}
2253 
2254 		if (counts[bit]) {
2255 			__intel_pmu_pebs_event(event, iregs, data, base,
2256 					       top, bit, counts[bit],
2257 					       setup_pebs_fixed_sample_data);
2258 		}
2259 	}
2260 }
2261 
intel_pmu_drain_pebs_icl(struct pt_regs * iregs,struct perf_sample_data * data)2262 static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
2263 {
2264 	short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2265 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2266 	int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
2267 	int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2268 	struct debug_store *ds = cpuc->ds;
2269 	struct perf_event *event;
2270 	void *base, *at, *top;
2271 	int bit, size;
2272 	u64 mask;
2273 
2274 	if (!x86_pmu.pebs_active)
2275 		return;
2276 
2277 	base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
2278 	top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
2279 
2280 	ds->pebs_index = ds->pebs_buffer_base;
2281 
2282 	mask = ((1ULL << max_pebs_events) - 1) |
2283 	       (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
2284 	size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
2285 
2286 	if (unlikely(base >= top)) {
2287 		intel_pmu_pebs_event_update_no_drain(cpuc, size);
2288 		return;
2289 	}
2290 
2291 	for (at = base; at < top; at += cpuc->pebs_record_size) {
2292 		u64 pebs_status;
2293 
2294 		pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
2295 		pebs_status &= mask;
2296 
2297 		for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
2298 			counts[bit]++;
2299 	}
2300 
2301 	for_each_set_bit(bit, (unsigned long *)&mask, size) {
2302 		if (counts[bit] == 0)
2303 			continue;
2304 
2305 		event = cpuc->events[bit];
2306 		if (WARN_ON_ONCE(!event))
2307 			continue;
2308 
2309 		if (WARN_ON_ONCE(!event->attr.precise_ip))
2310 			continue;
2311 
2312 		__intel_pmu_pebs_event(event, iregs, data, base,
2313 				       top, bit, counts[bit],
2314 				       setup_pebs_adaptive_sample_data);
2315 	}
2316 }
2317 
2318 /*
2319  * BTS, PEBS probe and setup
2320  */
2321 
intel_ds_init(void)2322 void __init intel_ds_init(void)
2323 {
2324 	/*
2325 	 * No support for 32bit formats
2326 	 */
2327 	if (!boot_cpu_has(X86_FEATURE_DTES64))
2328 		return;
2329 
2330 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
2331 	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
2332 	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
2333 	if (x86_pmu.version <= 4)
2334 		x86_pmu.pebs_no_isolation = 1;
2335 
2336 	if (x86_pmu.pebs) {
2337 		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
2338 		char *pebs_qual = "";
2339 		int format = x86_pmu.intel_cap.pebs_format;
2340 
2341 		if (format < 4)
2342 			x86_pmu.intel_cap.pebs_baseline = 0;
2343 
2344 		switch (format) {
2345 		case 0:
2346 			pr_cont("PEBS fmt0%c, ", pebs_type);
2347 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
2348 			/*
2349 			 * Using >PAGE_SIZE buffers makes the WRMSR to
2350 			 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
2351 			 * mysteriously hang on Core2.
2352 			 *
2353 			 * As a workaround, we don't do this.
2354 			 */
2355 			x86_pmu.pebs_buffer_size = PAGE_SIZE;
2356 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
2357 			break;
2358 
2359 		case 1:
2360 			pr_cont("PEBS fmt1%c, ", pebs_type);
2361 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
2362 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2363 			break;
2364 
2365 		case 2:
2366 			pr_cont("PEBS fmt2%c, ", pebs_type);
2367 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
2368 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2369 			break;
2370 
2371 		case 3:
2372 			pr_cont("PEBS fmt3%c, ", pebs_type);
2373 			x86_pmu.pebs_record_size =
2374 						sizeof(struct pebs_record_skl);
2375 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2376 			x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
2377 			break;
2378 
2379 		case 5:
2380 			x86_pmu.pebs_ept = 1;
2381 			fallthrough;
2382 		case 4:
2383 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
2384 			x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
2385 			if (x86_pmu.intel_cap.pebs_baseline) {
2386 				x86_pmu.large_pebs_flags |=
2387 					PERF_SAMPLE_BRANCH_STACK |
2388 					PERF_SAMPLE_TIME;
2389 				x86_pmu.flags |= PMU_FL_PEBS_ALL;
2390 				x86_pmu.pebs_capable = ~0ULL;
2391 				pebs_qual = "-baseline";
2392 				x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
2393 			} else {
2394 				/* Only basic record supported */
2395 				x86_pmu.large_pebs_flags &=
2396 					~(PERF_SAMPLE_ADDR |
2397 					  PERF_SAMPLE_TIME |
2398 					  PERF_SAMPLE_DATA_SRC |
2399 					  PERF_SAMPLE_TRANSACTION |
2400 					  PERF_SAMPLE_REGS_USER |
2401 					  PERF_SAMPLE_REGS_INTR);
2402 			}
2403 			pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
2404 
2405 			if (!is_hybrid() && x86_pmu.intel_cap.pebs_output_pt_available) {
2406 				pr_cont("PEBS-via-PT, ");
2407 				x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
2408 			}
2409 
2410 			break;
2411 
2412 		default:
2413 			pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
2414 			x86_pmu.pebs = 0;
2415 		}
2416 	}
2417 }
2418 
perf_restore_debug_store(void)2419 void perf_restore_debug_store(void)
2420 {
2421 	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2422 
2423 	if (!x86_pmu.bts && !x86_pmu.pebs)
2424 		return;
2425 
2426 	wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
2427 }
2428