xref: /openbmc/linux/arch/x86/events/perf_event.h (revision 1fa0a7dc)
1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 #include <asm/intel_ds.h>
18 
19 /* To enable MSR tracing please use the generic trace points. */
20 
21 /*
22  *          |   NHM/WSM    |      SNB     |
23  * register -------------------------------
24  *          |  HT  | no HT |  HT  | no HT |
25  *-----------------------------------------
26  * offcore  | core | core  | cpu  | core  |
27  * lbr_sel  | core | core  | cpu  | core  |
28  * ld_lat   | cpu  | core  | cpu  | core  |
29  *-----------------------------------------
30  *
31  * Given that there is a small number of shared regs,
32  * we can pre-allocate their slot in the per-cpu
33  * per-core reg tables.
34  */
35 enum extra_reg_type {
36 	EXTRA_REG_NONE  = -1,	/* not used */
37 
38 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
39 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
40 	EXTRA_REG_LBR   = 2,	/* lbr_select */
41 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
42 	EXTRA_REG_FE    = 4,    /* fe_* */
43 
44 	EXTRA_REG_MAX		/* number of entries needed */
45 };
46 
47 struct event_constraint {
48 	union {
49 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
50 		u64		idxmsk64;
51 	};
52 	u64		code;
53 	u64		cmask;
54 	int		weight;
55 	int		overlap;
56 	int		flags;
57 	unsigned int	size;
58 };
59 
60 static inline bool constraint_match(struct event_constraint *c, u64 ecode)
61 {
62 	return ((ecode & c->cmask) - c->code) <= (u64)c->size;
63 }
64 
65 /*
66  * struct hw_perf_event.flags flags
67  */
68 #define PERF_X86_EVENT_PEBS_LDLAT	0x0001 /* ld+ldlat data address sampling */
69 #define PERF_X86_EVENT_PEBS_ST		0x0002 /* st data address sampling */
70 #define PERF_X86_EVENT_PEBS_ST_HSW	0x0004 /* haswell style datala, store */
71 #define PERF_X86_EVENT_PEBS_LD_HSW	0x0008 /* haswell style datala, load */
72 #define PERF_X86_EVENT_PEBS_NA_HSW	0x0010 /* haswell style datala, unknown */
73 #define PERF_X86_EVENT_EXCL		0x0020 /* HT exclusivity on counter */
74 #define PERF_X86_EVENT_DYNAMIC		0x0040 /* dynamic alloc'd constraint */
75 #define PERF_X86_EVENT_RDPMC_ALLOWED	0x0080 /* grant rdpmc permission */
76 #define PERF_X86_EVENT_EXCL_ACCT	0x0100 /* accounted EXCL event */
77 #define PERF_X86_EVENT_AUTO_RELOAD	0x0200 /* use PEBS auto-reload */
78 #define PERF_X86_EVENT_LARGE_PEBS	0x0400 /* use large PEBS */
79 
80 struct amd_nb {
81 	int nb_id;  /* NorthBridge id */
82 	int refcnt; /* reference count */
83 	struct perf_event *owners[X86_PMC_IDX_MAX];
84 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
85 };
86 
87 #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
88 
89 /*
90  * Flags PEBS can handle without an PMI.
91  *
92  * TID can only be handled by flushing at context switch.
93  * REGS_USER can be handled for events limited to ring 3.
94  *
95  */
96 #define LARGE_PEBS_FLAGS \
97 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
98 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
99 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
100 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
101 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
102 	PERF_SAMPLE_PERIOD)
103 
104 #define PEBS_GP_REGS			\
105 	((1ULL << PERF_REG_X86_AX)    | \
106 	 (1ULL << PERF_REG_X86_BX)    | \
107 	 (1ULL << PERF_REG_X86_CX)    | \
108 	 (1ULL << PERF_REG_X86_DX)    | \
109 	 (1ULL << PERF_REG_X86_DI)    | \
110 	 (1ULL << PERF_REG_X86_SI)    | \
111 	 (1ULL << PERF_REG_X86_SP)    | \
112 	 (1ULL << PERF_REG_X86_BP)    | \
113 	 (1ULL << PERF_REG_X86_IP)    | \
114 	 (1ULL << PERF_REG_X86_FLAGS) | \
115 	 (1ULL << PERF_REG_X86_R8)    | \
116 	 (1ULL << PERF_REG_X86_R9)    | \
117 	 (1ULL << PERF_REG_X86_R10)   | \
118 	 (1ULL << PERF_REG_X86_R11)   | \
119 	 (1ULL << PERF_REG_X86_R12)   | \
120 	 (1ULL << PERF_REG_X86_R13)   | \
121 	 (1ULL << PERF_REG_X86_R14)   | \
122 	 (1ULL << PERF_REG_X86_R15))
123 
124 #define PEBS_XMM_REGS                   \
125 	((1ULL << PERF_REG_X86_XMM0)  | \
126 	 (1ULL << PERF_REG_X86_XMM1)  | \
127 	 (1ULL << PERF_REG_X86_XMM2)  | \
128 	 (1ULL << PERF_REG_X86_XMM3)  | \
129 	 (1ULL << PERF_REG_X86_XMM4)  | \
130 	 (1ULL << PERF_REG_X86_XMM5)  | \
131 	 (1ULL << PERF_REG_X86_XMM6)  | \
132 	 (1ULL << PERF_REG_X86_XMM7)  | \
133 	 (1ULL << PERF_REG_X86_XMM8)  | \
134 	 (1ULL << PERF_REG_X86_XMM9)  | \
135 	 (1ULL << PERF_REG_X86_XMM10) | \
136 	 (1ULL << PERF_REG_X86_XMM11) | \
137 	 (1ULL << PERF_REG_X86_XMM12) | \
138 	 (1ULL << PERF_REG_X86_XMM13) | \
139 	 (1ULL << PERF_REG_X86_XMM14) | \
140 	 (1ULL << PERF_REG_X86_XMM15))
141 
142 /*
143  * Per register state.
144  */
145 struct er_account {
146 	raw_spinlock_t      lock;	/* per-core: protect structure */
147 	u64                 config;	/* extra MSR config */
148 	u64                 reg;	/* extra MSR number */
149 	atomic_t            ref;	/* reference count */
150 };
151 
152 /*
153  * Per core/cpu state
154  *
155  * Used to coordinate shared registers between HT threads or
156  * among events on a single PMU.
157  */
158 struct intel_shared_regs {
159 	struct er_account       regs[EXTRA_REG_MAX];
160 	int                     refcnt;		/* per-core: #HT threads */
161 	unsigned                core_id;	/* per-core: core id */
162 };
163 
164 enum intel_excl_state_type {
165 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
166 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
167 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
168 };
169 
170 struct intel_excl_states {
171 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
172 	bool sched_started; /* true if scheduling has started */
173 };
174 
175 struct intel_excl_cntrs {
176 	raw_spinlock_t	lock;
177 
178 	struct intel_excl_states states[2];
179 
180 	union {
181 		u16	has_exclusive[2];
182 		u32	exclusive_present;
183 	};
184 
185 	int		refcnt;		/* per-core: #HT threads */
186 	unsigned	core_id;	/* per-core: core id */
187 };
188 
189 struct x86_perf_task_context;
190 #define MAX_LBR_ENTRIES		32
191 
192 enum {
193 	X86_PERF_KFREE_SHARED = 0,
194 	X86_PERF_KFREE_EXCL   = 1,
195 	X86_PERF_KFREE_MAX
196 };
197 
198 struct cpu_hw_events {
199 	/*
200 	 * Generic x86 PMC bits
201 	 */
202 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
203 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
204 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
205 	int			enabled;
206 
207 	int			n_events; /* the # of events in the below arrays */
208 	int			n_added;  /* the # last events in the below arrays;
209 					     they've never been enabled yet */
210 	int			n_txn;    /* the # last events in the below arrays;
211 					     added in the current transaction */
212 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
213 	u64			tags[X86_PMC_IDX_MAX];
214 
215 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
216 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
217 
218 	int			n_excl; /* the number of exclusive events */
219 
220 	unsigned int		txn_flags;
221 	int			is_fake;
222 
223 	/*
224 	 * Intel DebugStore bits
225 	 */
226 	struct debug_store	*ds;
227 	void			*ds_pebs_vaddr;
228 	void			*ds_bts_vaddr;
229 	u64			pebs_enabled;
230 	int			n_pebs;
231 	int			n_large_pebs;
232 
233 	/* Current super set of events hardware configuration */
234 	u64			pebs_data_cfg;
235 	u64			active_pebs_data_cfg;
236 	int			pebs_record_size;
237 
238 	/*
239 	 * Intel LBR bits
240 	 */
241 	int				lbr_users;
242 	int				lbr_pebs_users;
243 	struct perf_branch_stack	lbr_stack;
244 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
245 	struct er_account		*lbr_sel;
246 	u64				br_sel;
247 	struct x86_perf_task_context	*last_task_ctx;
248 	int				last_log_id;
249 
250 	/*
251 	 * Intel host/guest exclude bits
252 	 */
253 	u64				intel_ctrl_guest_mask;
254 	u64				intel_ctrl_host_mask;
255 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
256 
257 	/*
258 	 * Intel checkpoint mask
259 	 */
260 	u64				intel_cp_status;
261 
262 	/*
263 	 * manage shared (per-core, per-cpu) registers
264 	 * used on Intel NHM/WSM/SNB
265 	 */
266 	struct intel_shared_regs	*shared_regs;
267 	/*
268 	 * manage exclusive counter access between hyperthread
269 	 */
270 	struct event_constraint *constraint_list; /* in enable order */
271 	struct intel_excl_cntrs		*excl_cntrs;
272 	int excl_thread_id; /* 0 or 1 */
273 
274 	/*
275 	 * SKL TSX_FORCE_ABORT shadow
276 	 */
277 	u64				tfa_shadow;
278 
279 	/*
280 	 * AMD specific bits
281 	 */
282 	struct amd_nb			*amd_nb;
283 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
284 	u64				perf_ctr_virt_mask;
285 
286 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
287 };
288 
289 #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {	\
290 	{ .idxmsk64 = (n) },		\
291 	.code = (c),			\
292 	.size = (e) - (c),		\
293 	.cmask = (m),			\
294 	.weight = (w),			\
295 	.overlap = (o),			\
296 	.flags = f,			\
297 }
298 
299 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
300 	__EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
301 
302 #define EVENT_CONSTRAINT(c, n, m)	\
303 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
304 
305 /*
306  * The constraint_match() function only works for 'simple' event codes
307  * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
308  */
309 #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
310 	__EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
311 
312 #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
313 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
314 			   0, PERF_X86_EVENT_EXCL)
315 
316 /*
317  * The overlap flag marks event constraints with overlapping counter
318  * masks. This is the case if the counter mask of such an event is not
319  * a subset of any other counter mask of a constraint with an equal or
320  * higher weight, e.g.:
321  *
322  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
323  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
324  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
325  *
326  * The event scheduler may not select the correct counter in the first
327  * cycle because it needs to know which subsequent events will be
328  * scheduled. It may fail to schedule the events then. So we set the
329  * overlap flag for such constraints to give the scheduler a hint which
330  * events to select for counter rescheduling.
331  *
332  * Care must be taken as the rescheduling algorithm is O(n!) which
333  * will increase scheduling cycles for an over-committed system
334  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
335  * and its counter masks must be kept at a minimum.
336  */
337 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
338 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
339 
340 /*
341  * Constraint on the Event code.
342  */
343 #define INTEL_EVENT_CONSTRAINT(c, n)	\
344 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
345 
346 /*
347  * Constraint on a range of Event codes
348  */
349 #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n)			\
350 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
351 
352 /*
353  * Constraint on the Event code + UMask + fixed-mask
354  *
355  * filter mask to validate fixed counter events.
356  * the following filters disqualify for fixed counters:
357  *  - inv
358  *  - edge
359  *  - cnt-mask
360  *  - in_tx
361  *  - in_tx_checkpointed
362  *  The other filters are supported by fixed counters.
363  *  The any-thread option is supported starting with v3.
364  */
365 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
366 #define FIXED_EVENT_CONSTRAINT(c, n)	\
367 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
368 
369 /*
370  * Constraint on the Event code + UMask
371  */
372 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
373 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
374 
375 /* Constraint on specific umask bit only + event */
376 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
377 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
378 
379 /* Like UEVENT_CONSTRAINT, but match flags too */
380 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
381 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
382 
383 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
384 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
385 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
386 
387 #define INTEL_PLD_CONSTRAINT(c, n)	\
388 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
389 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
390 
391 #define INTEL_PST_CONSTRAINT(c, n)	\
392 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
393 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
394 
395 /* Event constraint, but match on all event flags too. */
396 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
397 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
398 
399 #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n)			\
400 	EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
401 
402 /* Check only flags, but allow all event/umask */
403 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
404 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
405 
406 /* Check flags and event code, and set the HSW store flag */
407 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
408 	__EVENT_CONSTRAINT(code, n, 			\
409 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
410 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
411 
412 /* Check flags and event code, and set the HSW load flag */
413 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
414 	__EVENT_CONSTRAINT(code, n,			\
415 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
416 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
417 
418 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
419 	__EVENT_CONSTRAINT_RANGE(code, end, n,				\
420 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
421 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
422 
423 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
424 	__EVENT_CONSTRAINT(code, n,			\
425 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
426 			  HWEIGHT(n), 0, \
427 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
428 
429 /* Check flags and event code/umask, and set the HSW store flag */
430 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
431 	__EVENT_CONSTRAINT(code, n, 			\
432 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
433 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
434 
435 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
436 	__EVENT_CONSTRAINT(code, n,			\
437 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
438 			  HWEIGHT(n), 0, \
439 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
440 
441 /* Check flags and event code/umask, and set the HSW load flag */
442 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
443 	__EVENT_CONSTRAINT(code, n, 			\
444 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
445 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
446 
447 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
448 	__EVENT_CONSTRAINT(code, n,			\
449 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
450 			  HWEIGHT(n), 0, \
451 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
452 
453 /* Check flags and event code/umask, and set the HSW N/A flag */
454 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
455 	__EVENT_CONSTRAINT(code, n, 			\
456 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
457 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
458 
459 
460 /*
461  * We define the end marker as having a weight of -1
462  * to enable blacklisting of events using a counter bitmask
463  * of zero and thus a weight of zero.
464  * The end marker has a weight that cannot possibly be
465  * obtained from counting the bits in the bitmask.
466  */
467 #define EVENT_CONSTRAINT_END { .weight = -1 }
468 
469 /*
470  * Check for end marker with weight == -1
471  */
472 #define for_each_event_constraint(e, c)	\
473 	for ((e) = (c); (e)->weight != -1; (e)++)
474 
475 /*
476  * Extra registers for specific events.
477  *
478  * Some events need large masks and require external MSRs.
479  * Those extra MSRs end up being shared for all events on
480  * a PMU and sometimes between PMU of sibling HT threads.
481  * In either case, the kernel needs to handle conflicting
482  * accesses to those extra, shared, regs. The data structure
483  * to manage those registers is stored in cpu_hw_event.
484  */
485 struct extra_reg {
486 	unsigned int		event;
487 	unsigned int		msr;
488 	u64			config_mask;
489 	u64			valid_mask;
490 	int			idx;  /* per_xxx->regs[] reg index */
491 	bool			extra_msr_access;
492 };
493 
494 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
495 	.event = (e),			\
496 	.msr = (ms),			\
497 	.config_mask = (m),		\
498 	.valid_mask = (vm),		\
499 	.idx = EXTRA_REG_##i,		\
500 	.extra_msr_access = true,	\
501 	}
502 
503 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
504 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
505 
506 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
507 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
508 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
509 
510 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
511 	INTEL_UEVENT_EXTRA_REG(c, \
512 			       MSR_PEBS_LD_LAT_THRESHOLD, \
513 			       0xffff, \
514 			       LDLAT)
515 
516 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
517 
518 union perf_capabilities {
519 	struct {
520 		u64	lbr_format:6;
521 		u64	pebs_trap:1;
522 		u64	pebs_arch_reg:1;
523 		u64	pebs_format:4;
524 		u64	smm_freeze:1;
525 		/*
526 		 * PMU supports separate counter range for writing
527 		 * values > 32bit.
528 		 */
529 		u64	full_width_write:1;
530 		u64     pebs_baseline:1;
531 	};
532 	u64	capabilities;
533 };
534 
535 struct x86_pmu_quirk {
536 	struct x86_pmu_quirk *next;
537 	void (*func)(void);
538 };
539 
540 union x86_pmu_config {
541 	struct {
542 		u64 event:8,
543 		    umask:8,
544 		    usr:1,
545 		    os:1,
546 		    edge:1,
547 		    pc:1,
548 		    interrupt:1,
549 		    __reserved1:1,
550 		    en:1,
551 		    inv:1,
552 		    cmask:8,
553 		    event2:4,
554 		    __reserved2:4,
555 		    go:1,
556 		    ho:1;
557 	} bits;
558 	u64 value;
559 };
560 
561 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
562 
563 enum {
564 	x86_lbr_exclusive_lbr,
565 	x86_lbr_exclusive_bts,
566 	x86_lbr_exclusive_pt,
567 	x86_lbr_exclusive_max,
568 };
569 
570 /*
571  * struct x86_pmu - generic x86 pmu
572  */
573 struct x86_pmu {
574 	/*
575 	 * Generic x86 PMC bits
576 	 */
577 	const char	*name;
578 	int		version;
579 	int		(*handle_irq)(struct pt_regs *);
580 	void		(*disable_all)(void);
581 	void		(*enable_all)(int added);
582 	void		(*enable)(struct perf_event *);
583 	void		(*disable)(struct perf_event *);
584 	void		(*add)(struct perf_event *);
585 	void		(*del)(struct perf_event *);
586 	void		(*read)(struct perf_event *event);
587 	int		(*hw_config)(struct perf_event *event);
588 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
589 	unsigned	eventsel;
590 	unsigned	perfctr;
591 	int		(*addr_offset)(int index, bool eventsel);
592 	int		(*rdpmc_index)(int index);
593 	u64		(*event_map)(int);
594 	int		max_events;
595 	int		num_counters;
596 	int		num_counters_fixed;
597 	int		cntval_bits;
598 	u64		cntval_mask;
599 	union {
600 			unsigned long events_maskl;
601 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
602 	};
603 	int		events_mask_len;
604 	int		apic;
605 	u64		max_period;
606 	struct event_constraint *
607 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
608 						 int idx,
609 						 struct perf_event *event);
610 
611 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
612 						 struct perf_event *event);
613 
614 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
615 
616 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
617 
618 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
619 
620 	struct event_constraint *event_constraints;
621 	struct x86_pmu_quirk *quirks;
622 	int		perfctr_second_write;
623 	u64		(*limit_period)(struct perf_event *event, u64 l);
624 
625 	/* PMI handler bits */
626 	unsigned int	late_ack		:1,
627 			counter_freezing	:1;
628 	/*
629 	 * sysfs attrs
630 	 */
631 	int		attr_rdpmc_broken;
632 	int		attr_rdpmc;
633 	struct attribute **format_attrs;
634 	struct attribute **event_attrs;
635 	struct attribute **caps_attrs;
636 
637 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
638 	struct attribute **cpu_events;
639 
640 	unsigned long	attr_freeze_on_smi;
641 	struct attribute **attrs;
642 
643 	/*
644 	 * CPU Hotplug hooks
645 	 */
646 	int		(*cpu_prepare)(int cpu);
647 	void		(*cpu_starting)(int cpu);
648 	void		(*cpu_dying)(int cpu);
649 	void		(*cpu_dead)(int cpu);
650 
651 	void		(*check_microcode)(void);
652 	void		(*sched_task)(struct perf_event_context *ctx,
653 				      bool sched_in);
654 
655 	/*
656 	 * Intel Arch Perfmon v2+
657 	 */
658 	u64			intel_ctrl;
659 	union perf_capabilities intel_cap;
660 
661 	/*
662 	 * Intel DebugStore bits
663 	 */
664 	unsigned int	bts			:1,
665 			bts_active		:1,
666 			pebs			:1,
667 			pebs_active		:1,
668 			pebs_broken		:1,
669 			pebs_prec_dist		:1,
670 			pebs_no_tlb		:1,
671 			pebs_no_isolation	:1,
672 			pebs_no_xmm_regs	:1;
673 	int		pebs_record_size;
674 	int		pebs_buffer_size;
675 	int		max_pebs_events;
676 	void		(*drain_pebs)(struct pt_regs *regs);
677 	struct event_constraint *pebs_constraints;
678 	void		(*pebs_aliases)(struct perf_event *event);
679 	unsigned long	large_pebs_flags;
680 	u64		rtm_abort_event;
681 
682 	/*
683 	 * Intel LBR
684 	 */
685 	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
686 	int		lbr_nr;			   /* hardware stack size */
687 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
688 	const int	*lbr_sel_map;		   /* lbr_select mappings */
689 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
690 	bool		lbr_pt_coexist;		   /* (LBR|BTS) may coexist with PT */
691 
692 	/*
693 	 * Intel PT/LBR/BTS are exclusive
694 	 */
695 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
696 
697 	/*
698 	 * AMD bits
699 	 */
700 	unsigned int	amd_nb_constraints : 1;
701 
702 	/*
703 	 * Extra registers for events
704 	 */
705 	struct extra_reg *extra_regs;
706 	unsigned int flags;
707 
708 	/*
709 	 * Intel host/guest support (KVM)
710 	 */
711 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
712 
713 	/*
714 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
715 	 */
716 	int (*check_period) (struct perf_event *event, u64 period);
717 };
718 
719 struct x86_perf_task_context {
720 	u64 lbr_from[MAX_LBR_ENTRIES];
721 	u64 lbr_to[MAX_LBR_ENTRIES];
722 	u64 lbr_info[MAX_LBR_ENTRIES];
723 	int tos;
724 	int valid_lbrs;
725 	int lbr_callstack_users;
726 	int lbr_stack_state;
727 	int log_id;
728 };
729 
730 #define x86_add_quirk(func_)						\
731 do {									\
732 	static struct x86_pmu_quirk __quirk __initdata = {		\
733 		.func = func_,						\
734 	};								\
735 	__quirk.next = x86_pmu.quirks;					\
736 	x86_pmu.quirks = &__quirk;					\
737 } while (0)
738 
739 /*
740  * x86_pmu flags
741  */
742 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
743 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
744 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
745 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
746 #define PMU_FL_PEBS_ALL		0x10 /* all events are valid PEBS events */
747 #define PMU_FL_TFA		0x20 /* deal with TSX force abort */
748 
749 #define EVENT_VAR(_id)  event_attr_##_id
750 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
751 
752 #define EVENT_ATTR(_name, _id)						\
753 static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
754 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
755 	.id		= PERF_COUNT_HW_##_id,				\
756 	.event_str	= NULL,						\
757 };
758 
759 #define EVENT_ATTR_STR(_name, v, str)					\
760 static struct perf_pmu_events_attr event_attr_##v = {			\
761 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
762 	.id		= 0,						\
763 	.event_str	= str,						\
764 };
765 
766 #define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
767 static struct perf_pmu_events_ht_attr event_attr_##v = {		\
768 	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
769 	.id		= 0,						\
770 	.event_str_noht	= noht,						\
771 	.event_str_ht	= ht,						\
772 }
773 
774 struct pmu *x86_get_pmu(void);
775 extern struct x86_pmu x86_pmu __read_mostly;
776 
777 static inline bool x86_pmu_has_lbr_callstack(void)
778 {
779 	return  x86_pmu.lbr_sel_map &&
780 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
781 }
782 
783 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
784 
785 int x86_perf_event_set_period(struct perf_event *event);
786 
787 /*
788  * Generalized hw caching related hw_event table, filled
789  * in on a per model basis. A value of 0 means
790  * 'not supported', -1 means 'hw_event makes no sense on
791  * this CPU', any other value means the raw hw_event
792  * ID.
793  */
794 
795 #define C(x) PERF_COUNT_HW_CACHE_##x
796 
797 extern u64 __read_mostly hw_cache_event_ids
798 				[PERF_COUNT_HW_CACHE_MAX]
799 				[PERF_COUNT_HW_CACHE_OP_MAX]
800 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
801 extern u64 __read_mostly hw_cache_extra_regs
802 				[PERF_COUNT_HW_CACHE_MAX]
803 				[PERF_COUNT_HW_CACHE_OP_MAX]
804 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
805 
806 u64 x86_perf_event_update(struct perf_event *event);
807 
808 static inline unsigned int x86_pmu_config_addr(int index)
809 {
810 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
811 				   x86_pmu.addr_offset(index, true) : index);
812 }
813 
814 static inline unsigned int x86_pmu_event_addr(int index)
815 {
816 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
817 				  x86_pmu.addr_offset(index, false) : index);
818 }
819 
820 static inline int x86_pmu_rdpmc_index(int index)
821 {
822 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
823 }
824 
825 int x86_add_exclusive(unsigned int what);
826 
827 void x86_del_exclusive(unsigned int what);
828 
829 int x86_reserve_hardware(void);
830 
831 void x86_release_hardware(void);
832 
833 int x86_pmu_max_precise(void);
834 
835 void hw_perf_lbr_event_destroy(struct perf_event *event);
836 
837 int x86_setup_perfctr(struct perf_event *event);
838 
839 int x86_pmu_hw_config(struct perf_event *event);
840 
841 void x86_pmu_disable_all(void);
842 
843 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
844 					  u64 enable_mask)
845 {
846 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
847 
848 	if (hwc->extra_reg.reg)
849 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
850 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
851 }
852 
853 void x86_pmu_enable_all(int added);
854 
855 int perf_assign_events(struct event_constraint **constraints, int n,
856 			int wmin, int wmax, int gpmax, int *assign);
857 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
858 
859 void x86_pmu_stop(struct perf_event *event, int flags);
860 
861 static inline void x86_pmu_disable_event(struct perf_event *event)
862 {
863 	struct hw_perf_event *hwc = &event->hw;
864 
865 	wrmsrl(hwc->config_base, hwc->config);
866 }
867 
868 void x86_pmu_enable_event(struct perf_event *event);
869 
870 int x86_pmu_handle_irq(struct pt_regs *regs);
871 
872 extern struct event_constraint emptyconstraint;
873 
874 extern struct event_constraint unconstrained;
875 
876 static inline bool kernel_ip(unsigned long ip)
877 {
878 #ifdef CONFIG_X86_32
879 	return ip > PAGE_OFFSET;
880 #else
881 	return (long)ip < 0;
882 #endif
883 }
884 
885 /*
886  * Not all PMUs provide the right context information to place the reported IP
887  * into full context. Specifically segment registers are typically not
888  * supplied.
889  *
890  * Assuming the address is a linear address (it is for IBS), we fake the CS and
891  * vm86 mode using the known zero-based code segment and 'fix up' the registers
892  * to reflect this.
893  *
894  * Intel PEBS/LBR appear to typically provide the effective address, nothing
895  * much we can do about that but pray and treat it like a linear address.
896  */
897 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
898 {
899 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
900 	if (regs->flags & X86_VM_MASK)
901 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
902 	regs->ip = ip;
903 }
904 
905 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
906 ssize_t intel_event_sysfs_show(char *page, u64 config);
907 
908 struct attribute **merge_attr(struct attribute **a, struct attribute **b);
909 
910 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
911 			  char *page);
912 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
913 			  char *page);
914 
915 #ifdef CONFIG_CPU_SUP_AMD
916 
917 int amd_pmu_init(void);
918 
919 #else /* CONFIG_CPU_SUP_AMD */
920 
921 static inline int amd_pmu_init(void)
922 {
923 	return 0;
924 }
925 
926 #endif /* CONFIG_CPU_SUP_AMD */
927 
928 #ifdef CONFIG_CPU_SUP_INTEL
929 
930 static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
931 {
932 	struct hw_perf_event *hwc = &event->hw;
933 	unsigned int hw_event, bts_event;
934 
935 	if (event->attr.freq)
936 		return false;
937 
938 	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
939 	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
940 
941 	return hw_event == bts_event && period == 1;
942 }
943 
944 static inline bool intel_pmu_has_bts(struct perf_event *event)
945 {
946 	struct hw_perf_event *hwc = &event->hw;
947 
948 	return intel_pmu_has_bts_period(event, hwc->sample_period);
949 }
950 
951 int intel_pmu_save_and_restart(struct perf_event *event);
952 
953 struct event_constraint *
954 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
955 			  struct perf_event *event);
956 
957 extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
958 extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
959 
960 int intel_pmu_init(void);
961 
962 void init_debug_store_on_cpu(int cpu);
963 
964 void fini_debug_store_on_cpu(int cpu);
965 
966 void release_ds_buffers(void);
967 
968 void reserve_ds_buffers(void);
969 
970 extern struct event_constraint bts_constraint;
971 
972 void intel_pmu_enable_bts(u64 config);
973 
974 void intel_pmu_disable_bts(void);
975 
976 int intel_pmu_drain_bts_buffer(void);
977 
978 extern struct event_constraint intel_core2_pebs_event_constraints[];
979 
980 extern struct event_constraint intel_atom_pebs_event_constraints[];
981 
982 extern struct event_constraint intel_slm_pebs_event_constraints[];
983 
984 extern struct event_constraint intel_glm_pebs_event_constraints[];
985 
986 extern struct event_constraint intel_glp_pebs_event_constraints[];
987 
988 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
989 
990 extern struct event_constraint intel_westmere_pebs_event_constraints[];
991 
992 extern struct event_constraint intel_snb_pebs_event_constraints[];
993 
994 extern struct event_constraint intel_ivb_pebs_event_constraints[];
995 
996 extern struct event_constraint intel_hsw_pebs_event_constraints[];
997 
998 extern struct event_constraint intel_bdw_pebs_event_constraints[];
999 
1000 extern struct event_constraint intel_skl_pebs_event_constraints[];
1001 
1002 extern struct event_constraint intel_icl_pebs_event_constraints[];
1003 
1004 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
1005 
1006 void intel_pmu_pebs_add(struct perf_event *event);
1007 
1008 void intel_pmu_pebs_del(struct perf_event *event);
1009 
1010 void intel_pmu_pebs_enable(struct perf_event *event);
1011 
1012 void intel_pmu_pebs_disable(struct perf_event *event);
1013 
1014 void intel_pmu_pebs_enable_all(void);
1015 
1016 void intel_pmu_pebs_disable_all(void);
1017 
1018 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
1019 
1020 void intel_pmu_auto_reload_read(struct perf_event *event);
1021 
1022 void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
1023 
1024 void intel_ds_init(void);
1025 
1026 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
1027 
1028 u64 lbr_from_signext_quirk_wr(u64 val);
1029 
1030 void intel_pmu_lbr_reset(void);
1031 
1032 void intel_pmu_lbr_add(struct perf_event *event);
1033 
1034 void intel_pmu_lbr_del(struct perf_event *event);
1035 
1036 void intel_pmu_lbr_enable_all(bool pmi);
1037 
1038 void intel_pmu_lbr_disable_all(void);
1039 
1040 void intel_pmu_lbr_read(void);
1041 
1042 void intel_pmu_lbr_init_core(void);
1043 
1044 void intel_pmu_lbr_init_nhm(void);
1045 
1046 void intel_pmu_lbr_init_atom(void);
1047 
1048 void intel_pmu_lbr_init_slm(void);
1049 
1050 void intel_pmu_lbr_init_snb(void);
1051 
1052 void intel_pmu_lbr_init_hsw(void);
1053 
1054 void intel_pmu_lbr_init_skl(void);
1055 
1056 void intel_pmu_lbr_init_knl(void);
1057 
1058 void intel_pmu_pebs_data_source_nhm(void);
1059 
1060 void intel_pmu_pebs_data_source_skl(bool pmem);
1061 
1062 int intel_pmu_setup_lbr_filter(struct perf_event *event);
1063 
1064 void intel_pt_interrupt(void);
1065 
1066 int intel_bts_interrupt(void);
1067 
1068 void intel_bts_enable_local(void);
1069 
1070 void intel_bts_disable_local(void);
1071 
1072 int p4_pmu_init(void);
1073 
1074 int p6_pmu_init(void);
1075 
1076 int knc_pmu_init(void);
1077 
1078 static inline int is_ht_workaround_enabled(void)
1079 {
1080 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
1081 }
1082 
1083 #else /* CONFIG_CPU_SUP_INTEL */
1084 
1085 static inline void reserve_ds_buffers(void)
1086 {
1087 }
1088 
1089 static inline void release_ds_buffers(void)
1090 {
1091 }
1092 
1093 static inline int intel_pmu_init(void)
1094 {
1095 	return 0;
1096 }
1097 
1098 static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
1099 {
1100 	return 0;
1101 }
1102 
1103 static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
1104 {
1105 }
1106 
1107 static inline int is_ht_workaround_enabled(void)
1108 {
1109 	return 0;
1110 }
1111 #endif /* CONFIG_CPU_SUP_INTEL */
1112