xref: /openbmc/linux/arch/x86/events/perf_event.h (revision bf070bb0)
1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 /* To enable MSR tracing please use the generic trace points. */
18 
19 /*
20  *          |   NHM/WSM    |      SNB     |
21  * register -------------------------------
22  *          |  HT  | no HT |  HT  | no HT |
23  *-----------------------------------------
24  * offcore  | core | core  | cpu  | core  |
25  * lbr_sel  | core | core  | cpu  | core  |
26  * ld_lat   | cpu  | core  | cpu  | core  |
27  *-----------------------------------------
28  *
29  * Given that there is a small number of shared regs,
30  * we can pre-allocate their slot in the per-cpu
31  * per-core reg tables.
32  */
33 enum extra_reg_type {
34 	EXTRA_REG_NONE  = -1,	/* not used */
35 
36 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
37 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
38 	EXTRA_REG_LBR   = 2,	/* lbr_select */
39 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
40 	EXTRA_REG_FE    = 4,    /* fe_* */
41 
42 	EXTRA_REG_MAX		/* number of entries needed */
43 };
44 
45 struct event_constraint {
46 	union {
47 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
48 		u64		idxmsk64;
49 	};
50 	u64	code;
51 	u64	cmask;
52 	int	weight;
53 	int	overlap;
54 	int	flags;
55 };
56 /*
57  * struct hw_perf_event.flags flags
58  */
59 #define PERF_X86_EVENT_PEBS_LDLAT	0x0001 /* ld+ldlat data address sampling */
60 #define PERF_X86_EVENT_PEBS_ST		0x0002 /* st data address sampling */
61 #define PERF_X86_EVENT_PEBS_ST_HSW	0x0004 /* haswell style datala, store */
62 #define PERF_X86_EVENT_COMMITTED	0x0008 /* event passed commit_txn */
63 #define PERF_X86_EVENT_PEBS_LD_HSW	0x0010 /* haswell style datala, load */
64 #define PERF_X86_EVENT_PEBS_NA_HSW	0x0020 /* haswell style datala, unknown */
65 #define PERF_X86_EVENT_EXCL		0x0040 /* HT exclusivity on counter */
66 #define PERF_X86_EVENT_DYNAMIC		0x0080 /* dynamic alloc'd constraint */
67 #define PERF_X86_EVENT_RDPMC_ALLOWED	0x0100 /* grant rdpmc permission */
68 #define PERF_X86_EVENT_EXCL_ACCT	0x0200 /* accounted EXCL event */
69 #define PERF_X86_EVENT_AUTO_RELOAD	0x0400 /* use PEBS auto-reload */
70 #define PERF_X86_EVENT_FREERUNNING	0x0800 /* use freerunning PEBS */
71 
72 
73 struct amd_nb {
74 	int nb_id;  /* NorthBridge id */
75 	int refcnt; /* reference count */
76 	struct perf_event *owners[X86_PMC_IDX_MAX];
77 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
78 };
79 
80 /* The maximal number of PEBS events: */
81 #define MAX_PEBS_EVENTS		8
82 #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
83 
84 /*
85  * Flags PEBS can handle without an PMI.
86  *
87  * TID can only be handled by flushing at context switch.
88  * REGS_USER can be handled for events limited to ring 3.
89  *
90  */
91 #define PEBS_FREERUNNING_FLAGS \
92 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
93 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
94 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
95 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
96 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
97 
98 /*
99  * A debug store configuration.
100  *
101  * We only support architectures that use 64bit fields.
102  */
103 struct debug_store {
104 	u64	bts_buffer_base;
105 	u64	bts_index;
106 	u64	bts_absolute_maximum;
107 	u64	bts_interrupt_threshold;
108 	u64	pebs_buffer_base;
109 	u64	pebs_index;
110 	u64	pebs_absolute_maximum;
111 	u64	pebs_interrupt_threshold;
112 	u64	pebs_event_reset[MAX_PEBS_EVENTS];
113 };
114 
115 #define PEBS_REGS \
116 	(PERF_REG_X86_AX | \
117 	 PERF_REG_X86_BX | \
118 	 PERF_REG_X86_CX | \
119 	 PERF_REG_X86_DX | \
120 	 PERF_REG_X86_DI | \
121 	 PERF_REG_X86_SI | \
122 	 PERF_REG_X86_SP | \
123 	 PERF_REG_X86_BP | \
124 	 PERF_REG_X86_IP | \
125 	 PERF_REG_X86_FLAGS | \
126 	 PERF_REG_X86_R8 | \
127 	 PERF_REG_X86_R9 | \
128 	 PERF_REG_X86_R10 | \
129 	 PERF_REG_X86_R11 | \
130 	 PERF_REG_X86_R12 | \
131 	 PERF_REG_X86_R13 | \
132 	 PERF_REG_X86_R14 | \
133 	 PERF_REG_X86_R15)
134 
135 /*
136  * Per register state.
137  */
138 struct er_account {
139 	raw_spinlock_t      lock;	/* per-core: protect structure */
140 	u64                 config;	/* extra MSR config */
141 	u64                 reg;	/* extra MSR number */
142 	atomic_t            ref;	/* reference count */
143 };
144 
145 /*
146  * Per core/cpu state
147  *
148  * Used to coordinate shared registers between HT threads or
149  * among events on a single PMU.
150  */
151 struct intel_shared_regs {
152 	struct er_account       regs[EXTRA_REG_MAX];
153 	int                     refcnt;		/* per-core: #HT threads */
154 	unsigned                core_id;	/* per-core: core id */
155 };
156 
157 enum intel_excl_state_type {
158 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
159 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
160 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
161 };
162 
163 struct intel_excl_states {
164 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
165 	bool sched_started; /* true if scheduling has started */
166 };
167 
168 struct intel_excl_cntrs {
169 	raw_spinlock_t	lock;
170 
171 	struct intel_excl_states states[2];
172 
173 	union {
174 		u16	has_exclusive[2];
175 		u32	exclusive_present;
176 	};
177 
178 	int		refcnt;		/* per-core: #HT threads */
179 	unsigned	core_id;	/* per-core: core id */
180 };
181 
182 #define MAX_LBR_ENTRIES		32
183 
184 enum {
185 	X86_PERF_KFREE_SHARED = 0,
186 	X86_PERF_KFREE_EXCL   = 1,
187 	X86_PERF_KFREE_MAX
188 };
189 
190 struct cpu_hw_events {
191 	/*
192 	 * Generic x86 PMC bits
193 	 */
194 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
195 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
196 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
197 	int			enabled;
198 
199 	int			n_events; /* the # of events in the below arrays */
200 	int			n_added;  /* the # last events in the below arrays;
201 					     they've never been enabled yet */
202 	int			n_txn;    /* the # last events in the below arrays;
203 					     added in the current transaction */
204 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
205 	u64			tags[X86_PMC_IDX_MAX];
206 
207 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
208 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
209 
210 	int			n_excl; /* the number of exclusive events */
211 
212 	unsigned int		txn_flags;
213 	int			is_fake;
214 
215 	/*
216 	 * Intel DebugStore bits
217 	 */
218 	struct debug_store	*ds;
219 	u64			pebs_enabled;
220 	int			n_pebs;
221 	int			n_large_pebs;
222 
223 	/*
224 	 * Intel LBR bits
225 	 */
226 	int				lbr_users;
227 	struct perf_branch_stack	lbr_stack;
228 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
229 	struct er_account		*lbr_sel;
230 	u64				br_sel;
231 
232 	/*
233 	 * Intel host/guest exclude bits
234 	 */
235 	u64				intel_ctrl_guest_mask;
236 	u64				intel_ctrl_host_mask;
237 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
238 
239 	/*
240 	 * Intel checkpoint mask
241 	 */
242 	u64				intel_cp_status;
243 
244 	/*
245 	 * manage shared (per-core, per-cpu) registers
246 	 * used on Intel NHM/WSM/SNB
247 	 */
248 	struct intel_shared_regs	*shared_regs;
249 	/*
250 	 * manage exclusive counter access between hyperthread
251 	 */
252 	struct event_constraint *constraint_list; /* in enable order */
253 	struct intel_excl_cntrs		*excl_cntrs;
254 	int excl_thread_id; /* 0 or 1 */
255 
256 	/*
257 	 * AMD specific bits
258 	 */
259 	struct amd_nb			*amd_nb;
260 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
261 	u64				perf_ctr_virt_mask;
262 
263 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
264 };
265 
266 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
267 	{ .idxmsk64 = (n) },		\
268 	.code = (c),			\
269 	.cmask = (m),			\
270 	.weight = (w),			\
271 	.overlap = (o),			\
272 	.flags = f,			\
273 }
274 
275 #define EVENT_CONSTRAINT(c, n, m)	\
276 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
277 
278 #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
279 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
280 			   0, PERF_X86_EVENT_EXCL)
281 
282 /*
283  * The overlap flag marks event constraints with overlapping counter
284  * masks. This is the case if the counter mask of such an event is not
285  * a subset of any other counter mask of a constraint with an equal or
286  * higher weight, e.g.:
287  *
288  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
289  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
290  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
291  *
292  * The event scheduler may not select the correct counter in the first
293  * cycle because it needs to know which subsequent events will be
294  * scheduled. It may fail to schedule the events then. So we set the
295  * overlap flag for such constraints to give the scheduler a hint which
296  * events to select for counter rescheduling.
297  *
298  * Care must be taken as the rescheduling algorithm is O(n!) which
299  * will increase scheduling cycles for an over-committed system
300  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
301  * and its counter masks must be kept at a minimum.
302  */
303 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
304 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
305 
306 /*
307  * Constraint on the Event code.
308  */
309 #define INTEL_EVENT_CONSTRAINT(c, n)	\
310 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
311 
312 /*
313  * Constraint on the Event code + UMask + fixed-mask
314  *
315  * filter mask to validate fixed counter events.
316  * the following filters disqualify for fixed counters:
317  *  - inv
318  *  - edge
319  *  - cnt-mask
320  *  - in_tx
321  *  - in_tx_checkpointed
322  *  The other filters are supported by fixed counters.
323  *  The any-thread option is supported starting with v3.
324  */
325 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
326 #define FIXED_EVENT_CONSTRAINT(c, n)	\
327 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
328 
329 /*
330  * Constraint on the Event code + UMask
331  */
332 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
333 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
334 
335 /* Constraint on specific umask bit only + event */
336 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
337 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
338 
339 /* Like UEVENT_CONSTRAINT, but match flags too */
340 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
341 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
342 
343 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
344 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
345 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
346 
347 #define INTEL_PLD_CONSTRAINT(c, n)	\
348 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
349 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
350 
351 #define INTEL_PST_CONSTRAINT(c, n)	\
352 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
353 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
354 
355 /* Event constraint, but match on all event flags too. */
356 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
357 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
358 
359 /* Check only flags, but allow all event/umask */
360 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
361 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
362 
363 /* Check flags and event code, and set the HSW store flag */
364 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
365 	__EVENT_CONSTRAINT(code, n, 			\
366 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
367 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
368 
369 /* Check flags and event code, and set the HSW load flag */
370 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
371 	__EVENT_CONSTRAINT(code, n,			\
372 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
373 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
374 
375 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
376 	__EVENT_CONSTRAINT(code, n,			\
377 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
378 			  HWEIGHT(n), 0, \
379 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
380 
381 /* Check flags and event code/umask, and set the HSW store flag */
382 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
383 	__EVENT_CONSTRAINT(code, n, 			\
384 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
385 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
386 
387 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
388 	__EVENT_CONSTRAINT(code, n,			\
389 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
390 			  HWEIGHT(n), 0, \
391 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
392 
393 /* Check flags and event code/umask, and set the HSW load flag */
394 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
395 	__EVENT_CONSTRAINT(code, n, 			\
396 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
397 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
398 
399 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
400 	__EVENT_CONSTRAINT(code, n,			\
401 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
402 			  HWEIGHT(n), 0, \
403 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
404 
405 /* Check flags and event code/umask, and set the HSW N/A flag */
406 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
407 	__EVENT_CONSTRAINT(code, n, 			\
408 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
409 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
410 
411 
412 /*
413  * We define the end marker as having a weight of -1
414  * to enable blacklisting of events using a counter bitmask
415  * of zero and thus a weight of zero.
416  * The end marker has a weight that cannot possibly be
417  * obtained from counting the bits in the bitmask.
418  */
419 #define EVENT_CONSTRAINT_END { .weight = -1 }
420 
421 /*
422  * Check for end marker with weight == -1
423  */
424 #define for_each_event_constraint(e, c)	\
425 	for ((e) = (c); (e)->weight != -1; (e)++)
426 
427 /*
428  * Extra registers for specific events.
429  *
430  * Some events need large masks and require external MSRs.
431  * Those extra MSRs end up being shared for all events on
432  * a PMU and sometimes between PMU of sibling HT threads.
433  * In either case, the kernel needs to handle conflicting
434  * accesses to those extra, shared, regs. The data structure
435  * to manage those registers is stored in cpu_hw_event.
436  */
437 struct extra_reg {
438 	unsigned int		event;
439 	unsigned int		msr;
440 	u64			config_mask;
441 	u64			valid_mask;
442 	int			idx;  /* per_xxx->regs[] reg index */
443 	bool			extra_msr_access;
444 };
445 
446 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
447 	.event = (e),			\
448 	.msr = (ms),			\
449 	.config_mask = (m),		\
450 	.valid_mask = (vm),		\
451 	.idx = EXTRA_REG_##i,		\
452 	.extra_msr_access = true,	\
453 	}
454 
455 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
456 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
457 
458 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
459 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
460 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
461 
462 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
463 	INTEL_UEVENT_EXTRA_REG(c, \
464 			       MSR_PEBS_LD_LAT_THRESHOLD, \
465 			       0xffff, \
466 			       LDLAT)
467 
468 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
469 
470 union perf_capabilities {
471 	struct {
472 		u64	lbr_format:6;
473 		u64	pebs_trap:1;
474 		u64	pebs_arch_reg:1;
475 		u64	pebs_format:4;
476 		u64	smm_freeze:1;
477 		/*
478 		 * PMU supports separate counter range for writing
479 		 * values > 32bit.
480 		 */
481 		u64	full_width_write:1;
482 	};
483 	u64	capabilities;
484 };
485 
486 struct x86_pmu_quirk {
487 	struct x86_pmu_quirk *next;
488 	void (*func)(void);
489 };
490 
491 union x86_pmu_config {
492 	struct {
493 		u64 event:8,
494 		    umask:8,
495 		    usr:1,
496 		    os:1,
497 		    edge:1,
498 		    pc:1,
499 		    interrupt:1,
500 		    __reserved1:1,
501 		    en:1,
502 		    inv:1,
503 		    cmask:8,
504 		    event2:4,
505 		    __reserved2:4,
506 		    go:1,
507 		    ho:1;
508 	} bits;
509 	u64 value;
510 };
511 
512 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
513 
514 enum {
515 	x86_lbr_exclusive_lbr,
516 	x86_lbr_exclusive_bts,
517 	x86_lbr_exclusive_pt,
518 	x86_lbr_exclusive_max,
519 };
520 
521 /*
522  * struct x86_pmu - generic x86 pmu
523  */
524 struct x86_pmu {
525 	/*
526 	 * Generic x86 PMC bits
527 	 */
528 	const char	*name;
529 	int		version;
530 	int		(*handle_irq)(struct pt_regs *);
531 	void		(*disable_all)(void);
532 	void		(*enable_all)(int added);
533 	void		(*enable)(struct perf_event *);
534 	void		(*disable)(struct perf_event *);
535 	void		(*add)(struct perf_event *);
536 	void		(*del)(struct perf_event *);
537 	int		(*hw_config)(struct perf_event *event);
538 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
539 	unsigned	eventsel;
540 	unsigned	perfctr;
541 	int		(*addr_offset)(int index, bool eventsel);
542 	int		(*rdpmc_index)(int index);
543 	u64		(*event_map)(int);
544 	int		max_events;
545 	int		num_counters;
546 	int		num_counters_fixed;
547 	int		cntval_bits;
548 	u64		cntval_mask;
549 	union {
550 			unsigned long events_maskl;
551 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
552 	};
553 	int		events_mask_len;
554 	int		apic;
555 	u64		max_period;
556 	struct event_constraint *
557 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
558 						 int idx,
559 						 struct perf_event *event);
560 
561 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
562 						 struct perf_event *event);
563 
564 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
565 
566 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
567 
568 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
569 
570 	struct event_constraint *event_constraints;
571 	struct x86_pmu_quirk *quirks;
572 	int		perfctr_second_write;
573 	bool		late_ack;
574 	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
575 
576 	/*
577 	 * sysfs attrs
578 	 */
579 	int		attr_rdpmc_broken;
580 	int		attr_rdpmc;
581 	struct attribute **format_attrs;
582 	struct attribute **event_attrs;
583 	struct attribute **caps_attrs;
584 
585 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
586 	struct attribute **cpu_events;
587 
588 	unsigned long	attr_freeze_on_smi;
589 	struct attribute **attrs;
590 
591 	/*
592 	 * CPU Hotplug hooks
593 	 */
594 	int		(*cpu_prepare)(int cpu);
595 	void		(*cpu_starting)(int cpu);
596 	void		(*cpu_dying)(int cpu);
597 	void		(*cpu_dead)(int cpu);
598 
599 	void		(*check_microcode)(void);
600 	void		(*sched_task)(struct perf_event_context *ctx,
601 				      bool sched_in);
602 
603 	/*
604 	 * Intel Arch Perfmon v2+
605 	 */
606 	u64			intel_ctrl;
607 	union perf_capabilities intel_cap;
608 
609 	/*
610 	 * Intel DebugStore bits
611 	 */
612 	unsigned int	bts		:1,
613 			bts_active	:1,
614 			pebs		:1,
615 			pebs_active	:1,
616 			pebs_broken	:1,
617 			pebs_prec_dist	:1,
618 			pebs_no_tlb	:1;
619 	int		pebs_record_size;
620 	int		pebs_buffer_size;
621 	void		(*drain_pebs)(struct pt_regs *regs);
622 	struct event_constraint *pebs_constraints;
623 	void		(*pebs_aliases)(struct perf_event *event);
624 	int 		max_pebs_events;
625 	unsigned long	free_running_flags;
626 
627 	/*
628 	 * Intel LBR
629 	 */
630 	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
631 	int		lbr_nr;			   /* hardware stack size */
632 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
633 	const int	*lbr_sel_map;		   /* lbr_select mappings */
634 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
635 	bool		lbr_pt_coexist;		   /* (LBR|BTS) may coexist with PT */
636 
637 	/*
638 	 * Intel PT/LBR/BTS are exclusive
639 	 */
640 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
641 
642 	/*
643 	 * AMD bits
644 	 */
645 	unsigned int	amd_nb_constraints : 1;
646 
647 	/*
648 	 * Extra registers for events
649 	 */
650 	struct extra_reg *extra_regs;
651 	unsigned int flags;
652 
653 	/*
654 	 * Intel host/guest support (KVM)
655 	 */
656 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
657 };
658 
659 struct x86_perf_task_context {
660 	u64 lbr_from[MAX_LBR_ENTRIES];
661 	u64 lbr_to[MAX_LBR_ENTRIES];
662 	u64 lbr_info[MAX_LBR_ENTRIES];
663 	int tos;
664 	int lbr_callstack_users;
665 	int lbr_stack_state;
666 };
667 
668 #define x86_add_quirk(func_)						\
669 do {									\
670 	static struct x86_pmu_quirk __quirk __initdata = {		\
671 		.func = func_,						\
672 	};								\
673 	__quirk.next = x86_pmu.quirks;					\
674 	x86_pmu.quirks = &__quirk;					\
675 } while (0)
676 
677 /*
678  * x86_pmu flags
679  */
680 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
681 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
682 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
683 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
684 
685 #define EVENT_VAR(_id)  event_attr_##_id
686 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
687 
688 #define EVENT_ATTR(_name, _id)						\
689 static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
690 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
691 	.id		= PERF_COUNT_HW_##_id,				\
692 	.event_str	= NULL,						\
693 };
694 
695 #define EVENT_ATTR_STR(_name, v, str)					\
696 static struct perf_pmu_events_attr event_attr_##v = {			\
697 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
698 	.id		= 0,						\
699 	.event_str	= str,						\
700 };
701 
702 #define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
703 static struct perf_pmu_events_ht_attr event_attr_##v = {		\
704 	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
705 	.id		= 0,						\
706 	.event_str_noht	= noht,						\
707 	.event_str_ht	= ht,						\
708 }
709 
710 extern struct x86_pmu x86_pmu __read_mostly;
711 
712 static inline bool x86_pmu_has_lbr_callstack(void)
713 {
714 	return  x86_pmu.lbr_sel_map &&
715 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
716 }
717 
718 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
719 
720 int x86_perf_event_set_period(struct perf_event *event);
721 
722 /*
723  * Generalized hw caching related hw_event table, filled
724  * in on a per model basis. A value of 0 means
725  * 'not supported', -1 means 'hw_event makes no sense on
726  * this CPU', any other value means the raw hw_event
727  * ID.
728  */
729 
730 #define C(x) PERF_COUNT_HW_CACHE_##x
731 
732 extern u64 __read_mostly hw_cache_event_ids
733 				[PERF_COUNT_HW_CACHE_MAX]
734 				[PERF_COUNT_HW_CACHE_OP_MAX]
735 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
736 extern u64 __read_mostly hw_cache_extra_regs
737 				[PERF_COUNT_HW_CACHE_MAX]
738 				[PERF_COUNT_HW_CACHE_OP_MAX]
739 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
740 
741 u64 x86_perf_event_update(struct perf_event *event);
742 
743 static inline unsigned int x86_pmu_config_addr(int index)
744 {
745 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
746 				   x86_pmu.addr_offset(index, true) : index);
747 }
748 
749 static inline unsigned int x86_pmu_event_addr(int index)
750 {
751 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
752 				  x86_pmu.addr_offset(index, false) : index);
753 }
754 
755 static inline int x86_pmu_rdpmc_index(int index)
756 {
757 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
758 }
759 
760 int x86_add_exclusive(unsigned int what);
761 
762 void x86_del_exclusive(unsigned int what);
763 
764 int x86_reserve_hardware(void);
765 
766 void x86_release_hardware(void);
767 
768 int x86_pmu_max_precise(void);
769 
770 void hw_perf_lbr_event_destroy(struct perf_event *event);
771 
772 int x86_setup_perfctr(struct perf_event *event);
773 
774 int x86_pmu_hw_config(struct perf_event *event);
775 
776 void x86_pmu_disable_all(void);
777 
778 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
779 					  u64 enable_mask)
780 {
781 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
782 
783 	if (hwc->extra_reg.reg)
784 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
785 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
786 }
787 
788 void x86_pmu_enable_all(int added);
789 
790 int perf_assign_events(struct event_constraint **constraints, int n,
791 			int wmin, int wmax, int gpmax, int *assign);
792 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
793 
794 void x86_pmu_stop(struct perf_event *event, int flags);
795 
796 static inline void x86_pmu_disable_event(struct perf_event *event)
797 {
798 	struct hw_perf_event *hwc = &event->hw;
799 
800 	wrmsrl(hwc->config_base, hwc->config);
801 }
802 
803 void x86_pmu_enable_event(struct perf_event *event);
804 
805 int x86_pmu_handle_irq(struct pt_regs *regs);
806 
807 extern struct event_constraint emptyconstraint;
808 
809 extern struct event_constraint unconstrained;
810 
811 static inline bool kernel_ip(unsigned long ip)
812 {
813 #ifdef CONFIG_X86_32
814 	return ip > PAGE_OFFSET;
815 #else
816 	return (long)ip < 0;
817 #endif
818 }
819 
820 /*
821  * Not all PMUs provide the right context information to place the reported IP
822  * into full context. Specifically segment registers are typically not
823  * supplied.
824  *
825  * Assuming the address is a linear address (it is for IBS), we fake the CS and
826  * vm86 mode using the known zero-based code segment and 'fix up' the registers
827  * to reflect this.
828  *
829  * Intel PEBS/LBR appear to typically provide the effective address, nothing
830  * much we can do about that but pray and treat it like a linear address.
831  */
832 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
833 {
834 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
835 	if (regs->flags & X86_VM_MASK)
836 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
837 	regs->ip = ip;
838 }
839 
840 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
841 ssize_t intel_event_sysfs_show(char *page, u64 config);
842 
843 struct attribute **merge_attr(struct attribute **a, struct attribute **b);
844 
845 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
846 			  char *page);
847 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
848 			  char *page);
849 
850 #ifdef CONFIG_CPU_SUP_AMD
851 
852 int amd_pmu_init(void);
853 
854 #else /* CONFIG_CPU_SUP_AMD */
855 
856 static inline int amd_pmu_init(void)
857 {
858 	return 0;
859 }
860 
861 #endif /* CONFIG_CPU_SUP_AMD */
862 
863 #ifdef CONFIG_CPU_SUP_INTEL
864 
865 static inline bool intel_pmu_has_bts(struct perf_event *event)
866 {
867 	if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
868 	    !event->attr.freq && event->hw.sample_period == 1)
869 		return true;
870 
871 	return false;
872 }
873 
874 int intel_pmu_save_and_restart(struct perf_event *event);
875 
876 struct event_constraint *
877 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
878 			  struct perf_event *event);
879 
880 struct intel_shared_regs *allocate_shared_regs(int cpu);
881 
882 int intel_pmu_init(void);
883 
884 void init_debug_store_on_cpu(int cpu);
885 
886 void fini_debug_store_on_cpu(int cpu);
887 
888 void release_ds_buffers(void);
889 
890 void reserve_ds_buffers(void);
891 
892 extern struct event_constraint bts_constraint;
893 
894 void intel_pmu_enable_bts(u64 config);
895 
896 void intel_pmu_disable_bts(void);
897 
898 int intel_pmu_drain_bts_buffer(void);
899 
900 extern struct event_constraint intel_core2_pebs_event_constraints[];
901 
902 extern struct event_constraint intel_atom_pebs_event_constraints[];
903 
904 extern struct event_constraint intel_slm_pebs_event_constraints[];
905 
906 extern struct event_constraint intel_glm_pebs_event_constraints[];
907 
908 extern struct event_constraint intel_glp_pebs_event_constraints[];
909 
910 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
911 
912 extern struct event_constraint intel_westmere_pebs_event_constraints[];
913 
914 extern struct event_constraint intel_snb_pebs_event_constraints[];
915 
916 extern struct event_constraint intel_ivb_pebs_event_constraints[];
917 
918 extern struct event_constraint intel_hsw_pebs_event_constraints[];
919 
920 extern struct event_constraint intel_bdw_pebs_event_constraints[];
921 
922 extern struct event_constraint intel_skl_pebs_event_constraints[];
923 
924 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
925 
926 void intel_pmu_pebs_add(struct perf_event *event);
927 
928 void intel_pmu_pebs_del(struct perf_event *event);
929 
930 void intel_pmu_pebs_enable(struct perf_event *event);
931 
932 void intel_pmu_pebs_disable(struct perf_event *event);
933 
934 void intel_pmu_pebs_enable_all(void);
935 
936 void intel_pmu_pebs_disable_all(void);
937 
938 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
939 
940 void intel_ds_init(void);
941 
942 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
943 
944 u64 lbr_from_signext_quirk_wr(u64 val);
945 
946 void intel_pmu_lbr_reset(void);
947 
948 void intel_pmu_lbr_add(struct perf_event *event);
949 
950 void intel_pmu_lbr_del(struct perf_event *event);
951 
952 void intel_pmu_lbr_enable_all(bool pmi);
953 
954 void intel_pmu_lbr_disable_all(void);
955 
956 void intel_pmu_lbr_read(void);
957 
958 void intel_pmu_lbr_init_core(void);
959 
960 void intel_pmu_lbr_init_nhm(void);
961 
962 void intel_pmu_lbr_init_atom(void);
963 
964 void intel_pmu_lbr_init_slm(void);
965 
966 void intel_pmu_lbr_init_snb(void);
967 
968 void intel_pmu_lbr_init_hsw(void);
969 
970 void intel_pmu_lbr_init_skl(void);
971 
972 void intel_pmu_lbr_init_knl(void);
973 
974 void intel_pmu_pebs_data_source_nhm(void);
975 
976 void intel_pmu_pebs_data_source_skl(bool pmem);
977 
978 int intel_pmu_setup_lbr_filter(struct perf_event *event);
979 
980 void intel_pt_interrupt(void);
981 
982 int intel_bts_interrupt(void);
983 
984 void intel_bts_enable_local(void);
985 
986 void intel_bts_disable_local(void);
987 
988 int p4_pmu_init(void);
989 
990 int p6_pmu_init(void);
991 
992 int knc_pmu_init(void);
993 
994 static inline int is_ht_workaround_enabled(void)
995 {
996 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
997 }
998 
999 #else /* CONFIG_CPU_SUP_INTEL */
1000 
1001 static inline void reserve_ds_buffers(void)
1002 {
1003 }
1004 
1005 static inline void release_ds_buffers(void)
1006 {
1007 }
1008 
1009 static inline int intel_pmu_init(void)
1010 {
1011 	return 0;
1012 }
1013 
1014 static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
1015 {
1016 	return NULL;
1017 }
1018 
1019 static inline int is_ht_workaround_enabled(void)
1020 {
1021 	return 0;
1022 }
1023 #endif /* CONFIG_CPU_SUP_INTEL */
1024