xref: /openbmc/linux/arch/x86/events/perf_event.h (revision c127f98ba9aba1818a6ca3a1da5a24653a10d966)
1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 #include <asm/intel_ds.h>
18 
19 /* To enable MSR tracing please use the generic trace points. */
20 
21 /*
22  *          |   NHM/WSM    |      SNB     |
23  * register -------------------------------
24  *          |  HT  | no HT |  HT  | no HT |
25  *-----------------------------------------
26  * offcore  | core | core  | cpu  | core  |
27  * lbr_sel  | core | core  | cpu  | core  |
28  * ld_lat   | cpu  | core  | cpu  | core  |
29  *-----------------------------------------
30  *
31  * Given that there is a small number of shared regs,
32  * we can pre-allocate their slot in the per-cpu
33  * per-core reg tables.
34  */
35 enum extra_reg_type {
36 	EXTRA_REG_NONE  = -1,	/* not used */
37 
38 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
39 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
40 	EXTRA_REG_LBR   = 2,	/* lbr_select */
41 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
42 	EXTRA_REG_FE    = 4,    /* fe_* */
43 
44 	EXTRA_REG_MAX		/* number of entries needed */
45 };
46 
47 struct event_constraint {
48 	union {
49 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
50 		u64		idxmsk64;
51 	};
52 	u64	code;
53 	u64	cmask;
54 	int	weight;
55 	int	overlap;
56 	int	flags;
57 };
58 /*
59  * struct hw_perf_event.flags flags
60  */
61 #define PERF_X86_EVENT_PEBS_LDLAT	0x0001 /* ld+ldlat data address sampling */
62 #define PERF_X86_EVENT_PEBS_ST		0x0002 /* st data address sampling */
63 #define PERF_X86_EVENT_PEBS_ST_HSW	0x0004 /* haswell style datala, store */
64 #define PERF_X86_EVENT_COMMITTED	0x0008 /* event passed commit_txn */
65 #define PERF_X86_EVENT_PEBS_LD_HSW	0x0010 /* haswell style datala, load */
66 #define PERF_X86_EVENT_PEBS_NA_HSW	0x0020 /* haswell style datala, unknown */
67 #define PERF_X86_EVENT_EXCL		0x0040 /* HT exclusivity on counter */
68 #define PERF_X86_EVENT_DYNAMIC		0x0080 /* dynamic alloc'd constraint */
69 #define PERF_X86_EVENT_RDPMC_ALLOWED	0x0100 /* grant rdpmc permission */
70 #define PERF_X86_EVENT_EXCL_ACCT	0x0200 /* accounted EXCL event */
71 #define PERF_X86_EVENT_AUTO_RELOAD	0x0400 /* use PEBS auto-reload */
72 #define PERF_X86_EVENT_FREERUNNING	0x0800 /* use freerunning PEBS */
73 
74 
75 struct amd_nb {
76 	int nb_id;  /* NorthBridge id */
77 	int refcnt; /* reference count */
78 	struct perf_event *owners[X86_PMC_IDX_MAX];
79 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
80 };
81 
82 #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
83 
84 /*
85  * Flags PEBS can handle without an PMI.
86  *
87  * TID can only be handled by flushing at context switch.
88  * REGS_USER can be handled for events limited to ring 3.
89  *
90  */
91 #define PEBS_FREERUNNING_FLAGS \
92 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
93 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
94 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
95 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
96 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
97 
98 #define PEBS_REGS \
99 	(PERF_REG_X86_AX | \
100 	 PERF_REG_X86_BX | \
101 	 PERF_REG_X86_CX | \
102 	 PERF_REG_X86_DX | \
103 	 PERF_REG_X86_DI | \
104 	 PERF_REG_X86_SI | \
105 	 PERF_REG_X86_SP | \
106 	 PERF_REG_X86_BP | \
107 	 PERF_REG_X86_IP | \
108 	 PERF_REG_X86_FLAGS | \
109 	 PERF_REG_X86_R8 | \
110 	 PERF_REG_X86_R9 | \
111 	 PERF_REG_X86_R10 | \
112 	 PERF_REG_X86_R11 | \
113 	 PERF_REG_X86_R12 | \
114 	 PERF_REG_X86_R13 | \
115 	 PERF_REG_X86_R14 | \
116 	 PERF_REG_X86_R15)
117 
118 /*
119  * Per register state.
120  */
121 struct er_account {
122 	raw_spinlock_t      lock;	/* per-core: protect structure */
123 	u64                 config;	/* extra MSR config */
124 	u64                 reg;	/* extra MSR number */
125 	atomic_t            ref;	/* reference count */
126 };
127 
128 /*
129  * Per core/cpu state
130  *
131  * Used to coordinate shared registers between HT threads or
132  * among events on a single PMU.
133  */
134 struct intel_shared_regs {
135 	struct er_account       regs[EXTRA_REG_MAX];
136 	int                     refcnt;		/* per-core: #HT threads */
137 	unsigned                core_id;	/* per-core: core id */
138 };
139 
140 enum intel_excl_state_type {
141 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
142 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
143 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
144 };
145 
146 struct intel_excl_states {
147 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
148 	bool sched_started; /* true if scheduling has started */
149 };
150 
151 struct intel_excl_cntrs {
152 	raw_spinlock_t	lock;
153 
154 	struct intel_excl_states states[2];
155 
156 	union {
157 		u16	has_exclusive[2];
158 		u32	exclusive_present;
159 	};
160 
161 	int		refcnt;		/* per-core: #HT threads */
162 	unsigned	core_id;	/* per-core: core id */
163 };
164 
165 #define MAX_LBR_ENTRIES		32
166 
167 enum {
168 	X86_PERF_KFREE_SHARED = 0,
169 	X86_PERF_KFREE_EXCL   = 1,
170 	X86_PERF_KFREE_MAX
171 };
172 
173 struct cpu_hw_events {
174 	/*
175 	 * Generic x86 PMC bits
176 	 */
177 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
178 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
179 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
180 	int			enabled;
181 
182 	int			n_events; /* the # of events in the below arrays */
183 	int			n_added;  /* the # last events in the below arrays;
184 					     they've never been enabled yet */
185 	int			n_txn;    /* the # last events in the below arrays;
186 					     added in the current transaction */
187 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
188 	u64			tags[X86_PMC_IDX_MAX];
189 
190 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
191 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
192 
193 	int			n_excl; /* the number of exclusive events */
194 
195 	unsigned int		txn_flags;
196 	int			is_fake;
197 
198 	/*
199 	 * Intel DebugStore bits
200 	 */
201 	struct debug_store	*ds;
202 	void			*ds_pebs_vaddr;
203 	void			*ds_bts_vaddr;
204 	u64			pebs_enabled;
205 	int			n_pebs;
206 	int			n_large_pebs;
207 
208 	/*
209 	 * Intel LBR bits
210 	 */
211 	int				lbr_users;
212 	struct perf_branch_stack	lbr_stack;
213 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
214 	struct er_account		*lbr_sel;
215 	u64				br_sel;
216 
217 	/*
218 	 * Intel host/guest exclude bits
219 	 */
220 	u64				intel_ctrl_guest_mask;
221 	u64				intel_ctrl_host_mask;
222 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
223 
224 	/*
225 	 * Intel checkpoint mask
226 	 */
227 	u64				intel_cp_status;
228 
229 	/*
230 	 * manage shared (per-core, per-cpu) registers
231 	 * used on Intel NHM/WSM/SNB
232 	 */
233 	struct intel_shared_regs	*shared_regs;
234 	/*
235 	 * manage exclusive counter access between hyperthread
236 	 */
237 	struct event_constraint *constraint_list; /* in enable order */
238 	struct intel_excl_cntrs		*excl_cntrs;
239 	int excl_thread_id; /* 0 or 1 */
240 
241 	/*
242 	 * AMD specific bits
243 	 */
244 	struct amd_nb			*amd_nb;
245 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
246 	u64				perf_ctr_virt_mask;
247 
248 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
249 };
250 
251 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
252 	{ .idxmsk64 = (n) },		\
253 	.code = (c),			\
254 	.cmask = (m),			\
255 	.weight = (w),			\
256 	.overlap = (o),			\
257 	.flags = f,			\
258 }
259 
260 #define EVENT_CONSTRAINT(c, n, m)	\
261 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
262 
263 #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
264 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
265 			   0, PERF_X86_EVENT_EXCL)
266 
267 /*
268  * The overlap flag marks event constraints with overlapping counter
269  * masks. This is the case if the counter mask of such an event is not
270  * a subset of any other counter mask of a constraint with an equal or
271  * higher weight, e.g.:
272  *
273  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
274  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
275  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
276  *
277  * The event scheduler may not select the correct counter in the first
278  * cycle because it needs to know which subsequent events will be
279  * scheduled. It may fail to schedule the events then. So we set the
280  * overlap flag for such constraints to give the scheduler a hint which
281  * events to select for counter rescheduling.
282  *
283  * Care must be taken as the rescheduling algorithm is O(n!) which
284  * will increase scheduling cycles for an over-committed system
285  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
286  * and its counter masks must be kept at a minimum.
287  */
288 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
289 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
290 
291 /*
292  * Constraint on the Event code.
293  */
294 #define INTEL_EVENT_CONSTRAINT(c, n)	\
295 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
296 
297 /*
298  * Constraint on the Event code + UMask + fixed-mask
299  *
300  * filter mask to validate fixed counter events.
301  * the following filters disqualify for fixed counters:
302  *  - inv
303  *  - edge
304  *  - cnt-mask
305  *  - in_tx
306  *  - in_tx_checkpointed
307  *  The other filters are supported by fixed counters.
308  *  The any-thread option is supported starting with v3.
309  */
310 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
311 #define FIXED_EVENT_CONSTRAINT(c, n)	\
312 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
313 
314 /*
315  * Constraint on the Event code + UMask
316  */
317 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
318 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
319 
320 /* Constraint on specific umask bit only + event */
321 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
322 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
323 
324 /* Like UEVENT_CONSTRAINT, but match flags too */
325 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
326 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
327 
328 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
329 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
330 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
331 
332 #define INTEL_PLD_CONSTRAINT(c, n)	\
333 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
334 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
335 
336 #define INTEL_PST_CONSTRAINT(c, n)	\
337 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
338 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
339 
340 /* Event constraint, but match on all event flags too. */
341 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
342 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
343 
344 /* Check only flags, but allow all event/umask */
345 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
346 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
347 
348 /* Check flags and event code, and set the HSW store flag */
349 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
350 	__EVENT_CONSTRAINT(code, n, 			\
351 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
352 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
353 
354 /* Check flags and event code, and set the HSW load flag */
355 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
356 	__EVENT_CONSTRAINT(code, n,			\
357 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
358 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
359 
360 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
361 	__EVENT_CONSTRAINT(code, n,			\
362 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
363 			  HWEIGHT(n), 0, \
364 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
365 
366 /* Check flags and event code/umask, and set the HSW store flag */
367 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
368 	__EVENT_CONSTRAINT(code, n, 			\
369 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
370 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
371 
372 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
373 	__EVENT_CONSTRAINT(code, n,			\
374 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
375 			  HWEIGHT(n), 0, \
376 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
377 
378 /* Check flags and event code/umask, and set the HSW load flag */
379 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
380 	__EVENT_CONSTRAINT(code, n, 			\
381 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
382 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
383 
384 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
385 	__EVENT_CONSTRAINT(code, n,			\
386 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
387 			  HWEIGHT(n), 0, \
388 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
389 
390 /* Check flags and event code/umask, and set the HSW N/A flag */
391 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
392 	__EVENT_CONSTRAINT(code, n, 			\
393 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
394 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
395 
396 
397 /*
398  * We define the end marker as having a weight of -1
399  * to enable blacklisting of events using a counter bitmask
400  * of zero and thus a weight of zero.
401  * The end marker has a weight that cannot possibly be
402  * obtained from counting the bits in the bitmask.
403  */
404 #define EVENT_CONSTRAINT_END { .weight = -1 }
405 
406 /*
407  * Check for end marker with weight == -1
408  */
409 #define for_each_event_constraint(e, c)	\
410 	for ((e) = (c); (e)->weight != -1; (e)++)
411 
412 /*
413  * Extra registers for specific events.
414  *
415  * Some events need large masks and require external MSRs.
416  * Those extra MSRs end up being shared for all events on
417  * a PMU and sometimes between PMU of sibling HT threads.
418  * In either case, the kernel needs to handle conflicting
419  * accesses to those extra, shared, regs. The data structure
420  * to manage those registers is stored in cpu_hw_event.
421  */
422 struct extra_reg {
423 	unsigned int		event;
424 	unsigned int		msr;
425 	u64			config_mask;
426 	u64			valid_mask;
427 	int			idx;  /* per_xxx->regs[] reg index */
428 	bool			extra_msr_access;
429 };
430 
431 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
432 	.event = (e),			\
433 	.msr = (ms),			\
434 	.config_mask = (m),		\
435 	.valid_mask = (vm),		\
436 	.idx = EXTRA_REG_##i,		\
437 	.extra_msr_access = true,	\
438 	}
439 
440 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
441 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
442 
443 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
444 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
445 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
446 
447 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
448 	INTEL_UEVENT_EXTRA_REG(c, \
449 			       MSR_PEBS_LD_LAT_THRESHOLD, \
450 			       0xffff, \
451 			       LDLAT)
452 
453 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
454 
455 union perf_capabilities {
456 	struct {
457 		u64	lbr_format:6;
458 		u64	pebs_trap:1;
459 		u64	pebs_arch_reg:1;
460 		u64	pebs_format:4;
461 		u64	smm_freeze:1;
462 		/*
463 		 * PMU supports separate counter range for writing
464 		 * values > 32bit.
465 		 */
466 		u64	full_width_write:1;
467 	};
468 	u64	capabilities;
469 };
470 
471 struct x86_pmu_quirk {
472 	struct x86_pmu_quirk *next;
473 	void (*func)(void);
474 };
475 
476 union x86_pmu_config {
477 	struct {
478 		u64 event:8,
479 		    umask:8,
480 		    usr:1,
481 		    os:1,
482 		    edge:1,
483 		    pc:1,
484 		    interrupt:1,
485 		    __reserved1:1,
486 		    en:1,
487 		    inv:1,
488 		    cmask:8,
489 		    event2:4,
490 		    __reserved2:4,
491 		    go:1,
492 		    ho:1;
493 	} bits;
494 	u64 value;
495 };
496 
497 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
498 
499 enum {
500 	x86_lbr_exclusive_lbr,
501 	x86_lbr_exclusive_bts,
502 	x86_lbr_exclusive_pt,
503 	x86_lbr_exclusive_max,
504 };
505 
506 /*
507  * struct x86_pmu - generic x86 pmu
508  */
509 struct x86_pmu {
510 	/*
511 	 * Generic x86 PMC bits
512 	 */
513 	const char	*name;
514 	int		version;
515 	int		(*handle_irq)(struct pt_regs *);
516 	void		(*disable_all)(void);
517 	void		(*enable_all)(int added);
518 	void		(*enable)(struct perf_event *);
519 	void		(*disable)(struct perf_event *);
520 	void		(*add)(struct perf_event *);
521 	void		(*del)(struct perf_event *);
522 	int		(*hw_config)(struct perf_event *event);
523 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
524 	unsigned	eventsel;
525 	unsigned	perfctr;
526 	int		(*addr_offset)(int index, bool eventsel);
527 	int		(*rdpmc_index)(int index);
528 	u64		(*event_map)(int);
529 	int		max_events;
530 	int		num_counters;
531 	int		num_counters_fixed;
532 	int		cntval_bits;
533 	u64		cntval_mask;
534 	union {
535 			unsigned long events_maskl;
536 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
537 	};
538 	int		events_mask_len;
539 	int		apic;
540 	u64		max_period;
541 	struct event_constraint *
542 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
543 						 int idx,
544 						 struct perf_event *event);
545 
546 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
547 						 struct perf_event *event);
548 
549 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
550 
551 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
552 
553 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
554 
555 	struct event_constraint *event_constraints;
556 	struct x86_pmu_quirk *quirks;
557 	int		perfctr_second_write;
558 	bool		late_ack;
559 	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
560 
561 	/*
562 	 * sysfs attrs
563 	 */
564 	int		attr_rdpmc_broken;
565 	int		attr_rdpmc;
566 	struct attribute **format_attrs;
567 	struct attribute **event_attrs;
568 	struct attribute **caps_attrs;
569 
570 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
571 	struct attribute **cpu_events;
572 
573 	unsigned long	attr_freeze_on_smi;
574 	struct attribute **attrs;
575 
576 	/*
577 	 * CPU Hotplug hooks
578 	 */
579 	int		(*cpu_prepare)(int cpu);
580 	void		(*cpu_starting)(int cpu);
581 	void		(*cpu_dying)(int cpu);
582 	void		(*cpu_dead)(int cpu);
583 
584 	void		(*check_microcode)(void);
585 	void		(*sched_task)(struct perf_event_context *ctx,
586 				      bool sched_in);
587 
588 	/*
589 	 * Intel Arch Perfmon v2+
590 	 */
591 	u64			intel_ctrl;
592 	union perf_capabilities intel_cap;
593 
594 	/*
595 	 * Intel DebugStore bits
596 	 */
597 	unsigned int	bts		:1,
598 			bts_active	:1,
599 			pebs		:1,
600 			pebs_active	:1,
601 			pebs_broken	:1,
602 			pebs_prec_dist	:1,
603 			pebs_no_tlb	:1;
604 	int		pebs_record_size;
605 	int		pebs_buffer_size;
606 	void		(*drain_pebs)(struct pt_regs *regs);
607 	struct event_constraint *pebs_constraints;
608 	void		(*pebs_aliases)(struct perf_event *event);
609 	int 		max_pebs_events;
610 	unsigned long	free_running_flags;
611 
612 	/*
613 	 * Intel LBR
614 	 */
615 	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
616 	int		lbr_nr;			   /* hardware stack size */
617 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
618 	const int	*lbr_sel_map;		   /* lbr_select mappings */
619 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
620 	bool		lbr_pt_coexist;		   /* (LBR|BTS) may coexist with PT */
621 
622 	/*
623 	 * Intel PT/LBR/BTS are exclusive
624 	 */
625 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
626 
627 	/*
628 	 * AMD bits
629 	 */
630 	unsigned int	amd_nb_constraints : 1;
631 
632 	/*
633 	 * Extra registers for events
634 	 */
635 	struct extra_reg *extra_regs;
636 	unsigned int flags;
637 
638 	/*
639 	 * Intel host/guest support (KVM)
640 	 */
641 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
642 };
643 
644 struct x86_perf_task_context {
645 	u64 lbr_from[MAX_LBR_ENTRIES];
646 	u64 lbr_to[MAX_LBR_ENTRIES];
647 	u64 lbr_info[MAX_LBR_ENTRIES];
648 	int tos;
649 	int lbr_callstack_users;
650 	int lbr_stack_state;
651 };
652 
653 #define x86_add_quirk(func_)						\
654 do {									\
655 	static struct x86_pmu_quirk __quirk __initdata = {		\
656 		.func = func_,						\
657 	};								\
658 	__quirk.next = x86_pmu.quirks;					\
659 	x86_pmu.quirks = &__quirk;					\
660 } while (0)
661 
662 /*
663  * x86_pmu flags
664  */
665 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
666 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
667 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
668 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
669 
670 #define EVENT_VAR(_id)  event_attr_##_id
671 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
672 
673 #define EVENT_ATTR(_name, _id)						\
674 static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
675 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
676 	.id		= PERF_COUNT_HW_##_id,				\
677 	.event_str	= NULL,						\
678 };
679 
680 #define EVENT_ATTR_STR(_name, v, str)					\
681 static struct perf_pmu_events_attr event_attr_##v = {			\
682 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
683 	.id		= 0,						\
684 	.event_str	= str,						\
685 };
686 
687 #define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
688 static struct perf_pmu_events_ht_attr event_attr_##v = {		\
689 	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
690 	.id		= 0,						\
691 	.event_str_noht	= noht,						\
692 	.event_str_ht	= ht,						\
693 }
694 
695 extern struct x86_pmu x86_pmu __read_mostly;
696 
697 static inline bool x86_pmu_has_lbr_callstack(void)
698 {
699 	return  x86_pmu.lbr_sel_map &&
700 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
701 }
702 
703 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
704 
705 int x86_perf_event_set_period(struct perf_event *event);
706 
707 /*
708  * Generalized hw caching related hw_event table, filled
709  * in on a per model basis. A value of 0 means
710  * 'not supported', -1 means 'hw_event makes no sense on
711  * this CPU', any other value means the raw hw_event
712  * ID.
713  */
714 
715 #define C(x) PERF_COUNT_HW_CACHE_##x
716 
717 extern u64 __read_mostly hw_cache_event_ids
718 				[PERF_COUNT_HW_CACHE_MAX]
719 				[PERF_COUNT_HW_CACHE_OP_MAX]
720 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
721 extern u64 __read_mostly hw_cache_extra_regs
722 				[PERF_COUNT_HW_CACHE_MAX]
723 				[PERF_COUNT_HW_CACHE_OP_MAX]
724 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
725 
726 u64 x86_perf_event_update(struct perf_event *event);
727 
728 static inline unsigned int x86_pmu_config_addr(int index)
729 {
730 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
731 				   x86_pmu.addr_offset(index, true) : index);
732 }
733 
734 static inline unsigned int x86_pmu_event_addr(int index)
735 {
736 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
737 				  x86_pmu.addr_offset(index, false) : index);
738 }
739 
740 static inline int x86_pmu_rdpmc_index(int index)
741 {
742 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
743 }
744 
745 int x86_add_exclusive(unsigned int what);
746 
747 void x86_del_exclusive(unsigned int what);
748 
749 int x86_reserve_hardware(void);
750 
751 void x86_release_hardware(void);
752 
753 int x86_pmu_max_precise(void);
754 
755 void hw_perf_lbr_event_destroy(struct perf_event *event);
756 
757 int x86_setup_perfctr(struct perf_event *event);
758 
759 int x86_pmu_hw_config(struct perf_event *event);
760 
761 void x86_pmu_disable_all(void);
762 
763 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
764 					  u64 enable_mask)
765 {
766 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
767 
768 	if (hwc->extra_reg.reg)
769 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
770 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
771 }
772 
773 void x86_pmu_enable_all(int added);
774 
775 int perf_assign_events(struct event_constraint **constraints, int n,
776 			int wmin, int wmax, int gpmax, int *assign);
777 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
778 
779 void x86_pmu_stop(struct perf_event *event, int flags);
780 
781 static inline void x86_pmu_disable_event(struct perf_event *event)
782 {
783 	struct hw_perf_event *hwc = &event->hw;
784 
785 	wrmsrl(hwc->config_base, hwc->config);
786 }
787 
788 void x86_pmu_enable_event(struct perf_event *event);
789 
790 int x86_pmu_handle_irq(struct pt_regs *regs);
791 
792 extern struct event_constraint emptyconstraint;
793 
794 extern struct event_constraint unconstrained;
795 
796 static inline bool kernel_ip(unsigned long ip)
797 {
798 #ifdef CONFIG_X86_32
799 	return ip > PAGE_OFFSET;
800 #else
801 	return (long)ip < 0;
802 #endif
803 }
804 
805 /*
806  * Not all PMUs provide the right context information to place the reported IP
807  * into full context. Specifically segment registers are typically not
808  * supplied.
809  *
810  * Assuming the address is a linear address (it is for IBS), we fake the CS and
811  * vm86 mode using the known zero-based code segment and 'fix up' the registers
812  * to reflect this.
813  *
814  * Intel PEBS/LBR appear to typically provide the effective address, nothing
815  * much we can do about that but pray and treat it like a linear address.
816  */
817 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
818 {
819 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
820 	if (regs->flags & X86_VM_MASK)
821 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
822 	regs->ip = ip;
823 }
824 
825 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
826 ssize_t intel_event_sysfs_show(char *page, u64 config);
827 
828 struct attribute **merge_attr(struct attribute **a, struct attribute **b);
829 
830 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
831 			  char *page);
832 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
833 			  char *page);
834 
835 #ifdef CONFIG_CPU_SUP_AMD
836 
837 int amd_pmu_init(void);
838 
839 #else /* CONFIG_CPU_SUP_AMD */
840 
841 static inline int amd_pmu_init(void)
842 {
843 	return 0;
844 }
845 
846 #endif /* CONFIG_CPU_SUP_AMD */
847 
848 #ifdef CONFIG_CPU_SUP_INTEL
849 
850 static inline bool intel_pmu_has_bts(struct perf_event *event)
851 {
852 	if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
853 	    !event->attr.freq && event->hw.sample_period == 1)
854 		return true;
855 
856 	return false;
857 }
858 
859 int intel_pmu_save_and_restart(struct perf_event *event);
860 
861 struct event_constraint *
862 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
863 			  struct perf_event *event);
864 
865 struct intel_shared_regs *allocate_shared_regs(int cpu);
866 
867 int intel_pmu_init(void);
868 
869 void init_debug_store_on_cpu(int cpu);
870 
871 void fini_debug_store_on_cpu(int cpu);
872 
873 void release_ds_buffers(void);
874 
875 void reserve_ds_buffers(void);
876 
877 extern struct event_constraint bts_constraint;
878 
879 void intel_pmu_enable_bts(u64 config);
880 
881 void intel_pmu_disable_bts(void);
882 
883 int intel_pmu_drain_bts_buffer(void);
884 
885 extern struct event_constraint intel_core2_pebs_event_constraints[];
886 
887 extern struct event_constraint intel_atom_pebs_event_constraints[];
888 
889 extern struct event_constraint intel_slm_pebs_event_constraints[];
890 
891 extern struct event_constraint intel_glm_pebs_event_constraints[];
892 
893 extern struct event_constraint intel_glp_pebs_event_constraints[];
894 
895 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
896 
897 extern struct event_constraint intel_westmere_pebs_event_constraints[];
898 
899 extern struct event_constraint intel_snb_pebs_event_constraints[];
900 
901 extern struct event_constraint intel_ivb_pebs_event_constraints[];
902 
903 extern struct event_constraint intel_hsw_pebs_event_constraints[];
904 
905 extern struct event_constraint intel_bdw_pebs_event_constraints[];
906 
907 extern struct event_constraint intel_skl_pebs_event_constraints[];
908 
909 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
910 
911 void intel_pmu_pebs_add(struct perf_event *event);
912 
913 void intel_pmu_pebs_del(struct perf_event *event);
914 
915 void intel_pmu_pebs_enable(struct perf_event *event);
916 
917 void intel_pmu_pebs_disable(struct perf_event *event);
918 
919 void intel_pmu_pebs_enable_all(void);
920 
921 void intel_pmu_pebs_disable_all(void);
922 
923 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
924 
925 void intel_ds_init(void);
926 
927 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
928 
929 u64 lbr_from_signext_quirk_wr(u64 val);
930 
931 void intel_pmu_lbr_reset(void);
932 
933 void intel_pmu_lbr_add(struct perf_event *event);
934 
935 void intel_pmu_lbr_del(struct perf_event *event);
936 
937 void intel_pmu_lbr_enable_all(bool pmi);
938 
939 void intel_pmu_lbr_disable_all(void);
940 
941 void intel_pmu_lbr_read(void);
942 
943 void intel_pmu_lbr_init_core(void);
944 
945 void intel_pmu_lbr_init_nhm(void);
946 
947 void intel_pmu_lbr_init_atom(void);
948 
949 void intel_pmu_lbr_init_slm(void);
950 
951 void intel_pmu_lbr_init_snb(void);
952 
953 void intel_pmu_lbr_init_hsw(void);
954 
955 void intel_pmu_lbr_init_skl(void);
956 
957 void intel_pmu_lbr_init_knl(void);
958 
959 void intel_pmu_pebs_data_source_nhm(void);
960 
961 void intel_pmu_pebs_data_source_skl(bool pmem);
962 
963 int intel_pmu_setup_lbr_filter(struct perf_event *event);
964 
965 void intel_pt_interrupt(void);
966 
967 int intel_bts_interrupt(void);
968 
969 void intel_bts_enable_local(void);
970 
971 void intel_bts_disable_local(void);
972 
973 int p4_pmu_init(void);
974 
975 int p6_pmu_init(void);
976 
977 int knc_pmu_init(void);
978 
979 static inline int is_ht_workaround_enabled(void)
980 {
981 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
982 }
983 
984 #else /* CONFIG_CPU_SUP_INTEL */
985 
986 static inline void reserve_ds_buffers(void)
987 {
988 }
989 
990 static inline void release_ds_buffers(void)
991 {
992 }
993 
994 static inline int intel_pmu_init(void)
995 {
996 	return 0;
997 }
998 
999 static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
1000 {
1001 	return NULL;
1002 }
1003 
1004 static inline int is_ht_workaround_enabled(void)
1005 {
1006 	return 0;
1007 }
1008 #endif /* CONFIG_CPU_SUP_INTEL */
1009