xref: /openbmc/linux/arch/x86/events/perf_event.h (revision f87deada)
1 /*
2  * Performance events x86 architecture header
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 
17 #include <asm/intel_ds.h>
18 
19 /* To enable MSR tracing please use the generic trace points. */
20 
21 /*
22  *          |   NHM/WSM    |      SNB     |
23  * register -------------------------------
24  *          |  HT  | no HT |  HT  | no HT |
25  *-----------------------------------------
26  * offcore  | core | core  | cpu  | core  |
27  * lbr_sel  | core | core  | cpu  | core  |
28  * ld_lat   | cpu  | core  | cpu  | core  |
29  *-----------------------------------------
30  *
31  * Given that there is a small number of shared regs,
32  * we can pre-allocate their slot in the per-cpu
33  * per-core reg tables.
34  */
35 enum extra_reg_type {
36 	EXTRA_REG_NONE  = -1,	/* not used */
37 
38 	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
39 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
40 	EXTRA_REG_LBR   = 2,	/* lbr_select */
41 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
42 	EXTRA_REG_FE    = 4,    /* fe_* */
43 
44 	EXTRA_REG_MAX		/* number of entries needed */
45 };
46 
47 struct event_constraint {
48 	union {
49 		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
50 		u64		idxmsk64;
51 	};
52 	u64	code;
53 	u64	cmask;
54 	int	weight;
55 	int	overlap;
56 	int	flags;
57 };
58 /*
59  * struct hw_perf_event.flags flags
60  */
61 #define PERF_X86_EVENT_PEBS_LDLAT	0x0001 /* ld+ldlat data address sampling */
62 #define PERF_X86_EVENT_PEBS_ST		0x0002 /* st data address sampling */
63 #define PERF_X86_EVENT_PEBS_ST_HSW	0x0004 /* haswell style datala, store */
64 #define PERF_X86_EVENT_COMMITTED	0x0008 /* event passed commit_txn */
65 #define PERF_X86_EVENT_PEBS_LD_HSW	0x0010 /* haswell style datala, load */
66 #define PERF_X86_EVENT_PEBS_NA_HSW	0x0020 /* haswell style datala, unknown */
67 #define PERF_X86_EVENT_EXCL		0x0040 /* HT exclusivity on counter */
68 #define PERF_X86_EVENT_DYNAMIC		0x0080 /* dynamic alloc'd constraint */
69 #define PERF_X86_EVENT_RDPMC_ALLOWED	0x0100 /* grant rdpmc permission */
70 #define PERF_X86_EVENT_EXCL_ACCT	0x0200 /* accounted EXCL event */
71 #define PERF_X86_EVENT_AUTO_RELOAD	0x0400 /* use PEBS auto-reload */
72 #define PERF_X86_EVENT_FREERUNNING	0x0800 /* use freerunning PEBS */
73 
74 
75 struct amd_nb {
76 	int nb_id;  /* NorthBridge id */
77 	int refcnt; /* reference count */
78 	struct perf_event *owners[X86_PMC_IDX_MAX];
79 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
80 };
81 
82 #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
83 
84 /*
85  * Flags PEBS can handle without an PMI.
86  *
87  * TID can only be handled by flushing at context switch.
88  * REGS_USER can be handled for events limited to ring 3.
89  *
90  */
91 #define PEBS_FREERUNNING_FLAGS \
92 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
93 	PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
94 	PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
95 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
96 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
97 	PERF_SAMPLE_PERIOD)
98 
99 #define PEBS_REGS \
100 	(PERF_REG_X86_AX | \
101 	 PERF_REG_X86_BX | \
102 	 PERF_REG_X86_CX | \
103 	 PERF_REG_X86_DX | \
104 	 PERF_REG_X86_DI | \
105 	 PERF_REG_X86_SI | \
106 	 PERF_REG_X86_SP | \
107 	 PERF_REG_X86_BP | \
108 	 PERF_REG_X86_IP | \
109 	 PERF_REG_X86_FLAGS | \
110 	 PERF_REG_X86_R8 | \
111 	 PERF_REG_X86_R9 | \
112 	 PERF_REG_X86_R10 | \
113 	 PERF_REG_X86_R11 | \
114 	 PERF_REG_X86_R12 | \
115 	 PERF_REG_X86_R13 | \
116 	 PERF_REG_X86_R14 | \
117 	 PERF_REG_X86_R15)
118 
119 /*
120  * Per register state.
121  */
122 struct er_account {
123 	raw_spinlock_t      lock;	/* per-core: protect structure */
124 	u64                 config;	/* extra MSR config */
125 	u64                 reg;	/* extra MSR number */
126 	atomic_t            ref;	/* reference count */
127 };
128 
129 /*
130  * Per core/cpu state
131  *
132  * Used to coordinate shared registers between HT threads or
133  * among events on a single PMU.
134  */
135 struct intel_shared_regs {
136 	struct er_account       regs[EXTRA_REG_MAX];
137 	int                     refcnt;		/* per-core: #HT threads */
138 	unsigned                core_id;	/* per-core: core id */
139 };
140 
141 enum intel_excl_state_type {
142 	INTEL_EXCL_UNUSED    = 0, /* counter is unused */
143 	INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
144 	INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
145 };
146 
147 struct intel_excl_states {
148 	enum intel_excl_state_type state[X86_PMC_IDX_MAX];
149 	bool sched_started; /* true if scheduling has started */
150 };
151 
152 struct intel_excl_cntrs {
153 	raw_spinlock_t	lock;
154 
155 	struct intel_excl_states states[2];
156 
157 	union {
158 		u16	has_exclusive[2];
159 		u32	exclusive_present;
160 	};
161 
162 	int		refcnt;		/* per-core: #HT threads */
163 	unsigned	core_id;	/* per-core: core id */
164 };
165 
166 #define MAX_LBR_ENTRIES		32
167 
168 enum {
169 	X86_PERF_KFREE_SHARED = 0,
170 	X86_PERF_KFREE_EXCL   = 1,
171 	X86_PERF_KFREE_MAX
172 };
173 
174 struct cpu_hw_events {
175 	/*
176 	 * Generic x86 PMC bits
177 	 */
178 	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
179 	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
180 	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
181 	int			enabled;
182 
183 	int			n_events; /* the # of events in the below arrays */
184 	int			n_added;  /* the # last events in the below arrays;
185 					     they've never been enabled yet */
186 	int			n_txn;    /* the # last events in the below arrays;
187 					     added in the current transaction */
188 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
189 	u64			tags[X86_PMC_IDX_MAX];
190 
191 	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
192 	struct event_constraint	*event_constraint[X86_PMC_IDX_MAX];
193 
194 	int			n_excl; /* the number of exclusive events */
195 
196 	unsigned int		txn_flags;
197 	int			is_fake;
198 
199 	/*
200 	 * Intel DebugStore bits
201 	 */
202 	struct debug_store	*ds;
203 	void			*ds_pebs_vaddr;
204 	void			*ds_bts_vaddr;
205 	u64			pebs_enabled;
206 	int			n_pebs;
207 	int			n_large_pebs;
208 
209 	/*
210 	 * Intel LBR bits
211 	 */
212 	int				lbr_users;
213 	struct perf_branch_stack	lbr_stack;
214 	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
215 	struct er_account		*lbr_sel;
216 	u64				br_sel;
217 
218 	/*
219 	 * Intel host/guest exclude bits
220 	 */
221 	u64				intel_ctrl_guest_mask;
222 	u64				intel_ctrl_host_mask;
223 	struct perf_guest_switch_msr	guest_switch_msrs[X86_PMC_IDX_MAX];
224 
225 	/*
226 	 * Intel checkpoint mask
227 	 */
228 	u64				intel_cp_status;
229 
230 	/*
231 	 * manage shared (per-core, per-cpu) registers
232 	 * used on Intel NHM/WSM/SNB
233 	 */
234 	struct intel_shared_regs	*shared_regs;
235 	/*
236 	 * manage exclusive counter access between hyperthread
237 	 */
238 	struct event_constraint *constraint_list; /* in enable order */
239 	struct intel_excl_cntrs		*excl_cntrs;
240 	int excl_thread_id; /* 0 or 1 */
241 
242 	/*
243 	 * AMD specific bits
244 	 */
245 	struct amd_nb			*amd_nb;
246 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
247 	u64				perf_ctr_virt_mask;
248 
249 	void				*kfree_on_online[X86_PERF_KFREE_MAX];
250 };
251 
252 #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
253 	{ .idxmsk64 = (n) },		\
254 	.code = (c),			\
255 	.cmask = (m),			\
256 	.weight = (w),			\
257 	.overlap = (o),			\
258 	.flags = f,			\
259 }
260 
261 #define EVENT_CONSTRAINT(c, n, m)	\
262 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
263 
264 #define INTEL_EXCLEVT_CONSTRAINT(c, n)	\
265 	__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
266 			   0, PERF_X86_EVENT_EXCL)
267 
268 /*
269  * The overlap flag marks event constraints with overlapping counter
270  * masks. This is the case if the counter mask of such an event is not
271  * a subset of any other counter mask of a constraint with an equal or
272  * higher weight, e.g.:
273  *
274  *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
275  *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
276  *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
277  *
278  * The event scheduler may not select the correct counter in the first
279  * cycle because it needs to know which subsequent events will be
280  * scheduled. It may fail to schedule the events then. So we set the
281  * overlap flag for such constraints to give the scheduler a hint which
282  * events to select for counter rescheduling.
283  *
284  * Care must be taken as the rescheduling algorithm is O(n!) which
285  * will increase scheduling cycles for an over-committed system
286  * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
287  * and its counter masks must be kept at a minimum.
288  */
289 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)	\
290 	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
291 
292 /*
293  * Constraint on the Event code.
294  */
295 #define INTEL_EVENT_CONSTRAINT(c, n)	\
296 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
297 
298 /*
299  * Constraint on the Event code + UMask + fixed-mask
300  *
301  * filter mask to validate fixed counter events.
302  * the following filters disqualify for fixed counters:
303  *  - inv
304  *  - edge
305  *  - cnt-mask
306  *  - in_tx
307  *  - in_tx_checkpointed
308  *  The other filters are supported by fixed counters.
309  *  The any-thread option is supported starting with v3.
310  */
311 #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
312 #define FIXED_EVENT_CONSTRAINT(c, n)	\
313 	EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
314 
315 /*
316  * Constraint on the Event code + UMask
317  */
318 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
319 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
320 
321 /* Constraint on specific umask bit only + event */
322 #define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
323 	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
324 
325 /* Like UEVENT_CONSTRAINT, but match flags too */
326 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
327 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
328 
329 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)	\
330 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
331 			   HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
332 
333 #define INTEL_PLD_CONSTRAINT(c, n)	\
334 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
335 			   HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
336 
337 #define INTEL_PST_CONSTRAINT(c, n)	\
338 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
339 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
340 
341 /* Event constraint, but match on all event flags too. */
342 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
343 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
344 
345 /* Check only flags, but allow all event/umask */
346 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)	\
347 	EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
348 
349 /* Check flags and event code, and set the HSW store flag */
350 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
351 	__EVENT_CONSTRAINT(code, n, 			\
352 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
353 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
354 
355 /* Check flags and event code, and set the HSW load flag */
356 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
357 	__EVENT_CONSTRAINT(code, n,			\
358 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
359 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
360 
361 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
362 	__EVENT_CONSTRAINT(code, n,			\
363 			  ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
364 			  HWEIGHT(n), 0, \
365 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
366 
367 /* Check flags and event code/umask, and set the HSW store flag */
368 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
369 	__EVENT_CONSTRAINT(code, n, 			\
370 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
371 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
372 
373 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
374 	__EVENT_CONSTRAINT(code, n,			\
375 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
376 			  HWEIGHT(n), 0, \
377 			  PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
378 
379 /* Check flags and event code/umask, and set the HSW load flag */
380 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
381 	__EVENT_CONSTRAINT(code, n, 			\
382 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
383 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
384 
385 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
386 	__EVENT_CONSTRAINT(code, n,			\
387 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
388 			  HWEIGHT(n), 0, \
389 			  PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
390 
391 /* Check flags and event code/umask, and set the HSW N/A flag */
392 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
393 	__EVENT_CONSTRAINT(code, n, 			\
394 			  INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
395 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
396 
397 
398 /*
399  * We define the end marker as having a weight of -1
400  * to enable blacklisting of events using a counter bitmask
401  * of zero and thus a weight of zero.
402  * The end marker has a weight that cannot possibly be
403  * obtained from counting the bits in the bitmask.
404  */
405 #define EVENT_CONSTRAINT_END { .weight = -1 }
406 
407 /*
408  * Check for end marker with weight == -1
409  */
410 #define for_each_event_constraint(e, c)	\
411 	for ((e) = (c); (e)->weight != -1; (e)++)
412 
413 /*
414  * Extra registers for specific events.
415  *
416  * Some events need large masks and require external MSRs.
417  * Those extra MSRs end up being shared for all events on
418  * a PMU and sometimes between PMU of sibling HT threads.
419  * In either case, the kernel needs to handle conflicting
420  * accesses to those extra, shared, regs. The data structure
421  * to manage those registers is stored in cpu_hw_event.
422  */
423 struct extra_reg {
424 	unsigned int		event;
425 	unsigned int		msr;
426 	u64			config_mask;
427 	u64			valid_mask;
428 	int			idx;  /* per_xxx->regs[] reg index */
429 	bool			extra_msr_access;
430 };
431 
432 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
433 	.event = (e),			\
434 	.msr = (ms),			\
435 	.config_mask = (m),		\
436 	.valid_mask = (vm),		\
437 	.idx = EXTRA_REG_##i,		\
438 	.extra_msr_access = true,	\
439 	}
440 
441 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
442 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
443 
444 #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
445 	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
446 			ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
447 
448 #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
449 	INTEL_UEVENT_EXTRA_REG(c, \
450 			       MSR_PEBS_LD_LAT_THRESHOLD, \
451 			       0xffff, \
452 			       LDLAT)
453 
454 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
455 
456 union perf_capabilities {
457 	struct {
458 		u64	lbr_format:6;
459 		u64	pebs_trap:1;
460 		u64	pebs_arch_reg:1;
461 		u64	pebs_format:4;
462 		u64	smm_freeze:1;
463 		/*
464 		 * PMU supports separate counter range for writing
465 		 * values > 32bit.
466 		 */
467 		u64	full_width_write:1;
468 	};
469 	u64	capabilities;
470 };
471 
472 struct x86_pmu_quirk {
473 	struct x86_pmu_quirk *next;
474 	void (*func)(void);
475 };
476 
477 union x86_pmu_config {
478 	struct {
479 		u64 event:8,
480 		    umask:8,
481 		    usr:1,
482 		    os:1,
483 		    edge:1,
484 		    pc:1,
485 		    interrupt:1,
486 		    __reserved1:1,
487 		    en:1,
488 		    inv:1,
489 		    cmask:8,
490 		    event2:4,
491 		    __reserved2:4,
492 		    go:1,
493 		    ho:1;
494 	} bits;
495 	u64 value;
496 };
497 
498 #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
499 
500 enum {
501 	x86_lbr_exclusive_lbr,
502 	x86_lbr_exclusive_bts,
503 	x86_lbr_exclusive_pt,
504 	x86_lbr_exclusive_max,
505 };
506 
507 /*
508  * struct x86_pmu - generic x86 pmu
509  */
510 struct x86_pmu {
511 	/*
512 	 * Generic x86 PMC bits
513 	 */
514 	const char	*name;
515 	int		version;
516 	int		(*handle_irq)(struct pt_regs *);
517 	void		(*disable_all)(void);
518 	void		(*enable_all)(int added);
519 	void		(*enable)(struct perf_event *);
520 	void		(*disable)(struct perf_event *);
521 	void		(*add)(struct perf_event *);
522 	void		(*del)(struct perf_event *);
523 	int		(*hw_config)(struct perf_event *event);
524 	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
525 	unsigned	eventsel;
526 	unsigned	perfctr;
527 	int		(*addr_offset)(int index, bool eventsel);
528 	int		(*rdpmc_index)(int index);
529 	u64		(*event_map)(int);
530 	int		max_events;
531 	int		num_counters;
532 	int		num_counters_fixed;
533 	int		cntval_bits;
534 	u64		cntval_mask;
535 	union {
536 			unsigned long events_maskl;
537 			unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
538 	};
539 	int		events_mask_len;
540 	int		apic;
541 	u64		max_period;
542 	struct event_constraint *
543 			(*get_event_constraints)(struct cpu_hw_events *cpuc,
544 						 int idx,
545 						 struct perf_event *event);
546 
547 	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
548 						 struct perf_event *event);
549 
550 	void		(*start_scheduling)(struct cpu_hw_events *cpuc);
551 
552 	void		(*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
553 
554 	void		(*stop_scheduling)(struct cpu_hw_events *cpuc);
555 
556 	struct event_constraint *event_constraints;
557 	struct x86_pmu_quirk *quirks;
558 	int		perfctr_second_write;
559 	bool		late_ack;
560 	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
561 
562 	/*
563 	 * sysfs attrs
564 	 */
565 	int		attr_rdpmc_broken;
566 	int		attr_rdpmc;
567 	struct attribute **format_attrs;
568 	struct attribute **event_attrs;
569 	struct attribute **caps_attrs;
570 
571 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
572 	struct attribute **cpu_events;
573 
574 	unsigned long	attr_freeze_on_smi;
575 	struct attribute **attrs;
576 
577 	/*
578 	 * CPU Hotplug hooks
579 	 */
580 	int		(*cpu_prepare)(int cpu);
581 	void		(*cpu_starting)(int cpu);
582 	void		(*cpu_dying)(int cpu);
583 	void		(*cpu_dead)(int cpu);
584 
585 	void		(*check_microcode)(void);
586 	void		(*sched_task)(struct perf_event_context *ctx,
587 				      bool sched_in);
588 
589 	/*
590 	 * Intel Arch Perfmon v2+
591 	 */
592 	u64			intel_ctrl;
593 	union perf_capabilities intel_cap;
594 
595 	/*
596 	 * Intel DebugStore bits
597 	 */
598 	unsigned int	bts		:1,
599 			bts_active	:1,
600 			pebs		:1,
601 			pebs_active	:1,
602 			pebs_broken	:1,
603 			pebs_prec_dist	:1,
604 			pebs_no_tlb	:1;
605 	int		pebs_record_size;
606 	int		pebs_buffer_size;
607 	void		(*drain_pebs)(struct pt_regs *regs);
608 	struct event_constraint *pebs_constraints;
609 	void		(*pebs_aliases)(struct perf_event *event);
610 	int 		max_pebs_events;
611 	unsigned long	free_running_flags;
612 
613 	/*
614 	 * Intel LBR
615 	 */
616 	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
617 	int		lbr_nr;			   /* hardware stack size */
618 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
619 	const int	*lbr_sel_map;		   /* lbr_select mappings */
620 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
621 	bool		lbr_pt_coexist;		   /* (LBR|BTS) may coexist with PT */
622 
623 	/*
624 	 * Intel PT/LBR/BTS are exclusive
625 	 */
626 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
627 
628 	/*
629 	 * AMD bits
630 	 */
631 	unsigned int	amd_nb_constraints : 1;
632 
633 	/*
634 	 * Extra registers for events
635 	 */
636 	struct extra_reg *extra_regs;
637 	unsigned int flags;
638 
639 	/*
640 	 * Intel host/guest support (KVM)
641 	 */
642 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
643 };
644 
645 struct x86_perf_task_context {
646 	u64 lbr_from[MAX_LBR_ENTRIES];
647 	u64 lbr_to[MAX_LBR_ENTRIES];
648 	u64 lbr_info[MAX_LBR_ENTRIES];
649 	int tos;
650 	int lbr_callstack_users;
651 	int lbr_stack_state;
652 };
653 
654 #define x86_add_quirk(func_)						\
655 do {									\
656 	static struct x86_pmu_quirk __quirk __initdata = {		\
657 		.func = func_,						\
658 	};								\
659 	__quirk.next = x86_pmu.quirks;					\
660 	x86_pmu.quirks = &__quirk;					\
661 } while (0)
662 
663 /*
664  * x86_pmu flags
665  */
666 #define PMU_FL_NO_HT_SHARING	0x1 /* no hyper-threading resource sharing */
667 #define PMU_FL_HAS_RSP_1	0x2 /* has 2 equivalent offcore_rsp regs   */
668 #define PMU_FL_EXCL_CNTRS	0x4 /* has exclusive counter requirements  */
669 #define PMU_FL_EXCL_ENABLED	0x8 /* exclusive counter active */
670 
671 #define EVENT_VAR(_id)  event_attr_##_id
672 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
673 
674 #define EVENT_ATTR(_name, _id)						\
675 static struct perf_pmu_events_attr EVENT_VAR(_id) = {			\
676 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
677 	.id		= PERF_COUNT_HW_##_id,				\
678 	.event_str	= NULL,						\
679 };
680 
681 #define EVENT_ATTR_STR(_name, v, str)					\
682 static struct perf_pmu_events_attr event_attr_##v = {			\
683 	.attr		= __ATTR(_name, 0444, events_sysfs_show, NULL),	\
684 	.id		= 0,						\
685 	.event_str	= str,						\
686 };
687 
688 #define EVENT_ATTR_STR_HT(_name, v, noht, ht)				\
689 static struct perf_pmu_events_ht_attr event_attr_##v = {		\
690 	.attr		= __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
691 	.id		= 0,						\
692 	.event_str_noht	= noht,						\
693 	.event_str_ht	= ht,						\
694 }
695 
696 extern struct x86_pmu x86_pmu __read_mostly;
697 
698 static inline bool x86_pmu_has_lbr_callstack(void)
699 {
700 	return  x86_pmu.lbr_sel_map &&
701 		x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
702 }
703 
704 DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
705 
706 int x86_perf_event_set_period(struct perf_event *event);
707 
708 /*
709  * Generalized hw caching related hw_event table, filled
710  * in on a per model basis. A value of 0 means
711  * 'not supported', -1 means 'hw_event makes no sense on
712  * this CPU', any other value means the raw hw_event
713  * ID.
714  */
715 
716 #define C(x) PERF_COUNT_HW_CACHE_##x
717 
718 extern u64 __read_mostly hw_cache_event_ids
719 				[PERF_COUNT_HW_CACHE_MAX]
720 				[PERF_COUNT_HW_CACHE_OP_MAX]
721 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
722 extern u64 __read_mostly hw_cache_extra_regs
723 				[PERF_COUNT_HW_CACHE_MAX]
724 				[PERF_COUNT_HW_CACHE_OP_MAX]
725 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
726 
727 u64 x86_perf_event_update(struct perf_event *event);
728 
729 static inline unsigned int x86_pmu_config_addr(int index)
730 {
731 	return x86_pmu.eventsel + (x86_pmu.addr_offset ?
732 				   x86_pmu.addr_offset(index, true) : index);
733 }
734 
735 static inline unsigned int x86_pmu_event_addr(int index)
736 {
737 	return x86_pmu.perfctr + (x86_pmu.addr_offset ?
738 				  x86_pmu.addr_offset(index, false) : index);
739 }
740 
741 static inline int x86_pmu_rdpmc_index(int index)
742 {
743 	return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
744 }
745 
746 int x86_add_exclusive(unsigned int what);
747 
748 void x86_del_exclusive(unsigned int what);
749 
750 int x86_reserve_hardware(void);
751 
752 void x86_release_hardware(void);
753 
754 int x86_pmu_max_precise(void);
755 
756 void hw_perf_lbr_event_destroy(struct perf_event *event);
757 
758 int x86_setup_perfctr(struct perf_event *event);
759 
760 int x86_pmu_hw_config(struct perf_event *event);
761 
762 void x86_pmu_disable_all(void);
763 
764 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
765 					  u64 enable_mask)
766 {
767 	u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
768 
769 	if (hwc->extra_reg.reg)
770 		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
771 	wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
772 }
773 
774 void x86_pmu_enable_all(int added);
775 
776 int perf_assign_events(struct event_constraint **constraints, int n,
777 			int wmin, int wmax, int gpmax, int *assign);
778 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
779 
780 void x86_pmu_stop(struct perf_event *event, int flags);
781 
782 static inline void x86_pmu_disable_event(struct perf_event *event)
783 {
784 	struct hw_perf_event *hwc = &event->hw;
785 
786 	wrmsrl(hwc->config_base, hwc->config);
787 }
788 
789 void x86_pmu_enable_event(struct perf_event *event);
790 
791 int x86_pmu_handle_irq(struct pt_regs *regs);
792 
793 extern struct event_constraint emptyconstraint;
794 
795 extern struct event_constraint unconstrained;
796 
797 static inline bool kernel_ip(unsigned long ip)
798 {
799 #ifdef CONFIG_X86_32
800 	return ip > PAGE_OFFSET;
801 #else
802 	return (long)ip < 0;
803 #endif
804 }
805 
806 /*
807  * Not all PMUs provide the right context information to place the reported IP
808  * into full context. Specifically segment registers are typically not
809  * supplied.
810  *
811  * Assuming the address is a linear address (it is for IBS), we fake the CS and
812  * vm86 mode using the known zero-based code segment and 'fix up' the registers
813  * to reflect this.
814  *
815  * Intel PEBS/LBR appear to typically provide the effective address, nothing
816  * much we can do about that but pray and treat it like a linear address.
817  */
818 static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
819 {
820 	regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
821 	if (regs->flags & X86_VM_MASK)
822 		regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
823 	regs->ip = ip;
824 }
825 
826 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
827 ssize_t intel_event_sysfs_show(char *page, u64 config);
828 
829 struct attribute **merge_attr(struct attribute **a, struct attribute **b);
830 
831 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
832 			  char *page);
833 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
834 			  char *page);
835 
836 #ifdef CONFIG_CPU_SUP_AMD
837 
838 int amd_pmu_init(void);
839 
840 #else /* CONFIG_CPU_SUP_AMD */
841 
842 static inline int amd_pmu_init(void)
843 {
844 	return 0;
845 }
846 
847 #endif /* CONFIG_CPU_SUP_AMD */
848 
849 #ifdef CONFIG_CPU_SUP_INTEL
850 
851 static inline bool intel_pmu_has_bts(struct perf_event *event)
852 {
853 	if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
854 	    !event->attr.freq && event->hw.sample_period == 1)
855 		return true;
856 
857 	return false;
858 }
859 
860 int intel_pmu_save_and_restart(struct perf_event *event);
861 
862 struct event_constraint *
863 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
864 			  struct perf_event *event);
865 
866 struct intel_shared_regs *allocate_shared_regs(int cpu);
867 
868 int intel_pmu_init(void);
869 
870 void init_debug_store_on_cpu(int cpu);
871 
872 void fini_debug_store_on_cpu(int cpu);
873 
874 void release_ds_buffers(void);
875 
876 void reserve_ds_buffers(void);
877 
878 extern struct event_constraint bts_constraint;
879 
880 void intel_pmu_enable_bts(u64 config);
881 
882 void intel_pmu_disable_bts(void);
883 
884 int intel_pmu_drain_bts_buffer(void);
885 
886 extern struct event_constraint intel_core2_pebs_event_constraints[];
887 
888 extern struct event_constraint intel_atom_pebs_event_constraints[];
889 
890 extern struct event_constraint intel_slm_pebs_event_constraints[];
891 
892 extern struct event_constraint intel_glm_pebs_event_constraints[];
893 
894 extern struct event_constraint intel_glp_pebs_event_constraints[];
895 
896 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
897 
898 extern struct event_constraint intel_westmere_pebs_event_constraints[];
899 
900 extern struct event_constraint intel_snb_pebs_event_constraints[];
901 
902 extern struct event_constraint intel_ivb_pebs_event_constraints[];
903 
904 extern struct event_constraint intel_hsw_pebs_event_constraints[];
905 
906 extern struct event_constraint intel_bdw_pebs_event_constraints[];
907 
908 extern struct event_constraint intel_skl_pebs_event_constraints[];
909 
910 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
911 
912 void intel_pmu_pebs_add(struct perf_event *event);
913 
914 void intel_pmu_pebs_del(struct perf_event *event);
915 
916 void intel_pmu_pebs_enable(struct perf_event *event);
917 
918 void intel_pmu_pebs_disable(struct perf_event *event);
919 
920 void intel_pmu_pebs_enable_all(void);
921 
922 void intel_pmu_pebs_disable_all(void);
923 
924 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
925 
926 void intel_ds_init(void);
927 
928 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
929 
930 u64 lbr_from_signext_quirk_wr(u64 val);
931 
932 void intel_pmu_lbr_reset(void);
933 
934 void intel_pmu_lbr_add(struct perf_event *event);
935 
936 void intel_pmu_lbr_del(struct perf_event *event);
937 
938 void intel_pmu_lbr_enable_all(bool pmi);
939 
940 void intel_pmu_lbr_disable_all(void);
941 
942 void intel_pmu_lbr_read(void);
943 
944 void intel_pmu_lbr_init_core(void);
945 
946 void intel_pmu_lbr_init_nhm(void);
947 
948 void intel_pmu_lbr_init_atom(void);
949 
950 void intel_pmu_lbr_init_slm(void);
951 
952 void intel_pmu_lbr_init_snb(void);
953 
954 void intel_pmu_lbr_init_hsw(void);
955 
956 void intel_pmu_lbr_init_skl(void);
957 
958 void intel_pmu_lbr_init_knl(void);
959 
960 void intel_pmu_pebs_data_source_nhm(void);
961 
962 void intel_pmu_pebs_data_source_skl(bool pmem);
963 
964 int intel_pmu_setup_lbr_filter(struct perf_event *event);
965 
966 void intel_pt_interrupt(void);
967 
968 int intel_bts_interrupt(void);
969 
970 void intel_bts_enable_local(void);
971 
972 void intel_bts_disable_local(void);
973 
974 int p4_pmu_init(void);
975 
976 int p6_pmu_init(void);
977 
978 int knc_pmu_init(void);
979 
980 static inline int is_ht_workaround_enabled(void)
981 {
982 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
983 }
984 
985 #else /* CONFIG_CPU_SUP_INTEL */
986 
987 static inline void reserve_ds_buffers(void)
988 {
989 }
990 
991 static inline void release_ds_buffers(void)
992 {
993 }
994 
995 static inline int intel_pmu_init(void)
996 {
997 	return 0;
998 }
999 
1000 static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
1001 {
1002 	return NULL;
1003 }
1004 
1005 static inline int is_ht_workaround_enabled(void)
1006 {
1007 	return 0;
1008 }
1009 #endif /* CONFIG_CPU_SUP_INTEL */
1010