xref: /openbmc/linux/arch/x86/include/asm/perf_event.h (revision 7dd65feb)
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3 
4 /*
5  * Performance event hw details:
6  */
7 
8 #define X86_PMC_MAX_GENERIC					8
9 #define X86_PMC_MAX_FIXED					3
10 
11 #define X86_PMC_IDX_GENERIC				        0
12 #define X86_PMC_IDX_FIXED				       32
13 #define X86_PMC_IDX_MAX					       64
14 
15 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17 
18 #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20 
21 #define ARCH_PERFMON_EVENTSEL0_ENABLE			  (1 << 22)
22 #define ARCH_PERFMON_EVENTSEL_INT			  (1 << 20)
23 #define ARCH_PERFMON_EVENTSEL_OS			  (1 << 17)
24 #define ARCH_PERFMON_EVENTSEL_USR			  (1 << 16)
25 
26 /*
27  * Includes eventsel and unit mask as well:
28  */
29 #define ARCH_PERFMON_EVENT_MASK				    0xffff
30 
31 /*
32  * filter mask to validate fixed counter events.
33  * the following filters disqualify for fixed counters:
34  *  - inv
35  *  - edge
36  *  - cnt-mask
37  *  The other filters are supported by fixed counters.
38  *  The any-thread option is supported starting with v3.
39  */
40 #define ARCH_PERFMON_EVENT_FILTER_MASK			0xff840000
41 
42 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		      0x3c
43 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
44 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX			 0
45 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
46 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
47 
48 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED			 6
49 
50 /*
51  * Intel "Architectural Performance Monitoring" CPUID
52  * detection/enumeration details:
53  */
54 union cpuid10_eax {
55 	struct {
56 		unsigned int version_id:8;
57 		unsigned int num_events:8;
58 		unsigned int bit_width:8;
59 		unsigned int mask_length:8;
60 	} split;
61 	unsigned int full;
62 };
63 
64 union cpuid10_edx {
65 	struct {
66 		unsigned int num_events_fixed:4;
67 		unsigned int reserved:28;
68 	} split;
69 	unsigned int full;
70 };
71 
72 
73 /*
74  * Fixed-purpose performance events:
75  */
76 
77 /*
78  * All 3 fixed-mode PMCs are configured via this single MSR:
79  */
80 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL			0x38d
81 
82 /*
83  * The counts are available in three separate MSRs:
84  */
85 
86 /* Instr_Retired.Any: */
87 #define MSR_ARCH_PERFMON_FIXED_CTR0			0x309
88 #define X86_PMC_IDX_FIXED_INSTRUCTIONS			(X86_PMC_IDX_FIXED + 0)
89 
90 /* CPU_CLK_Unhalted.Core: */
91 #define MSR_ARCH_PERFMON_FIXED_CTR1			0x30a
92 #define X86_PMC_IDX_FIXED_CPU_CYCLES			(X86_PMC_IDX_FIXED + 1)
93 
94 /* CPU_CLK_Unhalted.Ref: */
95 #define MSR_ARCH_PERFMON_FIXED_CTR2			0x30b
96 #define X86_PMC_IDX_FIXED_BUS_CYCLES			(X86_PMC_IDX_FIXED + 2)
97 
98 /*
99  * We model BTS tracing as another fixed-mode PMC.
100  *
101  * We choose a value in the middle of the fixed event range, since lower
102  * values are used by actual fixed events and higher values are used
103  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
104  */
105 #define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)
106 
107 
108 #ifdef CONFIG_PERF_EVENTS
109 extern void init_hw_perf_events(void);
110 extern void perf_events_lapic_init(void);
111 
112 #define PERF_EVENT_INDEX_OFFSET			0
113 
114 #else
115 static inline void init_hw_perf_events(void)		{ }
116 static inline void perf_events_lapic_init(void)	{ }
117 #endif
118 
119 #endif /* _ASM_X86_PERF_EVENT_H */
120