xref: /openbmc/linux/arch/x86/include/asm/perf_event.h (revision 0d456bad)
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3 
4 /*
5  * Performance event hw details:
6  */
7 
8 #define INTEL_PMC_MAX_GENERIC				       32
9 #define INTEL_PMC_MAX_FIXED					3
10 #define INTEL_PMC_IDX_FIXED				       32
11 
12 #define X86_PMC_IDX_MAX					       64
13 
14 #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
15 #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
16 
17 #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
18 #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
19 
20 #define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
21 #define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
22 #define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
23 #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
24 #define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
25 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
26 #define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
27 #define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
28 #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
29 #define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
30 #define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
31 
32 #define AMD_PERFMON_EVENTSEL_GUESTONLY			(1ULL << 40)
33 #define AMD_PERFMON_EVENTSEL_HOSTONLY			(1ULL << 41)
34 
35 #define AMD64_EVENTSEL_EVENT	\
36 	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
37 #define INTEL_ARCH_EVENT_MASK	\
38 	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
39 
40 #define X86_RAW_EVENT_MASK		\
41 	(ARCH_PERFMON_EVENTSEL_EVENT |	\
42 	 ARCH_PERFMON_EVENTSEL_UMASK |	\
43 	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
44 	 ARCH_PERFMON_EVENTSEL_INV   |	\
45 	 ARCH_PERFMON_EVENTSEL_CMASK)
46 #define AMD64_RAW_EVENT_MASK		\
47 	(X86_RAW_EVENT_MASK          |  \
48 	 AMD64_EVENTSEL_EVENT)
49 #define AMD64_NUM_COUNTERS				4
50 #define AMD64_NUM_COUNTERS_CORE				6
51 
52 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
53 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
54 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
55 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
56 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
57 
58 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
59 #define ARCH_PERFMON_EVENTS_COUNT			7
60 
61 /*
62  * Intel "Architectural Performance Monitoring" CPUID
63  * detection/enumeration details:
64  */
65 union cpuid10_eax {
66 	struct {
67 		unsigned int version_id:8;
68 		unsigned int num_counters:8;
69 		unsigned int bit_width:8;
70 		unsigned int mask_length:8;
71 	} split;
72 	unsigned int full;
73 };
74 
75 union cpuid10_ebx {
76 	struct {
77 		unsigned int no_unhalted_core_cycles:1;
78 		unsigned int no_instructions_retired:1;
79 		unsigned int no_unhalted_reference_cycles:1;
80 		unsigned int no_llc_reference:1;
81 		unsigned int no_llc_misses:1;
82 		unsigned int no_branch_instruction_retired:1;
83 		unsigned int no_branch_misses_retired:1;
84 	} split;
85 	unsigned int full;
86 };
87 
88 union cpuid10_edx {
89 	struct {
90 		unsigned int num_counters_fixed:5;
91 		unsigned int bit_width_fixed:8;
92 		unsigned int reserved:19;
93 	} split;
94 	unsigned int full;
95 };
96 
97 struct x86_pmu_capability {
98 	int		version;
99 	int		num_counters_gp;
100 	int		num_counters_fixed;
101 	int		bit_width_gp;
102 	int		bit_width_fixed;
103 	unsigned int	events_mask;
104 	int		events_mask_len;
105 };
106 
107 /*
108  * Fixed-purpose performance events:
109  */
110 
111 /*
112  * All 3 fixed-mode PMCs are configured via this single MSR:
113  */
114 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
115 
116 /*
117  * The counts are available in three separate MSRs:
118  */
119 
120 /* Instr_Retired.Any: */
121 #define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
122 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
123 
124 /* CPU_CLK_Unhalted.Core: */
125 #define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
126 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
127 
128 /* CPU_CLK_Unhalted.Ref: */
129 #define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
130 #define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
131 #define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
132 
133 /*
134  * We model BTS tracing as another fixed-mode PMC.
135  *
136  * We choose a value in the middle of the fixed event range, since lower
137  * values are used by actual fixed events and higher values are used
138  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
139  */
140 #define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
141 
142 /*
143  * IBS cpuid feature detection
144  */
145 
146 #define IBS_CPUID_FEATURES		0x8000001b
147 
148 /*
149  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
150  * bit 0 is used to indicate the existence of IBS.
151  */
152 #define IBS_CAPS_AVAIL			(1U<<0)
153 #define IBS_CAPS_FETCHSAM		(1U<<1)
154 #define IBS_CAPS_OPSAM			(1U<<2)
155 #define IBS_CAPS_RDWROPCNT		(1U<<3)
156 #define IBS_CAPS_OPCNT			(1U<<4)
157 #define IBS_CAPS_BRNTRGT		(1U<<5)
158 #define IBS_CAPS_OPCNTEXT		(1U<<6)
159 #define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
160 
161 #define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
162 					 | IBS_CAPS_FETCHSAM	\
163 					 | IBS_CAPS_OPSAM)
164 
165 /*
166  * IBS APIC setup
167  */
168 #define IBSCTL				0x1cc
169 #define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
170 #define IBSCTL_LVT_OFFSET_MASK		0x0F
171 
172 /* ibs fetch bits/masks */
173 #define IBS_FETCH_RAND_EN	(1ULL<<57)
174 #define IBS_FETCH_VAL		(1ULL<<49)
175 #define IBS_FETCH_ENABLE	(1ULL<<48)
176 #define IBS_FETCH_CNT		0xFFFF0000ULL
177 #define IBS_FETCH_MAX_CNT	0x0000FFFFULL
178 
179 /* ibs op bits/masks */
180 /* lower 4 bits of the current count are ignored: */
181 #define IBS_OP_CUR_CNT		(0xFFFF0ULL<<32)
182 #define IBS_OP_CNT_CTL		(1ULL<<19)
183 #define IBS_OP_VAL		(1ULL<<18)
184 #define IBS_OP_ENABLE		(1ULL<<17)
185 #define IBS_OP_MAX_CNT		0x0000FFFFULL
186 #define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
187 #define IBS_RIP_INVALID		(1ULL<<38)
188 
189 #ifdef CONFIG_X86_LOCAL_APIC
190 extern u32 get_ibs_caps(void);
191 #else
192 static inline u32 get_ibs_caps(void) { return 0; }
193 #endif
194 
195 #ifdef CONFIG_PERF_EVENTS
196 extern void perf_events_lapic_init(void);
197 
198 /*
199  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
200  * unused and ABI specified to be 0, so nobody should care what we do with
201  * them.
202  *
203  * EXACT - the IP points to the exact instruction that triggered the
204  *         event (HW bugs exempt).
205  * VM    - original X86_VM_MASK; see set_linear_ip().
206  */
207 #define PERF_EFLAGS_EXACT	(1UL << 3)
208 #define PERF_EFLAGS_VM		(1UL << 5)
209 
210 struct pt_regs;
211 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
212 extern unsigned long perf_misc_flags(struct pt_regs *regs);
213 #define perf_misc_flags(regs)	perf_misc_flags(regs)
214 
215 #include <asm/stacktrace.h>
216 
217 /*
218  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
219  * and the comment with PERF_EFLAGS_EXACT.
220  */
221 #define perf_arch_fetch_caller_regs(regs, __ip)		{	\
222 	(regs)->ip = (__ip);					\
223 	(regs)->bp = caller_frame_pointer();			\
224 	(regs)->cs = __KERNEL_CS;				\
225 	regs->flags = 0;					\
226 	asm volatile(						\
227 		_ASM_MOV "%%"_ASM_SP ", %0\n"			\
228 		: "=m" ((regs)->sp)				\
229 		:: "memory"					\
230 	);							\
231 }
232 
233 struct perf_guest_switch_msr {
234 	unsigned msr;
235 	u64 host, guest;
236 };
237 
238 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
239 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
240 extern void perf_check_microcode(void);
241 #else
242 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
243 {
244 	*nr = 0;
245 	return NULL;
246 }
247 
248 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
249 {
250 	memset(cap, 0, sizeof(*cap));
251 }
252 
253 static inline void perf_events_lapic_init(void)	{ }
254 static inline void perf_check_microcode(void) { }
255 #endif
256 
257 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
258  extern void amd_pmu_enable_virt(void);
259  extern void amd_pmu_disable_virt(void);
260 #else
261  static inline void amd_pmu_enable_virt(void) { }
262  static inline void amd_pmu_disable_virt(void) { }
263 #endif
264 
265 #define arch_perf_out_copy_user copy_from_user_nmi
266 
267 #endif /* _ASM_X86_PERF_EVENT_H */
268