1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
4
5 #include <linux/static_call.h>
6
7 /*
8 * Performance event hw details:
9 */
10
11 #define INTEL_PMC_MAX_GENERIC 32
12 #define INTEL_PMC_MAX_FIXED 16
13 #define INTEL_PMC_IDX_FIXED 32
14
15 #define X86_PMC_IDX_MAX 64
16
17 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
18 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
19
20 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
21 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
22
23 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
24 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
25 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
26 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
27 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
28 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
29 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
30 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
31 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
32 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
33 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
34
35 #define INTEL_FIXED_BITS_MASK 0xFULL
36 #define INTEL_FIXED_BITS_STRIDE 4
37 #define INTEL_FIXED_0_KERNEL (1ULL << 0)
38 #define INTEL_FIXED_0_USER (1ULL << 1)
39 #define INTEL_FIXED_0_ANYTHREAD (1ULL << 2)
40 #define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3)
41
42 #define HSW_IN_TX (1ULL << 32)
43 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
44 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
45 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
46
47 #define intel_fixed_bits_by_idx(_idx, _bits) \
48 ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
49
50 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
51 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
52 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
53
54 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
55 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
56 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
57
58 #define AMD64_EVENTSEL_EVENT \
59 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
60 #define INTEL_ARCH_EVENT_MASK \
61 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
62
63 #define AMD64_L3_SLICE_SHIFT 48
64 #define AMD64_L3_SLICE_MASK \
65 (0xFULL << AMD64_L3_SLICE_SHIFT)
66 #define AMD64_L3_SLICEID_MASK \
67 (0x7ULL << AMD64_L3_SLICE_SHIFT)
68
69 #define AMD64_L3_THREAD_SHIFT 56
70 #define AMD64_L3_THREAD_MASK \
71 (0xFFULL << AMD64_L3_THREAD_SHIFT)
72 #define AMD64_L3_F19H_THREAD_MASK \
73 (0x3ULL << AMD64_L3_THREAD_SHIFT)
74
75 #define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
76 #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
77
78 #define AMD64_L3_COREID_SHIFT 42
79 #define AMD64_L3_COREID_MASK \
80 (0x7ULL << AMD64_L3_COREID_SHIFT)
81
82 #define X86_RAW_EVENT_MASK \
83 (ARCH_PERFMON_EVENTSEL_EVENT | \
84 ARCH_PERFMON_EVENTSEL_UMASK | \
85 ARCH_PERFMON_EVENTSEL_EDGE | \
86 ARCH_PERFMON_EVENTSEL_INV | \
87 ARCH_PERFMON_EVENTSEL_CMASK)
88 #define X86_ALL_EVENT_FLAGS \
89 (ARCH_PERFMON_EVENTSEL_EDGE | \
90 ARCH_PERFMON_EVENTSEL_INV | \
91 ARCH_PERFMON_EVENTSEL_CMASK | \
92 ARCH_PERFMON_EVENTSEL_ANY | \
93 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
94 HSW_IN_TX | \
95 HSW_IN_TX_CHECKPOINTED)
96 #define AMD64_RAW_EVENT_MASK \
97 (X86_RAW_EVENT_MASK | \
98 AMD64_EVENTSEL_EVENT)
99 #define AMD64_RAW_EVENT_MASK_NB \
100 (AMD64_EVENTSEL_EVENT | \
101 ARCH_PERFMON_EVENTSEL_UMASK)
102
103 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \
104 (AMD64_EVENTSEL_EVENT | \
105 GENMASK_ULL(37, 36))
106
107 #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \
108 (ARCH_PERFMON_EVENTSEL_UMASK | \
109 GENMASK_ULL(27, 24))
110
111 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \
112 (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \
113 AMD64_PERFMON_V2_EVENTSEL_UMASK_NB)
114
115 #define AMD64_NUM_COUNTERS 4
116 #define AMD64_NUM_COUNTERS_CORE 6
117 #define AMD64_NUM_COUNTERS_NB 4
118
119 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
120 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
121 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
122 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
123 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
124
125 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
126 #define ARCH_PERFMON_EVENTS_COUNT 7
127
128 #define PEBS_DATACFG_MEMINFO BIT_ULL(0)
129 #define PEBS_DATACFG_GP BIT_ULL(1)
130 #define PEBS_DATACFG_XMMS BIT_ULL(2)
131 #define PEBS_DATACFG_LBRS BIT_ULL(3)
132 #define PEBS_DATACFG_LBR_SHIFT 24
133
134 /* Steal the highest bit of pebs_data_cfg for SW usage */
135 #define PEBS_UPDATE_DS_SW BIT_ULL(63)
136
137 /*
138 * Intel "Architectural Performance Monitoring" CPUID
139 * detection/enumeration details:
140 */
141 union cpuid10_eax {
142 struct {
143 unsigned int version_id:8;
144 unsigned int num_counters:8;
145 unsigned int bit_width:8;
146 unsigned int mask_length:8;
147 } split;
148 unsigned int full;
149 };
150
151 union cpuid10_ebx {
152 struct {
153 unsigned int no_unhalted_core_cycles:1;
154 unsigned int no_instructions_retired:1;
155 unsigned int no_unhalted_reference_cycles:1;
156 unsigned int no_llc_reference:1;
157 unsigned int no_llc_misses:1;
158 unsigned int no_branch_instruction_retired:1;
159 unsigned int no_branch_misses_retired:1;
160 } split;
161 unsigned int full;
162 };
163
164 union cpuid10_edx {
165 struct {
166 unsigned int num_counters_fixed:5;
167 unsigned int bit_width_fixed:8;
168 unsigned int reserved1:2;
169 unsigned int anythread_deprecated:1;
170 unsigned int reserved2:16;
171 } split;
172 unsigned int full;
173 };
174
175 /*
176 * Intel "Architectural Performance Monitoring extension" CPUID
177 * detection/enumeration details:
178 */
179 #define ARCH_PERFMON_EXT_LEAF 0x00000023
180 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
181
182 union cpuid35_eax {
183 struct {
184 unsigned int leaf0:1;
185 /* Counters Sub-Leaf */
186 unsigned int cntr_subleaf:1;
187 /* Auto Counter Reload Sub-Leaf */
188 unsigned int acr_subleaf:1;
189 /* Events Sub-Leaf */
190 unsigned int events_subleaf:1;
191 unsigned int reserved:28;
192 } split;
193 unsigned int full;
194 };
195
196 union cpuid35_ebx {
197 struct {
198 /* UnitMask2 Supported */
199 unsigned int umask2:1;
200 /* EQ-bit Supported */
201 unsigned int eq:1;
202 unsigned int reserved:30;
203 } split;
204 unsigned int full;
205 };
206
207 /*
208 * Intel Architectural LBR CPUID detection/enumeration details:
209 */
210 union cpuid28_eax {
211 struct {
212 /* Supported LBR depth values */
213 unsigned int lbr_depth_mask:8;
214 unsigned int reserved:22;
215 /* Deep C-state Reset */
216 unsigned int lbr_deep_c_reset:1;
217 /* IP values contain LIP */
218 unsigned int lbr_lip:1;
219 } split;
220 unsigned int full;
221 };
222
223 union cpuid28_ebx {
224 struct {
225 /* CPL Filtering Supported */
226 unsigned int lbr_cpl:1;
227 /* Branch Filtering Supported */
228 unsigned int lbr_filter:1;
229 /* Call-stack Mode Supported */
230 unsigned int lbr_call_stack:1;
231 } split;
232 unsigned int full;
233 };
234
235 union cpuid28_ecx {
236 struct {
237 /* Mispredict Bit Supported */
238 unsigned int lbr_mispred:1;
239 /* Timed LBRs Supported */
240 unsigned int lbr_timed_lbr:1;
241 /* Branch Type Field Supported */
242 unsigned int lbr_br_type:1;
243 } split;
244 unsigned int full;
245 };
246
247 /*
248 * AMD "Extended Performance Monitoring and Debug" CPUID
249 * detection/enumeration details:
250 */
251 union cpuid_0x80000022_ebx {
252 struct {
253 /* Number of Core Performance Counters */
254 unsigned int num_core_pmc:4;
255 /* Number of available LBR Stack Entries */
256 unsigned int lbr_v2_stack_sz:6;
257 /* Number of Data Fabric Counters */
258 unsigned int num_df_pmc:6;
259 } split;
260 unsigned int full;
261 };
262
263 struct x86_pmu_capability {
264 int version;
265 int num_counters_gp;
266 int num_counters_fixed;
267 int bit_width_gp;
268 int bit_width_fixed;
269 unsigned int events_mask;
270 int events_mask_len;
271 unsigned int pebs_ept :1;
272 };
273
274 /*
275 * Fixed-purpose performance events:
276 */
277
278 /* RDPMC offset for Fixed PMCs */
279 #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30)
280 #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29)
281
282 /*
283 * All the fixed-mode PMCs are configured via this single MSR:
284 */
285 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
286
287 /*
288 * There is no event-code assigned to the fixed-mode PMCs.
289 *
290 * For a fixed-mode PMC, which has an equivalent event on a general-purpose
291 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
292 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
293 *
294 * For a fixed-mode PMC, which doesn't have an equivalent event, a
295 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
296 * The pseudo event-code for a fixed-mode PMC must be 0x00.
297 * The pseudo umask-code is 0xX. The X equals the index of the fixed
298 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
299 *
300 * The counts are available in separate MSRs:
301 */
302
303 /* Instr_Retired.Any: */
304 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
305 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
306
307 /* CPU_CLK_Unhalted.Core: */
308 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
309 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
310
311 /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
312 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
313 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
314 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
315
316 /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
317 #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c
318 #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
319 #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
320
use_fixed_pseudo_encoding(u64 code)321 static inline bool use_fixed_pseudo_encoding(u64 code)
322 {
323 return !(code & 0xff);
324 }
325
326 /*
327 * We model BTS tracing as another fixed-mode PMC.
328 *
329 * We choose the value 47 for the fixed index of BTS, since lower
330 * values are used by actual fixed events and higher values are used
331 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
332 */
333 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15)
334
335 /*
336 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
337 * each TopDown metric event.
338 *
339 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
340 */
341 #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16)
342 #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0)
343 #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1)
344 #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2)
345 #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3)
346 #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4)
347 #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5)
348 #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6)
349 #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7)
350 #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND
351 #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
352 INTEL_PMC_MSK_FIXED_SLOTS)
353
354 /*
355 * There is no event-code assigned to the TopDown events.
356 *
357 * For the slots event, use the pseudo code of the fixed counter 3.
358 *
359 * For the metric events, the pseudo event-code is 0x00.
360 * The pseudo umask-code starts from the middle of the pseudo event
361 * space, 0x80.
362 */
363 #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */
364 /* Level 1 metrics */
365 #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */
366 #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */
367 #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */
368 #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */
369 /* Level 2 metrics */
370 #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */
371 #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */
372 #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */
373 #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */
374
375 #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND
376 #define INTEL_TD_METRIC_NUM 8
377
is_metric_idx(int idx)378 static inline bool is_metric_idx(int idx)
379 {
380 return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
381 }
382
is_topdown_idx(int idx)383 static inline bool is_topdown_idx(int idx)
384 {
385 return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
386 }
387
388 #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \
389 (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
390
391 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
392 #define GLOBAL_STATUS_BUFFER_OVF_BIT 62
393 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
394 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
395 #define GLOBAL_STATUS_ASIF BIT_ULL(60)
396 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
397 #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
398 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
399 #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55
400 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
401 #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48
402
403 #define GLOBAL_CTRL_EN_PERF_METRICS 48
404 /*
405 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
406 *
407 * We choose bit 58 because it's used to indicate LBR stack frozen state
408 * for architectural perfmon v4, also we unconditionally mask that bit in
409 * the handle_pmi_common(), so it'll never be set in the overflow handling.
410 *
411 * With this fake counter assigned, the guest LBR event user (such as KVM),
412 * can program the LBR registers on its own, and we don't actually do anything
413 * with then in the host context.
414 */
415 #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT)
416
417 /*
418 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
419 * since it would claim bit 58 which is effectively Fixed26.
420 */
421 #define INTEL_FIXED_VLBR_EVENT 0x1b00
422
423 /*
424 * Adaptive PEBS v4
425 */
426
427 struct pebs_basic {
428 u64 format_size;
429 u64 ip;
430 u64 applicable_counters;
431 u64 tsc;
432 };
433
434 struct pebs_meminfo {
435 u64 address;
436 u64 aux;
437 u64 latency;
438 u64 tsx_tuning;
439 };
440
441 struct pebs_gprs {
442 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
443 u64 r8, r9, r10, r11, r12, r13, r14, r15;
444 };
445
446 struct pebs_xmm {
447 u64 xmm[16*2]; /* two entries for each register */
448 };
449
450 /*
451 * AMD Extended Performance Monitoring and Debug cpuid feature detection
452 */
453 #define EXT_PERFMON_DEBUG_FEATURES 0x80000022
454
455 /*
456 * IBS cpuid feature detection
457 */
458
459 #define IBS_CPUID_FEATURES 0x8000001b
460
461 /*
462 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
463 * bit 0 is used to indicate the existence of IBS.
464 */
465 #define IBS_CAPS_AVAIL (1U<<0)
466 #define IBS_CAPS_FETCHSAM (1U<<1)
467 #define IBS_CAPS_OPSAM (1U<<2)
468 #define IBS_CAPS_RDWROPCNT (1U<<3)
469 #define IBS_CAPS_OPCNT (1U<<4)
470 #define IBS_CAPS_BRNTRGT (1U<<5)
471 #define IBS_CAPS_OPCNTEXT (1U<<6)
472 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
473 #define IBS_CAPS_OPBRNFUSE (1U<<8)
474 #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
475 #define IBS_CAPS_OPDATA4 (1U<<10)
476 #define IBS_CAPS_ZEN4 (1U<<11)
477
478 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
479 | IBS_CAPS_FETCHSAM \
480 | IBS_CAPS_OPSAM)
481
482 /*
483 * IBS APIC setup
484 */
485 #define IBSCTL 0x1cc
486 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
487 #define IBSCTL_LVT_OFFSET_MASK 0x0F
488
489 /* IBS fetch bits/masks */
490 #define IBS_FETCH_L3MISSONLY (1ULL<<59)
491 #define IBS_FETCH_RAND_EN (1ULL<<57)
492 #define IBS_FETCH_VAL (1ULL<<49)
493 #define IBS_FETCH_ENABLE (1ULL<<48)
494 #define IBS_FETCH_CNT 0xFFFF0000ULL
495 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
496
497 /*
498 * IBS op bits/masks
499 * The lower 7 bits of the current count are random bits
500 * preloaded by hardware and ignored in software
501 */
502 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
503 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
504 #define IBS_OP_CNT_CTL (1ULL<<19)
505 #define IBS_OP_VAL (1ULL<<18)
506 #define IBS_OP_ENABLE (1ULL<<17)
507 #define IBS_OP_L3MISSONLY (1ULL<<16)
508 #define IBS_OP_MAX_CNT 0x0000FFFFULL
509 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
510 #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */
511 #define IBS_RIP_INVALID (1ULL<<38)
512
513 #ifdef CONFIG_X86_LOCAL_APIC
514 extern u32 get_ibs_caps(void);
515 extern int forward_event_to_ibs(struct perf_event *event);
516 #else
get_ibs_caps(void)517 static inline u32 get_ibs_caps(void) { return 0; }
forward_event_to_ibs(struct perf_event * event)518 static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
519 #endif
520
521 #ifdef CONFIG_PERF_EVENTS
522 extern void perf_events_lapic_init(void);
523
524 /*
525 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
526 * unused and ABI specified to be 0, so nobody should care what we do with
527 * them.
528 *
529 * EXACT - the IP points to the exact instruction that triggered the
530 * event (HW bugs exempt).
531 * VM - original X86_VM_MASK; see set_linear_ip().
532 */
533 #define PERF_EFLAGS_EXACT (1UL << 3)
534 #define PERF_EFLAGS_VM (1UL << 5)
535
536 struct pt_regs;
537 struct x86_perf_regs {
538 struct pt_regs regs;
539 u64 *xmm_regs;
540 };
541
542 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
543 extern unsigned long perf_misc_flags(struct pt_regs *regs);
544 #define perf_misc_flags(regs) perf_misc_flags(regs)
545
546 #include <asm/stacktrace.h>
547
548 /*
549 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
550 * and the comment with PERF_EFLAGS_EXACT.
551 */
552 #define perf_arch_fetch_caller_regs(regs, __ip) { \
553 (regs)->ip = (__ip); \
554 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
555 (regs)->cs = __KERNEL_CS; \
556 regs->flags = 0; \
557 }
558
559 struct perf_guest_switch_msr {
560 unsigned msr;
561 u64 host, guest;
562 };
563
564 struct x86_pmu_lbr {
565 unsigned int nr;
566 unsigned int from;
567 unsigned int to;
568 unsigned int info;
569 };
570
571 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
572 extern u64 perf_get_hw_event_config(int hw_event);
573 extern void perf_check_microcode(void);
574 extern void perf_clear_dirty_counters(void);
575 extern int x86_perf_rdpmc_index(struct perf_event *event);
576 #else
perf_get_x86_pmu_capability(struct x86_pmu_capability * cap)577 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
578 {
579 memset(cap, 0, sizeof(*cap));
580 }
581
perf_get_hw_event_config(int hw_event)582 static inline u64 perf_get_hw_event_config(int hw_event)
583 {
584 return 0;
585 }
586
perf_events_lapic_init(void)587 static inline void perf_events_lapic_init(void) { }
perf_check_microcode(void)588 static inline void perf_check_microcode(void) { }
589 #endif
590
591 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
592 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
593 extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
594 #else
595 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)596 static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
597 {
598 memset(lbr, 0, sizeof(*lbr));
599 }
600 #endif
601
602 #ifdef CONFIG_CPU_SUP_INTEL
603 extern void intel_pt_handle_vmx(int on);
604 #else
intel_pt_handle_vmx(int on)605 static inline void intel_pt_handle_vmx(int on)
606 {
607
608 }
609 #endif
610
611 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
612 extern void amd_pmu_enable_virt(void);
613 extern void amd_pmu_disable_virt(void);
614
615 #if defined(CONFIG_PERF_EVENTS_AMD_BRS)
616
617 #define PERF_NEEDS_LOPWR_CB 1
618
619 /*
620 * architectural low power callback impacts
621 * drivers/acpi/processor_idle.c
622 * drivers/acpi/acpi_pad.c
623 */
624 extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
625
626 DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
627
perf_lopwr_cb(bool lopwr_in)628 static __always_inline void perf_lopwr_cb(bool lopwr_in)
629 {
630 static_call_mod(perf_lopwr_cb)(lopwr_in);
631 }
632
633 #endif /* PERF_NEEDS_LOPWR_CB */
634
635 #else
amd_pmu_enable_virt(void)636 static inline void amd_pmu_enable_virt(void) { }
amd_pmu_disable_virt(void)637 static inline void amd_pmu_disable_virt(void) { }
638 #endif
639
640 #define arch_perf_out_copy_user copy_from_user_nmi
641
642 #endif /* _ASM_X86_PERF_EVENT_H */
643