1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PERF_EVENT_H 3 #define _ASM_X86_PERF_EVENT_H 4 5 #include <linux/static_call.h> 6 7 /* 8 * Performance event hw details: 9 */ 10 11 #define INTEL_PMC_MAX_GENERIC 32 12 #define INTEL_PMC_MAX_FIXED 16 13 #define INTEL_PMC_IDX_FIXED 32 14 15 #define X86_PMC_IDX_MAX 64 16 17 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 18 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 19 20 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 21 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 22 23 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL 24 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL 25 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) 26 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) 27 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) 28 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) 29 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) 30 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) 31 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) 32 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 33 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 34 35 #define HSW_IN_TX (1ULL << 32) 36 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) 37 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) 38 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32) 39 40 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) 41 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) 42 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) 43 44 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 45 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ 46 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) 47 48 #define AMD64_EVENTSEL_EVENT \ 49 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 50 #define INTEL_ARCH_EVENT_MASK \ 51 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) 52 53 #define AMD64_L3_SLICE_SHIFT 48 54 #define AMD64_L3_SLICE_MASK \ 55 (0xFULL << AMD64_L3_SLICE_SHIFT) 56 #define AMD64_L3_SLICEID_MASK \ 57 (0x7ULL << AMD64_L3_SLICE_SHIFT) 58 59 #define AMD64_L3_THREAD_SHIFT 56 60 #define AMD64_L3_THREAD_MASK \ 61 (0xFFULL << AMD64_L3_THREAD_SHIFT) 62 #define AMD64_L3_F19H_THREAD_MASK \ 63 (0x3ULL << AMD64_L3_THREAD_SHIFT) 64 65 #define AMD64_L3_EN_ALL_CORES BIT_ULL(47) 66 #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46) 67 68 #define AMD64_L3_COREID_SHIFT 42 69 #define AMD64_L3_COREID_MASK \ 70 (0x7ULL << AMD64_L3_COREID_SHIFT) 71 72 #define X86_RAW_EVENT_MASK \ 73 (ARCH_PERFMON_EVENTSEL_EVENT | \ 74 ARCH_PERFMON_EVENTSEL_UMASK | \ 75 ARCH_PERFMON_EVENTSEL_EDGE | \ 76 ARCH_PERFMON_EVENTSEL_INV | \ 77 ARCH_PERFMON_EVENTSEL_CMASK) 78 #define X86_ALL_EVENT_FLAGS \ 79 (ARCH_PERFMON_EVENTSEL_EDGE | \ 80 ARCH_PERFMON_EVENTSEL_INV | \ 81 ARCH_PERFMON_EVENTSEL_CMASK | \ 82 ARCH_PERFMON_EVENTSEL_ANY | \ 83 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \ 84 HSW_IN_TX | \ 85 HSW_IN_TX_CHECKPOINTED) 86 #define AMD64_RAW_EVENT_MASK \ 87 (X86_RAW_EVENT_MASK | \ 88 AMD64_EVENTSEL_EVENT) 89 #define AMD64_RAW_EVENT_MASK_NB \ 90 (AMD64_EVENTSEL_EVENT | \ 91 ARCH_PERFMON_EVENTSEL_UMASK) 92 93 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \ 94 (AMD64_EVENTSEL_EVENT | \ 95 GENMASK_ULL(37, 36)) 96 97 #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \ 98 (ARCH_PERFMON_EVENTSEL_UMASK | \ 99 GENMASK_ULL(27, 24)) 100 101 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \ 102 (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \ 103 AMD64_PERFMON_V2_EVENTSEL_UMASK_NB) 104 105 #define AMD64_NUM_COUNTERS 4 106 #define AMD64_NUM_COUNTERS_CORE 6 107 #define AMD64_NUM_COUNTERS_NB 4 108 109 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 110 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 111 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 112 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ 113 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) 114 115 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 116 #define ARCH_PERFMON_EVENTS_COUNT 7 117 118 #define PEBS_DATACFG_MEMINFO BIT_ULL(0) 119 #define PEBS_DATACFG_GP BIT_ULL(1) 120 #define PEBS_DATACFG_XMMS BIT_ULL(2) 121 #define PEBS_DATACFG_LBRS BIT_ULL(3) 122 #define PEBS_DATACFG_LBR_SHIFT 24 123 124 /* 125 * Intel "Architectural Performance Monitoring" CPUID 126 * detection/enumeration details: 127 */ 128 union cpuid10_eax { 129 struct { 130 unsigned int version_id:8; 131 unsigned int num_counters:8; 132 unsigned int bit_width:8; 133 unsigned int mask_length:8; 134 } split; 135 unsigned int full; 136 }; 137 138 union cpuid10_ebx { 139 struct { 140 unsigned int no_unhalted_core_cycles:1; 141 unsigned int no_instructions_retired:1; 142 unsigned int no_unhalted_reference_cycles:1; 143 unsigned int no_llc_reference:1; 144 unsigned int no_llc_misses:1; 145 unsigned int no_branch_instruction_retired:1; 146 unsigned int no_branch_misses_retired:1; 147 } split; 148 unsigned int full; 149 }; 150 151 union cpuid10_edx { 152 struct { 153 unsigned int num_counters_fixed:5; 154 unsigned int bit_width_fixed:8; 155 unsigned int reserved1:2; 156 unsigned int anythread_deprecated:1; 157 unsigned int reserved2:16; 158 } split; 159 unsigned int full; 160 }; 161 162 /* 163 * Intel "Architectural Performance Monitoring extension" CPUID 164 * detection/enumeration details: 165 */ 166 #define ARCH_PERFMON_EXT_LEAF 0x00000023 167 #define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT 0x1 168 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1 169 170 /* 171 * Intel Architectural LBR CPUID detection/enumeration details: 172 */ 173 union cpuid28_eax { 174 struct { 175 /* Supported LBR depth values */ 176 unsigned int lbr_depth_mask:8; 177 unsigned int reserved:22; 178 /* Deep C-state Reset */ 179 unsigned int lbr_deep_c_reset:1; 180 /* IP values contain LIP */ 181 unsigned int lbr_lip:1; 182 } split; 183 unsigned int full; 184 }; 185 186 union cpuid28_ebx { 187 struct { 188 /* CPL Filtering Supported */ 189 unsigned int lbr_cpl:1; 190 /* Branch Filtering Supported */ 191 unsigned int lbr_filter:1; 192 /* Call-stack Mode Supported */ 193 unsigned int lbr_call_stack:1; 194 } split; 195 unsigned int full; 196 }; 197 198 union cpuid28_ecx { 199 struct { 200 /* Mispredict Bit Supported */ 201 unsigned int lbr_mispred:1; 202 /* Timed LBRs Supported */ 203 unsigned int lbr_timed_lbr:1; 204 /* Branch Type Field Supported */ 205 unsigned int lbr_br_type:1; 206 } split; 207 unsigned int full; 208 }; 209 210 /* 211 * AMD "Extended Performance Monitoring and Debug" CPUID 212 * detection/enumeration details: 213 */ 214 union cpuid_0x80000022_ebx { 215 struct { 216 /* Number of Core Performance Counters */ 217 unsigned int num_core_pmc:4; 218 /* Number of available LBR Stack Entries */ 219 unsigned int lbr_v2_stack_sz:6; 220 /* Number of Data Fabric Counters */ 221 unsigned int num_df_pmc:6; 222 } split; 223 unsigned int full; 224 }; 225 226 struct x86_pmu_capability { 227 int version; 228 int num_counters_gp; 229 int num_counters_fixed; 230 int bit_width_gp; 231 int bit_width_fixed; 232 unsigned int events_mask; 233 int events_mask_len; 234 unsigned int pebs_ept :1; 235 }; 236 237 /* 238 * Fixed-purpose performance events: 239 */ 240 241 /* RDPMC offset for Fixed PMCs */ 242 #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30) 243 #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29) 244 245 /* 246 * All the fixed-mode PMCs are configured via this single MSR: 247 */ 248 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d 249 250 /* 251 * There is no event-code assigned to the fixed-mode PMCs. 252 * 253 * For a fixed-mode PMC, which has an equivalent event on a general-purpose 254 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC, 255 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core. 256 * 257 * For a fixed-mode PMC, which doesn't have an equivalent event, a 258 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS. 259 * The pseudo event-code for a fixed-mode PMC must be 0x00. 260 * The pseudo umask-code is 0xX. The X equals the index of the fixed 261 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300. 262 * 263 * The counts are available in separate MSRs: 264 */ 265 266 /* Instr_Retired.Any: */ 267 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 268 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) 269 270 /* CPU_CLK_Unhalted.Core: */ 271 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a 272 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) 273 274 /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */ 275 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 276 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) 277 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) 278 279 /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */ 280 #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c 281 #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3) 282 #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS) 283 284 static inline bool use_fixed_pseudo_encoding(u64 code) 285 { 286 return !(code & 0xff); 287 } 288 289 /* 290 * We model BTS tracing as another fixed-mode PMC. 291 * 292 * We choose the value 47 for the fixed index of BTS, since lower 293 * values are used by actual fixed events and higher values are used 294 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. 295 */ 296 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15) 297 298 /* 299 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for 300 * each TopDown metric event. 301 * 302 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS). 303 */ 304 #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16) 305 #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0) 306 #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1) 307 #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2) 308 #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3) 309 #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4) 310 #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5) 311 #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6) 312 #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7) 313 #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND 314 #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \ 315 INTEL_PMC_MSK_FIXED_SLOTS) 316 317 /* 318 * There is no event-code assigned to the TopDown events. 319 * 320 * For the slots event, use the pseudo code of the fixed counter 3. 321 * 322 * For the metric events, the pseudo event-code is 0x00. 323 * The pseudo umask-code starts from the middle of the pseudo event 324 * space, 0x80. 325 */ 326 #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */ 327 /* Level 1 metrics */ 328 #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */ 329 #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */ 330 #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */ 331 #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */ 332 /* Level 2 metrics */ 333 #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */ 334 #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */ 335 #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */ 336 #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */ 337 338 #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND 339 #define INTEL_TD_METRIC_NUM 8 340 341 static inline bool is_metric_idx(int idx) 342 { 343 return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM; 344 } 345 346 static inline bool is_topdown_idx(int idx) 347 { 348 return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS; 349 } 350 351 #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \ 352 (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN) 353 354 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63) 355 #define GLOBAL_STATUS_BUFFER_OVF_BIT 62 356 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT) 357 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) 358 #define GLOBAL_STATUS_ASIF BIT_ULL(60) 359 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) 360 #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 361 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) 362 #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55 363 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) 364 #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 365 366 #define GLOBAL_CTRL_EN_PERF_METRICS 48 367 /* 368 * We model guest LBR event tracing as another fixed-mode PMC like BTS. 369 * 370 * We choose bit 58 because it's used to indicate LBR stack frozen state 371 * for architectural perfmon v4, also we unconditionally mask that bit in 372 * the handle_pmi_common(), so it'll never be set in the overflow handling. 373 * 374 * With this fake counter assigned, the guest LBR event user (such as KVM), 375 * can program the LBR registers on its own, and we don't actually do anything 376 * with then in the host context. 377 */ 378 #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT) 379 380 /* 381 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b, 382 * since it would claim bit 58 which is effectively Fixed26. 383 */ 384 #define INTEL_FIXED_VLBR_EVENT 0x1b00 385 386 /* 387 * Adaptive PEBS v4 388 */ 389 390 struct pebs_basic { 391 u64 format_size; 392 u64 ip; 393 u64 applicable_counters; 394 u64 tsc; 395 }; 396 397 struct pebs_meminfo { 398 u64 address; 399 u64 aux; 400 u64 latency; 401 u64 tsx_tuning; 402 }; 403 404 struct pebs_gprs { 405 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; 406 u64 r8, r9, r10, r11, r12, r13, r14, r15; 407 }; 408 409 struct pebs_xmm { 410 u64 xmm[16*2]; /* two entries for each register */ 411 }; 412 413 /* 414 * AMD Extended Performance Monitoring and Debug cpuid feature detection 415 */ 416 #define EXT_PERFMON_DEBUG_FEATURES 0x80000022 417 418 /* 419 * IBS cpuid feature detection 420 */ 421 422 #define IBS_CPUID_FEATURES 0x8000001b 423 424 /* 425 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but 426 * bit 0 is used to indicate the existence of IBS. 427 */ 428 #define IBS_CAPS_AVAIL (1U<<0) 429 #define IBS_CAPS_FETCHSAM (1U<<1) 430 #define IBS_CAPS_OPSAM (1U<<2) 431 #define IBS_CAPS_RDWROPCNT (1U<<3) 432 #define IBS_CAPS_OPCNT (1U<<4) 433 #define IBS_CAPS_BRNTRGT (1U<<5) 434 #define IBS_CAPS_OPCNTEXT (1U<<6) 435 #define IBS_CAPS_RIPINVALIDCHK (1U<<7) 436 #define IBS_CAPS_OPBRNFUSE (1U<<8) 437 #define IBS_CAPS_FETCHCTLEXTD (1U<<9) 438 #define IBS_CAPS_OPDATA4 (1U<<10) 439 #define IBS_CAPS_ZEN4 (1U<<11) 440 441 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ 442 | IBS_CAPS_FETCHSAM \ 443 | IBS_CAPS_OPSAM) 444 445 /* 446 * IBS APIC setup 447 */ 448 #define IBSCTL 0x1cc 449 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 450 #define IBSCTL_LVT_OFFSET_MASK 0x0F 451 452 /* IBS fetch bits/masks */ 453 #define IBS_FETCH_L3MISSONLY (1ULL<<59) 454 #define IBS_FETCH_RAND_EN (1ULL<<57) 455 #define IBS_FETCH_VAL (1ULL<<49) 456 #define IBS_FETCH_ENABLE (1ULL<<48) 457 #define IBS_FETCH_CNT 0xFFFF0000ULL 458 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL 459 460 /* 461 * IBS op bits/masks 462 * The lower 7 bits of the current count are random bits 463 * preloaded by hardware and ignored in software 464 */ 465 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32) 466 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) 467 #define IBS_OP_CNT_CTL (1ULL<<19) 468 #define IBS_OP_VAL (1ULL<<18) 469 #define IBS_OP_ENABLE (1ULL<<17) 470 #define IBS_OP_L3MISSONLY (1ULL<<16) 471 #define IBS_OP_MAX_CNT 0x0000FFFFULL 472 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ 473 #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */ 474 #define IBS_RIP_INVALID (1ULL<<38) 475 476 #ifdef CONFIG_X86_LOCAL_APIC 477 extern u32 get_ibs_caps(void); 478 #else 479 static inline u32 get_ibs_caps(void) { return 0; } 480 #endif 481 482 #ifdef CONFIG_PERF_EVENTS 483 extern void perf_events_lapic_init(void); 484 485 /* 486 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise 487 * unused and ABI specified to be 0, so nobody should care what we do with 488 * them. 489 * 490 * EXACT - the IP points to the exact instruction that triggered the 491 * event (HW bugs exempt). 492 * VM - original X86_VM_MASK; see set_linear_ip(). 493 */ 494 #define PERF_EFLAGS_EXACT (1UL << 3) 495 #define PERF_EFLAGS_VM (1UL << 5) 496 497 struct pt_regs; 498 struct x86_perf_regs { 499 struct pt_regs regs; 500 u64 *xmm_regs; 501 }; 502 503 extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 504 extern unsigned long perf_misc_flags(struct pt_regs *regs); 505 #define perf_misc_flags(regs) perf_misc_flags(regs) 506 507 #include <asm/stacktrace.h> 508 509 /* 510 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags 511 * and the comment with PERF_EFLAGS_EXACT. 512 */ 513 #define perf_arch_fetch_caller_regs(regs, __ip) { \ 514 (regs)->ip = (__ip); \ 515 (regs)->sp = (unsigned long)__builtin_frame_address(0); \ 516 (regs)->cs = __KERNEL_CS; \ 517 regs->flags = 0; \ 518 } 519 520 struct perf_guest_switch_msr { 521 unsigned msr; 522 u64 host, guest; 523 }; 524 525 struct x86_pmu_lbr { 526 unsigned int nr; 527 unsigned int from; 528 unsigned int to; 529 unsigned int info; 530 }; 531 532 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); 533 extern u64 perf_get_hw_event_config(int hw_event); 534 extern void perf_check_microcode(void); 535 extern void perf_clear_dirty_counters(void); 536 extern int x86_perf_rdpmc_index(struct perf_event *event); 537 #else 538 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) 539 { 540 memset(cap, 0, sizeof(*cap)); 541 } 542 543 static inline u64 perf_get_hw_event_config(int hw_event) 544 { 545 return 0; 546 } 547 548 static inline void perf_events_lapic_init(void) { } 549 static inline void perf_check_microcode(void) { } 550 #endif 551 552 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) 553 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); 554 extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr); 555 #else 556 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); 557 static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr) 558 { 559 memset(lbr, 0, sizeof(*lbr)); 560 } 561 #endif 562 563 #ifdef CONFIG_CPU_SUP_INTEL 564 extern void intel_pt_handle_vmx(int on); 565 #else 566 static inline void intel_pt_handle_vmx(int on) 567 { 568 569 } 570 #endif 571 572 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 573 extern void amd_pmu_enable_virt(void); 574 extern void amd_pmu_disable_virt(void); 575 576 #if defined(CONFIG_PERF_EVENTS_AMD_BRS) 577 578 #define PERF_NEEDS_LOPWR_CB 1 579 580 /* 581 * architectural low power callback impacts 582 * drivers/acpi/processor_idle.c 583 * drivers/acpi/acpi_pad.c 584 */ 585 extern void perf_amd_brs_lopwr_cb(bool lopwr_in); 586 587 DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb); 588 589 static __always_inline void perf_lopwr_cb(bool lopwr_in) 590 { 591 static_call_mod(perf_lopwr_cb)(lopwr_in); 592 } 593 594 #endif /* PERF_NEEDS_LOPWR_CB */ 595 596 #else 597 static inline void amd_pmu_enable_virt(void) { } 598 static inline void amd_pmu_disable_virt(void) { } 599 #endif 600 601 #define arch_perf_out_copy_user copy_from_user_nmi 602 603 #endif /* _ASM_X86_PERF_EVENT_H */ 604