1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PERF_EVENT_H 3 #define _ASM_X86_PERF_EVENT_H 4 5 /* 6 * Performance event hw details: 7 */ 8 9 #define INTEL_PMC_MAX_GENERIC 32 10 #define INTEL_PMC_MAX_FIXED 3 11 #define INTEL_PMC_IDX_FIXED 32 12 13 #define X86_PMC_IDX_MAX 64 14 15 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 16 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 17 18 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 19 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 20 21 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL 22 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL 23 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) 24 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) 25 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) 26 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) 27 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) 28 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) 29 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) 30 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 31 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 32 33 #define HSW_IN_TX (1ULL << 32) 34 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) 35 36 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) 37 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) 38 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) 39 40 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 41 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ 42 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) 43 44 #define AMD64_EVENTSEL_EVENT \ 45 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 46 #define INTEL_ARCH_EVENT_MASK \ 47 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) 48 49 #define X86_RAW_EVENT_MASK \ 50 (ARCH_PERFMON_EVENTSEL_EVENT | \ 51 ARCH_PERFMON_EVENTSEL_UMASK | \ 52 ARCH_PERFMON_EVENTSEL_EDGE | \ 53 ARCH_PERFMON_EVENTSEL_INV | \ 54 ARCH_PERFMON_EVENTSEL_CMASK) 55 #define X86_ALL_EVENT_FLAGS \ 56 (ARCH_PERFMON_EVENTSEL_EDGE | \ 57 ARCH_PERFMON_EVENTSEL_INV | \ 58 ARCH_PERFMON_EVENTSEL_CMASK | \ 59 ARCH_PERFMON_EVENTSEL_ANY | \ 60 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \ 61 HSW_IN_TX | \ 62 HSW_IN_TX_CHECKPOINTED) 63 #define AMD64_RAW_EVENT_MASK \ 64 (X86_RAW_EVENT_MASK | \ 65 AMD64_EVENTSEL_EVENT) 66 #define AMD64_RAW_EVENT_MASK_NB \ 67 (AMD64_EVENTSEL_EVENT | \ 68 ARCH_PERFMON_EVENTSEL_UMASK) 69 #define AMD64_NUM_COUNTERS 4 70 #define AMD64_NUM_COUNTERS_CORE 6 71 #define AMD64_NUM_COUNTERS_NB 4 72 73 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 74 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 75 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 76 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ 77 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) 78 79 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 80 #define ARCH_PERFMON_EVENTS_COUNT 7 81 82 /* 83 * Intel "Architectural Performance Monitoring" CPUID 84 * detection/enumeration details: 85 */ 86 union cpuid10_eax { 87 struct { 88 unsigned int version_id:8; 89 unsigned int num_counters:8; 90 unsigned int bit_width:8; 91 unsigned int mask_length:8; 92 } split; 93 unsigned int full; 94 }; 95 96 union cpuid10_ebx { 97 struct { 98 unsigned int no_unhalted_core_cycles:1; 99 unsigned int no_instructions_retired:1; 100 unsigned int no_unhalted_reference_cycles:1; 101 unsigned int no_llc_reference:1; 102 unsigned int no_llc_misses:1; 103 unsigned int no_branch_instruction_retired:1; 104 unsigned int no_branch_misses_retired:1; 105 } split; 106 unsigned int full; 107 }; 108 109 union cpuid10_edx { 110 struct { 111 unsigned int num_counters_fixed:5; 112 unsigned int bit_width_fixed:8; 113 unsigned int reserved:19; 114 } split; 115 unsigned int full; 116 }; 117 118 struct x86_pmu_capability { 119 int version; 120 int num_counters_gp; 121 int num_counters_fixed; 122 int bit_width_gp; 123 int bit_width_fixed; 124 unsigned int events_mask; 125 int events_mask_len; 126 }; 127 128 /* 129 * Fixed-purpose performance events: 130 */ 131 132 /* 133 * All 3 fixed-mode PMCs are configured via this single MSR: 134 */ 135 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d 136 137 /* 138 * The counts are available in three separate MSRs: 139 */ 140 141 /* Instr_Retired.Any: */ 142 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 143 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) 144 145 /* CPU_CLK_Unhalted.Core: */ 146 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a 147 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) 148 149 /* CPU_CLK_Unhalted.Ref: */ 150 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 151 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) 152 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) 153 154 /* 155 * We model BTS tracing as another fixed-mode PMC. 156 * 157 * We choose a value in the middle of the fixed event range, since lower 158 * values are used by actual fixed events and higher values are used 159 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. 160 */ 161 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) 162 163 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63) 164 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62) 165 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) 166 #define GLOBAL_STATUS_ASIF BIT_ULL(60) 167 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) 168 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58) 169 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55) 170 171 /* 172 * IBS cpuid feature detection 173 */ 174 175 #define IBS_CPUID_FEATURES 0x8000001b 176 177 /* 178 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but 179 * bit 0 is used to indicate the existence of IBS. 180 */ 181 #define IBS_CAPS_AVAIL (1U<<0) 182 #define IBS_CAPS_FETCHSAM (1U<<1) 183 #define IBS_CAPS_OPSAM (1U<<2) 184 #define IBS_CAPS_RDWROPCNT (1U<<3) 185 #define IBS_CAPS_OPCNT (1U<<4) 186 #define IBS_CAPS_BRNTRGT (1U<<5) 187 #define IBS_CAPS_OPCNTEXT (1U<<6) 188 #define IBS_CAPS_RIPINVALIDCHK (1U<<7) 189 #define IBS_CAPS_OPBRNFUSE (1U<<8) 190 #define IBS_CAPS_FETCHCTLEXTD (1U<<9) 191 #define IBS_CAPS_OPDATA4 (1U<<10) 192 193 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ 194 | IBS_CAPS_FETCHSAM \ 195 | IBS_CAPS_OPSAM) 196 197 /* 198 * IBS APIC setup 199 */ 200 #define IBSCTL 0x1cc 201 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 202 #define IBSCTL_LVT_OFFSET_MASK 0x0F 203 204 /* ibs fetch bits/masks */ 205 #define IBS_FETCH_RAND_EN (1ULL<<57) 206 #define IBS_FETCH_VAL (1ULL<<49) 207 #define IBS_FETCH_ENABLE (1ULL<<48) 208 #define IBS_FETCH_CNT 0xFFFF0000ULL 209 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL 210 211 /* ibs op bits/masks */ 212 /* lower 4 bits of the current count are ignored: */ 213 #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) 214 #define IBS_OP_CNT_CTL (1ULL<<19) 215 #define IBS_OP_VAL (1ULL<<18) 216 #define IBS_OP_ENABLE (1ULL<<17) 217 #define IBS_OP_MAX_CNT 0x0000FFFFULL 218 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ 219 #define IBS_RIP_INVALID (1ULL<<38) 220 221 #ifdef CONFIG_X86_LOCAL_APIC 222 extern u32 get_ibs_caps(void); 223 #else 224 static inline u32 get_ibs_caps(void) { return 0; } 225 #endif 226 227 #ifdef CONFIG_PERF_EVENTS 228 extern void perf_events_lapic_init(void); 229 230 /* 231 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise 232 * unused and ABI specified to be 0, so nobody should care what we do with 233 * them. 234 * 235 * EXACT - the IP points to the exact instruction that triggered the 236 * event (HW bugs exempt). 237 * VM - original X86_VM_MASK; see set_linear_ip(). 238 */ 239 #define PERF_EFLAGS_EXACT (1UL << 3) 240 #define PERF_EFLAGS_VM (1UL << 5) 241 242 struct pt_regs; 243 extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 244 extern unsigned long perf_misc_flags(struct pt_regs *regs); 245 #define perf_misc_flags(regs) perf_misc_flags(regs) 246 247 #include <asm/stacktrace.h> 248 249 /* 250 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags 251 * and the comment with PERF_EFLAGS_EXACT. 252 */ 253 #define perf_arch_fetch_caller_regs(regs, __ip) { \ 254 (regs)->ip = (__ip); \ 255 (regs)->bp = caller_frame_pointer(); \ 256 (regs)->cs = __KERNEL_CS; \ 257 regs->flags = 0; \ 258 asm volatile( \ 259 _ASM_MOV "%%"_ASM_SP ", %0\n" \ 260 : "=m" ((regs)->sp) \ 261 :: "memory" \ 262 ); \ 263 } 264 265 struct perf_guest_switch_msr { 266 unsigned msr; 267 u64 host, guest; 268 }; 269 270 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); 271 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); 272 extern void perf_check_microcode(void); 273 #else 274 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr) 275 { 276 *nr = 0; 277 return NULL; 278 } 279 280 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) 281 { 282 memset(cap, 0, sizeof(*cap)); 283 } 284 285 static inline void perf_events_lapic_init(void) { } 286 static inline void perf_check_microcode(void) { } 287 #endif 288 289 #ifdef CONFIG_CPU_SUP_INTEL 290 extern void intel_pt_handle_vmx(int on); 291 #endif 292 293 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 294 extern void amd_pmu_enable_virt(void); 295 extern void amd_pmu_disable_virt(void); 296 #else 297 static inline void amd_pmu_enable_virt(void) { } 298 static inline void amd_pmu_disable_virt(void) { } 299 #endif 300 301 #define arch_perf_out_copy_user copy_from_user_nmi 302 303 #endif /* _ASM_X86_PERF_EVENT_H */ 304