1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * local apic based NMI watchdog for various CPUs. 4 * 5 * This file also handles reservation of performance counters for coordination 6 * with other users (like oprofile). 7 * 8 * Note that these events normally don't tick when the CPU idles. This means 9 * the frequency varies with CPU load. 10 * 11 * Original code for K7/P6 written by Keith Owens 12 * 13 */ 14 15 #include <linux/percpu.h> 16 #include <linux/export.h> 17 #include <linux/kernel.h> 18 #include <linux/bitops.h> 19 #include <linux/smp.h> 20 #include <asm/nmi.h> 21 #include <linux/kprobes.h> 22 23 #include <asm/apic.h> 24 #include <asm/perf_event.h> 25 26 /* 27 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's 28 * offset from MSR_P4_BSU_ESCR0. 29 * 30 * It will be the max for all platforms (for now) 31 */ 32 #define NMI_MAX_COUNTER_BITS 66 33 34 /* 35 * perfctr_nmi_owner tracks the ownership of the perfctr registers: 36 * evtsel_nmi_owner tracks the ownership of the event selection 37 * - different performance counters/ event selection may be reserved for 38 * different subsystems this reservation system just tries to coordinate 39 * things a little 40 */ 41 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); 42 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); 43 44 /* converts an msr to an appropriate reservation bit */ 45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) 46 { 47 /* returns the bit offset of the performance counter register */ 48 switch (boot_cpu_data.x86_vendor) { 49 case X86_VENDOR_HYGON: 50 case X86_VENDOR_AMD: 51 if (msr >= MSR_F15H_PERF_CTR) 52 return (msr - MSR_F15H_PERF_CTR) >> 1; 53 return msr - MSR_K7_PERFCTR0; 54 case X86_VENDOR_INTEL: 55 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 56 return msr - MSR_ARCH_PERFMON_PERFCTR0; 57 58 switch (boot_cpu_data.x86) { 59 case 6: 60 return msr - MSR_P6_PERFCTR0; 61 case 11: 62 return msr - MSR_KNC_PERFCTR0; 63 case 15: 64 return msr - MSR_P4_BPU_PERFCTR0; 65 } 66 fallthrough; 67 case X86_VENDOR_ZHAOXIN: 68 case X86_VENDOR_CENTAUR: 69 return msr - MSR_ARCH_PERFMON_PERFCTR0; 70 } 71 return 0; 72 } 73 74 /* 75 * converts an msr to an appropriate reservation bit 76 * returns the bit offset of the event selection register 77 */ 78 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) 79 { 80 /* returns the bit offset of the event selection register */ 81 switch (boot_cpu_data.x86_vendor) { 82 case X86_VENDOR_HYGON: 83 case X86_VENDOR_AMD: 84 if (msr >= MSR_F15H_PERF_CTL) 85 return (msr - MSR_F15H_PERF_CTL) >> 1; 86 return msr - MSR_K7_EVNTSEL0; 87 case X86_VENDOR_INTEL: 88 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 89 return msr - MSR_ARCH_PERFMON_EVENTSEL0; 90 91 switch (boot_cpu_data.x86) { 92 case 6: 93 return msr - MSR_P6_EVNTSEL0; 94 case 11: 95 return msr - MSR_KNC_EVNTSEL0; 96 case 15: 97 return msr - MSR_P4_BSU_ESCR0; 98 } 99 fallthrough; 100 case X86_VENDOR_ZHAOXIN: 101 case X86_VENDOR_CENTAUR: 102 return msr - MSR_ARCH_PERFMON_EVENTSEL0; 103 } 104 return 0; 105 106 } 107 108 /* checks for a bit availability (hack for oprofile) */ 109 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) 110 { 111 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 112 113 return !test_bit(counter, perfctr_nmi_owner); 114 } 115 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); 116 117 int reserve_perfctr_nmi(unsigned int msr) 118 { 119 unsigned int counter; 120 121 counter = nmi_perfctr_msr_to_bit(msr); 122 /* register not managed by the allocator? */ 123 if (counter > NMI_MAX_COUNTER_BITS) 124 return 1; 125 126 if (!test_and_set_bit(counter, perfctr_nmi_owner)) 127 return 1; 128 return 0; 129 } 130 EXPORT_SYMBOL(reserve_perfctr_nmi); 131 132 void release_perfctr_nmi(unsigned int msr) 133 { 134 unsigned int counter; 135 136 counter = nmi_perfctr_msr_to_bit(msr); 137 /* register not managed by the allocator? */ 138 if (counter > NMI_MAX_COUNTER_BITS) 139 return; 140 141 clear_bit(counter, perfctr_nmi_owner); 142 } 143 EXPORT_SYMBOL(release_perfctr_nmi); 144 145 int reserve_evntsel_nmi(unsigned int msr) 146 { 147 unsigned int counter; 148 149 counter = nmi_evntsel_msr_to_bit(msr); 150 /* register not managed by the allocator? */ 151 if (counter > NMI_MAX_COUNTER_BITS) 152 return 1; 153 154 if (!test_and_set_bit(counter, evntsel_nmi_owner)) 155 return 1; 156 return 0; 157 } 158 EXPORT_SYMBOL(reserve_evntsel_nmi); 159 160 void release_evntsel_nmi(unsigned int msr) 161 { 162 unsigned int counter; 163 164 counter = nmi_evntsel_msr_to_bit(msr); 165 /* register not managed by the allocator? */ 166 if (counter > NMI_MAX_COUNTER_BITS) 167 return; 168 169 clear_bit(counter, evntsel_nmi_owner); 170 } 171 EXPORT_SYMBOL(release_evntsel_nmi); 172