1 /* 2 * Defines x86 CPU feature bits 3 */ 4 #ifndef _ASM_X86_CPUFEATURE_H 5 #define _ASM_X86_CPUFEATURE_H 6 7 #ifndef _ASM_X86_REQUIRED_FEATURES_H 8 #include <asm/required-features.h> 9 #endif 10 11 #ifndef _ASM_X86_DISABLED_FEATURES_H 12 #include <asm/disabled-features.h> 13 #endif 14 15 #define NCAPINTS 11 /* N 32-bit words worth of info */ 16 #define NBUGINTS 1 /* N 32-bit bug flags */ 17 18 /* 19 * Note: If the comment begins with a quoted string, that string is used 20 * in /proc/cpuinfo instead of the macro name. If the string is "", 21 * this feature bit is not displayed in /proc/cpuinfo at all. 22 */ 23 24 /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 25 #define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ 26 #define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ 27 #define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ 28 #define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ 29 #define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ 30 #define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ 31 #define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ 32 #define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ 33 #define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ 34 #define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ 35 #define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ 36 #define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ 37 #define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ 38 #define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ 39 #define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ 40 /* (plus FCMOVcc, FCOMI with FPU) */ 41 #define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ 42 #define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ 43 #define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ 44 #define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ 45 #define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ 46 #define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ 47 #define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ 48 #define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ 49 #define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ 50 #define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ 51 #define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ 52 #define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ 53 #define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ 54 #define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ 55 #define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ 56 57 /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 58 /* Don't duplicate feature flags which are redundant with Intel! */ 59 #define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ 60 #define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ 61 #define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ 62 #define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ 63 #define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ 64 #define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ 65 #define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ 66 #define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ 67 #define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ 68 #define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ 69 70 /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ 71 #define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ 72 #define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ 73 #define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ 74 75 /* Other features, Linux-defined mapping, word 3 */ 76 /* This range is used for feature bits which conflict or are synthesized */ 77 #define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ 78 #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ 79 #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 80 #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ 81 /* cpu types for specific tunings: */ 82 #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ 83 #define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ 84 #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ 85 #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ 86 #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ 87 #define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ 88 /* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ 89 #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ 90 #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ 91 #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ 92 #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ 93 #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ 94 #define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ 95 #define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ 96 #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ 97 /* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */ 98 #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ 99 #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ 100 #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ 101 #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ 102 #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ 103 /* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ 104 #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ 105 #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ 106 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ 107 #define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */ 108 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ 109 110 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 111 #define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ 112 #define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ 113 #define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ 114 #define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ 115 #define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ 116 #define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ 117 #define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ 118 #define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ 119 #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ 120 #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ 121 #define X86_FEATURE_CID ( 4*32+10) /* Context ID */ 122 #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ 123 #define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ 124 #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ 125 #define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ 126 #define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ 127 #define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ 128 #define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ 129 #define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ 130 #define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ 131 #define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ 132 #define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ 133 #define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ 134 #define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ 135 #define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 136 #define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ 137 #define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ 138 #define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ 139 #define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ 140 #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ 141 142 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 143 #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ 144 #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ 145 #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ 146 #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ 147 #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ 148 #define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ 149 #define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ 150 #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ 151 #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ 152 #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ 153 154 /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 155 #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ 156 #define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ 157 #define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ 158 #define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ 159 #define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ 160 #define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ 161 #define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ 162 #define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ 163 #define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ 164 #define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ 165 #define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ 166 #define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ 167 #define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ 168 #define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ 169 #define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ 170 #define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ 171 #define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ 172 #define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ 173 #define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ 174 #define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ 175 #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ 176 #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 177 #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ 178 179 /* 180 * Auxiliary flags: Linux defined - For features scattered in various 181 * CPUID levels like 0x6, 0xA etc, word 7 182 */ 183 #define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ 184 #define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */ 185 #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 186 #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 187 #define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ 188 #define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */ 189 #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ 190 #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 191 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 192 #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ 193 #define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ 194 #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ 195 #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ 196 #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ 197 198 /* Virtualization flags: Linux defined, word 8 */ 199 #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 200 #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ 201 #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 202 #define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 203 #define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 204 #define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ 205 #define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */ 206 #define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ 207 #define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ 208 #define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ 209 #define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ 210 #define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */ 211 #define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */ 212 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ 213 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ 214 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ 215 216 217 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 218 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 219 #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ 220 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ 221 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ 222 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ 223 #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ 224 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ 225 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 226 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ 227 #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ 228 #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ 229 #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ 230 #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ 231 #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ 232 #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 233 #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 234 #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 235 #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 236 #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 237 238 /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ 239 #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ 240 #define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ 241 #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ 242 #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ 243 244 /* 245 * BUG word(s) 246 */ 247 #define X86_BUG(x) (NCAPINTS*32 + (x)) 248 249 #define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ 250 #define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ 251 #define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ 252 #define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ 253 #define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ 254 #define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ 255 #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 256 #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 257 258 #if defined(__KERNEL__) && !defined(__ASSEMBLY__) 259 260 #include <asm/asm.h> 261 #include <linux/bitops.h> 262 263 #ifdef CONFIG_X86_FEATURE_NAMES 264 extern const char * const x86_cap_flags[NCAPINTS*32]; 265 extern const char * const x86_power_flags[32]; 266 #define X86_CAP_FMT "%s" 267 #define x86_cap_flag(flag) x86_cap_flags[flag] 268 #else 269 #define X86_CAP_FMT "%d:%d" 270 #define x86_cap_flag(flag) ((flag) >> 5), ((flag) & 31) 271 #endif 272 273 /* 274 * In order to save room, we index into this array by doing 275 * X86_BUG_<name> - NCAPINTS*32. 276 */ 277 extern const char * const x86_bug_flags[NBUGINTS*32]; 278 279 #define test_cpu_cap(c, bit) \ 280 test_bit(bit, (unsigned long *)((c)->x86_capability)) 281 282 #define REQUIRED_MASK_BIT_SET(bit) \ 283 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ 284 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ 285 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ 286 (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ 287 (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ 288 (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ 289 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ 290 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ 291 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ 292 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) 293 294 #define DISABLED_MASK_BIT_SET(bit) \ 295 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \ 296 (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \ 297 (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \ 298 (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \ 299 (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \ 300 (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \ 301 (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \ 302 (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \ 303 (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \ 304 (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) ) 305 306 #define cpu_has(c, bit) \ 307 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ 308 test_cpu_cap(c, bit)) 309 310 #define this_cpu_has(bit) \ 311 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ 312 x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) 313 314 /* 315 * This macro is for detection of features which need kernel 316 * infrastructure to be used. It may *not* directly test the CPU 317 * itself. Use the cpu_has() family if you want true runtime 318 * testing of CPU features, like in hypervisor code where you are 319 * supporting a possible guest feature where host support for it 320 * is not relevant. 321 */ 322 #define cpu_feature_enabled(bit) \ 323 (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \ 324 cpu_has(&boot_cpu_data, bit)) 325 326 #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) 327 328 #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) 329 #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) 330 #define setup_clear_cpu_cap(bit) do { \ 331 clear_cpu_cap(&boot_cpu_data, bit); \ 332 set_bit(bit, (unsigned long *)cpu_caps_cleared); \ 333 } while (0) 334 #define setup_force_cpu_cap(bit) do { \ 335 set_cpu_cap(&boot_cpu_data, bit); \ 336 set_bit(bit, (unsigned long *)cpu_caps_set); \ 337 } while (0) 338 339 #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 340 #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) 341 #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) 342 #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) 343 #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) 344 #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) 345 #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) 346 #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) 347 #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) 348 #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) 349 #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) 350 #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) 351 #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) 352 #define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3) 353 #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) 354 #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) 355 #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) 356 #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) 357 #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) 358 #define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) 359 #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) 360 #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) 361 #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) 362 #define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) 363 #define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) 364 #define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) 365 #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) 366 #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) 367 #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) 368 #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) 369 #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) 370 #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) 371 #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) 372 #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) 373 #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) 374 #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) 375 #define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) 376 #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 377 #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 378 #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 379 #define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) 380 #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) 381 #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) 382 #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 383 #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 384 #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 385 #define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 386 #define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) 387 #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 388 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 389 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 390 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) 391 392 #if __GNUC__ >= 4 393 extern void warn_pre_alternatives(void); 394 extern bool __static_cpu_has_safe(u16 bit); 395 396 /* 397 * Static testing of CPU features. Used the same as boot_cpu_has(). 398 * These are only valid after alternatives have run, but will statically 399 * patch the target code for additional performance. 400 */ 401 static __always_inline __pure bool __static_cpu_has(u16 bit) 402 { 403 #ifdef CC_HAVE_ASM_GOTO 404 405 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS 406 407 /* 408 * Catch too early usage of this before alternatives 409 * have run. 410 */ 411 asm_volatile_goto("1: jmp %l[t_warn]\n" 412 "2:\n" 413 ".section .altinstructions,\"a\"\n" 414 " .long 1b - .\n" 415 " .long 0\n" /* no replacement */ 416 " .word %P0\n" /* 1: do replace */ 417 " .byte 2b - 1b\n" /* source len */ 418 " .byte 0\n" /* replacement len */ 419 ".previous\n" 420 /* skipping size check since replacement size = 0 */ 421 : : "i" (X86_FEATURE_ALWAYS) : : t_warn); 422 423 #endif 424 425 asm_volatile_goto("1: jmp %l[t_no]\n" 426 "2:\n" 427 ".section .altinstructions,\"a\"\n" 428 " .long 1b - .\n" 429 " .long 0\n" /* no replacement */ 430 " .word %P0\n" /* feature bit */ 431 " .byte 2b - 1b\n" /* source len */ 432 " .byte 0\n" /* replacement len */ 433 ".previous\n" 434 /* skipping size check since replacement size = 0 */ 435 : : "i" (bit) : : t_no); 436 return true; 437 t_no: 438 return false; 439 440 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS 441 t_warn: 442 warn_pre_alternatives(); 443 return false; 444 #endif 445 446 #else /* CC_HAVE_ASM_GOTO */ 447 448 u8 flag; 449 /* Open-coded due to __stringify() in ALTERNATIVE() */ 450 asm volatile("1: movb $0,%0\n" 451 "2:\n" 452 ".section .altinstructions,\"a\"\n" 453 " .long 1b - .\n" 454 " .long 3f - .\n" 455 " .word %P1\n" /* feature bit */ 456 " .byte 2b - 1b\n" /* source len */ 457 " .byte 4f - 3f\n" /* replacement len */ 458 ".previous\n" 459 ".section .discard,\"aw\",@progbits\n" 460 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ 461 ".previous\n" 462 ".section .altinstr_replacement,\"ax\"\n" 463 "3: movb $1,%0\n" 464 "4:\n" 465 ".previous\n" 466 : "=qm" (flag) : "i" (bit)); 467 return flag; 468 469 #endif /* CC_HAVE_ASM_GOTO */ 470 } 471 472 #define static_cpu_has(bit) \ 473 ( \ 474 __builtin_constant_p(boot_cpu_has(bit)) ? \ 475 boot_cpu_has(bit) : \ 476 __builtin_constant_p(bit) ? \ 477 __static_cpu_has(bit) : \ 478 boot_cpu_has(bit) \ 479 ) 480 481 static __always_inline __pure bool _static_cpu_has_safe(u16 bit) 482 { 483 #ifdef CC_HAVE_ASM_GOTO 484 /* 485 * We need to spell the jumps to the compiler because, depending on the offset, 486 * the replacement jump can be bigger than the original jump, and this we cannot 487 * have. Thus, we force the jump to the widest, 4-byte, signed relative 488 * offset even though the last would often fit in less bytes. 489 */ 490 asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" 491 "2:\n" 492 ".section .altinstructions,\"a\"\n" 493 " .long 1b - .\n" /* src offset */ 494 " .long 3f - .\n" /* repl offset */ 495 " .word %P1\n" /* always replace */ 496 " .byte 2b - 1b\n" /* src len */ 497 " .byte 4f - 3f\n" /* repl len */ 498 ".previous\n" 499 ".section .altinstr_replacement,\"ax\"\n" 500 "3: .byte 0xe9\n .long %l[t_no] - 2b\n" 501 "4:\n" 502 ".previous\n" 503 ".section .altinstructions,\"a\"\n" 504 " .long 1b - .\n" /* src offset */ 505 " .long 0\n" /* no replacement */ 506 " .word %P0\n" /* feature bit */ 507 " .byte 2b - 1b\n" /* src len */ 508 " .byte 0\n" /* repl len */ 509 ".previous\n" 510 : : "i" (bit), "i" (X86_FEATURE_ALWAYS) 511 : : t_dynamic, t_no); 512 return true; 513 t_no: 514 return false; 515 t_dynamic: 516 return __static_cpu_has_safe(bit); 517 #else 518 u8 flag; 519 /* Open-coded due to __stringify() in ALTERNATIVE() */ 520 asm volatile("1: movb $2,%0\n" 521 "2:\n" 522 ".section .altinstructions,\"a\"\n" 523 " .long 1b - .\n" /* src offset */ 524 " .long 3f - .\n" /* repl offset */ 525 " .word %P2\n" /* always replace */ 526 " .byte 2b - 1b\n" /* source len */ 527 " .byte 4f - 3f\n" /* replacement len */ 528 ".previous\n" 529 ".section .discard,\"aw\",@progbits\n" 530 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ 531 ".previous\n" 532 ".section .altinstr_replacement,\"ax\"\n" 533 "3: movb $0,%0\n" 534 "4:\n" 535 ".previous\n" 536 ".section .altinstructions,\"a\"\n" 537 " .long 1b - .\n" /* src offset */ 538 " .long 5f - .\n" /* repl offset */ 539 " .word %P1\n" /* feature bit */ 540 " .byte 4b - 3b\n" /* src len */ 541 " .byte 6f - 5f\n" /* repl len */ 542 ".previous\n" 543 ".section .discard,\"aw\",@progbits\n" 544 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */ 545 ".previous\n" 546 ".section .altinstr_replacement,\"ax\"\n" 547 "5: movb $1,%0\n" 548 "6:\n" 549 ".previous\n" 550 : "=qm" (flag) 551 : "i" (bit), "i" (X86_FEATURE_ALWAYS)); 552 return (flag == 2 ? __static_cpu_has_safe(bit) : flag); 553 #endif /* CC_HAVE_ASM_GOTO */ 554 } 555 556 #define static_cpu_has_safe(bit) \ 557 ( \ 558 __builtin_constant_p(boot_cpu_has(bit)) ? \ 559 boot_cpu_has(bit) : \ 560 _static_cpu_has_safe(bit) \ 561 ) 562 #else 563 /* 564 * gcc 3.x is too stupid to do the static test; fall back to dynamic. 565 */ 566 #define static_cpu_has(bit) boot_cpu_has(bit) 567 #define static_cpu_has_safe(bit) boot_cpu_has(bit) 568 #endif 569 570 #define cpu_has_bug(c, bit) cpu_has(c, (bit)) 571 #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) 572 #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) 573 574 #define static_cpu_has_bug(bit) static_cpu_has((bit)) 575 #define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit)) 576 #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) 577 578 #define MAX_CPU_FEATURES (NCAPINTS * 32) 579 #define cpu_have_feature boot_cpu_has 580 581 #define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X" 582 #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ 583 boot_cpu_data.x86_model 584 585 #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 586 #endif /* _ASM_X86_CPUFEATURE_H */ 587