1 /* 2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #ifndef __ASM_CPUFEATURE_H 10 #define __ASM_CPUFEATURE_H 11 12 #include <asm/cpucaps.h> 13 #include <asm/fpsimd.h> 14 #include <asm/hwcap.h> 15 #include <asm/sigcontext.h> 16 #include <asm/sysreg.h> 17 18 /* 19 * In the arm64 world (as in the ARM world), elf_hwcap is used both internally 20 * in the kernel and for user space to keep track of which optional features 21 * are supported by the current system. So let's map feature 'x' to HWCAP_x. 22 * Note that HWCAP_x constants are bit fields so we need to take the log. 23 */ 24 25 #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) 26 #define cpu_feature(x) ilog2(HWCAP_ ## x) 27 28 #ifndef __ASSEMBLY__ 29 30 #include <linux/bug.h> 31 #include <linux/jump_label.h> 32 #include <linux/kernel.h> 33 34 /* 35 * CPU feature register tracking 36 * 37 * The safe value of a CPUID feature field is dependent on the implications 38 * of the values assigned to it by the architecture. Based on the relationship 39 * between the values, the features are classified into 3 types - LOWER_SAFE, 40 * HIGHER_SAFE and EXACT. 41 * 42 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest 43 * for HIGHER_SAFE. It is expected that all CPUs have the same value for 44 * a field when EXACT is specified, failing which, the safe value specified 45 * in the table is chosen. 46 */ 47 48 enum ftr_type { 49 FTR_EXACT, /* Use a predefined safe value */ 50 FTR_LOWER_SAFE, /* Smaller value is safe */ 51 FTR_HIGHER_SAFE,/* Bigger value is safe */ 52 }; 53 54 #define FTR_STRICT true /* SANITY check strict matching required */ 55 #define FTR_NONSTRICT false /* SANITY check ignored */ 56 57 #define FTR_SIGNED true /* Value should be treated as signed */ 58 #define FTR_UNSIGNED false /* Value should be treated as unsigned */ 59 60 #define FTR_VISIBLE true /* Feature visible to the user space */ 61 #define FTR_HIDDEN false /* Feature is hidden from the user */ 62 63 #define FTR_VISIBLE_IF_IS_ENABLED(config) \ 64 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN) 65 66 struct arm64_ftr_bits { 67 bool sign; /* Value is signed ? */ 68 bool visible; 69 bool strict; /* CPU Sanity check: strict matching required ? */ 70 enum ftr_type type; 71 u8 shift; 72 u8 width; 73 s64 safe_val; /* safe value for FTR_EXACT features */ 74 }; 75 76 /* 77 * @arm64_ftr_reg - Feature register 78 * @strict_mask Bits which should match across all CPUs for sanity. 79 * @sys_val Safe value across the CPUs (system view) 80 */ 81 struct arm64_ftr_reg { 82 const char *name; 83 u64 strict_mask; 84 u64 user_mask; 85 u64 sys_val; 86 u64 user_val; 87 const struct arm64_ftr_bits *ftr_bits; 88 }; 89 90 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; 91 92 /* scope of capability check */ 93 enum { 94 SCOPE_SYSTEM, 95 SCOPE_LOCAL_CPU, 96 }; 97 98 struct arm64_cpu_capabilities { 99 const char *desc; 100 u16 capability; 101 int def_scope; /* default scope */ 102 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); 103 int (*enable)(void *); /* Called on all active CPUs */ 104 union { 105 struct { /* To be used for erratum handling only */ 106 u32 midr_model; 107 u32 midr_range_min, midr_range_max; 108 }; 109 110 struct { /* Feature register checking */ 111 u32 sys_reg; 112 u8 field_pos; 113 u8 min_field_value; 114 u8 hwcap_type; 115 bool sign; 116 unsigned long hwcap; 117 }; 118 }; 119 }; 120 121 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 122 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; 123 extern struct static_key_false arm64_const_caps_ready; 124 125 bool this_cpu_has_cap(unsigned int cap); 126 127 static inline bool cpu_have_feature(unsigned int num) 128 { 129 return elf_hwcap & (1UL << num); 130 } 131 132 /* System capability check for constant caps */ 133 static inline bool __cpus_have_const_cap(int num) 134 { 135 if (num >= ARM64_NCAPS) 136 return false; 137 return static_branch_unlikely(&cpu_hwcap_keys[num]); 138 } 139 140 static inline bool cpus_have_cap(unsigned int num) 141 { 142 if (num >= ARM64_NCAPS) 143 return false; 144 return test_bit(num, cpu_hwcaps); 145 } 146 147 static inline bool cpus_have_const_cap(int num) 148 { 149 if (static_branch_likely(&arm64_const_caps_ready)) 150 return __cpus_have_const_cap(num); 151 else 152 return cpus_have_cap(num); 153 } 154 155 static inline void cpus_set_cap(unsigned int num) 156 { 157 if (num >= ARM64_NCAPS) { 158 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n", 159 num, ARM64_NCAPS); 160 } else { 161 __set_bit(num, cpu_hwcaps); 162 } 163 } 164 165 static inline int __attribute_const__ 166 cpuid_feature_extract_signed_field_width(u64 features, int field, int width) 167 { 168 return (s64)(features << (64 - width - field)) >> (64 - width); 169 } 170 171 static inline int __attribute_const__ 172 cpuid_feature_extract_signed_field(u64 features, int field) 173 { 174 return cpuid_feature_extract_signed_field_width(features, field, 4); 175 } 176 177 static inline unsigned int __attribute_const__ 178 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) 179 { 180 return (u64)(features << (64 - width - field)) >> (64 - width); 181 } 182 183 static inline unsigned int __attribute_const__ 184 cpuid_feature_extract_unsigned_field(u64 features, int field) 185 { 186 return cpuid_feature_extract_unsigned_field_width(features, field, 4); 187 } 188 189 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) 190 { 191 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); 192 } 193 194 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg) 195 { 196 return (reg->user_val | (reg->sys_val & reg->user_mask)); 197 } 198 199 static inline int __attribute_const__ 200 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign) 201 { 202 return (sign) ? 203 cpuid_feature_extract_signed_field_width(features, field, width) : 204 cpuid_feature_extract_unsigned_field_width(features, field, width); 205 } 206 207 static inline int __attribute_const__ 208 cpuid_feature_extract_field(u64 features, int field, bool sign) 209 { 210 return cpuid_feature_extract_field_width(features, field, 4, sign); 211 } 212 213 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) 214 { 215 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign); 216 } 217 218 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) 219 { 220 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 || 221 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1; 222 } 223 224 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) 225 { 226 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); 227 228 return val == ID_AA64PFR0_EL0_32BIT_64BIT; 229 } 230 231 static inline bool id_aa64pfr0_sve(u64 pfr0) 232 { 233 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); 234 235 return val > 0; 236 } 237 238 void __init setup_cpu_features(void); 239 240 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, 241 const char *info); 242 void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps); 243 void check_local_cpu_capabilities(void); 244 245 void update_cpu_errata_workarounds(void); 246 void __init enable_errata_workarounds(void); 247 void verify_local_cpu_errata_workarounds(void); 248 249 u64 read_sanitised_ftr_reg(u32 id); 250 251 static inline bool cpu_supports_mixed_endian_el0(void) 252 { 253 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); 254 } 255 256 static inline bool system_supports_32bit_el0(void) 257 { 258 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); 259 } 260 261 static inline bool system_supports_mixed_endian_el0(void) 262 { 263 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1)); 264 } 265 266 static inline bool system_supports_fpsimd(void) 267 { 268 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); 269 } 270 271 static inline bool system_uses_ttbr0_pan(void) 272 { 273 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && 274 !cpus_have_const_cap(ARM64_HAS_PAN); 275 } 276 277 static inline bool system_supports_sve(void) 278 { 279 return IS_ENABLED(CONFIG_ARM64_SVE) && 280 cpus_have_const_cap(ARM64_SVE); 281 } 282 283 /* 284 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE 285 * vector length. 286 * 287 * Use only if SVE is present. 288 * This function clobbers the SVE vector length. 289 */ 290 static inline u64 read_zcr_features(void) 291 { 292 u64 zcr; 293 unsigned int vq_max; 294 295 /* 296 * Set the maximum possible VL, and write zeroes to all other 297 * bits to see if they stick. 298 */ 299 sve_kernel_enable(NULL); 300 write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); 301 302 zcr = read_sysreg_s(SYS_ZCR_EL1); 303 zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */ 304 vq_max = sve_vq_from_vl(sve_get_vl()); 305 zcr |= vq_max - 1; /* set LEN field to maximum effective value */ 306 307 return zcr; 308 } 309 310 #endif /* __ASSEMBLY__ */ 311 312 #endif 313