1 /* 2 * Contains CPU feature definitions 3 * 4 * Copyright (C) 2015 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #define pr_fmt(fmt) "CPU features: " fmt 20 21 #include <linux/bsearch.h> 22 #include <linux/cpumask.h> 23 #include <linux/sort.h> 24 #include <linux/stop_machine.h> 25 #include <linux/types.h> 26 #include <asm/cpu.h> 27 #include <asm/cpufeature.h> 28 #include <asm/cpu_ops.h> 29 #include <asm/mmu_context.h> 30 #include <asm/processor.h> 31 #include <asm/sysreg.h> 32 #include <asm/virt.h> 33 34 unsigned long elf_hwcap __read_mostly; 35 EXPORT_SYMBOL_GPL(elf_hwcap); 36 37 #ifdef CONFIG_COMPAT 38 #define COMPAT_ELF_HWCAP_DEFAULT \ 39 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ 40 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ 41 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ 42 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ 43 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ 44 COMPAT_HWCAP_LPAE) 45 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; 46 unsigned int compat_elf_hwcap2 __read_mostly; 47 #endif 48 49 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 50 51 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); 52 EXPORT_SYMBOL(cpu_hwcap_keys); 53 54 #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 55 { \ 56 .sign = SIGNED, \ 57 .strict = STRICT, \ 58 .type = TYPE, \ 59 .shift = SHIFT, \ 60 .width = WIDTH, \ 61 .safe_val = SAFE_VAL, \ 62 } 63 64 /* Define a feature with unsigned values */ 65 #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 66 __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) 67 68 /* Define a feature with a signed value */ 69 #define S_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 70 __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) 71 72 #define ARM64_FTR_END \ 73 { \ 74 .width = 0, \ 75 } 76 77 /* meta feature for alternatives */ 78 static bool __maybe_unused 79 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused); 80 81 82 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { 83 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 84 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), 85 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), 86 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), 87 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), 88 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), 89 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), 90 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0), 91 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ 92 ARM64_FTR_END, 93 }; 94 95 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { 96 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 97 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), 98 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), 99 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), 100 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), 101 /* Linux doesn't care about the EL3 */ 102 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0), 103 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0), 104 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), 105 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), 106 ARM64_FTR_END, 107 }; 108 109 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { 110 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 111 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), 112 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), 113 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), 114 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), 115 /* Linux shouldn't care about secure memory */ 116 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), 117 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), 118 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0), 119 /* 120 * Differing PARange is fine as long as all peripherals and memory are mapped 121 * within the minimum PARange of all CPUs 122 */ 123 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), 124 ARM64_FTR_END, 125 }; 126 127 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { 128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 129 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), 130 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0), 131 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0), 132 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0), 133 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), 134 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), 135 ARM64_FTR_END, 136 }; 137 138 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { 139 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0), 140 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0), 141 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0), 142 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0), 143 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0), 144 ARM64_FTR_END, 145 }; 146 147 static const struct arm64_ftr_bits ftr_ctr[] = { 148 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ 149 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), 150 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ 151 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ 152 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ 153 /* 154 * Linux can handle differing I-cache policies. Userspace JITs will 155 * make use of *minLine. 156 * If we have differing I-cache policies, report it as the weakest - AIVIVT. 157 */ 158 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */ 159 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ 160 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ 161 ARM64_FTR_END, 162 }; 163 164 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { 165 .name = "SYS_CTR_EL0", 166 .ftr_bits = ftr_ctr 167 }; 168 169 static const struct arm64_ftr_bits ftr_id_mmfr0[] = { 170 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */ 171 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */ 172 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ 173 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */ 174 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */ 175 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */ 176 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */ 177 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */ 178 ARM64_FTR_END, 179 }; 180 181 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { 182 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 183 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), 184 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), 185 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), 186 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), 187 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), 188 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), 189 ARM64_FTR_END, 190 }; 191 192 static const struct arm64_ftr_bits ftr_mvfr2[] = { 193 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ 194 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */ 195 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */ 196 ARM64_FTR_END, 197 }; 198 199 static const struct arm64_ftr_bits ftr_dczid[] = { 200 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */ 201 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */ 202 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */ 203 ARM64_FTR_END, 204 }; 205 206 207 static const struct arm64_ftr_bits ftr_id_isar5[] = { 208 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0), 209 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */ 210 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0), 211 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0), 212 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0), 213 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0), 214 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0), 215 ARM64_FTR_END, 216 }; 217 218 static const struct arm64_ftr_bits ftr_id_mmfr4[] = { 219 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ 220 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */ 221 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ 222 ARM64_FTR_END, 223 }; 224 225 static const struct arm64_ftr_bits ftr_id_pfr0[] = { 226 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */ 227 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */ 228 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */ 229 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */ 230 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */ 231 ARM64_FTR_END, 232 }; 233 234 static const struct arm64_ftr_bits ftr_id_dfr0[] = { 235 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), 236 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ 237 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), 238 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), 239 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), 240 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), 241 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), 242 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), 243 ARM64_FTR_END, 244 }; 245 246 /* 247 * Common ftr bits for a 32bit register with all hidden, strict 248 * attributes, with 4bit feature fields and a default safe value of 249 * 0. Covers the following 32bit registers: 250 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] 251 */ 252 static const struct arm64_ftr_bits ftr_generic_32bits[] = { 253 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), 254 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), 255 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), 256 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), 257 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), 258 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), 259 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), 260 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), 261 ARM64_FTR_END, 262 }; 263 264 static const struct arm64_ftr_bits ftr_generic[] = { 265 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), 266 ARM64_FTR_END, 267 }; 268 269 static const struct arm64_ftr_bits ftr_generic32[] = { 270 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0), 271 ARM64_FTR_END, 272 }; 273 274 static const struct arm64_ftr_bits ftr_aa64raz[] = { 275 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), 276 ARM64_FTR_END, 277 }; 278 279 #define ARM64_FTR_REG(id, table) { \ 280 .sys_id = id, \ 281 .reg = &(struct arm64_ftr_reg){ \ 282 .name = #id, \ 283 .ftr_bits = &((table)[0]), \ 284 }} 285 286 static const struct __ftr_reg_entry { 287 u32 sys_id; 288 struct arm64_ftr_reg *reg; 289 } arm64_ftr_regs[] = { 290 291 /* Op1 = 0, CRn = 0, CRm = 1 */ 292 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), 293 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits), 294 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0), 295 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0), 296 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits), 297 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits), 298 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits), 299 300 /* Op1 = 0, CRn = 0, CRm = 2 */ 301 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits), 302 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits), 303 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits), 304 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits), 305 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits), 306 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), 307 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), 308 309 /* Op1 = 0, CRn = 0, CRm = 3 */ 310 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), 311 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), 312 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), 313 314 /* Op1 = 0, CRn = 0, CRm = 4 */ 315 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), 316 ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz), 317 318 /* Op1 = 0, CRn = 0, CRm = 5 */ 319 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), 320 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic), 321 322 /* Op1 = 0, CRn = 0, CRm = 6 */ 323 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), 324 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz), 325 326 /* Op1 = 0, CRn = 0, CRm = 7 */ 327 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), 328 ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), 329 ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), 330 331 /* Op1 = 3, CRn = 0, CRm = 0 */ 332 { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 }, 333 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), 334 335 /* Op1 = 3, CRn = 14, CRm = 0 */ 336 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32), 337 }; 338 339 static int search_cmp_ftr_reg(const void *id, const void *regp) 340 { 341 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id; 342 } 343 344 /* 345 * get_arm64_ftr_reg - Lookup a feature register entry using its 346 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the 347 * ascending order of sys_id , we use binary search to find a matching 348 * entry. 349 * 350 * returns - Upon success, matching ftr_reg entry for id. 351 * - NULL on failure. It is upto the caller to decide 352 * the impact of a failure. 353 */ 354 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) 355 { 356 const struct __ftr_reg_entry *ret; 357 358 ret = bsearch((const void *)(unsigned long)sys_id, 359 arm64_ftr_regs, 360 ARRAY_SIZE(arm64_ftr_regs), 361 sizeof(arm64_ftr_regs[0]), 362 search_cmp_ftr_reg); 363 if (ret) 364 return ret->reg; 365 return NULL; 366 } 367 368 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg, 369 s64 ftr_val) 370 { 371 u64 mask = arm64_ftr_mask(ftrp); 372 373 reg &= ~mask; 374 reg |= (ftr_val << ftrp->shift) & mask; 375 return reg; 376 } 377 378 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, 379 s64 cur) 380 { 381 s64 ret = 0; 382 383 switch (ftrp->type) { 384 case FTR_EXACT: 385 ret = ftrp->safe_val; 386 break; 387 case FTR_LOWER_SAFE: 388 ret = new < cur ? new : cur; 389 break; 390 case FTR_HIGHER_SAFE: 391 ret = new > cur ? new : cur; 392 break; 393 default: 394 BUG(); 395 } 396 397 return ret; 398 } 399 400 static void __init sort_ftr_regs(void) 401 { 402 int i; 403 404 /* Check that the array is sorted so that we can do the binary search */ 405 for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++) 406 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id); 407 } 408 409 /* 410 * Initialise the CPU feature register from Boot CPU values. 411 * Also initiliases the strict_mask for the register. 412 */ 413 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) 414 { 415 u64 val = 0; 416 u64 strict_mask = ~0x0ULL; 417 const struct arm64_ftr_bits *ftrp; 418 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); 419 420 BUG_ON(!reg); 421 422 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { 423 s64 ftr_new = arm64_ftr_value(ftrp, new); 424 425 val = arm64_ftr_set_value(ftrp, val, ftr_new); 426 if (!ftrp->strict) 427 strict_mask &= ~arm64_ftr_mask(ftrp); 428 } 429 reg->sys_val = val; 430 reg->strict_mask = strict_mask; 431 } 432 433 void __init init_cpu_features(struct cpuinfo_arm64 *info) 434 { 435 /* Before we start using the tables, make sure it is sorted */ 436 sort_ftr_regs(); 437 438 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); 439 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); 440 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); 441 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); 442 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); 443 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); 444 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); 445 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); 446 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); 447 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); 448 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); 449 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); 450 451 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 452 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); 453 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); 454 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); 455 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); 456 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); 457 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); 458 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); 459 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); 460 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); 461 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); 462 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); 463 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); 464 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); 465 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); 466 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); 467 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); 468 } 469 470 } 471 472 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) 473 { 474 const struct arm64_ftr_bits *ftrp; 475 476 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { 477 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); 478 s64 ftr_new = arm64_ftr_value(ftrp, new); 479 480 if (ftr_cur == ftr_new) 481 continue; 482 /* Find a safe value */ 483 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur); 484 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); 485 } 486 487 } 488 489 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) 490 { 491 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); 492 493 BUG_ON(!regp); 494 update_cpu_ftr_reg(regp, val); 495 if ((boot & regp->strict_mask) == (val & regp->strict_mask)) 496 return 0; 497 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", 498 regp->name, boot, cpu, val); 499 return 1; 500 } 501 502 /* 503 * Update system wide CPU feature registers with the values from a 504 * non-boot CPU. Also performs SANITY checks to make sure that there 505 * aren't any insane variations from that of the boot CPU. 506 */ 507 void update_cpu_features(int cpu, 508 struct cpuinfo_arm64 *info, 509 struct cpuinfo_arm64 *boot) 510 { 511 int taint = 0; 512 513 /* 514 * The kernel can handle differing I-cache policies, but otherwise 515 * caches should look identical. Userspace JITs will make use of 516 * *minLine. 517 */ 518 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, 519 info->reg_ctr, boot->reg_ctr); 520 521 /* 522 * Userspace may perform DC ZVA instructions. Mismatched block sizes 523 * could result in too much or too little memory being zeroed if a 524 * process is preempted and migrated between CPUs. 525 */ 526 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, 527 info->reg_dczid, boot->reg_dczid); 528 529 /* If different, timekeeping will be broken (especially with KVM) */ 530 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, 531 info->reg_cntfrq, boot->reg_cntfrq); 532 533 /* 534 * The kernel uses self-hosted debug features and expects CPUs to 535 * support identical debug features. We presently need CTX_CMPs, WRPs, 536 * and BRPs to be identical. 537 * ID_AA64DFR1 is currently RES0. 538 */ 539 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, 540 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); 541 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, 542 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); 543 /* 544 * Even in big.LITTLE, processors should be identical instruction-set 545 * wise. 546 */ 547 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, 548 info->reg_id_aa64isar0, boot->reg_id_aa64isar0); 549 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, 550 info->reg_id_aa64isar1, boot->reg_id_aa64isar1); 551 552 /* 553 * Differing PARange support is fine as long as all peripherals and 554 * memory are mapped within the minimum PARange of all CPUs. 555 * Linux should not care about secure memory. 556 */ 557 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, 558 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); 559 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, 560 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); 561 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, 562 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); 563 564 /* 565 * EL3 is not our concern. 566 * ID_AA64PFR1 is currently RES0. 567 */ 568 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, 569 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); 570 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, 571 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); 572 573 /* 574 * If we have AArch32, we care about 32-bit features for compat. 575 * If the system doesn't support AArch32, don't update them. 576 */ 577 if (id_aa64pfr0_32bit_el0(read_system_reg(SYS_ID_AA64PFR0_EL1)) && 578 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 579 580 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, 581 info->reg_id_dfr0, boot->reg_id_dfr0); 582 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, 583 info->reg_id_isar0, boot->reg_id_isar0); 584 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, 585 info->reg_id_isar1, boot->reg_id_isar1); 586 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, 587 info->reg_id_isar2, boot->reg_id_isar2); 588 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, 589 info->reg_id_isar3, boot->reg_id_isar3); 590 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, 591 info->reg_id_isar4, boot->reg_id_isar4); 592 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, 593 info->reg_id_isar5, boot->reg_id_isar5); 594 595 /* 596 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and 597 * ACTLR formats could differ across CPUs and therefore would have to 598 * be trapped for virtualization anyway. 599 */ 600 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, 601 info->reg_id_mmfr0, boot->reg_id_mmfr0); 602 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, 603 info->reg_id_mmfr1, boot->reg_id_mmfr1); 604 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, 605 info->reg_id_mmfr2, boot->reg_id_mmfr2); 606 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, 607 info->reg_id_mmfr3, boot->reg_id_mmfr3); 608 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, 609 info->reg_id_pfr0, boot->reg_id_pfr0); 610 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, 611 info->reg_id_pfr1, boot->reg_id_pfr1); 612 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, 613 info->reg_mvfr0, boot->reg_mvfr0); 614 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, 615 info->reg_mvfr1, boot->reg_mvfr1); 616 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, 617 info->reg_mvfr2, boot->reg_mvfr2); 618 } 619 620 /* 621 * Mismatched CPU features are a recipe for disaster. Don't even 622 * pretend to support them. 623 */ 624 WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC, 625 "Unsupported CPU feature variation.\n"); 626 } 627 628 u64 read_system_reg(u32 id) 629 { 630 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id); 631 632 /* We shouldn't get a request for an unsupported register */ 633 BUG_ON(!regp); 634 return regp->sys_val; 635 } 636 637 /* 638 * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated. 639 * Read the system register on the current CPU 640 */ 641 static u64 __raw_read_system_reg(u32 sys_id) 642 { 643 switch (sys_id) { 644 case SYS_ID_PFR0_EL1: return read_cpuid(ID_PFR0_EL1); 645 case SYS_ID_PFR1_EL1: return read_cpuid(ID_PFR1_EL1); 646 case SYS_ID_DFR0_EL1: return read_cpuid(ID_DFR0_EL1); 647 case SYS_ID_MMFR0_EL1: return read_cpuid(ID_MMFR0_EL1); 648 case SYS_ID_MMFR1_EL1: return read_cpuid(ID_MMFR1_EL1); 649 case SYS_ID_MMFR2_EL1: return read_cpuid(ID_MMFR2_EL1); 650 case SYS_ID_MMFR3_EL1: return read_cpuid(ID_MMFR3_EL1); 651 case SYS_ID_ISAR0_EL1: return read_cpuid(ID_ISAR0_EL1); 652 case SYS_ID_ISAR1_EL1: return read_cpuid(ID_ISAR1_EL1); 653 case SYS_ID_ISAR2_EL1: return read_cpuid(ID_ISAR2_EL1); 654 case SYS_ID_ISAR3_EL1: return read_cpuid(ID_ISAR3_EL1); 655 case SYS_ID_ISAR4_EL1: return read_cpuid(ID_ISAR4_EL1); 656 case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR4_EL1); 657 case SYS_MVFR0_EL1: return read_cpuid(MVFR0_EL1); 658 case SYS_MVFR1_EL1: return read_cpuid(MVFR1_EL1); 659 case SYS_MVFR2_EL1: return read_cpuid(MVFR2_EL1); 660 661 case SYS_ID_AA64PFR0_EL1: return read_cpuid(ID_AA64PFR0_EL1); 662 case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR0_EL1); 663 case SYS_ID_AA64DFR0_EL1: return read_cpuid(ID_AA64DFR0_EL1); 664 case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR0_EL1); 665 case SYS_ID_AA64MMFR0_EL1: return read_cpuid(ID_AA64MMFR0_EL1); 666 case SYS_ID_AA64MMFR1_EL1: return read_cpuid(ID_AA64MMFR1_EL1); 667 case SYS_ID_AA64MMFR2_EL1: return read_cpuid(ID_AA64MMFR2_EL1); 668 case SYS_ID_AA64ISAR0_EL1: return read_cpuid(ID_AA64ISAR0_EL1); 669 case SYS_ID_AA64ISAR1_EL1: return read_cpuid(ID_AA64ISAR1_EL1); 670 671 case SYS_CNTFRQ_EL0: return read_cpuid(CNTFRQ_EL0); 672 case SYS_CTR_EL0: return read_cpuid(CTR_EL0); 673 case SYS_DCZID_EL0: return read_cpuid(DCZID_EL0); 674 default: 675 BUG(); 676 return 0; 677 } 678 } 679 680 #include <linux/irqchip/arm-gic-v3.h> 681 682 static bool 683 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) 684 { 685 int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign); 686 687 return val >= entry->min_field_value; 688 } 689 690 static bool 691 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) 692 { 693 u64 val; 694 695 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible()); 696 if (scope == SCOPE_SYSTEM) 697 val = read_system_reg(entry->sys_reg); 698 else 699 val = __raw_read_system_reg(entry->sys_reg); 700 701 return feature_matches(val, entry); 702 } 703 704 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope) 705 { 706 bool has_sre; 707 708 if (!has_cpuid_feature(entry, scope)) 709 return false; 710 711 has_sre = gic_enable_sre(); 712 if (!has_sre) 713 pr_warn_once("%s present but disabled by higher exception level\n", 714 entry->desc); 715 716 return has_sre; 717 } 718 719 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused) 720 { 721 u32 midr = read_cpuid_id(); 722 u32 rv_min, rv_max; 723 724 /* Cavium ThunderX pass 1.x and 2.x */ 725 rv_min = 0; 726 rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK; 727 728 return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max); 729 } 730 731 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) 732 { 733 return is_kernel_in_hyp_mode(); 734 } 735 736 static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry, 737 int __unused) 738 { 739 phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start); 740 741 /* 742 * Activate the lower HYP offset only if: 743 * - the idmap doesn't clash with it, 744 * - the kernel is not running at EL2. 745 */ 746 return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode(); 747 } 748 749 static const struct arm64_cpu_capabilities arm64_features[] = { 750 { 751 .desc = "GIC system register CPU interface", 752 .capability = ARM64_HAS_SYSREG_GIC_CPUIF, 753 .def_scope = SCOPE_SYSTEM, 754 .matches = has_useable_gicv3_cpuif, 755 .sys_reg = SYS_ID_AA64PFR0_EL1, 756 .field_pos = ID_AA64PFR0_GIC_SHIFT, 757 .sign = FTR_UNSIGNED, 758 .min_field_value = 1, 759 }, 760 #ifdef CONFIG_ARM64_PAN 761 { 762 .desc = "Privileged Access Never", 763 .capability = ARM64_HAS_PAN, 764 .def_scope = SCOPE_SYSTEM, 765 .matches = has_cpuid_feature, 766 .sys_reg = SYS_ID_AA64MMFR1_EL1, 767 .field_pos = ID_AA64MMFR1_PAN_SHIFT, 768 .sign = FTR_UNSIGNED, 769 .min_field_value = 1, 770 .enable = cpu_enable_pan, 771 }, 772 #endif /* CONFIG_ARM64_PAN */ 773 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) 774 { 775 .desc = "LSE atomic instructions", 776 .capability = ARM64_HAS_LSE_ATOMICS, 777 .def_scope = SCOPE_SYSTEM, 778 .matches = has_cpuid_feature, 779 .sys_reg = SYS_ID_AA64ISAR0_EL1, 780 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, 781 .sign = FTR_UNSIGNED, 782 .min_field_value = 2, 783 }, 784 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ 785 { 786 .desc = "Software prefetching using PRFM", 787 .capability = ARM64_HAS_NO_HW_PREFETCH, 788 .def_scope = SCOPE_SYSTEM, 789 .matches = has_no_hw_prefetch, 790 }, 791 #ifdef CONFIG_ARM64_UAO 792 { 793 .desc = "User Access Override", 794 .capability = ARM64_HAS_UAO, 795 .def_scope = SCOPE_SYSTEM, 796 .matches = has_cpuid_feature, 797 .sys_reg = SYS_ID_AA64MMFR2_EL1, 798 .field_pos = ID_AA64MMFR2_UAO_SHIFT, 799 .min_field_value = 1, 800 .enable = cpu_enable_uao, 801 }, 802 #endif /* CONFIG_ARM64_UAO */ 803 #ifdef CONFIG_ARM64_PAN 804 { 805 .capability = ARM64_ALT_PAN_NOT_UAO, 806 .def_scope = SCOPE_SYSTEM, 807 .matches = cpufeature_pan_not_uao, 808 }, 809 #endif /* CONFIG_ARM64_PAN */ 810 { 811 .desc = "Virtualization Host Extensions", 812 .capability = ARM64_HAS_VIRT_HOST_EXTN, 813 .def_scope = SCOPE_SYSTEM, 814 .matches = runs_at_el2, 815 }, 816 { 817 .desc = "32-bit EL0 Support", 818 .capability = ARM64_HAS_32BIT_EL0, 819 .def_scope = SCOPE_SYSTEM, 820 .matches = has_cpuid_feature, 821 .sys_reg = SYS_ID_AA64PFR0_EL1, 822 .sign = FTR_UNSIGNED, 823 .field_pos = ID_AA64PFR0_EL0_SHIFT, 824 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT, 825 }, 826 { 827 .desc = "Reduced HYP mapping offset", 828 .capability = ARM64_HYP_OFFSET_LOW, 829 .def_scope = SCOPE_SYSTEM, 830 .matches = hyp_offset_low, 831 }, 832 {}, 833 }; 834 835 #define HWCAP_CAP(reg, field, s, min_value, type, cap) \ 836 { \ 837 .desc = #cap, \ 838 .def_scope = SCOPE_SYSTEM, \ 839 .matches = has_cpuid_feature, \ 840 .sys_reg = reg, \ 841 .field_pos = field, \ 842 .sign = s, \ 843 .min_field_value = min_value, \ 844 .hwcap_type = type, \ 845 .hwcap = cap, \ 846 } 847 848 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { 849 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL), 850 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES), 851 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1), 852 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2), 853 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32), 854 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS), 855 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP), 856 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP), 857 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD), 858 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP), 859 {}, 860 }; 861 862 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { 863 #ifdef CONFIG_COMPAT 864 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), 865 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), 866 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), 867 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), 868 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), 869 #endif 870 {}, 871 }; 872 873 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) 874 { 875 switch (cap->hwcap_type) { 876 case CAP_HWCAP: 877 elf_hwcap |= cap->hwcap; 878 break; 879 #ifdef CONFIG_COMPAT 880 case CAP_COMPAT_HWCAP: 881 compat_elf_hwcap |= (u32)cap->hwcap; 882 break; 883 case CAP_COMPAT_HWCAP2: 884 compat_elf_hwcap2 |= (u32)cap->hwcap; 885 break; 886 #endif 887 default: 888 WARN_ON(1); 889 break; 890 } 891 } 892 893 /* Check if we have a particular HWCAP enabled */ 894 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap) 895 { 896 bool rc; 897 898 switch (cap->hwcap_type) { 899 case CAP_HWCAP: 900 rc = (elf_hwcap & cap->hwcap) != 0; 901 break; 902 #ifdef CONFIG_COMPAT 903 case CAP_COMPAT_HWCAP: 904 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; 905 break; 906 case CAP_COMPAT_HWCAP2: 907 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; 908 break; 909 #endif 910 default: 911 WARN_ON(1); 912 rc = false; 913 } 914 915 return rc; 916 } 917 918 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) 919 { 920 for (; hwcaps->matches; hwcaps++) 921 if (hwcaps->matches(hwcaps, hwcaps->def_scope)) 922 cap_set_elf_hwcap(hwcaps); 923 } 924 925 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, 926 const char *info) 927 { 928 for (; caps->matches; caps++) { 929 if (!caps->matches(caps, caps->def_scope)) 930 continue; 931 932 if (!cpus_have_cap(caps->capability) && caps->desc) 933 pr_info("%s %s\n", info, caps->desc); 934 cpus_set_cap(caps->capability); 935 } 936 } 937 938 /* 939 * Run through the enabled capabilities and enable() it on all active 940 * CPUs 941 */ 942 void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) 943 { 944 for (; caps->matches; caps++) 945 if (caps->enable && cpus_have_cap(caps->capability)) 946 /* 947 * Use stop_machine() as it schedules the work allowing 948 * us to modify PSTATE, instead of on_each_cpu() which 949 * uses an IPI, giving us a PSTATE that disappears when 950 * we return. 951 */ 952 stop_machine(caps->enable, NULL, cpu_online_mask); 953 } 954 955 /* 956 * Flag to indicate if we have computed the system wide 957 * capabilities based on the boot time active CPUs. This 958 * will be used to determine if a new booting CPU should 959 * go through the verification process to make sure that it 960 * supports the system capabilities, without using a hotplug 961 * notifier. 962 */ 963 static bool sys_caps_initialised; 964 965 static inline void set_sys_caps_initialised(void) 966 { 967 sys_caps_initialised = true; 968 } 969 970 /* 971 * Check for CPU features that are used in early boot 972 * based on the Boot CPU value. 973 */ 974 static void check_early_cpu_features(void) 975 { 976 verify_cpu_run_el(); 977 verify_cpu_asid_bits(); 978 } 979 980 static void 981 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) 982 { 983 984 for (; caps->matches; caps++) 985 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) { 986 pr_crit("CPU%d: missing HWCAP: %s\n", 987 smp_processor_id(), caps->desc); 988 cpu_die_early(); 989 } 990 } 991 992 static void 993 verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) 994 { 995 for (; caps->matches; caps++) { 996 if (!cpus_have_cap(caps->capability)) 997 continue; 998 /* 999 * If the new CPU misses an advertised feature, we cannot proceed 1000 * further, park the cpu. 1001 */ 1002 if (!caps->matches(caps, SCOPE_LOCAL_CPU)) { 1003 pr_crit("CPU%d: missing feature: %s\n", 1004 smp_processor_id(), caps->desc); 1005 cpu_die_early(); 1006 } 1007 if (caps->enable) 1008 caps->enable(NULL); 1009 } 1010 } 1011 1012 /* 1013 * Run through the enabled system capabilities and enable() it on this CPU. 1014 * The capabilities were decided based on the available CPUs at the boot time. 1015 * Any new CPU should match the system wide status of the capability. If the 1016 * new CPU doesn't have a capability which the system now has enabled, we 1017 * cannot do anything to fix it up and could cause unexpected failures. So 1018 * we park the CPU. 1019 */ 1020 static void verify_local_cpu_capabilities(void) 1021 { 1022 verify_local_cpu_errata_workarounds(); 1023 verify_local_cpu_features(arm64_features); 1024 verify_local_elf_hwcaps(arm64_elf_hwcaps); 1025 if (system_supports_32bit_el0()) 1026 verify_local_elf_hwcaps(compat_elf_hwcaps); 1027 } 1028 1029 void check_local_cpu_capabilities(void) 1030 { 1031 /* 1032 * All secondary CPUs should conform to the early CPU features 1033 * in use by the kernel based on boot CPU. 1034 */ 1035 check_early_cpu_features(); 1036 1037 /* 1038 * If we haven't finalised the system capabilities, this CPU gets 1039 * a chance to update the errata work arounds. 1040 * Otherwise, this CPU should verify that it has all the system 1041 * advertised capabilities. 1042 */ 1043 if (!sys_caps_initialised) 1044 update_cpu_errata_workarounds(); 1045 else 1046 verify_local_cpu_capabilities(); 1047 } 1048 1049 static void __init setup_feature_capabilities(void) 1050 { 1051 update_cpu_capabilities(arm64_features, "detected feature:"); 1052 enable_cpu_capabilities(arm64_features); 1053 } 1054 1055 /* 1056 * Check if the current CPU has a given feature capability. 1057 * Should be called from non-preemptible context. 1058 */ 1059 bool this_cpu_has_cap(unsigned int cap) 1060 { 1061 const struct arm64_cpu_capabilities *caps; 1062 1063 if (WARN_ON(preemptible())) 1064 return false; 1065 1066 for (caps = arm64_features; caps->desc; caps++) 1067 if (caps->capability == cap && caps->matches) 1068 return caps->matches(caps, SCOPE_LOCAL_CPU); 1069 1070 return false; 1071 } 1072 1073 void __init setup_cpu_features(void) 1074 { 1075 u32 cwg; 1076 int cls; 1077 1078 /* Set the CPU feature capabilies */ 1079 setup_feature_capabilities(); 1080 enable_errata_workarounds(); 1081 setup_elf_hwcaps(arm64_elf_hwcaps); 1082 1083 if (system_supports_32bit_el0()) 1084 setup_elf_hwcaps(compat_elf_hwcaps); 1085 1086 /* Advertise that we have computed the system capabilities */ 1087 set_sys_caps_initialised(); 1088 1089 /* 1090 * Check for sane CTR_EL0.CWG value. 1091 */ 1092 cwg = cache_type_cwg(); 1093 cls = cache_line_size(); 1094 if (!cwg) 1095 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", 1096 cls); 1097 if (L1_CACHE_BYTES < cls) 1098 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", 1099 L1_CACHE_BYTES, cls); 1100 } 1101 1102 static bool __maybe_unused 1103 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) 1104 { 1105 return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO)); 1106 } 1107