1 /* 2 * Contains CPU feature definitions 3 * 4 * Copyright (C) 2015 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #define pr_fmt(fmt) "CPU features: " fmt 20 21 #include <linux/bsearch.h> 22 #include <linux/sort.h> 23 #include <linux/types.h> 24 #include <asm/cpu.h> 25 #include <asm/cpufeature.h> 26 #include <asm/cpu_ops.h> 27 #include <asm/processor.h> 28 #include <asm/sysreg.h> 29 30 unsigned long elf_hwcap __read_mostly; 31 EXPORT_SYMBOL_GPL(elf_hwcap); 32 33 #ifdef CONFIG_COMPAT 34 #define COMPAT_ELF_HWCAP_DEFAULT \ 35 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ 36 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ 37 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ 38 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ 39 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ 40 COMPAT_HWCAP_LPAE) 41 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; 42 unsigned int compat_elf_hwcap2 __read_mostly; 43 #endif 44 45 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 46 47 #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 48 { \ 49 .sign = SIGNED, \ 50 .strict = STRICT, \ 51 .type = TYPE, \ 52 .shift = SHIFT, \ 53 .width = WIDTH, \ 54 .safe_val = SAFE_VAL, \ 55 } 56 57 /* Define a feature with signed values */ 58 #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 59 __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) 60 61 /* Define a feature with unsigned value */ 62 #define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 63 __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) 64 65 #define ARM64_FTR_END \ 66 { \ 67 .width = 0, \ 68 } 69 70 static struct arm64_ftr_bits ftr_id_aa64isar0[] = { 71 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 72 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), 73 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), 74 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), 75 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), 76 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), 77 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), 78 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0), 79 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ 80 ARM64_FTR_END, 81 }; 82 83 static struct arm64_ftr_bits ftr_id_aa64pfr0[] = { 84 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 85 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), 86 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), 87 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), 88 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), 89 /* Linux doesn't care about the EL3 */ 90 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0), 91 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0), 92 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), 93 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), 94 ARM64_FTR_END, 95 }; 96 97 static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { 98 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 99 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), 100 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), 101 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), 102 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), 103 /* Linux shouldn't care about secure memory */ 104 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), 105 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), 106 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0), 107 /* 108 * Differing PARange is fine as long as all peripherals and memory are mapped 109 * within the minimum PARange of all CPUs 110 */ 111 U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), 112 ARM64_FTR_END, 113 }; 114 115 static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { 116 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 117 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), 118 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0), 119 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0), 120 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0), 121 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), 122 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), 123 ARM64_FTR_END, 124 }; 125 126 static struct arm64_ftr_bits ftr_ctr[] = { 127 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ 128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), 129 U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ 130 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ 131 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ 132 /* 133 * Linux can handle differing I-cache policies. Userspace JITs will 134 * make use of *minLine 135 */ 136 U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ 137 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ 138 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ 139 ARM64_FTR_END, 140 }; 141 142 static struct arm64_ftr_bits ftr_id_mmfr0[] = { 143 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), /* InnerShr */ 144 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */ 145 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ 146 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */ 147 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */ 148 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* OuterShr */ 149 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */ 150 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */ 151 ARM64_FTR_END, 152 }; 153 154 static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { 155 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 156 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), 157 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), 158 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), 159 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), 160 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), 161 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), 162 ARM64_FTR_END, 163 }; 164 165 static struct arm64_ftr_bits ftr_mvfr2[] = { 166 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ 167 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */ 168 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */ 169 ARM64_FTR_END, 170 }; 171 172 static struct arm64_ftr_bits ftr_dczid[] = { 173 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */ 174 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */ 175 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */ 176 ARM64_FTR_END, 177 }; 178 179 180 static struct arm64_ftr_bits ftr_id_isar5[] = { 181 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0), 182 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */ 183 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0), 184 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0), 185 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0), 186 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0), 187 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0), 188 ARM64_FTR_END, 189 }; 190 191 static struct arm64_ftr_bits ftr_id_mmfr4[] = { 192 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ 193 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */ 194 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ 195 ARM64_FTR_END, 196 }; 197 198 static struct arm64_ftr_bits ftr_id_pfr0[] = { 199 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */ 200 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */ 201 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */ 202 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */ 203 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */ 204 ARM64_FTR_END, 205 }; 206 207 /* 208 * Common ftr bits for a 32bit register with all hidden, strict 209 * attributes, with 4bit feature fields and a default safe value of 210 * 0. Covers the following 32bit registers: 211 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] 212 */ 213 static struct arm64_ftr_bits ftr_generic_32bits[] = { 214 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), 215 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), 216 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), 217 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), 218 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), 219 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), 220 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), 221 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), 222 ARM64_FTR_END, 223 }; 224 225 static struct arm64_ftr_bits ftr_generic[] = { 226 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), 227 ARM64_FTR_END, 228 }; 229 230 static struct arm64_ftr_bits ftr_generic32[] = { 231 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0), 232 ARM64_FTR_END, 233 }; 234 235 static struct arm64_ftr_bits ftr_aa64raz[] = { 236 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), 237 ARM64_FTR_END, 238 }; 239 240 #define ARM64_FTR_REG(id, table) \ 241 { \ 242 .sys_id = id, \ 243 .name = #id, \ 244 .ftr_bits = &((table)[0]), \ 245 } 246 247 static struct arm64_ftr_reg arm64_ftr_regs[] = { 248 249 /* Op1 = 0, CRn = 0, CRm = 1 */ 250 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), 251 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits), 252 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_generic_32bits), 253 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0), 254 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits), 255 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits), 256 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits), 257 258 /* Op1 = 0, CRn = 0, CRm = 2 */ 259 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits), 260 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits), 261 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits), 262 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits), 263 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits), 264 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), 265 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), 266 267 /* Op1 = 0, CRn = 0, CRm = 3 */ 268 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), 269 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), 270 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), 271 272 /* Op1 = 0, CRn = 0, CRm = 4 */ 273 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), 274 ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz), 275 276 /* Op1 = 0, CRn = 0, CRm = 5 */ 277 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), 278 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic), 279 280 /* Op1 = 0, CRn = 0, CRm = 6 */ 281 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), 282 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz), 283 284 /* Op1 = 0, CRn = 0, CRm = 7 */ 285 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), 286 ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), 287 288 /* Op1 = 3, CRn = 0, CRm = 0 */ 289 ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr), 290 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), 291 292 /* Op1 = 3, CRn = 14, CRm = 0 */ 293 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32), 294 }; 295 296 static int search_cmp_ftr_reg(const void *id, const void *regp) 297 { 298 return (int)(unsigned long)id - (int)((const struct arm64_ftr_reg *)regp)->sys_id; 299 } 300 301 /* 302 * get_arm64_ftr_reg - Lookup a feature register entry using its 303 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the 304 * ascending order of sys_id , we use binary search to find a matching 305 * entry. 306 * 307 * returns - Upon success, matching ftr_reg entry for id. 308 * - NULL on failure. It is upto the caller to decide 309 * the impact of a failure. 310 */ 311 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) 312 { 313 return bsearch((const void *)(unsigned long)sys_id, 314 arm64_ftr_regs, 315 ARRAY_SIZE(arm64_ftr_regs), 316 sizeof(arm64_ftr_regs[0]), 317 search_cmp_ftr_reg); 318 } 319 320 static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val) 321 { 322 u64 mask = arm64_ftr_mask(ftrp); 323 324 reg &= ~mask; 325 reg |= (ftr_val << ftrp->shift) & mask; 326 return reg; 327 } 328 329 static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur) 330 { 331 s64 ret = 0; 332 333 switch (ftrp->type) { 334 case FTR_EXACT: 335 ret = ftrp->safe_val; 336 break; 337 case FTR_LOWER_SAFE: 338 ret = new < cur ? new : cur; 339 break; 340 case FTR_HIGHER_SAFE: 341 ret = new > cur ? new : cur; 342 break; 343 default: 344 BUG(); 345 } 346 347 return ret; 348 } 349 350 static int __init sort_cmp_ftr_regs(const void *a, const void *b) 351 { 352 return ((const struct arm64_ftr_reg *)a)->sys_id - 353 ((const struct arm64_ftr_reg *)b)->sys_id; 354 } 355 356 static void __init swap_ftr_regs(void *a, void *b, int size) 357 { 358 struct arm64_ftr_reg tmp = *(struct arm64_ftr_reg *)a; 359 *(struct arm64_ftr_reg *)a = *(struct arm64_ftr_reg *)b; 360 *(struct arm64_ftr_reg *)b = tmp; 361 } 362 363 static void __init sort_ftr_regs(void) 364 { 365 /* Keep the array sorted so that we can do the binary search */ 366 sort(arm64_ftr_regs, 367 ARRAY_SIZE(arm64_ftr_regs), 368 sizeof(arm64_ftr_regs[0]), 369 sort_cmp_ftr_regs, 370 swap_ftr_regs); 371 } 372 373 /* 374 * Initialise the CPU feature register from Boot CPU values. 375 * Also initiliases the strict_mask for the register. 376 */ 377 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) 378 { 379 u64 val = 0; 380 u64 strict_mask = ~0x0ULL; 381 struct arm64_ftr_bits *ftrp; 382 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); 383 384 BUG_ON(!reg); 385 386 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { 387 s64 ftr_new = arm64_ftr_value(ftrp, new); 388 389 val = arm64_ftr_set_value(ftrp, val, ftr_new); 390 if (!ftrp->strict) 391 strict_mask &= ~arm64_ftr_mask(ftrp); 392 } 393 reg->sys_val = val; 394 reg->strict_mask = strict_mask; 395 } 396 397 void __init init_cpu_features(struct cpuinfo_arm64 *info) 398 { 399 /* Before we start using the tables, make sure it is sorted */ 400 sort_ftr_regs(); 401 402 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); 403 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); 404 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); 405 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); 406 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); 407 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); 408 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); 409 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); 410 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); 411 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); 412 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); 413 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); 414 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); 415 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); 416 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); 417 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); 418 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); 419 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); 420 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); 421 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); 422 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); 423 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); 424 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); 425 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); 426 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); 427 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); 428 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); 429 } 430 431 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) 432 { 433 struct arm64_ftr_bits *ftrp; 434 435 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { 436 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); 437 s64 ftr_new = arm64_ftr_value(ftrp, new); 438 439 if (ftr_cur == ftr_new) 440 continue; 441 /* Find a safe value */ 442 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur); 443 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); 444 } 445 446 } 447 448 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) 449 { 450 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); 451 452 BUG_ON(!regp); 453 update_cpu_ftr_reg(regp, val); 454 if ((boot & regp->strict_mask) == (val & regp->strict_mask)) 455 return 0; 456 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", 457 regp->name, boot, cpu, val); 458 return 1; 459 } 460 461 /* 462 * Update system wide CPU feature registers with the values from a 463 * non-boot CPU. Also performs SANITY checks to make sure that there 464 * aren't any insane variations from that of the boot CPU. 465 */ 466 void update_cpu_features(int cpu, 467 struct cpuinfo_arm64 *info, 468 struct cpuinfo_arm64 *boot) 469 { 470 int taint = 0; 471 472 /* 473 * The kernel can handle differing I-cache policies, but otherwise 474 * caches should look identical. Userspace JITs will make use of 475 * *minLine. 476 */ 477 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, 478 info->reg_ctr, boot->reg_ctr); 479 480 /* 481 * Userspace may perform DC ZVA instructions. Mismatched block sizes 482 * could result in too much or too little memory being zeroed if a 483 * process is preempted and migrated between CPUs. 484 */ 485 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, 486 info->reg_dczid, boot->reg_dczid); 487 488 /* If different, timekeeping will be broken (especially with KVM) */ 489 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, 490 info->reg_cntfrq, boot->reg_cntfrq); 491 492 /* 493 * The kernel uses self-hosted debug features and expects CPUs to 494 * support identical debug features. We presently need CTX_CMPs, WRPs, 495 * and BRPs to be identical. 496 * ID_AA64DFR1 is currently RES0. 497 */ 498 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, 499 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); 500 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, 501 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); 502 /* 503 * Even in big.LITTLE, processors should be identical instruction-set 504 * wise. 505 */ 506 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, 507 info->reg_id_aa64isar0, boot->reg_id_aa64isar0); 508 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, 509 info->reg_id_aa64isar1, boot->reg_id_aa64isar1); 510 511 /* 512 * Differing PARange support is fine as long as all peripherals and 513 * memory are mapped within the minimum PARange of all CPUs. 514 * Linux should not care about secure memory. 515 */ 516 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, 517 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); 518 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, 519 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); 520 521 /* 522 * EL3 is not our concern. 523 * ID_AA64PFR1 is currently RES0. 524 */ 525 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, 526 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); 527 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, 528 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); 529 530 /* 531 * If we have AArch32, we care about 32-bit features for compat. These 532 * registers should be RES0 otherwise. 533 */ 534 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, 535 info->reg_id_dfr0, boot->reg_id_dfr0); 536 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, 537 info->reg_id_isar0, boot->reg_id_isar0); 538 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, 539 info->reg_id_isar1, boot->reg_id_isar1); 540 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, 541 info->reg_id_isar2, boot->reg_id_isar2); 542 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, 543 info->reg_id_isar3, boot->reg_id_isar3); 544 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, 545 info->reg_id_isar4, boot->reg_id_isar4); 546 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, 547 info->reg_id_isar5, boot->reg_id_isar5); 548 549 /* 550 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and 551 * ACTLR formats could differ across CPUs and therefore would have to 552 * be trapped for virtualization anyway. 553 */ 554 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, 555 info->reg_id_mmfr0, boot->reg_id_mmfr0); 556 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, 557 info->reg_id_mmfr1, boot->reg_id_mmfr1); 558 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, 559 info->reg_id_mmfr2, boot->reg_id_mmfr2); 560 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, 561 info->reg_id_mmfr3, boot->reg_id_mmfr3); 562 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, 563 info->reg_id_pfr0, boot->reg_id_pfr0); 564 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, 565 info->reg_id_pfr1, boot->reg_id_pfr1); 566 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, 567 info->reg_mvfr0, boot->reg_mvfr0); 568 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, 569 info->reg_mvfr1, boot->reg_mvfr1); 570 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, 571 info->reg_mvfr2, boot->reg_mvfr2); 572 573 /* 574 * Mismatched CPU features are a recipe for disaster. Don't even 575 * pretend to support them. 576 */ 577 WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC, 578 "Unsupported CPU feature variation.\n"); 579 } 580 581 u64 read_system_reg(u32 id) 582 { 583 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id); 584 585 /* We shouldn't get a request for an unsupported register */ 586 BUG_ON(!regp); 587 return regp->sys_val; 588 } 589 590 #include <linux/irqchip/arm-gic-v3.h> 591 592 static bool 593 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) 594 { 595 int val = cpuid_feature_extract_field(reg, entry->field_pos); 596 597 return val >= entry->min_field_value; 598 } 599 600 static bool 601 has_cpuid_feature(const struct arm64_cpu_capabilities *entry) 602 { 603 u64 val; 604 605 val = read_system_reg(entry->sys_reg); 606 return feature_matches(val, entry); 607 } 608 609 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry) 610 { 611 bool has_sre; 612 613 if (!has_cpuid_feature(entry)) 614 return false; 615 616 has_sre = gic_enable_sre(); 617 if (!has_sre) 618 pr_warn_once("%s present but disabled by higher exception level\n", 619 entry->desc); 620 621 return has_sre; 622 } 623 624 static const struct arm64_cpu_capabilities arm64_features[] = { 625 { 626 .desc = "GIC system register CPU interface", 627 .capability = ARM64_HAS_SYSREG_GIC_CPUIF, 628 .matches = has_useable_gicv3_cpuif, 629 .sys_reg = SYS_ID_AA64PFR0_EL1, 630 .field_pos = ID_AA64PFR0_GIC_SHIFT, 631 .min_field_value = 1, 632 }, 633 #ifdef CONFIG_ARM64_PAN 634 { 635 .desc = "Privileged Access Never", 636 .capability = ARM64_HAS_PAN, 637 .matches = has_cpuid_feature, 638 .sys_reg = SYS_ID_AA64MMFR1_EL1, 639 .field_pos = ID_AA64MMFR1_PAN_SHIFT, 640 .min_field_value = 1, 641 .enable = cpu_enable_pan, 642 }, 643 #endif /* CONFIG_ARM64_PAN */ 644 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) 645 { 646 .desc = "LSE atomic instructions", 647 .capability = ARM64_HAS_LSE_ATOMICS, 648 .matches = has_cpuid_feature, 649 .sys_reg = SYS_ID_AA64ISAR0_EL1, 650 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, 651 .min_field_value = 2, 652 }, 653 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ 654 {}, 655 }; 656 657 #define HWCAP_CAP(reg, field, min_value, type, cap) \ 658 { \ 659 .desc = #cap, \ 660 .matches = has_cpuid_feature, \ 661 .sys_reg = reg, \ 662 .field_pos = field, \ 663 .min_field_value = min_value, \ 664 .hwcap_type = type, \ 665 .hwcap = cap, \ 666 } 667 668 static const struct arm64_cpu_capabilities arm64_hwcaps[] = { 669 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 2, CAP_HWCAP, HWCAP_PMULL), 670 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 1, CAP_HWCAP, HWCAP_AES), 671 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 1, CAP_HWCAP, HWCAP_SHA1), 672 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 1, CAP_HWCAP, HWCAP_SHA2), 673 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 1, CAP_HWCAP, HWCAP_CRC32), 674 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 2, CAP_HWCAP, HWCAP_ATOMICS), 675 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 0, CAP_HWCAP, HWCAP_FP), 676 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 0, CAP_HWCAP, HWCAP_ASIMD), 677 #ifdef CONFIG_COMPAT 678 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), 679 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), 680 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), 681 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2), 682 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32), 683 #endif 684 {}, 685 }; 686 687 static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap) 688 { 689 switch (cap->hwcap_type) { 690 case CAP_HWCAP: 691 elf_hwcap |= cap->hwcap; 692 break; 693 #ifdef CONFIG_COMPAT 694 case CAP_COMPAT_HWCAP: 695 compat_elf_hwcap |= (u32)cap->hwcap; 696 break; 697 case CAP_COMPAT_HWCAP2: 698 compat_elf_hwcap2 |= (u32)cap->hwcap; 699 break; 700 #endif 701 default: 702 WARN_ON(1); 703 break; 704 } 705 } 706 707 /* Check if we have a particular HWCAP enabled */ 708 static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap) 709 { 710 bool rc; 711 712 switch (cap->hwcap_type) { 713 case CAP_HWCAP: 714 rc = (elf_hwcap & cap->hwcap) != 0; 715 break; 716 #ifdef CONFIG_COMPAT 717 case CAP_COMPAT_HWCAP: 718 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0; 719 break; 720 case CAP_COMPAT_HWCAP2: 721 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0; 722 break; 723 #endif 724 default: 725 WARN_ON(1); 726 rc = false; 727 } 728 729 return rc; 730 } 731 732 static void __init setup_cpu_hwcaps(void) 733 { 734 int i; 735 const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps; 736 737 for (i = 0; hwcaps[i].desc; i++) 738 if (hwcaps[i].matches(&hwcaps[i])) 739 cap_set_hwcap(&hwcaps[i]); 740 } 741 742 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, 743 const char *info) 744 { 745 int i; 746 747 for (i = 0; caps[i].desc; i++) { 748 if (!caps[i].matches(&caps[i])) 749 continue; 750 751 if (!cpus_have_cap(caps[i].capability)) 752 pr_info("%s %s\n", info, caps[i].desc); 753 cpus_set_cap(caps[i].capability); 754 } 755 } 756 757 /* 758 * Run through the enabled capabilities and enable() it on all active 759 * CPUs 760 */ 761 static void __init 762 enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) 763 { 764 int i; 765 766 for (i = 0; caps[i].desc; i++) 767 if (caps[i].enable && cpus_have_cap(caps[i].capability)) 768 on_each_cpu(caps[i].enable, NULL, true); 769 } 770 771 #ifdef CONFIG_HOTPLUG_CPU 772 773 /* 774 * Flag to indicate if we have computed the system wide 775 * capabilities based on the boot time active CPUs. This 776 * will be used to determine if a new booting CPU should 777 * go through the verification process to make sure that it 778 * supports the system capabilities, without using a hotplug 779 * notifier. 780 */ 781 static bool sys_caps_initialised; 782 783 static inline void set_sys_caps_initialised(void) 784 { 785 sys_caps_initialised = true; 786 } 787 788 /* 789 * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated. 790 */ 791 static u64 __raw_read_system_reg(u32 sys_id) 792 { 793 switch (sys_id) { 794 case SYS_ID_PFR0_EL1: return (u64)read_cpuid(ID_PFR0_EL1); 795 case SYS_ID_PFR1_EL1: return (u64)read_cpuid(ID_PFR1_EL1); 796 case SYS_ID_DFR0_EL1: return (u64)read_cpuid(ID_DFR0_EL1); 797 case SYS_ID_MMFR0_EL1: return (u64)read_cpuid(ID_MMFR0_EL1); 798 case SYS_ID_MMFR1_EL1: return (u64)read_cpuid(ID_MMFR1_EL1); 799 case SYS_ID_MMFR2_EL1: return (u64)read_cpuid(ID_MMFR2_EL1); 800 case SYS_ID_MMFR3_EL1: return (u64)read_cpuid(ID_MMFR3_EL1); 801 case SYS_ID_ISAR0_EL1: return (u64)read_cpuid(ID_ISAR0_EL1); 802 case SYS_ID_ISAR1_EL1: return (u64)read_cpuid(ID_ISAR1_EL1); 803 case SYS_ID_ISAR2_EL1: return (u64)read_cpuid(ID_ISAR2_EL1); 804 case SYS_ID_ISAR3_EL1: return (u64)read_cpuid(ID_ISAR3_EL1); 805 case SYS_ID_ISAR4_EL1: return (u64)read_cpuid(ID_ISAR4_EL1); 806 case SYS_ID_ISAR5_EL1: return (u64)read_cpuid(ID_ISAR4_EL1); 807 case SYS_MVFR0_EL1: return (u64)read_cpuid(MVFR0_EL1); 808 case SYS_MVFR1_EL1: return (u64)read_cpuid(MVFR1_EL1); 809 case SYS_MVFR2_EL1: return (u64)read_cpuid(MVFR2_EL1); 810 811 case SYS_ID_AA64PFR0_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1); 812 case SYS_ID_AA64PFR1_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1); 813 case SYS_ID_AA64DFR0_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1); 814 case SYS_ID_AA64DFR1_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1); 815 case SYS_ID_AA64MMFR0_EL1: return (u64)read_cpuid(ID_AA64MMFR0_EL1); 816 case SYS_ID_AA64MMFR1_EL1: return (u64)read_cpuid(ID_AA64MMFR1_EL1); 817 case SYS_ID_AA64ISAR0_EL1: return (u64)read_cpuid(ID_AA64ISAR0_EL1); 818 case SYS_ID_AA64ISAR1_EL1: return (u64)read_cpuid(ID_AA64ISAR1_EL1); 819 820 case SYS_CNTFRQ_EL0: return (u64)read_cpuid(CNTFRQ_EL0); 821 case SYS_CTR_EL0: return (u64)read_cpuid(CTR_EL0); 822 case SYS_DCZID_EL0: return (u64)read_cpuid(DCZID_EL0); 823 default: 824 BUG(); 825 return 0; 826 } 827 } 828 829 /* 830 * Park the CPU which doesn't have the capability as advertised 831 * by the system. 832 */ 833 static void fail_incapable_cpu(char *cap_type, 834 const struct arm64_cpu_capabilities *cap) 835 { 836 int cpu = smp_processor_id(); 837 838 pr_crit("CPU%d: missing %s : %s\n", cpu, cap_type, cap->desc); 839 /* Mark this CPU absent */ 840 set_cpu_present(cpu, 0); 841 842 /* Check if we can park ourselves */ 843 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) 844 cpu_ops[cpu]->cpu_die(cpu); 845 asm( 846 "1: wfe\n" 847 " wfi\n" 848 " b 1b"); 849 } 850 851 /* 852 * Run through the enabled system capabilities and enable() it on this CPU. 853 * The capabilities were decided based on the available CPUs at the boot time. 854 * Any new CPU should match the system wide status of the capability. If the 855 * new CPU doesn't have a capability which the system now has enabled, we 856 * cannot do anything to fix it up and could cause unexpected failures. So 857 * we park the CPU. 858 */ 859 void verify_local_cpu_capabilities(void) 860 { 861 int i; 862 const struct arm64_cpu_capabilities *caps; 863 864 /* 865 * If we haven't computed the system capabilities, there is nothing 866 * to verify. 867 */ 868 if (!sys_caps_initialised) 869 return; 870 871 caps = arm64_features; 872 for (i = 0; caps[i].desc; i++) { 873 if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg) 874 continue; 875 /* 876 * If the new CPU misses an advertised feature, we cannot proceed 877 * further, park the cpu. 878 */ 879 if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) 880 fail_incapable_cpu("arm64_features", &caps[i]); 881 if (caps[i].enable) 882 caps[i].enable(NULL); 883 } 884 885 for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) { 886 if (!cpus_have_hwcap(&caps[i])) 887 continue; 888 if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) 889 fail_incapable_cpu("arm64_hwcaps", &caps[i]); 890 } 891 } 892 893 #else /* !CONFIG_HOTPLUG_CPU */ 894 895 static inline void set_sys_caps_initialised(void) 896 { 897 } 898 899 #endif /* CONFIG_HOTPLUG_CPU */ 900 901 static void __init setup_feature_capabilities(void) 902 { 903 update_cpu_capabilities(arm64_features, "detected feature:"); 904 enable_cpu_capabilities(arm64_features); 905 } 906 907 void __init setup_cpu_features(void) 908 { 909 u32 cwg; 910 int cls; 911 912 /* Set the CPU feature capabilies */ 913 setup_feature_capabilities(); 914 setup_cpu_hwcaps(); 915 916 /* Advertise that we have computed the system capabilities */ 917 set_sys_caps_initialised(); 918 919 /* 920 * Check for sane CTR_EL0.CWG value. 921 */ 922 cwg = cache_type_cwg(); 923 cls = cache_line_size(); 924 if (!cwg) 925 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", 926 cls); 927 if (L1_CACHE_BYTES < cls) 928 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", 929 L1_CACHE_BYTES, cls); 930 } 931