1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Contains CPU specific errata definitions 4 * 5 * Copyright (C) 2014 ARM Ltd. 6 */ 7 8 #include <linux/arm-smccc.h> 9 #include <linux/types.h> 10 #include <linux/cpu.h> 11 #include <asm/cpu.h> 12 #include <asm/cputype.h> 13 #include <asm/cpufeature.h> 14 #include <asm/kvm_asm.h> 15 #include <asm/smp_plat.h> 16 17 static bool __maybe_unused 18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 19 { 20 const struct arm64_midr_revidr *fix; 21 u32 midr = read_cpuid_id(), revidr; 22 23 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 24 if (!is_midr_in_range(midr, &entry->midr_range)) 25 return false; 26 27 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; 28 revidr = read_cpuid(REVIDR_EL1); 29 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) 30 if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) 31 return false; 32 33 return true; 34 } 35 36 static bool __maybe_unused 37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, 38 int scope) 39 { 40 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 41 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); 42 } 43 44 static bool __maybe_unused 45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) 46 { 47 u32 model; 48 49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 50 51 model = read_cpuid_id(); 52 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | 53 MIDR_ARCHITECTURE_MASK; 54 55 return model == entry->midr_range.model; 56 } 57 58 static bool 59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, 60 int scope) 61 { 62 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 63 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; 64 u64 ctr_raw, ctr_real; 65 66 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 67 68 /* 69 * We want to make sure that all the CPUs in the system expose 70 * a consistent CTR_EL0 to make sure that applications behaves 71 * correctly with migration. 72 * 73 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : 74 * 75 * 1) It is safe if the system doesn't support IDC, as CPU anyway 76 * reports IDC = 0, consistent with the rest. 77 * 78 * 2) If the system has IDC, it is still safe as we trap CTR_EL0 79 * access on this CPU via the ARM64_HAS_CACHE_IDC capability. 80 * 81 * So, we need to make sure either the raw CTR_EL0 or the effective 82 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. 83 */ 84 ctr_raw = read_cpuid_cachetype() & mask; 85 ctr_real = read_cpuid_effective_cachetype() & mask; 86 87 return (ctr_real != sys) && (ctr_raw != sys); 88 } 89 90 static void 91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) 92 { 93 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 94 bool enable_uct_trap = false; 95 96 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ 97 if ((read_cpuid_cachetype() & mask) != 98 (arm64_ftr_reg_ctrel0.sys_val & mask)) 99 enable_uct_trap = true; 100 101 /* ... or if the system is affected by an erratum */ 102 if (cap->capability == ARM64_WORKAROUND_1542419) 103 enable_uct_trap = true; 104 105 if (enable_uct_trap) 106 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); 107 } 108 109 #ifdef CONFIG_ARM64_ERRATUM_1463225 110 static bool 111 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, 112 int scope) 113 { 114 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); 115 } 116 #endif 117 118 static void __maybe_unused 119 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) 120 { 121 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); 122 } 123 124 static DEFINE_RAW_SPINLOCK(reg_user_mask_modification); 125 static void __maybe_unused 126 cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused) 127 { 128 struct arm64_ftr_reg *regp; 129 130 regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1); 131 if (!regp) 132 return; 133 134 raw_spin_lock(®_user_mask_modification); 135 if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK) 136 regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK; 137 raw_spin_unlock(®_user_mask_modification); 138 } 139 140 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 141 .matches = is_affected_midr_range, \ 142 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) 143 144 #define CAP_MIDR_ALL_VERSIONS(model) \ 145 .matches = is_affected_midr_range, \ 146 .midr_range = MIDR_ALL_VERSIONS(model) 147 148 #define MIDR_FIXED(rev, revidr_mask) \ 149 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} 150 151 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 152 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 153 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) 154 155 #define CAP_MIDR_RANGE_LIST(list) \ 156 .matches = is_affected_midr_range_list, \ 157 .midr_range_list = list 158 159 /* Errata affecting a range of revisions of given model variant */ 160 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ 161 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) 162 163 /* Errata affecting a single variant/revision of a model */ 164 #define ERRATA_MIDR_REV(model, var, rev) \ 165 ERRATA_MIDR_RANGE(model, var, rev, var, rev) 166 167 /* Errata affecting all variants/revisions of a given a model */ 168 #define ERRATA_MIDR_ALL_VERSIONS(model) \ 169 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 170 CAP_MIDR_ALL_VERSIONS(model) 171 172 /* Errata affecting a list of midr ranges, with same work around */ 173 #define ERRATA_MIDR_RANGE_LIST(midr_list) \ 174 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 175 CAP_MIDR_RANGE_LIST(midr_list) 176 177 static const __maybe_unused struct midr_range tx2_family_cpus[] = { 178 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 179 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 180 {}, 181 }; 182 183 static bool __maybe_unused 184 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, 185 int scope) 186 { 187 int i; 188 189 if (!is_affected_midr_range_list(entry, scope) || 190 !is_hyp_mode_available()) 191 return false; 192 193 for_each_possible_cpu(i) { 194 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) 195 return true; 196 } 197 198 return false; 199 } 200 201 static bool __maybe_unused 202 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, 203 int scope) 204 { 205 u32 midr = read_cpuid_id(); 206 bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT); 207 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); 208 209 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 210 return is_midr_in_range(midr, &range) && has_dic; 211 } 212 213 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 214 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { 215 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 216 { 217 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) 218 }, 219 { 220 .midr_range.model = MIDR_QCOM_KRYO, 221 .matches = is_kryo_midr, 222 }, 223 #endif 224 #ifdef CONFIG_ARM64_ERRATUM_1286807 225 { 226 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), 227 }, 228 { 229 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ 230 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), 231 }, 232 #endif 233 #ifdef CONFIG_ARM64_ERRATUM_2441007 234 { 235 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 236 }, 237 #endif 238 #ifdef CONFIG_ARM64_ERRATUM_2441009 239 { 240 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */ 241 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), 242 }, 243 #endif 244 {}, 245 }; 246 #endif 247 248 #ifdef CONFIG_CAVIUM_ERRATUM_23154 249 static const struct midr_range cavium_erratum_23154_cpus[] = { 250 MIDR_ALL_VERSIONS(MIDR_THUNDERX), 251 MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX), 252 MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX), 253 MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX), 254 MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX), 255 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX), 256 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN), 257 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM), 258 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO), 259 {}, 260 }; 261 #endif 262 263 #ifdef CONFIG_CAVIUM_ERRATUM_27456 264 const struct midr_range cavium_erratum_27456_cpus[] = { 265 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 266 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 267 /* Cavium ThunderX, T81 pass 1.0 */ 268 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), 269 {}, 270 }; 271 #endif 272 273 #ifdef CONFIG_CAVIUM_ERRATUM_30115 274 static const struct midr_range cavium_erratum_30115_cpus[] = { 275 /* Cavium ThunderX, T88 pass 1.x - 2.2 */ 276 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), 277 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ 278 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), 279 /* Cavium ThunderX, T83 pass 1.0 */ 280 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), 281 {}, 282 }; 283 #endif 284 285 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 286 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { 287 { 288 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 289 }, 290 { 291 .midr_range.model = MIDR_QCOM_KRYO, 292 .matches = is_kryo_midr, 293 }, 294 {}, 295 }; 296 #endif 297 298 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 299 static const struct midr_range workaround_clean_cache[] = { 300 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ 301 defined(CONFIG_ARM64_ERRATUM_827319) || \ 302 defined(CONFIG_ARM64_ERRATUM_824069) 303 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ 304 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), 305 #endif 306 #ifdef CONFIG_ARM64_ERRATUM_819472 307 /* Cortex-A53 r0p[01] : ARM errata 819472 */ 308 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), 309 #endif 310 {}, 311 }; 312 #endif 313 314 #ifdef CONFIG_ARM64_ERRATUM_1418040 315 /* 316 * - 1188873 affects r0p0 to r2p0 317 * - 1418040 affects r0p0 to r3p1 318 */ 319 static const struct midr_range erratum_1418040_list[] = { 320 /* Cortex-A76 r0p0 to r3p1 */ 321 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 322 /* Neoverse-N1 r0p0 to r3p1 */ 323 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), 324 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ 325 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), 326 {}, 327 }; 328 #endif 329 330 #ifdef CONFIG_ARM64_ERRATUM_845719 331 static const struct midr_range erratum_845719_list[] = { 332 /* Cortex-A53 r0p[01234] */ 333 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 334 /* Brahma-B53 r0p[0] */ 335 MIDR_REV(MIDR_BRAHMA_B53, 0, 0), 336 /* Kryo2XX Silver rAp4 */ 337 MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4), 338 {}, 339 }; 340 #endif 341 342 #ifdef CONFIG_ARM64_ERRATUM_843419 343 static const struct arm64_cpu_capabilities erratum_843419_list[] = { 344 { 345 /* Cortex-A53 r0p[01234] */ 346 .matches = is_affected_midr_range, 347 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 348 MIDR_FIXED(0x4, BIT(8)), 349 }, 350 { 351 /* Brahma-B53 r0p[0] */ 352 .matches = is_affected_midr_range, 353 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), 354 }, 355 {}, 356 }; 357 #endif 358 359 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT 360 static const struct midr_range erratum_speculative_at_list[] = { 361 #ifdef CONFIG_ARM64_ERRATUM_1165522 362 /* Cortex A76 r0p0 to r2p0 */ 363 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), 364 #endif 365 #ifdef CONFIG_ARM64_ERRATUM_1319367 366 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 367 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 368 #endif 369 #ifdef CONFIG_ARM64_ERRATUM_1530923 370 /* Cortex A55 r0p0 to r2p0 */ 371 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), 372 /* Kryo4xx Silver (rdpe => r1p0) */ 373 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), 374 #endif 375 {}, 376 }; 377 #endif 378 379 #ifdef CONFIG_ARM64_ERRATUM_1463225 380 static const struct midr_range erratum_1463225[] = { 381 /* Cortex-A76 r0p0 - r3p1 */ 382 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 383 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ 384 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), 385 {}, 386 }; 387 #endif 388 389 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE 390 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { 391 #ifdef CONFIG_ARM64_ERRATUM_2139208 392 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 393 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 394 #endif 395 #ifdef CONFIG_ARM64_ERRATUM_2119858 396 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 397 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), 398 #endif 399 {}, 400 }; 401 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */ 402 403 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE 404 static const struct midr_range tsb_flush_fail_cpus[] = { 405 #ifdef CONFIG_ARM64_ERRATUM_2067961 406 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 407 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 408 #endif 409 #ifdef CONFIG_ARM64_ERRATUM_2054223 410 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 411 #endif 412 {}, 413 }; 414 #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ 415 416 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE 417 static struct midr_range trbe_write_out_of_range_cpus[] = { 418 #ifdef CONFIG_ARM64_ERRATUM_2253138 419 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 420 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 421 #endif 422 #ifdef CONFIG_ARM64_ERRATUM_2224489 423 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 424 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), 425 #endif 426 {}, 427 }; 428 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ 429 430 #ifdef CONFIG_ARM64_ERRATUM_1742098 431 static struct midr_range broken_aarch32_aes[] = { 432 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), 433 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 434 {}, 435 }; 436 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ 437 438 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD 439 static const struct midr_range erratum_spec_unpriv_load_list[] = { 440 #ifdef CONFIG_ARM64_ERRATUM_3117295 441 MIDR_ALL_VERSIONS(MIDR_CORTEX_A510), 442 #endif 443 #ifdef CONFIG_ARM64_ERRATUM_2966298 444 /* Cortex-A520 r0p0 to r0p1 */ 445 MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1), 446 #endif 447 {}, 448 }; 449 #endif 450 451 #ifdef CONFIG_ARM64_ERRATUM_3194386 452 static const struct midr_range erratum_spec_ssbs_list[] = { 453 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), 454 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), 455 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), 456 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), 457 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 458 MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), 459 MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), 460 MIDR_ALL_VERSIONS(MIDR_CORTEX_A725), 461 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), 462 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C), 463 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), 464 MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), 465 MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), 466 MIDR_ALL_VERSIONS(MIDR_CORTEX_X925), 467 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 468 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), 469 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 470 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3), 471 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), 472 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), 473 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), 474 {} 475 }; 476 #endif 477 478 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 479 static const struct midr_range erratum_ac03_cpu_38_list[] = { 480 MIDR_ALL_VERSIONS(MIDR_AMPERE1), 481 MIDR_ALL_VERSIONS(MIDR_AMPERE1A), 482 {}, 483 }; 484 #endif 485 486 const struct arm64_cpu_capabilities arm64_errata[] = { 487 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 488 { 489 .desc = "ARM errata 826319, 827319, 824069, or 819472", 490 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 491 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), 492 .cpu_enable = cpu_enable_cache_maint_trap, 493 }, 494 #endif 495 #ifdef CONFIG_ARM64_ERRATUM_832075 496 { 497 /* Cortex-A57 r0p0 - r1p2 */ 498 .desc = "ARM erratum 832075", 499 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, 500 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 501 0, 0, 502 1, 2), 503 }, 504 #endif 505 #ifdef CONFIG_ARM64_ERRATUM_834220 506 { 507 /* Cortex-A57 r0p0 - r1p2 */ 508 .desc = "ARM erratum 834220", 509 .capability = ARM64_WORKAROUND_834220, 510 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 511 0, 0, 512 1, 2), 513 }, 514 #endif 515 #ifdef CONFIG_ARM64_ERRATUM_843419 516 { 517 .desc = "ARM erratum 843419", 518 .capability = ARM64_WORKAROUND_843419, 519 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 520 .matches = cpucap_multi_entry_cap_matches, 521 .match_list = erratum_843419_list, 522 }, 523 #endif 524 #ifdef CONFIG_ARM64_ERRATUM_845719 525 { 526 .desc = "ARM erratum 845719", 527 .capability = ARM64_WORKAROUND_845719, 528 ERRATA_MIDR_RANGE_LIST(erratum_845719_list), 529 }, 530 #endif 531 #ifdef CONFIG_CAVIUM_ERRATUM_23154 532 { 533 .desc = "Cavium errata 23154 and 38545", 534 .capability = ARM64_WORKAROUND_CAVIUM_23154, 535 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 536 ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus), 537 }, 538 #endif 539 #ifdef CONFIG_CAVIUM_ERRATUM_27456 540 { 541 .desc = "Cavium erratum 27456", 542 .capability = ARM64_WORKAROUND_CAVIUM_27456, 543 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), 544 }, 545 #endif 546 #ifdef CONFIG_CAVIUM_ERRATUM_30115 547 { 548 .desc = "Cavium erratum 30115", 549 .capability = ARM64_WORKAROUND_CAVIUM_30115, 550 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), 551 }, 552 #endif 553 { 554 .desc = "Mismatched cache type (CTR_EL0)", 555 .capability = ARM64_MISMATCHED_CACHE_TYPE, 556 .matches = has_mismatched_cache_type, 557 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 558 .cpu_enable = cpu_enable_trap_ctr_access, 559 }, 560 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 561 { 562 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", 563 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 564 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 565 .matches = cpucap_multi_entry_cap_matches, 566 .match_list = qcom_erratum_1003_list, 567 }, 568 #endif 569 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 570 { 571 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009", 572 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 573 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 574 .matches = cpucap_multi_entry_cap_matches, 575 .match_list = arm64_repeat_tlbi_list, 576 }, 577 #endif 578 #ifdef CONFIG_ARM64_ERRATUM_858921 579 { 580 /* Cortex-A73 all versions */ 581 .desc = "ARM erratum 858921", 582 .capability = ARM64_WORKAROUND_858921, 583 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 584 }, 585 #endif 586 { 587 .desc = "Spectre-v2", 588 .capability = ARM64_SPECTRE_V2, 589 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 590 .matches = has_spectre_v2, 591 .cpu_enable = spectre_v2_enable_mitigation, 592 }, 593 #ifdef CONFIG_RANDOMIZE_BASE 594 { 595 /* Must come after the Spectre-v2 entry */ 596 .desc = "Spectre-v3a", 597 .capability = ARM64_SPECTRE_V3A, 598 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 599 .matches = has_spectre_v3a, 600 .cpu_enable = spectre_v3a_enable_mitigation, 601 }, 602 #endif 603 { 604 .desc = "Spectre-v4", 605 .capability = ARM64_SPECTRE_V4, 606 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 607 .matches = has_spectre_v4, 608 .cpu_enable = spectre_v4_enable_mitigation, 609 }, 610 { 611 .desc = "Spectre-BHB", 612 .capability = ARM64_SPECTRE_BHB, 613 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 614 .matches = is_spectre_bhb_affected, 615 .cpu_enable = spectre_bhb_enable_mitigation, 616 }, 617 #ifdef CONFIG_ARM64_ERRATUM_1418040 618 { 619 .desc = "ARM erratum 1418040", 620 .capability = ARM64_WORKAROUND_1418040, 621 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), 622 /* 623 * We need to allow affected CPUs to come in late, but 624 * also need the non-affected CPUs to be able to come 625 * in at any point in time. Wonderful. 626 */ 627 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 628 }, 629 #endif 630 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT 631 { 632 .desc = "ARM errata 1165522, 1319367, or 1530923", 633 .capability = ARM64_WORKAROUND_SPECULATIVE_AT, 634 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), 635 }, 636 #endif 637 #ifdef CONFIG_ARM64_ERRATUM_1463225 638 { 639 .desc = "ARM erratum 1463225", 640 .capability = ARM64_WORKAROUND_1463225, 641 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 642 .matches = has_cortex_a76_erratum_1463225, 643 .midr_range_list = erratum_1463225, 644 }, 645 #endif 646 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 647 { 648 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", 649 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, 650 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), 651 .matches = needs_tx2_tvm_workaround, 652 }, 653 { 654 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", 655 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, 656 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), 657 }, 658 #endif 659 #ifdef CONFIG_ARM64_ERRATUM_1542419 660 { 661 /* we depend on the firmware portion for correctness */ 662 .desc = "ARM erratum 1542419 (kernel portion)", 663 .capability = ARM64_WORKAROUND_1542419, 664 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 665 .matches = has_neoverse_n1_erratum_1542419, 666 .cpu_enable = cpu_enable_trap_ctr_access, 667 }, 668 #endif 669 #ifdef CONFIG_ARM64_ERRATUM_1508412 670 { 671 /* we depend on the firmware portion for correctness */ 672 .desc = "ARM erratum 1508412 (kernel portion)", 673 .capability = ARM64_WORKAROUND_1508412, 674 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77, 675 0, 0, 676 1, 0), 677 }, 678 #endif 679 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM 680 { 681 /* NVIDIA Carmel */ 682 .desc = "NVIDIA Carmel CNP erratum", 683 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, 684 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), 685 }, 686 #endif 687 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE 688 { 689 /* 690 * The erratum work around is handled within the TRBE 691 * driver and can be applied per-cpu. So, we can allow 692 * a late CPU to come online with this erratum. 693 */ 694 .desc = "ARM erratum 2119858 or 2139208", 695 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE, 696 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 697 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus), 698 }, 699 #endif 700 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE 701 { 702 .desc = "ARM erratum 2067961 or 2054223", 703 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, 704 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), 705 }, 706 #endif 707 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE 708 { 709 .desc = "ARM erratum 2253138 or 2224489", 710 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, 711 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 712 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), 713 }, 714 #endif 715 #ifdef CONFIG_ARM64_ERRATUM_2645198 716 { 717 .desc = "ARM erratum 2645198", 718 .capability = ARM64_WORKAROUND_2645198, 719 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715) 720 }, 721 #endif 722 #ifdef CONFIG_ARM64_ERRATUM_2077057 723 { 724 .desc = "ARM erratum 2077057", 725 .capability = ARM64_WORKAROUND_2077057, 726 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), 727 }, 728 #endif 729 #ifdef CONFIG_ARM64_ERRATUM_2064142 730 { 731 .desc = "ARM erratum 2064142", 732 .capability = ARM64_WORKAROUND_2064142, 733 734 /* Cortex-A510 r0p0 - r0p2 */ 735 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) 736 }, 737 #endif 738 #ifdef CONFIG_ARM64_ERRATUM_2457168 739 { 740 .desc = "ARM erratum 2457168", 741 .capability = ARM64_WORKAROUND_2457168, 742 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 743 744 /* Cortex-A510 r0p0-r1p1 */ 745 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) 746 }, 747 #endif 748 #ifdef CONFIG_ARM64_ERRATUM_2038923 749 { 750 .desc = "ARM erratum 2038923", 751 .capability = ARM64_WORKAROUND_2038923, 752 753 /* Cortex-A510 r0p0 - r0p2 */ 754 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) 755 }, 756 #endif 757 #ifdef CONFIG_ARM64_ERRATUM_1902691 758 { 759 .desc = "ARM erratum 1902691", 760 .capability = ARM64_WORKAROUND_1902691, 761 762 /* Cortex-A510 r0p0 - r0p1 */ 763 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1) 764 }, 765 #endif 766 #ifdef CONFIG_ARM64_ERRATUM_1742098 767 { 768 .desc = "ARM erratum 1742098", 769 .capability = ARM64_WORKAROUND_1742098, 770 CAP_MIDR_RANGE_LIST(broken_aarch32_aes), 771 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 772 }, 773 #endif 774 #ifdef CONFIG_ARM64_ERRATUM_2658417 775 { 776 .desc = "ARM erratum 2658417", 777 .capability = ARM64_WORKAROUND_2658417, 778 /* Cortex-A510 r0p0 - r1p1 */ 779 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), 780 MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), 781 .cpu_enable = cpu_clear_bf16_from_user_emulation, 782 }, 783 #endif 784 #ifdef CONFIG_ARM64_ERRATUM_3194386 785 { 786 .desc = "SSBS not fully self-synchronizing", 787 .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS, 788 ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list), 789 }, 790 #endif 791 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD 792 { 793 .desc = "ARM errata 2966298, 3117295", 794 .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD, 795 /* Cortex-A520 r0p0 - r0p1 */ 796 ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list), 797 }, 798 #endif 799 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 800 { 801 .desc = "AmpereOne erratum AC03_CPU_38", 802 .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, 803 ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list), 804 }, 805 #endif 806 { 807 } 808 }; 809