1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Contains CPU specific errata definitions 4 * 5 * Copyright (C) 2014 ARM Ltd. 6 */ 7 8 #include <linux/arm-smccc.h> 9 #include <linux/types.h> 10 #include <linux/cpu.h> 11 #include <asm/cpu.h> 12 #include <asm/cputype.h> 13 #include <asm/cpufeature.h> 14 #include <asm/kvm_asm.h> 15 #include <asm/smp_plat.h> 16 17 static bool __maybe_unused 18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 19 { 20 const struct arm64_midr_revidr *fix; 21 u32 midr = read_cpuid_id(), revidr; 22 23 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 24 if (!is_midr_in_range(midr, &entry->midr_range)) 25 return false; 26 27 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; 28 revidr = read_cpuid(REVIDR_EL1); 29 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) 30 if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) 31 return false; 32 33 return true; 34 } 35 36 static bool __maybe_unused 37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, 38 int scope) 39 { 40 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 41 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); 42 } 43 44 static bool __maybe_unused 45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) 46 { 47 u32 model; 48 49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 50 51 model = read_cpuid_id(); 52 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | 53 MIDR_ARCHITECTURE_MASK; 54 55 return model == entry->midr_range.model; 56 } 57 58 static bool 59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, 60 int scope) 61 { 62 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 63 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; 64 u64 ctr_raw, ctr_real; 65 66 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 67 68 /* 69 * We want to make sure that all the CPUs in the system expose 70 * a consistent CTR_EL0 to make sure that applications behaves 71 * correctly with migration. 72 * 73 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : 74 * 75 * 1) It is safe if the system doesn't support IDC, as CPU anyway 76 * reports IDC = 0, consistent with the rest. 77 * 78 * 2) If the system has IDC, it is still safe as we trap CTR_EL0 79 * access on this CPU via the ARM64_HAS_CACHE_IDC capability. 80 * 81 * So, we need to make sure either the raw CTR_EL0 or the effective 82 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. 83 */ 84 ctr_raw = read_cpuid_cachetype() & mask; 85 ctr_real = read_cpuid_effective_cachetype() & mask; 86 87 return (ctr_real != sys) && (ctr_raw != sys); 88 } 89 90 static void 91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) 92 { 93 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 94 bool enable_uct_trap = false; 95 96 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ 97 if ((read_cpuid_cachetype() & mask) != 98 (arm64_ftr_reg_ctrel0.sys_val & mask)) 99 enable_uct_trap = true; 100 101 /* ... or if the system is affected by an erratum */ 102 if (cap->capability == ARM64_WORKAROUND_1542419) 103 enable_uct_trap = true; 104 105 if (enable_uct_trap) 106 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); 107 } 108 109 #ifdef CONFIG_ARM64_ERRATUM_1463225 110 static bool 111 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, 112 int scope) 113 { 114 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); 115 } 116 #endif 117 118 static void __maybe_unused 119 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) 120 { 121 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); 122 } 123 124 static DEFINE_RAW_SPINLOCK(reg_user_mask_modification); 125 static void __maybe_unused 126 cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused) 127 { 128 struct arm64_ftr_reg *regp; 129 130 regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1); 131 if (!regp) 132 return; 133 134 raw_spin_lock(®_user_mask_modification); 135 if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK) 136 regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK; 137 raw_spin_unlock(®_user_mask_modification); 138 } 139 140 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 141 .matches = is_affected_midr_range, \ 142 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) 143 144 #define CAP_MIDR_ALL_VERSIONS(model) \ 145 .matches = is_affected_midr_range, \ 146 .midr_range = MIDR_ALL_VERSIONS(model) 147 148 #define MIDR_FIXED(rev, revidr_mask) \ 149 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} 150 151 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 152 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 153 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) 154 155 #define CAP_MIDR_RANGE_LIST(list) \ 156 .matches = is_affected_midr_range_list, \ 157 .midr_range_list = list 158 159 /* Errata affecting a range of revisions of given model variant */ 160 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ 161 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) 162 163 /* Errata affecting a single variant/revision of a model */ 164 #define ERRATA_MIDR_REV(model, var, rev) \ 165 ERRATA_MIDR_RANGE(model, var, rev, var, rev) 166 167 /* Errata affecting all variants/revisions of a given a model */ 168 #define ERRATA_MIDR_ALL_VERSIONS(model) \ 169 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 170 CAP_MIDR_ALL_VERSIONS(model) 171 172 /* Errata affecting a list of midr ranges, with same work around */ 173 #define ERRATA_MIDR_RANGE_LIST(midr_list) \ 174 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 175 CAP_MIDR_RANGE_LIST(midr_list) 176 177 static const __maybe_unused struct midr_range tx2_family_cpus[] = { 178 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 179 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 180 {}, 181 }; 182 183 static bool __maybe_unused 184 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, 185 int scope) 186 { 187 int i; 188 189 if (!is_affected_midr_range_list(entry, scope) || 190 !is_hyp_mode_available()) 191 return false; 192 193 for_each_possible_cpu(i) { 194 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) 195 return true; 196 } 197 198 return false; 199 } 200 201 static bool __maybe_unused 202 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, 203 int scope) 204 { 205 u32 midr = read_cpuid_id(); 206 bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT); 207 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); 208 209 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 210 return is_midr_in_range(midr, &range) && has_dic; 211 } 212 213 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 214 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { 215 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 216 { 217 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) 218 }, 219 { 220 .midr_range.model = MIDR_QCOM_KRYO, 221 .matches = is_kryo_midr, 222 }, 223 #endif 224 #ifdef CONFIG_ARM64_ERRATUM_1286807 225 { 226 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), 227 }, 228 { 229 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ 230 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), 231 }, 232 #endif 233 #ifdef CONFIG_ARM64_ERRATUM_2441007 234 { 235 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), 236 }, 237 #endif 238 #ifdef CONFIG_ARM64_ERRATUM_2441009 239 { 240 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */ 241 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), 242 }, 243 #endif 244 {}, 245 }; 246 #endif 247 248 #ifdef CONFIG_CAVIUM_ERRATUM_23154 249 static const struct midr_range cavium_erratum_23154_cpus[] = { 250 MIDR_ALL_VERSIONS(MIDR_THUNDERX), 251 MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX), 252 MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX), 253 MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX), 254 MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX), 255 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX), 256 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN), 257 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM), 258 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO), 259 {}, 260 }; 261 #endif 262 263 #ifdef CONFIG_CAVIUM_ERRATUM_27456 264 const struct midr_range cavium_erratum_27456_cpus[] = { 265 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 266 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 267 /* Cavium ThunderX, T81 pass 1.0 */ 268 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), 269 {}, 270 }; 271 #endif 272 273 #ifdef CONFIG_CAVIUM_ERRATUM_30115 274 static const struct midr_range cavium_erratum_30115_cpus[] = { 275 /* Cavium ThunderX, T88 pass 1.x - 2.2 */ 276 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), 277 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ 278 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), 279 /* Cavium ThunderX, T83 pass 1.0 */ 280 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), 281 {}, 282 }; 283 #endif 284 285 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 286 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { 287 { 288 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 289 }, 290 { 291 .midr_range.model = MIDR_QCOM_KRYO, 292 .matches = is_kryo_midr, 293 }, 294 {}, 295 }; 296 #endif 297 298 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 299 static const struct midr_range workaround_clean_cache[] = { 300 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ 301 defined(CONFIG_ARM64_ERRATUM_827319) || \ 302 defined(CONFIG_ARM64_ERRATUM_824069) 303 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ 304 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), 305 #endif 306 #ifdef CONFIG_ARM64_ERRATUM_819472 307 /* Cortex-A53 r0p[01] : ARM errata 819472 */ 308 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), 309 #endif 310 {}, 311 }; 312 #endif 313 314 #ifdef CONFIG_ARM64_ERRATUM_1418040 315 /* 316 * - 1188873 affects r0p0 to r2p0 317 * - 1418040 affects r0p0 to r3p1 318 */ 319 static const struct midr_range erratum_1418040_list[] = { 320 /* Cortex-A76 r0p0 to r3p1 */ 321 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 322 /* Neoverse-N1 r0p0 to r3p1 */ 323 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), 324 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ 325 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), 326 {}, 327 }; 328 #endif 329 330 #ifdef CONFIG_ARM64_ERRATUM_845719 331 static const struct midr_range erratum_845719_list[] = { 332 /* Cortex-A53 r0p[01234] */ 333 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 334 /* Brahma-B53 r0p[0] */ 335 MIDR_REV(MIDR_BRAHMA_B53, 0, 0), 336 /* Kryo2XX Silver rAp4 */ 337 MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4), 338 {}, 339 }; 340 #endif 341 342 #ifdef CONFIG_ARM64_ERRATUM_843419 343 static const struct arm64_cpu_capabilities erratum_843419_list[] = { 344 { 345 /* Cortex-A53 r0p[01234] */ 346 .matches = is_affected_midr_range, 347 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 348 MIDR_FIXED(0x4, BIT(8)), 349 }, 350 { 351 /* Brahma-B53 r0p[0] */ 352 .matches = is_affected_midr_range, 353 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), 354 }, 355 {}, 356 }; 357 #endif 358 359 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT 360 static const struct midr_range erratum_speculative_at_list[] = { 361 #ifdef CONFIG_ARM64_ERRATUM_1165522 362 /* Cortex A76 r0p0 to r2p0 */ 363 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), 364 #endif 365 #ifdef CONFIG_ARM64_ERRATUM_1319367 366 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 367 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 368 #endif 369 #ifdef CONFIG_ARM64_ERRATUM_1530923 370 /* Cortex A55 r0p0 to r2p0 */ 371 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), 372 /* Kryo4xx Silver (rdpe => r1p0) */ 373 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), 374 #endif 375 {}, 376 }; 377 #endif 378 379 #ifdef CONFIG_ARM64_ERRATUM_1463225 380 static const struct midr_range erratum_1463225[] = { 381 /* Cortex-A76 r0p0 - r3p1 */ 382 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), 383 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ 384 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), 385 {}, 386 }; 387 #endif 388 389 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE 390 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { 391 #ifdef CONFIG_ARM64_ERRATUM_2139208 392 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 393 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 394 #endif 395 #ifdef CONFIG_ARM64_ERRATUM_2119858 396 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 397 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), 398 #endif 399 {}, 400 }; 401 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */ 402 403 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE 404 static const struct midr_range tsb_flush_fail_cpus[] = { 405 #ifdef CONFIG_ARM64_ERRATUM_2067961 406 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 407 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 408 #endif 409 #ifdef CONFIG_ARM64_ERRATUM_2054223 410 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 411 #endif 412 {}, 413 }; 414 #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ 415 416 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE 417 static struct midr_range trbe_write_out_of_range_cpus[] = { 418 #ifdef CONFIG_ARM64_ERRATUM_2253138 419 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), 420 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), 421 #endif 422 #ifdef CONFIG_ARM64_ERRATUM_2224489 423 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), 424 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0), 425 #endif 426 {}, 427 }; 428 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ 429 430 #ifdef CONFIG_ARM64_ERRATUM_1742098 431 static struct midr_range broken_aarch32_aes[] = { 432 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), 433 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 434 {}, 435 }; 436 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ 437 438 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD 439 static const struct midr_range erratum_spec_unpriv_load_list[] = { 440 #ifdef CONFIG_ARM64_ERRATUM_3117295 441 MIDR_ALL_VERSIONS(MIDR_CORTEX_A510), 442 #endif 443 #ifdef CONFIG_ARM64_ERRATUM_2966298 444 /* Cortex-A520 r0p0 to r0p1 */ 445 MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1), 446 #endif 447 {}, 448 }; 449 #endif 450 451 #ifdef CONFIG_ARM64_ERRATUM_3194386 452 static const struct midr_range erratum_spec_ssbs_list[] = { 453 MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), 454 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), 455 {} 456 }; 457 #endif 458 459 const struct arm64_cpu_capabilities arm64_errata[] = { 460 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 461 { 462 .desc = "ARM errata 826319, 827319, 824069, or 819472", 463 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 464 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), 465 .cpu_enable = cpu_enable_cache_maint_trap, 466 }, 467 #endif 468 #ifdef CONFIG_ARM64_ERRATUM_832075 469 { 470 /* Cortex-A57 r0p0 - r1p2 */ 471 .desc = "ARM erratum 832075", 472 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, 473 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 474 0, 0, 475 1, 2), 476 }, 477 #endif 478 #ifdef CONFIG_ARM64_ERRATUM_834220 479 { 480 /* Cortex-A57 r0p0 - r1p2 */ 481 .desc = "ARM erratum 834220", 482 .capability = ARM64_WORKAROUND_834220, 483 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 484 0, 0, 485 1, 2), 486 }, 487 #endif 488 #ifdef CONFIG_ARM64_ERRATUM_843419 489 { 490 .desc = "ARM erratum 843419", 491 .capability = ARM64_WORKAROUND_843419, 492 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 493 .matches = cpucap_multi_entry_cap_matches, 494 .match_list = erratum_843419_list, 495 }, 496 #endif 497 #ifdef CONFIG_ARM64_ERRATUM_845719 498 { 499 .desc = "ARM erratum 845719", 500 .capability = ARM64_WORKAROUND_845719, 501 ERRATA_MIDR_RANGE_LIST(erratum_845719_list), 502 }, 503 #endif 504 #ifdef CONFIG_CAVIUM_ERRATUM_23154 505 { 506 .desc = "Cavium errata 23154 and 38545", 507 .capability = ARM64_WORKAROUND_CAVIUM_23154, 508 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 509 ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus), 510 }, 511 #endif 512 #ifdef CONFIG_CAVIUM_ERRATUM_27456 513 { 514 .desc = "Cavium erratum 27456", 515 .capability = ARM64_WORKAROUND_CAVIUM_27456, 516 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), 517 }, 518 #endif 519 #ifdef CONFIG_CAVIUM_ERRATUM_30115 520 { 521 .desc = "Cavium erratum 30115", 522 .capability = ARM64_WORKAROUND_CAVIUM_30115, 523 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), 524 }, 525 #endif 526 { 527 .desc = "Mismatched cache type (CTR_EL0)", 528 .capability = ARM64_MISMATCHED_CACHE_TYPE, 529 .matches = has_mismatched_cache_type, 530 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 531 .cpu_enable = cpu_enable_trap_ctr_access, 532 }, 533 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 534 { 535 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", 536 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 537 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 538 .matches = cpucap_multi_entry_cap_matches, 539 .match_list = qcom_erratum_1003_list, 540 }, 541 #endif 542 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 543 { 544 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009", 545 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 546 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 547 .matches = cpucap_multi_entry_cap_matches, 548 .match_list = arm64_repeat_tlbi_list, 549 }, 550 #endif 551 #ifdef CONFIG_ARM64_ERRATUM_858921 552 { 553 /* Cortex-A73 all versions */ 554 .desc = "ARM erratum 858921", 555 .capability = ARM64_WORKAROUND_858921, 556 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 557 }, 558 #endif 559 { 560 .desc = "Spectre-v2", 561 .capability = ARM64_SPECTRE_V2, 562 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 563 .matches = has_spectre_v2, 564 .cpu_enable = spectre_v2_enable_mitigation, 565 }, 566 #ifdef CONFIG_RANDOMIZE_BASE 567 { 568 /* Must come after the Spectre-v2 entry */ 569 .desc = "Spectre-v3a", 570 .capability = ARM64_SPECTRE_V3A, 571 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 572 .matches = has_spectre_v3a, 573 .cpu_enable = spectre_v3a_enable_mitigation, 574 }, 575 #endif 576 { 577 .desc = "Spectre-v4", 578 .capability = ARM64_SPECTRE_V4, 579 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 580 .matches = has_spectre_v4, 581 .cpu_enable = spectre_v4_enable_mitigation, 582 }, 583 { 584 .desc = "Spectre-BHB", 585 .capability = ARM64_SPECTRE_BHB, 586 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 587 .matches = is_spectre_bhb_affected, 588 .cpu_enable = spectre_bhb_enable_mitigation, 589 }, 590 #ifdef CONFIG_ARM64_ERRATUM_1418040 591 { 592 .desc = "ARM erratum 1418040", 593 .capability = ARM64_WORKAROUND_1418040, 594 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), 595 /* 596 * We need to allow affected CPUs to come in late, but 597 * also need the non-affected CPUs to be able to come 598 * in at any point in time. Wonderful. 599 */ 600 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 601 }, 602 #endif 603 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT 604 { 605 .desc = "ARM errata 1165522, 1319367, or 1530923", 606 .capability = ARM64_WORKAROUND_SPECULATIVE_AT, 607 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), 608 }, 609 #endif 610 #ifdef CONFIG_ARM64_ERRATUM_1463225 611 { 612 .desc = "ARM erratum 1463225", 613 .capability = ARM64_WORKAROUND_1463225, 614 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 615 .matches = has_cortex_a76_erratum_1463225, 616 .midr_range_list = erratum_1463225, 617 }, 618 #endif 619 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 620 { 621 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", 622 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, 623 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), 624 .matches = needs_tx2_tvm_workaround, 625 }, 626 { 627 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", 628 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, 629 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), 630 }, 631 #endif 632 #ifdef CONFIG_ARM64_ERRATUM_1542419 633 { 634 /* we depend on the firmware portion for correctness */ 635 .desc = "ARM erratum 1542419 (kernel portion)", 636 .capability = ARM64_WORKAROUND_1542419, 637 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 638 .matches = has_neoverse_n1_erratum_1542419, 639 .cpu_enable = cpu_enable_trap_ctr_access, 640 }, 641 #endif 642 #ifdef CONFIG_ARM64_ERRATUM_1508412 643 { 644 /* we depend on the firmware portion for correctness */ 645 .desc = "ARM erratum 1508412 (kernel portion)", 646 .capability = ARM64_WORKAROUND_1508412, 647 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77, 648 0, 0, 649 1, 0), 650 }, 651 #endif 652 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM 653 { 654 /* NVIDIA Carmel */ 655 .desc = "NVIDIA Carmel CNP erratum", 656 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, 657 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), 658 }, 659 #endif 660 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE 661 { 662 /* 663 * The erratum work around is handled within the TRBE 664 * driver and can be applied per-cpu. So, we can allow 665 * a late CPU to come online with this erratum. 666 */ 667 .desc = "ARM erratum 2119858 or 2139208", 668 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE, 669 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 670 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus), 671 }, 672 #endif 673 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE 674 { 675 .desc = "ARM erratum 2067961 or 2054223", 676 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, 677 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), 678 }, 679 #endif 680 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE 681 { 682 .desc = "ARM erratum 2253138 or 2224489", 683 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, 684 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 685 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), 686 }, 687 #endif 688 #ifdef CONFIG_ARM64_ERRATUM_2645198 689 { 690 .desc = "ARM erratum 2645198", 691 .capability = ARM64_WORKAROUND_2645198, 692 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715) 693 }, 694 #endif 695 #ifdef CONFIG_ARM64_ERRATUM_2077057 696 { 697 .desc = "ARM erratum 2077057", 698 .capability = ARM64_WORKAROUND_2077057, 699 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), 700 }, 701 #endif 702 #ifdef CONFIG_ARM64_ERRATUM_2064142 703 { 704 .desc = "ARM erratum 2064142", 705 .capability = ARM64_WORKAROUND_2064142, 706 707 /* Cortex-A510 r0p0 - r0p2 */ 708 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) 709 }, 710 #endif 711 #ifdef CONFIG_ARM64_ERRATUM_2457168 712 { 713 .desc = "ARM erratum 2457168", 714 .capability = ARM64_WORKAROUND_2457168, 715 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 716 717 /* Cortex-A510 r0p0-r1p1 */ 718 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) 719 }, 720 #endif 721 #ifdef CONFIG_ARM64_ERRATUM_2038923 722 { 723 .desc = "ARM erratum 2038923", 724 .capability = ARM64_WORKAROUND_2038923, 725 726 /* Cortex-A510 r0p0 - r0p2 */ 727 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) 728 }, 729 #endif 730 #ifdef CONFIG_ARM64_ERRATUM_1902691 731 { 732 .desc = "ARM erratum 1902691", 733 .capability = ARM64_WORKAROUND_1902691, 734 735 /* Cortex-A510 r0p0 - r0p1 */ 736 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1) 737 }, 738 #endif 739 #ifdef CONFIG_ARM64_ERRATUM_1742098 740 { 741 .desc = "ARM erratum 1742098", 742 .capability = ARM64_WORKAROUND_1742098, 743 CAP_MIDR_RANGE_LIST(broken_aarch32_aes), 744 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 745 }, 746 #endif 747 #ifdef CONFIG_ARM64_ERRATUM_2658417 748 { 749 .desc = "ARM erratum 2658417", 750 .capability = ARM64_WORKAROUND_2658417, 751 /* Cortex-A510 r0p0 - r1p1 */ 752 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), 753 MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), 754 .cpu_enable = cpu_clear_bf16_from_user_emulation, 755 }, 756 #endif 757 #ifdef CONFIG_ARM64_ERRATUM_3194386 758 { 759 .desc = "ARM errata 3194386, 3312417", 760 .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS, 761 ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list), 762 }, 763 #endif 764 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD 765 { 766 .desc = "ARM errata 2966298, 3117295", 767 .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD, 768 /* Cortex-A520 r0p0 - r0p1 */ 769 ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list), 770 }, 771 #endif 772 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 773 { 774 .desc = "AmpereOne erratum AC03_CPU_38", 775 .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, 776 ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1), 777 }, 778 #endif 779 { 780 } 781 }; 782