1 /* 2 * Contains CPU specific errata definitions 3 * 4 * Copyright (C) 2014 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/arm-smccc.h> 20 #include <linux/psci.h> 21 #include <linux/types.h> 22 #include <asm/cpu.h> 23 #include <asm/cputype.h> 24 #include <asm/cpufeature.h> 25 26 static bool __maybe_unused 27 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 28 { 29 const struct arm64_midr_revidr *fix; 30 u32 midr = read_cpuid_id(), revidr; 31 32 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 33 if (!is_midr_in_range(midr, &entry->midr_range)) 34 return false; 35 36 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; 37 revidr = read_cpuid(REVIDR_EL1); 38 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) 39 if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) 40 return false; 41 42 return true; 43 } 44 45 static bool __maybe_unused 46 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, 47 int scope) 48 { 49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 50 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); 51 } 52 53 static bool __maybe_unused 54 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) 55 { 56 u32 model; 57 58 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 59 60 model = read_cpuid_id(); 61 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | 62 MIDR_ARCHITECTURE_MASK; 63 64 return model == entry->midr_range.model; 65 } 66 67 static bool 68 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, 69 int scope) 70 { 71 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 72 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; 73 u64 ctr_raw, ctr_real; 74 75 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 76 77 /* 78 * We want to make sure that all the CPUs in the system expose 79 * a consistent CTR_EL0 to make sure that applications behaves 80 * correctly with migration. 81 * 82 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : 83 * 84 * 1) It is safe if the system doesn't support IDC, as CPU anyway 85 * reports IDC = 0, consistent with the rest. 86 * 87 * 2) If the system has IDC, it is still safe as we trap CTR_EL0 88 * access on this CPU via the ARM64_HAS_CACHE_IDC capability. 89 * 90 * So, we need to make sure either the raw CTR_EL0 or the effective 91 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. 92 */ 93 ctr_raw = read_cpuid_cachetype() & mask; 94 ctr_real = read_cpuid_effective_cachetype() & mask; 95 96 return (ctr_real != sys) && (ctr_raw != sys); 97 } 98 99 static void 100 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) 101 { 102 u64 mask = arm64_ftr_reg_ctrel0.strict_mask; 103 104 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ 105 if ((read_cpuid_cachetype() & mask) != 106 (arm64_ftr_reg_ctrel0.sys_val & mask)) 107 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); 108 } 109 110 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); 111 112 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 113 #include <asm/mmu_context.h> 114 #include <asm/cacheflush.h> 115 116 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 117 118 #ifdef CONFIG_KVM_INDIRECT_VECTORS 119 extern char __smccc_workaround_1_smc_start[]; 120 extern char __smccc_workaround_1_smc_end[]; 121 122 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, 123 const char *hyp_vecs_end) 124 { 125 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); 126 int i; 127 128 for (i = 0; i < SZ_2K; i += 0x80) 129 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); 130 131 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); 132 } 133 134 static void __install_bp_hardening_cb(bp_hardening_cb_t fn, 135 const char *hyp_vecs_start, 136 const char *hyp_vecs_end) 137 { 138 static DEFINE_RAW_SPINLOCK(bp_lock); 139 int cpu, slot = -1; 140 141 /* 142 * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs 143 * start/end if we're a guest. Skip the hyp-vectors work. 144 */ 145 if (!hyp_vecs_start) { 146 __this_cpu_write(bp_hardening_data.fn, fn); 147 return; 148 } 149 150 raw_spin_lock(&bp_lock); 151 for_each_possible_cpu(cpu) { 152 if (per_cpu(bp_hardening_data.fn, cpu) == fn) { 153 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); 154 break; 155 } 156 } 157 158 if (slot == -1) { 159 slot = atomic_inc_return(&arm64_el2_vector_last_slot); 160 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); 161 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); 162 } 163 164 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); 165 __this_cpu_write(bp_hardening_data.fn, fn); 166 raw_spin_unlock(&bp_lock); 167 } 168 #else 169 #define __smccc_workaround_1_smc_start NULL 170 #define __smccc_workaround_1_smc_end NULL 171 172 static void __install_bp_hardening_cb(bp_hardening_cb_t fn, 173 const char *hyp_vecs_start, 174 const char *hyp_vecs_end) 175 { 176 __this_cpu_write(bp_hardening_data.fn, fn); 177 } 178 #endif /* CONFIG_KVM_INDIRECT_VECTORS */ 179 180 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, 181 bp_hardening_cb_t fn, 182 const char *hyp_vecs_start, 183 const char *hyp_vecs_end) 184 { 185 u64 pfr0; 186 187 if (!entry->matches(entry, SCOPE_LOCAL_CPU)) 188 return; 189 190 pfr0 = read_cpuid(ID_AA64PFR0_EL1); 191 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) 192 return; 193 194 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); 195 } 196 197 #include <uapi/linux/psci.h> 198 #include <linux/arm-smccc.h> 199 #include <linux/psci.h> 200 201 static void call_smc_arch_workaround_1(void) 202 { 203 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 204 } 205 206 static void call_hvc_arch_workaround_1(void) 207 { 208 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 209 } 210 211 static void qcom_link_stack_sanitization(void) 212 { 213 u64 tmp; 214 215 asm volatile("mov %0, x30 \n" 216 ".rept 16 \n" 217 "bl . + 4 \n" 218 ".endr \n" 219 "mov x30, %0 \n" 220 : "=&r" (tmp)); 221 } 222 223 static void 224 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) 225 { 226 bp_hardening_cb_t cb; 227 void *smccc_start, *smccc_end; 228 struct arm_smccc_res res; 229 u32 midr = read_cpuid_id(); 230 231 if (!entry->matches(entry, SCOPE_LOCAL_CPU)) 232 return; 233 234 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) 235 return; 236 237 switch (psci_ops.conduit) { 238 case PSCI_CONDUIT_HVC: 239 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 240 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 241 if ((int)res.a0 < 0) 242 return; 243 cb = call_hvc_arch_workaround_1; 244 /* This is a guest, no need to patch KVM vectors */ 245 smccc_start = NULL; 246 smccc_end = NULL; 247 break; 248 249 case PSCI_CONDUIT_SMC: 250 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 251 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 252 if ((int)res.a0 < 0) 253 return; 254 cb = call_smc_arch_workaround_1; 255 smccc_start = __smccc_workaround_1_smc_start; 256 smccc_end = __smccc_workaround_1_smc_end; 257 break; 258 259 default: 260 return; 261 } 262 263 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || 264 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) 265 cb = qcom_link_stack_sanitization; 266 267 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); 268 269 return; 270 } 271 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ 272 273 #ifdef CONFIG_ARM64_SSBD 274 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); 275 276 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; 277 278 static const struct ssbd_options { 279 const char *str; 280 int state; 281 } ssbd_options[] = { 282 { "force-on", ARM64_SSBD_FORCE_ENABLE, }, 283 { "force-off", ARM64_SSBD_FORCE_DISABLE, }, 284 { "kernel", ARM64_SSBD_KERNEL, }, 285 }; 286 287 static int __init ssbd_cfg(char *buf) 288 { 289 int i; 290 291 if (!buf || !buf[0]) 292 return -EINVAL; 293 294 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { 295 int len = strlen(ssbd_options[i].str); 296 297 if (strncmp(buf, ssbd_options[i].str, len)) 298 continue; 299 300 ssbd_state = ssbd_options[i].state; 301 return 0; 302 } 303 304 return -EINVAL; 305 } 306 early_param("ssbd", ssbd_cfg); 307 308 void __init arm64_update_smccc_conduit(struct alt_instr *alt, 309 __le32 *origptr, __le32 *updptr, 310 int nr_inst) 311 { 312 u32 insn; 313 314 BUG_ON(nr_inst != 1); 315 316 switch (psci_ops.conduit) { 317 case PSCI_CONDUIT_HVC: 318 insn = aarch64_insn_get_hvc_value(); 319 break; 320 case PSCI_CONDUIT_SMC: 321 insn = aarch64_insn_get_smc_value(); 322 break; 323 default: 324 return; 325 } 326 327 *updptr = cpu_to_le32(insn); 328 } 329 330 void __init arm64_enable_wa2_handling(struct alt_instr *alt, 331 __le32 *origptr, __le32 *updptr, 332 int nr_inst) 333 { 334 BUG_ON(nr_inst != 1); 335 /* 336 * Only allow mitigation on EL1 entry/exit and guest 337 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to 338 * be flipped. 339 */ 340 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) 341 *updptr = cpu_to_le32(aarch64_insn_gen_nop()); 342 } 343 344 void arm64_set_ssbd_mitigation(bool state) 345 { 346 if (this_cpu_has_cap(ARM64_SSBS)) { 347 if (state) 348 asm volatile(SET_PSTATE_SSBS(0)); 349 else 350 asm volatile(SET_PSTATE_SSBS(1)); 351 return; 352 } 353 354 switch (psci_ops.conduit) { 355 case PSCI_CONDUIT_HVC: 356 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); 357 break; 358 359 case PSCI_CONDUIT_SMC: 360 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); 361 break; 362 363 default: 364 WARN_ON_ONCE(1); 365 break; 366 } 367 } 368 369 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, 370 int scope) 371 { 372 struct arm_smccc_res res; 373 bool required = true; 374 s32 val; 375 376 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 377 378 if (this_cpu_has_cap(ARM64_SSBS)) { 379 required = false; 380 goto out_printmsg; 381 } 382 383 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { 384 ssbd_state = ARM64_SSBD_UNKNOWN; 385 return false; 386 } 387 388 switch (psci_ops.conduit) { 389 case PSCI_CONDUIT_HVC: 390 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 391 ARM_SMCCC_ARCH_WORKAROUND_2, &res); 392 break; 393 394 case PSCI_CONDUIT_SMC: 395 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 396 ARM_SMCCC_ARCH_WORKAROUND_2, &res); 397 break; 398 399 default: 400 ssbd_state = ARM64_SSBD_UNKNOWN; 401 return false; 402 } 403 404 val = (s32)res.a0; 405 406 switch (val) { 407 case SMCCC_RET_NOT_SUPPORTED: 408 ssbd_state = ARM64_SSBD_UNKNOWN; 409 return false; 410 411 case SMCCC_RET_NOT_REQUIRED: 412 pr_info_once("%s mitigation not required\n", entry->desc); 413 ssbd_state = ARM64_SSBD_MITIGATED; 414 return false; 415 416 case SMCCC_RET_SUCCESS: 417 required = true; 418 break; 419 420 case 1: /* Mitigation not required on this CPU */ 421 required = false; 422 break; 423 424 default: 425 WARN_ON(1); 426 return false; 427 } 428 429 switch (ssbd_state) { 430 case ARM64_SSBD_FORCE_DISABLE: 431 arm64_set_ssbd_mitigation(false); 432 required = false; 433 break; 434 435 case ARM64_SSBD_KERNEL: 436 if (required) { 437 __this_cpu_write(arm64_ssbd_callback_required, 1); 438 arm64_set_ssbd_mitigation(true); 439 } 440 break; 441 442 case ARM64_SSBD_FORCE_ENABLE: 443 arm64_set_ssbd_mitigation(true); 444 required = true; 445 break; 446 447 default: 448 WARN_ON(1); 449 break; 450 } 451 452 out_printmsg: 453 switch (ssbd_state) { 454 case ARM64_SSBD_FORCE_DISABLE: 455 pr_info_once("%s disabled from command-line\n", entry->desc); 456 break; 457 458 case ARM64_SSBD_FORCE_ENABLE: 459 pr_info_once("%s forced from command-line\n", entry->desc); 460 break; 461 } 462 463 return required; 464 } 465 #endif /* CONFIG_ARM64_SSBD */ 466 467 static void __maybe_unused 468 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) 469 { 470 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); 471 } 472 473 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 474 .matches = is_affected_midr_range, \ 475 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) 476 477 #define CAP_MIDR_ALL_VERSIONS(model) \ 478 .matches = is_affected_midr_range, \ 479 .midr_range = MIDR_ALL_VERSIONS(model) 480 481 #define MIDR_FIXED(rev, revidr_mask) \ 482 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} 483 484 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 485 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 486 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) 487 488 #define CAP_MIDR_RANGE_LIST(list) \ 489 .matches = is_affected_midr_range_list, \ 490 .midr_range_list = list 491 492 /* Errata affecting a range of revisions of given model variant */ 493 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ 494 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) 495 496 /* Errata affecting a single variant/revision of a model */ 497 #define ERRATA_MIDR_REV(model, var, rev) \ 498 ERRATA_MIDR_RANGE(model, var, rev, var, rev) 499 500 /* Errata affecting all variants/revisions of a given a model */ 501 #define ERRATA_MIDR_ALL_VERSIONS(model) \ 502 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 503 CAP_MIDR_ALL_VERSIONS(model) 504 505 /* Errata affecting a list of midr ranges, with same work around */ 506 #define ERRATA_MIDR_RANGE_LIST(midr_list) \ 507 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 508 CAP_MIDR_RANGE_LIST(midr_list) 509 510 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 511 512 /* 513 * List of CPUs where we need to issue a psci call to 514 * harden the branch predictor. 515 */ 516 static const struct midr_range arm64_bp_harden_smccc_cpus[] = { 517 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 518 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 519 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 520 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), 521 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 522 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 523 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), 524 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), 525 MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER), 526 {}, 527 }; 528 529 #endif 530 531 #ifdef CONFIG_HARDEN_EL2_VECTORS 532 533 static const struct midr_range arm64_harden_el2_vectors[] = { 534 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 535 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 536 {}, 537 }; 538 539 #endif 540 541 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 542 543 static const struct midr_range arm64_repeat_tlbi_cpus[] = { 544 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 545 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0), 546 #endif 547 #ifdef CONFIG_ARM64_ERRATUM_1286807 548 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), 549 #endif 550 {}, 551 }; 552 553 #endif 554 555 #ifdef CONFIG_CAVIUM_ERRATUM_27456 556 const struct midr_range cavium_erratum_27456_cpus[] = { 557 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 558 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 559 /* Cavium ThunderX, T81 pass 1.0 */ 560 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), 561 {}, 562 }; 563 #endif 564 565 #ifdef CONFIG_CAVIUM_ERRATUM_30115 566 static const struct midr_range cavium_erratum_30115_cpus[] = { 567 /* Cavium ThunderX, T88 pass 1.x - 2.2 */ 568 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), 569 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ 570 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), 571 /* Cavium ThunderX, T83 pass 1.0 */ 572 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), 573 {}, 574 }; 575 #endif 576 577 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 578 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { 579 { 580 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 581 }, 582 { 583 .midr_range.model = MIDR_QCOM_KRYO, 584 .matches = is_kryo_midr, 585 }, 586 {}, 587 }; 588 #endif 589 590 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 591 static const struct midr_range workaround_clean_cache[] = { 592 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ 593 defined(CONFIG_ARM64_ERRATUM_827319) || \ 594 defined(CONFIG_ARM64_ERRATUM_824069) 595 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ 596 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), 597 #endif 598 #ifdef CONFIG_ARM64_ERRATUM_819472 599 /* Cortex-A53 r0p[01] : ARM errata 819472 */ 600 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), 601 #endif 602 {}, 603 }; 604 #endif 605 606 const struct arm64_cpu_capabilities arm64_errata[] = { 607 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE 608 { 609 .desc = "ARM errata 826319, 827319, 824069, 819472", 610 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 611 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), 612 .cpu_enable = cpu_enable_cache_maint_trap, 613 }, 614 #endif 615 #ifdef CONFIG_ARM64_ERRATUM_832075 616 { 617 /* Cortex-A57 r0p0 - r1p2 */ 618 .desc = "ARM erratum 832075", 619 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, 620 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 621 0, 0, 622 1, 2), 623 }, 624 #endif 625 #ifdef CONFIG_ARM64_ERRATUM_834220 626 { 627 /* Cortex-A57 r0p0 - r1p2 */ 628 .desc = "ARM erratum 834220", 629 .capability = ARM64_WORKAROUND_834220, 630 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 631 0, 0, 632 1, 2), 633 }, 634 #endif 635 #ifdef CONFIG_ARM64_ERRATUM_843419 636 { 637 /* Cortex-A53 r0p[01234] */ 638 .desc = "ARM erratum 843419", 639 .capability = ARM64_WORKAROUND_843419, 640 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 641 MIDR_FIXED(0x4, BIT(8)), 642 }, 643 #endif 644 #ifdef CONFIG_ARM64_ERRATUM_845719 645 { 646 /* Cortex-A53 r0p[01234] */ 647 .desc = "ARM erratum 845719", 648 .capability = ARM64_WORKAROUND_845719, 649 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 650 }, 651 #endif 652 #ifdef CONFIG_CAVIUM_ERRATUM_23154 653 { 654 /* Cavium ThunderX, pass 1.x */ 655 .desc = "Cavium erratum 23154", 656 .capability = ARM64_WORKAROUND_CAVIUM_23154, 657 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), 658 }, 659 #endif 660 #ifdef CONFIG_CAVIUM_ERRATUM_27456 661 { 662 .desc = "Cavium erratum 27456", 663 .capability = ARM64_WORKAROUND_CAVIUM_27456, 664 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), 665 }, 666 #endif 667 #ifdef CONFIG_CAVIUM_ERRATUM_30115 668 { 669 .desc = "Cavium erratum 30115", 670 .capability = ARM64_WORKAROUND_CAVIUM_30115, 671 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), 672 }, 673 #endif 674 { 675 .desc = "Mismatched cache type (CTR_EL0)", 676 .capability = ARM64_MISMATCHED_CACHE_TYPE, 677 .matches = has_mismatched_cache_type, 678 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 679 .cpu_enable = cpu_enable_trap_ctr_access, 680 }, 681 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 682 { 683 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", 684 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 685 .matches = cpucap_multi_entry_cap_matches, 686 .match_list = qcom_erratum_1003_list, 687 }, 688 #endif 689 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI 690 { 691 .desc = "Qualcomm erratum 1009, ARM erratum 1286807", 692 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 693 ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus), 694 }, 695 #endif 696 #ifdef CONFIG_ARM64_ERRATUM_858921 697 { 698 /* Cortex-A73 all versions */ 699 .desc = "ARM erratum 858921", 700 .capability = ARM64_WORKAROUND_858921, 701 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 702 }, 703 #endif 704 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 705 { 706 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 707 .cpu_enable = enable_smccc_arch_workaround_1, 708 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), 709 }, 710 #endif 711 #ifdef CONFIG_HARDEN_EL2_VECTORS 712 { 713 .desc = "EL2 vector hardening", 714 .capability = ARM64_HARDEN_EL2_VECTORS, 715 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), 716 }, 717 #endif 718 #ifdef CONFIG_ARM64_SSBD 719 { 720 .desc = "Speculative Store Bypass Disable", 721 .capability = ARM64_SSBD, 722 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 723 .matches = has_ssbd_mitigation, 724 }, 725 #endif 726 #ifdef CONFIG_ARM64_ERRATUM_1188873 727 { 728 /* Cortex-A76 r0p0 to r2p0 */ 729 .desc = "ARM erratum 1188873", 730 .capability = ARM64_WORKAROUND_1188873, 731 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), 732 }, 733 #endif 734 #ifdef CONFIG_ARM64_ERRATUM_1165522 735 { 736 /* Cortex-A76 r0p0 to r2p0 */ 737 .desc = "ARM erratum 1165522", 738 .capability = ARM64_WORKAROUND_1165522, 739 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), 740 }, 741 #endif 742 { 743 } 744 }; 745