1 /* 2 * Contains CPU specific errata definitions 3 * 4 * Copyright (C) 2014 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/types.h> 20 #include <asm/cpu.h> 21 #include <asm/cputype.h> 22 #include <asm/cpufeature.h> 23 24 static bool __maybe_unused 25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 26 { 27 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 28 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model, 29 entry->midr_range_min, 30 entry->midr_range_max); 31 } 32 33 static bool __maybe_unused 34 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) 35 { 36 u32 model; 37 38 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 39 40 model = read_cpuid_id(); 41 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | 42 MIDR_ARCHITECTURE_MASK; 43 44 return model == entry->midr_model; 45 } 46 47 static bool 48 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, 49 int scope) 50 { 51 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 52 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) != 53 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); 54 } 55 56 static int cpu_enable_trap_ctr_access(void *__unused) 57 { 58 /* Clear SCTLR_EL1.UCT */ 59 config_sctlr_el1(SCTLR_EL1_UCT, 0); 60 return 0; 61 } 62 63 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 64 #include <asm/mmu_context.h> 65 #include <asm/cacheflush.h> 66 67 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 68 69 #ifdef CONFIG_KVM 70 extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; 71 extern char __qcom_hyp_sanitize_link_stack_start[]; 72 extern char __qcom_hyp_sanitize_link_stack_end[]; 73 74 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, 75 const char *hyp_vecs_end) 76 { 77 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); 78 int i; 79 80 for (i = 0; i < SZ_2K; i += 0x80) 81 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); 82 83 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); 84 } 85 86 static void __install_bp_hardening_cb(bp_hardening_cb_t fn, 87 const char *hyp_vecs_start, 88 const char *hyp_vecs_end) 89 { 90 static int last_slot = -1; 91 static DEFINE_SPINLOCK(bp_lock); 92 int cpu, slot = -1; 93 94 spin_lock(&bp_lock); 95 for_each_possible_cpu(cpu) { 96 if (per_cpu(bp_hardening_data.fn, cpu) == fn) { 97 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); 98 break; 99 } 100 } 101 102 if (slot == -1) { 103 last_slot++; 104 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start) 105 / SZ_2K) <= last_slot); 106 slot = last_slot; 107 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); 108 } 109 110 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); 111 __this_cpu_write(bp_hardening_data.fn, fn); 112 spin_unlock(&bp_lock); 113 } 114 #else 115 #define __psci_hyp_bp_inval_start NULL 116 #define __psci_hyp_bp_inval_end NULL 117 #define __qcom_hyp_sanitize_link_stack_start NULL 118 #define __qcom_hyp_sanitize_link_stack_end NULL 119 120 static void __install_bp_hardening_cb(bp_hardening_cb_t fn, 121 const char *hyp_vecs_start, 122 const char *hyp_vecs_end) 123 { 124 __this_cpu_write(bp_hardening_data.fn, fn); 125 } 126 #endif /* CONFIG_KVM */ 127 128 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, 129 bp_hardening_cb_t fn, 130 const char *hyp_vecs_start, 131 const char *hyp_vecs_end) 132 { 133 u64 pfr0; 134 135 if (!entry->matches(entry, SCOPE_LOCAL_CPU)) 136 return; 137 138 pfr0 = read_cpuid(ID_AA64PFR0_EL1); 139 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) 140 return; 141 142 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); 143 } 144 145 #include <linux/psci.h> 146 147 static int enable_psci_bp_hardening(void *data) 148 { 149 const struct arm64_cpu_capabilities *entry = data; 150 151 if (psci_ops.get_version) 152 install_bp_hardening_cb(entry, 153 (bp_hardening_cb_t)psci_ops.get_version, 154 __psci_hyp_bp_inval_start, 155 __psci_hyp_bp_inval_end); 156 157 return 0; 158 } 159 160 static void qcom_link_stack_sanitization(void) 161 { 162 u64 tmp; 163 164 asm volatile("mov %0, x30 \n" 165 ".rept 16 \n" 166 "bl . + 4 \n" 167 ".endr \n" 168 "mov x30, %0 \n" 169 : "=&r" (tmp)); 170 } 171 172 static int qcom_enable_link_stack_sanitization(void *data) 173 { 174 const struct arm64_cpu_capabilities *entry = data; 175 176 install_bp_hardening_cb(entry, qcom_link_stack_sanitization, 177 __qcom_hyp_sanitize_link_stack_start, 178 __qcom_hyp_sanitize_link_stack_end); 179 180 return 0; 181 } 182 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ 183 184 #define MIDR_RANGE(model, min, max) \ 185 .def_scope = SCOPE_LOCAL_CPU, \ 186 .matches = is_affected_midr_range, \ 187 .midr_model = model, \ 188 .midr_range_min = min, \ 189 .midr_range_max = max 190 191 #define MIDR_ALL_VERSIONS(model) \ 192 .def_scope = SCOPE_LOCAL_CPU, \ 193 .matches = is_affected_midr_range, \ 194 .midr_model = model, \ 195 .midr_range_min = 0, \ 196 .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK) 197 198 const struct arm64_cpu_capabilities arm64_errata[] = { 199 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ 200 defined(CONFIG_ARM64_ERRATUM_827319) || \ 201 defined(CONFIG_ARM64_ERRATUM_824069) 202 { 203 /* Cortex-A53 r0p[012] */ 204 .desc = "ARM errata 826319, 827319, 824069", 205 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 206 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), 207 .enable = cpu_enable_cache_maint_trap, 208 }, 209 #endif 210 #ifdef CONFIG_ARM64_ERRATUM_819472 211 { 212 /* Cortex-A53 r0p[01] */ 213 .desc = "ARM errata 819472", 214 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 215 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), 216 .enable = cpu_enable_cache_maint_trap, 217 }, 218 #endif 219 #ifdef CONFIG_ARM64_ERRATUM_832075 220 { 221 /* Cortex-A57 r0p0 - r1p2 */ 222 .desc = "ARM erratum 832075", 223 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, 224 MIDR_RANGE(MIDR_CORTEX_A57, 225 MIDR_CPU_VAR_REV(0, 0), 226 MIDR_CPU_VAR_REV(1, 2)), 227 }, 228 #endif 229 #ifdef CONFIG_ARM64_ERRATUM_834220 230 { 231 /* Cortex-A57 r0p0 - r1p2 */ 232 .desc = "ARM erratum 834220", 233 .capability = ARM64_WORKAROUND_834220, 234 MIDR_RANGE(MIDR_CORTEX_A57, 235 MIDR_CPU_VAR_REV(0, 0), 236 MIDR_CPU_VAR_REV(1, 2)), 237 }, 238 #endif 239 #ifdef CONFIG_ARM64_ERRATUM_845719 240 { 241 /* Cortex-A53 r0p[01234] */ 242 .desc = "ARM erratum 845719", 243 .capability = ARM64_WORKAROUND_845719, 244 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), 245 }, 246 #endif 247 #ifdef CONFIG_CAVIUM_ERRATUM_23154 248 { 249 /* Cavium ThunderX, pass 1.x */ 250 .desc = "Cavium erratum 23154", 251 .capability = ARM64_WORKAROUND_CAVIUM_23154, 252 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), 253 }, 254 #endif 255 #ifdef CONFIG_CAVIUM_ERRATUM_27456 256 { 257 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 258 .desc = "Cavium erratum 27456", 259 .capability = ARM64_WORKAROUND_CAVIUM_27456, 260 MIDR_RANGE(MIDR_THUNDERX, 261 MIDR_CPU_VAR_REV(0, 0), 262 MIDR_CPU_VAR_REV(1, 1)), 263 }, 264 { 265 /* Cavium ThunderX, T81 pass 1.0 */ 266 .desc = "Cavium erratum 27456", 267 .capability = ARM64_WORKAROUND_CAVIUM_27456, 268 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00), 269 }, 270 #endif 271 #ifdef CONFIG_CAVIUM_ERRATUM_30115 272 { 273 /* Cavium ThunderX, T88 pass 1.x - 2.2 */ 274 .desc = "Cavium erratum 30115", 275 .capability = ARM64_WORKAROUND_CAVIUM_30115, 276 MIDR_RANGE(MIDR_THUNDERX, 0x00, 277 (1 << MIDR_VARIANT_SHIFT) | 2), 278 }, 279 { 280 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ 281 .desc = "Cavium erratum 30115", 282 .capability = ARM64_WORKAROUND_CAVIUM_30115, 283 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02), 284 }, 285 { 286 /* Cavium ThunderX, T83 pass 1.0 */ 287 .desc = "Cavium erratum 30115", 288 .capability = ARM64_WORKAROUND_CAVIUM_30115, 289 MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00), 290 }, 291 #endif 292 { 293 .desc = "Mismatched cache line size", 294 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, 295 .matches = has_mismatched_cache_line_size, 296 .def_scope = SCOPE_LOCAL_CPU, 297 .enable = cpu_enable_trap_ctr_access, 298 }, 299 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 300 { 301 .desc = "Qualcomm Technologies Falkor erratum 1003", 302 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 303 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 304 MIDR_CPU_VAR_REV(0, 0), 305 MIDR_CPU_VAR_REV(0, 0)), 306 }, 307 { 308 .desc = "Qualcomm Technologies Kryo erratum 1003", 309 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 310 .def_scope = SCOPE_LOCAL_CPU, 311 .midr_model = MIDR_QCOM_KRYO, 312 .matches = is_kryo_midr, 313 }, 314 #endif 315 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 316 { 317 .desc = "Qualcomm Technologies Falkor erratum 1009", 318 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 319 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 320 MIDR_CPU_VAR_REV(0, 0), 321 MIDR_CPU_VAR_REV(0, 0)), 322 }, 323 #endif 324 #ifdef CONFIG_ARM64_ERRATUM_858921 325 { 326 /* Cortex-A73 all versions */ 327 .desc = "ARM erratum 858921", 328 .capability = ARM64_WORKAROUND_858921, 329 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 330 }, 331 #endif 332 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 333 { 334 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 335 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 336 .enable = enable_psci_bp_hardening, 337 }, 338 { 339 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 340 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 341 .enable = enable_psci_bp_hardening, 342 }, 343 { 344 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 345 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 346 .enable = enable_psci_bp_hardening, 347 }, 348 { 349 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 350 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), 351 .enable = enable_psci_bp_hardening, 352 }, 353 { 354 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 355 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), 356 .enable = qcom_enable_link_stack_sanitization, 357 }, 358 { 359 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, 360 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), 361 }, 362 { 363 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 364 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 365 .enable = enable_psci_bp_hardening, 366 }, 367 { 368 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 369 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 370 .enable = enable_psci_bp_hardening, 371 }, 372 #endif 373 { 374 } 375 }; 376 377 /* 378 * The CPU Errata work arounds are detected and applied at boot time 379 * and the related information is freed soon after. If the new CPU requires 380 * an errata not detected at boot, fail this CPU. 381 */ 382 void verify_local_cpu_errata_workarounds(void) 383 { 384 const struct arm64_cpu_capabilities *caps = arm64_errata; 385 386 for (; caps->matches; caps++) { 387 if (cpus_have_cap(caps->capability)) { 388 if (caps->enable) 389 caps->enable((void *)caps); 390 } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) { 391 pr_crit("CPU%d: Requires work around for %s, not detected" 392 " at boot time\n", 393 smp_processor_id(), 394 caps->desc ? : "an erratum"); 395 cpu_die_early(); 396 } 397 } 398 } 399 400 void update_cpu_errata_workarounds(void) 401 { 402 update_cpu_capabilities(arm64_errata, "enabling workaround for"); 403 } 404 405 void __init enable_errata_workarounds(void) 406 { 407 enable_cpu_capabilities(arm64_errata); 408 } 409