1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2021 Heiko Stuebner <heiko@sntech.de> 4 */ 5 6 #include <linux/bug.h> 7 #include <linux/kernel.h> 8 #include <linux/memory.h> 9 #include <linux/module.h> 10 #include <linux/string.h> 11 #include <linux/uaccess.h> 12 #include <asm/alternative.h> 13 #include <asm/cacheflush.h> 14 #include <asm/cpufeature.h> 15 #include <asm/errata_list.h> 16 #include <asm/hwprobe.h> 17 #include <asm/patch.h> 18 #include <asm/vendorid_list.h> 19 20 static bool errata_probe_pbmt(unsigned int stage, 21 unsigned long arch_id, unsigned long impid) 22 { 23 if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PBMT)) 24 return false; 25 26 if (arch_id != 0 || impid != 0) 27 return false; 28 29 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT || 30 stage == RISCV_ALTERNATIVES_MODULE) 31 return true; 32 33 return false; 34 } 35 36 static bool errata_probe_cmo(unsigned int stage, 37 unsigned long arch_id, unsigned long impid) 38 { 39 if (!IS_ENABLED(CONFIG_ERRATA_THEAD_CMO)) 40 return false; 41 42 if (arch_id != 0 || impid != 0) 43 return false; 44 45 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) 46 return false; 47 48 if (stage == RISCV_ALTERNATIVES_BOOT) { 49 riscv_cbom_block_size = L1_CACHE_BYTES; 50 riscv_noncoherent_supported(); 51 } 52 53 return true; 54 } 55 56 static bool errata_probe_pmu(unsigned int stage, 57 unsigned long arch_id, unsigned long impid) 58 { 59 if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU)) 60 return false; 61 62 /* target-c9xx cores report arch_id and impid as 0 */ 63 if (arch_id != 0 || impid != 0) 64 return false; 65 66 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) 67 return false; 68 69 return true; 70 } 71 72 static u32 thead_errata_probe(unsigned int stage, 73 unsigned long archid, unsigned long impid) 74 { 75 u32 cpu_req_errata = 0; 76 77 if (errata_probe_pbmt(stage, archid, impid)) 78 cpu_req_errata |= BIT(ERRATA_THEAD_PBMT); 79 80 if (errata_probe_cmo(stage, archid, impid)) 81 cpu_req_errata |= BIT(ERRATA_THEAD_CMO); 82 83 if (errata_probe_pmu(stage, archid, impid)) 84 cpu_req_errata |= BIT(ERRATA_THEAD_PMU); 85 86 return cpu_req_errata; 87 } 88 89 void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, 90 unsigned long archid, unsigned long impid, 91 unsigned int stage) 92 { 93 struct alt_entry *alt; 94 u32 cpu_req_errata = thead_errata_probe(stage, archid, impid); 95 u32 tmp; 96 void *oldptr, *altptr; 97 98 for (alt = begin; alt < end; alt++) { 99 if (alt->vendor_id != THEAD_VENDOR_ID) 100 continue; 101 if (alt->patch_id >= ERRATA_THEAD_NUMBER) 102 continue; 103 104 tmp = (1U << alt->patch_id); 105 if (cpu_req_errata & tmp) { 106 oldptr = ALT_OLD_PTR(alt); 107 altptr = ALT_ALT_PTR(alt); 108 109 /* On vm-alternatives, the mmu isn't running yet */ 110 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) { 111 memcpy(oldptr, altptr, alt->alt_len); 112 } else { 113 mutex_lock(&text_mutex); 114 patch_text_nosync(oldptr, altptr, alt->alt_len); 115 mutex_unlock(&text_mutex); 116 } 117 } 118 } 119 120 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) 121 local_flush_icache_all(); 122 } 123 124 void thead_feature_probe_func(unsigned int cpu, 125 unsigned long archid, 126 unsigned long impid) 127 { 128 if ((archid == 0) && (impid == 0)) 129 per_cpu(misaligned_access_speed, cpu) = RISCV_HWPROBE_MISALIGNED_FAST; 130 } 131