xref: /openbmc/linux/arch/riscv/kernel/alternative.c (revision e021ae7f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * alternative runtime patching
4  * inspired by the ARM64 and x86 version
5  *
6  * Copyright (C) 2021 Sifive.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/uaccess.h>
13 #include <asm/alternative.h>
14 #include <asm/module.h>
15 #include <asm/sections.h>
16 #include <asm/vdso.h>
17 #include <asm/vendorid_list.h>
18 #include <asm/sbi.h>
19 #include <asm/csr.h>
20 #include <asm/insn.h>
21 #include <asm/patch.h>
22 
23 struct cpu_manufacturer_info_t {
24 	unsigned long vendor_id;
25 	unsigned long arch_id;
26 	unsigned long imp_id;
27 	void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
28 				  unsigned long archid, unsigned long impid,
29 				  unsigned int stage);
30 };
31 
riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t * cpu_mfr_info)32 static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
33 {
34 #ifdef CONFIG_RISCV_M_MODE
35 	cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
36 	cpu_mfr_info->arch_id = csr_read(CSR_MARCHID);
37 	cpu_mfr_info->imp_id = csr_read(CSR_MIMPID);
38 #else
39 	cpu_mfr_info->vendor_id = sbi_get_mvendorid();
40 	cpu_mfr_info->arch_id = sbi_get_marchid();
41 	cpu_mfr_info->imp_id = sbi_get_mimpid();
42 #endif
43 
44 	switch (cpu_mfr_info->vendor_id) {
45 #ifdef CONFIG_ERRATA_ANDES
46 	case ANDESTECH_VENDOR_ID:
47 		cpu_mfr_info->patch_func = andes_errata_patch_func;
48 		break;
49 #endif
50 #ifdef CONFIG_ERRATA_SIFIVE
51 	case SIFIVE_VENDOR_ID:
52 		cpu_mfr_info->patch_func = sifive_errata_patch_func;
53 		break;
54 #endif
55 #ifdef CONFIG_ERRATA_THEAD
56 	case THEAD_VENDOR_ID:
57 		cpu_mfr_info->patch_func = thead_errata_patch_func;
58 		break;
59 #endif
60 	default:
61 		cpu_mfr_info->patch_func = NULL;
62 	}
63 }
64 
riscv_instruction_at(void * p)65 static u32 riscv_instruction_at(void *p)
66 {
67 	u16 *parcel = p;
68 
69 	return (u32)parcel[0] | (u32)parcel[1] << 16;
70 }
71 
riscv_alternative_fix_auipc_jalr(void * ptr,u32 auipc_insn,u32 jalr_insn,int patch_offset)72 static void riscv_alternative_fix_auipc_jalr(void *ptr, u32 auipc_insn,
73 					     u32 jalr_insn, int patch_offset)
74 {
75 	u32 call[2] = { auipc_insn, jalr_insn };
76 	s32 imm;
77 
78 	/* get and adjust new target address */
79 	imm = riscv_insn_extract_utype_itype_imm(auipc_insn, jalr_insn);
80 	imm -= patch_offset;
81 
82 	/* update instructions */
83 	riscv_insn_insert_utype_itype_imm(&call[0], &call[1], imm);
84 
85 	/* patch the call place again */
86 	patch_text_nosync(ptr, call, sizeof(u32) * 2);
87 }
88 
riscv_alternative_fix_jal(void * ptr,u32 jal_insn,int patch_offset)89 static void riscv_alternative_fix_jal(void *ptr, u32 jal_insn, int patch_offset)
90 {
91 	s32 imm;
92 
93 	/* get and adjust new target address */
94 	imm = riscv_insn_extract_jtype_imm(jal_insn);
95 	imm -= patch_offset;
96 
97 	/* update instruction */
98 	riscv_insn_insert_jtype_imm(&jal_insn, imm);
99 
100 	/* patch the call place again */
101 	patch_text_nosync(ptr, &jal_insn, sizeof(u32));
102 }
103 
riscv_alternative_fix_offsets(void * alt_ptr,unsigned int len,int patch_offset)104 void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
105 				      int patch_offset)
106 {
107 	int num_insn = len / sizeof(u32);
108 	int i;
109 
110 	for (i = 0; i < num_insn; i++) {
111 		u32 insn = riscv_instruction_at(alt_ptr + i * sizeof(u32));
112 
113 		/*
114 		 * May be the start of an auipc + jalr pair
115 		 * Needs to check that at least one more instruction
116 		 * is in the list.
117 		 */
118 		if (riscv_insn_is_auipc(insn) && i < num_insn - 1) {
119 			u32 insn2 = riscv_instruction_at(alt_ptr + (i + 1) * sizeof(u32));
120 
121 			if (!riscv_insn_is_jalr(insn2))
122 				continue;
123 
124 			/* if instruction pair is a call, it will use the ra register */
125 			if (RV_EXTRACT_RD_REG(insn) != 1)
126 				continue;
127 
128 			riscv_alternative_fix_auipc_jalr(alt_ptr + i * sizeof(u32),
129 							 insn, insn2, patch_offset);
130 			i++;
131 		}
132 
133 		if (riscv_insn_is_jal(insn)) {
134 			s32 imm = riscv_insn_extract_jtype_imm(insn);
135 
136 			/* Don't modify jumps inside the alternative block */
137 			if ((alt_ptr + i * sizeof(u32) + imm) >= alt_ptr &&
138 			    (alt_ptr + i * sizeof(u32) + imm) < (alt_ptr + len))
139 				continue;
140 
141 			riscv_alternative_fix_jal(alt_ptr + i * sizeof(u32),
142 						  insn, patch_offset);
143 		}
144 	}
145 }
146 
147 /*
148  * This is called very early in the boot process (directly after we run
149  * a feature detect on the boot CPU). No need to worry about other CPUs
150  * here.
151  */
_apply_alternatives(struct alt_entry * begin,struct alt_entry * end,unsigned int stage)152 static void __init_or_module _apply_alternatives(struct alt_entry *begin,
153 						 struct alt_entry *end,
154 						 unsigned int stage)
155 {
156 	struct cpu_manufacturer_info_t cpu_mfr_info;
157 
158 	riscv_fill_cpu_mfr_info(&cpu_mfr_info);
159 
160 	riscv_cpufeature_patch_func(begin, end, stage);
161 
162 	if (!cpu_mfr_info.patch_func)
163 		return;
164 
165 	cpu_mfr_info.patch_func(begin, end,
166 				cpu_mfr_info.arch_id,
167 				cpu_mfr_info.imp_id,
168 				stage);
169 }
170 
171 #ifdef CONFIG_MMU
apply_vdso_alternatives(void)172 static void __init apply_vdso_alternatives(void)
173 {
174 	const Elf_Ehdr *hdr;
175 	const Elf_Shdr *shdr;
176 	const Elf_Shdr *alt;
177 	struct alt_entry *begin, *end;
178 
179 	hdr = (Elf_Ehdr *)vdso_start;
180 	shdr = (void *)hdr + hdr->e_shoff;
181 	alt = find_section(hdr, shdr, ".alternative");
182 	if (!alt)
183 		return;
184 
185 	begin = (void *)hdr + alt->sh_offset,
186 	end = (void *)hdr + alt->sh_offset + alt->sh_size,
187 
188 	_apply_alternatives((struct alt_entry *)begin,
189 			    (struct alt_entry *)end,
190 			    RISCV_ALTERNATIVES_BOOT);
191 }
192 #else
apply_vdso_alternatives(void)193 static void __init apply_vdso_alternatives(void) { }
194 #endif
195 
apply_boot_alternatives(void)196 void __init apply_boot_alternatives(void)
197 {
198 	/* If called on non-boot cpu things could go wrong */
199 	WARN_ON(smp_processor_id() != 0);
200 
201 	_apply_alternatives((struct alt_entry *)__alt_start,
202 			    (struct alt_entry *)__alt_end,
203 			    RISCV_ALTERNATIVES_BOOT);
204 
205 	apply_vdso_alternatives();
206 }
207 
208 /*
209  * apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
210  *
211  * Following requirements should be honoured for it to work correctly:
212  * 1) It should use PC-relative addressing for accessing kernel symbols.
213  *    To achieve this we always use GCC cmodel=medany.
214  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
215  *    so disable compiler instrumentation when FTRACE is enabled.
216  *
217  * Currently, the above requirements are honoured by using custom CFLAGS
218  * for alternative.o in kernel/Makefile.
219  */
apply_early_boot_alternatives(void)220 void __init apply_early_boot_alternatives(void)
221 {
222 #ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
223 	_apply_alternatives((struct alt_entry *)__alt_start,
224 			    (struct alt_entry *)__alt_end,
225 			    RISCV_ALTERNATIVES_EARLY_BOOT);
226 #endif
227 }
228 
229 #ifdef CONFIG_MODULES
apply_module_alternatives(void * start,size_t length)230 void apply_module_alternatives(void *start, size_t length)
231 {
232 	_apply_alternatives((struct alt_entry *)start,
233 			    (struct alt_entry *)(start + length),
234 			    RISCV_ALTERNATIVES_MODULE);
235 }
236 #endif
237