xref: /openbmc/linux/arch/x86/kernel/callthunks.c (revision bbaceb18)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #define pr_fmt(fmt) "callthunks: " fmt
4 
5 #include <linux/kallsyms.h>
6 #include <linux/memory.h>
7 #include <linux/moduleloader.h>
8 
9 #include <asm/alternative.h>
10 #include <asm/asm-offsets.h>
11 #include <asm/cpu.h>
12 #include <asm/ftrace.h>
13 #include <asm/insn.h>
14 #include <asm/kexec.h>
15 #include <asm/nospec-branch.h>
16 #include <asm/paravirt.h>
17 #include <asm/sections.h>
18 #include <asm/switch_to.h>
19 #include <asm/sync_core.h>
20 #include <asm/text-patching.h>
21 #include <asm/xen/hypercall.h>
22 
23 static int __initdata_or_module debug_callthunks;
24 
25 #define prdbg(fmt, args...)					\
26 do {								\
27 	if (debug_callthunks)					\
28 		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
29 } while(0)
30 
31 static int __init debug_thunks(char *str)
32 {
33 	debug_callthunks = 1;
34 	return 1;
35 }
36 __setup("debug-callthunks", debug_thunks);
37 
38 extern s32 __call_sites[], __call_sites_end[];
39 
40 struct thunk_desc {
41 	void		*template;
42 	unsigned int	template_size;
43 };
44 
45 struct core_text {
46 	unsigned long	base;
47 	unsigned long	end;
48 	const char	*name;
49 };
50 
51 static bool thunks_initialized __ro_after_init;
52 
53 static const struct core_text builtin_coretext = {
54 	.base = (unsigned long)_text,
55 	.end  = (unsigned long)_etext,
56 	.name = "builtin",
57 };
58 
59 asm (
60 	".pushsection .rodata				\n"
61 	".global skl_call_thunk_template		\n"
62 	"skl_call_thunk_template:			\n"
63 		__stringify(INCREMENT_CALL_DEPTH)"	\n"
64 	".global skl_call_thunk_tail			\n"
65 	"skl_call_thunk_tail:				\n"
66 	".popsection					\n"
67 );
68 
69 extern u8 skl_call_thunk_template[];
70 extern u8 skl_call_thunk_tail[];
71 
72 #define SKL_TMPL_SIZE \
73 	((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
74 
75 extern void error_entry(void);
76 extern void xen_error_entry(void);
77 extern void paranoid_entry(void);
78 
79 static inline bool within_coretext(const struct core_text *ct, void *addr)
80 {
81 	unsigned long p = (unsigned long)addr;
82 
83 	return ct->base <= p && p < ct->end;
84 }
85 
86 static inline bool within_module_coretext(void *addr)
87 {
88 	bool ret = false;
89 
90 #ifdef CONFIG_MODULES
91 	struct module *mod;
92 
93 	preempt_disable();
94 	mod = __module_address((unsigned long)addr);
95 	if (mod && within_module_core((unsigned long)addr, mod))
96 		ret = true;
97 	preempt_enable();
98 #endif
99 	return ret;
100 }
101 
102 static bool is_coretext(const struct core_text *ct, void *addr)
103 {
104 	if (ct && within_coretext(ct, addr))
105 		return true;
106 	if (within_coretext(&builtin_coretext, addr))
107 		return true;
108 	return within_module_coretext(addr);
109 }
110 
111 static __init_or_module bool skip_addr(void *dest)
112 {
113 	if (dest == error_entry)
114 		return true;
115 	if (dest == paranoid_entry)
116 		return true;
117 	if (dest == xen_error_entry)
118 		return true;
119 	/* Does FILL_RSB... */
120 	if (dest == __switch_to_asm)
121 		return true;
122 	/* Accounts directly */
123 	if (dest == ret_from_fork)
124 		return true;
125 #ifdef CONFIG_HOTPLUG_CPU
126 	if (dest == start_cpu0)
127 		return true;
128 #endif
129 #ifdef CONFIG_FUNCTION_TRACER
130 	if (dest == __fentry__)
131 		return true;
132 #endif
133 #ifdef CONFIG_KEXEC_CORE
134 	if (dest >= (void *)relocate_kernel &&
135 	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
136 		return true;
137 #endif
138 #ifdef CONFIG_XEN
139 	if (dest >= (void *)hypercall_page &&
140 	    dest < (void*)hypercall_page + PAGE_SIZE)
141 		return true;
142 #endif
143 	return false;
144 }
145 
146 static __init_or_module void *call_get_dest(void *addr)
147 {
148 	struct insn insn;
149 	void *dest;
150 	int ret;
151 
152 	ret = insn_decode_kernel(&insn, addr);
153 	if (ret)
154 		return ERR_PTR(ret);
155 
156 	/* Patched out call? */
157 	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
158 		return NULL;
159 
160 	dest = addr + insn.length + insn.immediate.value;
161 	if (skip_addr(dest))
162 		return NULL;
163 	return dest;
164 }
165 
166 static const u8 nops[] = {
167 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
168 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
169 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
170 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
171 };
172 
173 static __init_or_module void *patch_dest(void *dest, bool direct)
174 {
175 	unsigned int tsize = SKL_TMPL_SIZE;
176 	u8 *pad = dest - tsize;
177 
178 	/* Already patched? */
179 	if (!bcmp(pad, skl_call_thunk_template, tsize))
180 		return pad;
181 
182 	/* Ensure there are nops */
183 	if (bcmp(pad, nops, tsize)) {
184 		pr_warn_once("Invalid padding area for %pS\n", dest);
185 		return NULL;
186 	}
187 
188 	if (direct)
189 		memcpy(pad, skl_call_thunk_template, tsize);
190 	else
191 		text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
192 	return pad;
193 }
194 
195 static __init_or_module void patch_call(void *addr, const struct core_text *ct)
196 {
197 	void *pad, *dest;
198 	u8 bytes[8];
199 
200 	if (!within_coretext(ct, addr))
201 		return;
202 
203 	dest = call_get_dest(addr);
204 	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
205 		return;
206 
207 	if (!is_coretext(ct, dest))
208 		return;
209 
210 	pad = patch_dest(dest, within_coretext(ct, dest));
211 	if (!pad)
212 		return;
213 
214 	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
215 		dest, dest, pad);
216 	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
217 	text_poke_early(addr, bytes, CALL_INSN_SIZE);
218 }
219 
220 static __init_or_module void
221 patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
222 {
223 	s32 *s;
224 
225 	for (s = start; s < end; s++)
226 		patch_call((void *)s + *s, ct);
227 }
228 
229 static __init_or_module void
230 patch_paravirt_call_sites(struct paravirt_patch_site *start,
231 			  struct paravirt_patch_site *end,
232 			  const struct core_text *ct)
233 {
234 	struct paravirt_patch_site *p;
235 
236 	for (p = start; p < end; p++)
237 		patch_call(p->instr, ct);
238 }
239 
240 static __init_or_module void
241 callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
242 {
243 	prdbg("Patching call sites %s\n", ct->name);
244 	patch_call_sites(cs->call_start, cs->call_end, ct);
245 	patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
246 	prdbg("Patching call sites done%s\n", ct->name);
247 }
248 
249 void __init callthunks_patch_builtin_calls(void)
250 {
251 	struct callthunk_sites cs = {
252 		.call_start	= __call_sites,
253 		.call_end	= __call_sites_end,
254 		.pv_start	= __parainstructions,
255 		.pv_end		= __parainstructions_end
256 	};
257 
258 	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
259 		return;
260 
261 	pr_info("Setting up call depth tracking\n");
262 	mutex_lock(&text_mutex);
263 	callthunks_setup(&cs, &builtin_coretext);
264 	thunks_initialized = true;
265 	mutex_unlock(&text_mutex);
266 }
267 
268 #ifdef CONFIG_MODULES
269 void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
270 					    struct module *mod)
271 {
272 	struct core_text ct = {
273 		.base = (unsigned long)mod->core_layout.base,
274 		.end  = (unsigned long)mod->core_layout.base + mod->core_layout.size,
275 		.name = mod->name,
276 	};
277 
278 	if (!thunks_initialized)
279 		return;
280 
281 	mutex_lock(&text_mutex);
282 	callthunks_setup(cs, &ct);
283 	mutex_unlock(&text_mutex);
284 }
285 #endif /* CONFIG_MODULES */
286