xref: /openbmc/linux/arch/x86/kernel/callthunks.c (revision eaf44c81)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #define pr_fmt(fmt) "callthunks: " fmt
4 
5 #include <linux/kallsyms.h>
6 #include <linux/memory.h>
7 #include <linux/moduleloader.h>
8 
9 #include <asm/alternative.h>
10 #include <asm/cpu.h>
11 #include <asm/ftrace.h>
12 #include <asm/insn.h>
13 #include <asm/kexec.h>
14 #include <asm/nospec-branch.h>
15 #include <asm/paravirt.h>
16 #include <asm/sections.h>
17 #include <asm/switch_to.h>
18 #include <asm/sync_core.h>
19 #include <asm/text-patching.h>
20 #include <asm/xen/hypercall.h>
21 
22 static int __initdata_or_module debug_callthunks;
23 
24 #define prdbg(fmt, args...)					\
25 do {								\
26 	if (debug_callthunks)					\
27 		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
28 } while(0)
29 
30 static int __init debug_thunks(char *str)
31 {
32 	debug_callthunks = 1;
33 	return 1;
34 }
35 __setup("debug-callthunks", debug_thunks);
36 
37 extern s32 __call_sites[], __call_sites_end[];
38 
39 struct thunk_desc {
40 	void		*template;
41 	unsigned int	template_size;
42 };
43 
44 struct core_text {
45 	unsigned long	base;
46 	unsigned long	end;
47 	const char	*name;
48 };
49 
50 static bool thunks_initialized __ro_after_init;
51 
52 static const struct core_text builtin_coretext = {
53 	.base = (unsigned long)_text,
54 	.end  = (unsigned long)_etext,
55 	.name = "builtin",
56 };
57 
58 static struct thunk_desc callthunk_desc __ro_after_init;
59 
60 extern void error_entry(void);
61 extern void xen_error_entry(void);
62 extern void paranoid_entry(void);
63 
64 static inline bool within_coretext(const struct core_text *ct, void *addr)
65 {
66 	unsigned long p = (unsigned long)addr;
67 
68 	return ct->base <= p && p < ct->end;
69 }
70 
71 static inline bool within_module_coretext(void *addr)
72 {
73 	bool ret = false;
74 
75 #ifdef CONFIG_MODULES
76 	struct module *mod;
77 
78 	preempt_disable();
79 	mod = __module_address((unsigned long)addr);
80 	if (mod && within_module_core((unsigned long)addr, mod))
81 		ret = true;
82 	preempt_enable();
83 #endif
84 	return ret;
85 }
86 
87 static bool is_coretext(const struct core_text *ct, void *addr)
88 {
89 	if (ct && within_coretext(ct, addr))
90 		return true;
91 	if (within_coretext(&builtin_coretext, addr))
92 		return true;
93 	return within_module_coretext(addr);
94 }
95 
96 static __init_or_module bool skip_addr(void *dest)
97 {
98 	if (dest == error_entry)
99 		return true;
100 	if (dest == paranoid_entry)
101 		return true;
102 	if (dest == xen_error_entry)
103 		return true;
104 	/* Does FILL_RSB... */
105 	if (dest == __switch_to_asm)
106 		return true;
107 	/* Accounts directly */
108 	if (dest == ret_from_fork)
109 		return true;
110 #ifdef CONFIG_HOTPLUG_CPU
111 	if (dest == start_cpu0)
112 		return true;
113 #endif
114 #ifdef CONFIG_FUNCTION_TRACER
115 	if (dest == __fentry__)
116 		return true;
117 #endif
118 #ifdef CONFIG_KEXEC_CORE
119 	if (dest >= (void *)relocate_kernel &&
120 	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
121 		return true;
122 #endif
123 #ifdef CONFIG_XEN
124 	if (dest >= (void *)hypercall_page &&
125 	    dest < (void*)hypercall_page + PAGE_SIZE)
126 		return true;
127 #endif
128 	return false;
129 }
130 
131 static __init_or_module void *call_get_dest(void *addr)
132 {
133 	struct insn insn;
134 	void *dest;
135 	int ret;
136 
137 	ret = insn_decode_kernel(&insn, addr);
138 	if (ret)
139 		return ERR_PTR(ret);
140 
141 	/* Patched out call? */
142 	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
143 		return NULL;
144 
145 	dest = addr + insn.length + insn.immediate.value;
146 	if (skip_addr(dest))
147 		return NULL;
148 	return dest;
149 }
150 
151 static const u8 nops[] = {
152 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
153 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
154 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
155 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
156 };
157 
158 static __init_or_module void *patch_dest(void *dest, bool direct)
159 {
160 	unsigned int tsize = callthunk_desc.template_size;
161 	u8 *pad = dest - tsize;
162 
163 	/* Already patched? */
164 	if (!bcmp(pad, callthunk_desc.template, tsize))
165 		return pad;
166 
167 	/* Ensure there are nops */
168 	if (bcmp(pad, nops, tsize)) {
169 		pr_warn_once("Invalid padding area for %pS\n", dest);
170 		return NULL;
171 	}
172 
173 	if (direct)
174 		memcpy(pad, callthunk_desc.template, tsize);
175 	else
176 		text_poke_copy_locked(pad, callthunk_desc.template, tsize, true);
177 	return pad;
178 }
179 
180 static __init_or_module void patch_call(void *addr, const struct core_text *ct)
181 {
182 	void *pad, *dest;
183 	u8 bytes[8];
184 
185 	if (!within_coretext(ct, addr))
186 		return;
187 
188 	dest = call_get_dest(addr);
189 	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
190 		return;
191 
192 	if (!is_coretext(ct, dest))
193 		return;
194 
195 	pad = patch_dest(dest, within_coretext(ct, dest));
196 	if (!pad)
197 		return;
198 
199 	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
200 		dest, dest, pad);
201 	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
202 	text_poke_early(addr, bytes, CALL_INSN_SIZE);
203 }
204 
205 static __init_or_module void
206 patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
207 {
208 	s32 *s;
209 
210 	for (s = start; s < end; s++)
211 		patch_call((void *)s + *s, ct);
212 }
213 
214 static __init_or_module void
215 patch_paravirt_call_sites(struct paravirt_patch_site *start,
216 			  struct paravirt_patch_site *end,
217 			  const struct core_text *ct)
218 {
219 	struct paravirt_patch_site *p;
220 
221 	for (p = start; p < end; p++)
222 		patch_call(p->instr, ct);
223 }
224 
225 static __init_or_module void
226 callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
227 {
228 	prdbg("Patching call sites %s\n", ct->name);
229 	patch_call_sites(cs->call_start, cs->call_end, ct);
230 	patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
231 	prdbg("Patching call sites done%s\n", ct->name);
232 }
233 
234 void __init callthunks_patch_builtin_calls(void)
235 {
236 	struct callthunk_sites cs = {
237 		.call_start	= __call_sites,
238 		.call_end	= __call_sites_end,
239 		.pv_start	= __parainstructions,
240 		.pv_end		= __parainstructions_end
241 	};
242 
243 	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
244 		return;
245 
246 	pr_info("Setting up call depth tracking\n");
247 	mutex_lock(&text_mutex);
248 	callthunks_setup(&cs, &builtin_coretext);
249 	thunks_initialized = true;
250 	mutex_unlock(&text_mutex);
251 }
252 
253 #ifdef CONFIG_MODULES
254 void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
255 					    struct module *mod)
256 {
257 	struct core_text ct = {
258 		.base = (unsigned long)mod->core_layout.base,
259 		.end  = (unsigned long)mod->core_layout.base + mod->core_layout.size,
260 		.name = mod->name,
261 	};
262 
263 	if (!thunks_initialized)
264 		return;
265 
266 	mutex_lock(&text_mutex);
267 	callthunks_setup(cs, &ct);
268 	mutex_unlock(&text_mutex);
269 }
270 #endif /* CONFIG_MODULES */
271