xref: /openbmc/linux/arch/x86/kernel/kprobes/opt.c (revision dd502a81077a5f3b3e19fa9a1accffdcab5ad5bc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes Jump Optimization (Optprobes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  * Copyright (C) Hitachi Ltd., 2012
7  */
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/ftrace.h>
19 #include <linux/frame.h>
20 #include <linux/pgtable.h>
21 #include <linux/static_call.h>
22 
23 #include <asm/text-patching.h>
24 #include <asm/cacheflush.h>
25 #include <asm/desc.h>
26 #include <linux/uaccess.h>
27 #include <asm/alternative.h>
28 #include <asm/insn.h>
29 #include <asm/debugreg.h>
30 #include <asm/set_memory.h>
31 #include <asm/sections.h>
32 #include <asm/nospec-branch.h>
33 
34 #include "common.h"
35 
36 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
37 {
38 	struct optimized_kprobe *op;
39 	struct kprobe *kp;
40 	long offs;
41 	int i;
42 
43 	for (i = 0; i < JMP32_INSN_SIZE; i++) {
44 		kp = get_kprobe((void *)addr - i);
45 		/* This function only handles jump-optimized kprobe */
46 		if (kp && kprobe_optimized(kp)) {
47 			op = container_of(kp, struct optimized_kprobe, kp);
48 			/* If op->list is not empty, op is under optimizing */
49 			if (list_empty(&op->list))
50 				goto found;
51 		}
52 	}
53 
54 	return addr;
55 found:
56 	/*
57 	 * If the kprobe can be optimized, original bytes which can be
58 	 * overwritten by jump destination address. In this case, original
59 	 * bytes must be recovered from op->optinsn.copied_insn buffer.
60 	 */
61 	if (copy_from_kernel_nofault(buf, (void *)addr,
62 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
63 		return 0UL;
64 
65 	if (addr == (unsigned long)kp->addr) {
66 		buf[0] = kp->opcode;
67 		memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
68 	} else {
69 		offs = addr - (unsigned long)kp->addr - 1;
70 		memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
71 	}
72 
73 	return (unsigned long)buf;
74 }
75 
76 static void synthesize_clac(kprobe_opcode_t *addr)
77 {
78 	/*
79 	 * Can't be static_cpu_has() due to how objtool treats this feature bit.
80 	 * This isn't a fast path anyway.
81 	 */
82 	if (!boot_cpu_has(X86_FEATURE_SMAP))
83 		return;
84 
85 	/* Replace the NOP3 with CLAC */
86 	addr[0] = 0x0f;
87 	addr[1] = 0x01;
88 	addr[2] = 0xca;
89 }
90 
91 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
92 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
93 {
94 #ifdef CONFIG_X86_64
95 	*addr++ = 0x48;
96 	*addr++ = 0xbf;
97 #else
98 	*addr++ = 0xb8;
99 #endif
100 	*(unsigned long *)addr = val;
101 }
102 
103 asm (
104 			".pushsection .rodata\n"
105 			"optprobe_template_func:\n"
106 			".global optprobe_template_entry\n"
107 			"optprobe_template_entry:\n"
108 #ifdef CONFIG_X86_64
109 			/* We don't bother saving the ss register */
110 			"	pushq %rsp\n"
111 			"	pushfq\n"
112 			".global optprobe_template_clac\n"
113 			"optprobe_template_clac:\n"
114 			ASM_NOP3
115 			SAVE_REGS_STRING
116 			"	movq %rsp, %rsi\n"
117 			".global optprobe_template_val\n"
118 			"optprobe_template_val:\n"
119 			ASM_NOP5
120 			ASM_NOP5
121 			".global optprobe_template_call\n"
122 			"optprobe_template_call:\n"
123 			ASM_NOP5
124 			/* Move flags to rsp */
125 			"	movq 18*8(%rsp), %rdx\n"
126 			"	movq %rdx, 19*8(%rsp)\n"
127 			RESTORE_REGS_STRING
128 			/* Skip flags entry */
129 			"	addq $8, %rsp\n"
130 			"	popfq\n"
131 #else /* CONFIG_X86_32 */
132 			"	pushl %esp\n"
133 			"	pushfl\n"
134 			".global optprobe_template_clac\n"
135 			"optprobe_template_clac:\n"
136 			ASM_NOP3
137 			SAVE_REGS_STRING
138 			"	movl %esp, %edx\n"
139 			".global optprobe_template_val\n"
140 			"optprobe_template_val:\n"
141 			ASM_NOP5
142 			".global optprobe_template_call\n"
143 			"optprobe_template_call:\n"
144 			ASM_NOP5
145 			/* Move flags into esp */
146 			"	movl 14*4(%esp), %edx\n"
147 			"	movl %edx, 15*4(%esp)\n"
148 			RESTORE_REGS_STRING
149 			/* Skip flags entry */
150 			"	addl $4, %esp\n"
151 			"	popfl\n"
152 #endif
153 			".global optprobe_template_end\n"
154 			"optprobe_template_end:\n"
155 			".popsection\n");
156 
157 void optprobe_template_func(void);
158 STACK_FRAME_NON_STANDARD(optprobe_template_func);
159 
160 #define TMPL_CLAC_IDX \
161 	((long)optprobe_template_clac - (long)optprobe_template_entry)
162 #define TMPL_MOVE_IDX \
163 	((long)optprobe_template_val - (long)optprobe_template_entry)
164 #define TMPL_CALL_IDX \
165 	((long)optprobe_template_call - (long)optprobe_template_entry)
166 #define TMPL_END_IDX \
167 	((long)optprobe_template_end - (long)optprobe_template_entry)
168 
169 /* Optimized kprobe call back function: called from optinsn */
170 static void
171 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
172 {
173 	/* This is possible if op is under delayed unoptimizing */
174 	if (kprobe_disabled(&op->kp))
175 		return;
176 
177 	preempt_disable();
178 	if (kprobe_running()) {
179 		kprobes_inc_nmissed_count(&op->kp);
180 	} else {
181 		struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
182 		/* Save skipped registers */
183 		regs->cs = __KERNEL_CS;
184 #ifdef CONFIG_X86_32
185 		regs->cs |= get_kernel_rpl();
186 		regs->gs = 0;
187 #endif
188 		regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
189 		regs->orig_ax = ~0UL;
190 
191 		__this_cpu_write(current_kprobe, &op->kp);
192 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
193 		opt_pre_handler(&op->kp, regs);
194 		__this_cpu_write(current_kprobe, NULL);
195 	}
196 	preempt_enable();
197 }
198 NOKPROBE_SYMBOL(optimized_callback);
199 
200 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
201 {
202 	struct insn insn;
203 	int len = 0, ret;
204 
205 	while (len < JMP32_INSN_SIZE) {
206 		ret = __copy_instruction(dest + len, src + len, real + len, &insn);
207 		if (!ret || !can_boost(&insn, src + len))
208 			return -EINVAL;
209 		len += ret;
210 	}
211 	/* Check whether the address range is reserved */
212 	if (ftrace_text_reserved(src, src + len - 1) ||
213 	    alternatives_text_reserved(src, src + len - 1) ||
214 	    jump_label_text_reserved(src, src + len - 1) ||
215 	    static_call_text_reserved(src, src + len - 1))
216 		return -EBUSY;
217 
218 	return len;
219 }
220 
221 /* Check whether insn is indirect jump */
222 static int __insn_is_indirect_jump(struct insn *insn)
223 {
224 	return ((insn->opcode.bytes[0] == 0xff &&
225 		(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
226 		insn->opcode.bytes[0] == 0xea);	/* Segment based jump */
227 }
228 
229 /* Check whether insn jumps into specified address range */
230 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
231 {
232 	unsigned long target = 0;
233 
234 	switch (insn->opcode.bytes[0]) {
235 	case 0xe0:	/* loopne */
236 	case 0xe1:	/* loope */
237 	case 0xe2:	/* loop */
238 	case 0xe3:	/* jcxz */
239 	case 0xe9:	/* near relative jump */
240 	case 0xeb:	/* short relative jump */
241 		break;
242 	case 0x0f:
243 		if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
244 			break;
245 		return 0;
246 	default:
247 		if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
248 			break;
249 		return 0;
250 	}
251 	target = (unsigned long)insn->next_byte + insn->immediate.value;
252 
253 	return (start <= target && target <= start + len);
254 }
255 
256 static int insn_is_indirect_jump(struct insn *insn)
257 {
258 	int ret = __insn_is_indirect_jump(insn);
259 
260 #ifdef CONFIG_RETPOLINE
261 	/*
262 	 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
263 	 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
264 	 * older gcc may use indirect jump. So we add this check instead of
265 	 * replace indirect-jump check.
266 	 */
267 	if (!ret)
268 		ret = insn_jump_into_range(insn,
269 				(unsigned long)__indirect_thunk_start,
270 				(unsigned long)__indirect_thunk_end -
271 				(unsigned long)__indirect_thunk_start);
272 #endif
273 	return ret;
274 }
275 
276 /* Decode whole function to ensure any instructions don't jump into target */
277 static int can_optimize(unsigned long paddr)
278 {
279 	unsigned long addr, size = 0, offset = 0;
280 	struct insn insn;
281 	kprobe_opcode_t buf[MAX_INSN_SIZE];
282 
283 	/* Lookup symbol including addr */
284 	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
285 		return 0;
286 
287 	/*
288 	 * Do not optimize in the entry code due to the unstable
289 	 * stack handling and registers setup.
290 	 */
291 	if (((paddr >= (unsigned long)__entry_text_start) &&
292 	     (paddr <  (unsigned long)__entry_text_end)))
293 		return 0;
294 
295 	/* Check there is enough space for a relative jump. */
296 	if (size - offset < JMP32_INSN_SIZE)
297 		return 0;
298 
299 	/* Decode instructions */
300 	addr = paddr - offset;
301 	while (addr < paddr - offset + size) { /* Decode until function end */
302 		unsigned long recovered_insn;
303 		if (search_exception_tables(addr))
304 			/*
305 			 * Since some fixup code will jumps into this function,
306 			 * we can't optimize kprobe in this function.
307 			 */
308 			return 0;
309 		recovered_insn = recover_probed_instruction(buf, addr);
310 		if (!recovered_insn)
311 			return 0;
312 		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
313 		insn_get_length(&insn);
314 		/* Another subsystem puts a breakpoint */
315 		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
316 			return 0;
317 		/* Recover address */
318 		insn.kaddr = (void *)addr;
319 		insn.next_byte = (void *)(addr + insn.length);
320 		/* Check any instructions don't jump into target */
321 		if (insn_is_indirect_jump(&insn) ||
322 		    insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
323 					 DISP32_SIZE))
324 			return 0;
325 		addr += insn.length;
326 	}
327 
328 	return 1;
329 }
330 
331 /* Check optimized_kprobe can actually be optimized. */
332 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
333 {
334 	int i;
335 	struct kprobe *p;
336 
337 	for (i = 1; i < op->optinsn.size; i++) {
338 		p = get_kprobe(op->kp.addr + i);
339 		if (p && !kprobe_disabled(p))
340 			return -EEXIST;
341 	}
342 
343 	return 0;
344 }
345 
346 /* Check the addr is within the optimized instructions. */
347 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
348 				 unsigned long addr)
349 {
350 	return ((unsigned long)op->kp.addr <= addr &&
351 		(unsigned long)op->kp.addr + op->optinsn.size > addr);
352 }
353 
354 /* Free optimized instruction slot */
355 static
356 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
357 {
358 	u8 *slot = op->optinsn.insn;
359 	if (slot) {
360 		int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
361 
362 		/* Record the perf event before freeing the slot */
363 		if (dirty)
364 			perf_event_text_poke(slot, slot, len, NULL, 0);
365 
366 		free_optinsn_slot(slot, dirty);
367 		op->optinsn.insn = NULL;
368 		op->optinsn.size = 0;
369 	}
370 }
371 
372 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
373 {
374 	__arch_remove_optimized_kprobe(op, 1);
375 }
376 
377 /*
378  * Copy replacing target instructions
379  * Target instructions MUST be relocatable (checked inside)
380  * This is called when new aggr(opt)probe is allocated or reused.
381  */
382 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
383 				  struct kprobe *__unused)
384 {
385 	u8 *buf = NULL, *slot;
386 	int ret, len;
387 	long rel;
388 
389 	if (!can_optimize((unsigned long)op->kp.addr))
390 		return -EILSEQ;
391 
392 	buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
393 	if (!buf)
394 		return -ENOMEM;
395 
396 	op->optinsn.insn = slot = get_optinsn_slot();
397 	if (!slot) {
398 		ret = -ENOMEM;
399 		goto out;
400 	}
401 
402 	/*
403 	 * Verify if the address gap is in 2GB range, because this uses
404 	 * a relative jump.
405 	 */
406 	rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
407 	if (abs(rel) > 0x7fffffff) {
408 		ret = -ERANGE;
409 		goto err;
410 	}
411 
412 	/* Copy arch-dep-instance from template */
413 	memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
414 
415 	/* Copy instructions into the out-of-line buffer */
416 	ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
417 					  slot + TMPL_END_IDX);
418 	if (ret < 0)
419 		goto err;
420 	op->optinsn.size = ret;
421 	len = TMPL_END_IDX + op->optinsn.size;
422 
423 	synthesize_clac(buf + TMPL_CLAC_IDX);
424 
425 	/* Set probe information */
426 	synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
427 
428 	/* Set probe function call */
429 	synthesize_relcall(buf + TMPL_CALL_IDX,
430 			   slot + TMPL_CALL_IDX, optimized_callback);
431 
432 	/* Set returning jmp instruction at the tail of out-of-line buffer */
433 	synthesize_reljump(buf + len, slot + len,
434 			   (u8 *)op->kp.addr + op->optinsn.size);
435 	len += JMP32_INSN_SIZE;
436 
437 	/*
438 	 * Note	len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
439 	 * used in __arch_remove_optimized_kprobe().
440 	 */
441 
442 	/* We have to use text_poke() for instruction buffer because it is RO */
443 	perf_event_text_poke(slot, NULL, 0, buf, len);
444 	text_poke(slot, buf, len);
445 
446 	ret = 0;
447 out:
448 	kfree(buf);
449 	return ret;
450 
451 err:
452 	__arch_remove_optimized_kprobe(op, 0);
453 	goto out;
454 }
455 
456 /*
457  * Replace breakpoints (INT3) with relative jumps (JMP.d32).
458  * Caller must call with locking kprobe_mutex and text_mutex.
459  *
460  * The caller will have installed a regular kprobe and after that issued
461  * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
462  * the 4 bytes after the INT3 are unused and can now be overwritten.
463  */
464 void arch_optimize_kprobes(struct list_head *oplist)
465 {
466 	struct optimized_kprobe *op, *tmp;
467 	u8 insn_buff[JMP32_INSN_SIZE];
468 
469 	list_for_each_entry_safe(op, tmp, oplist, list) {
470 		s32 rel = (s32)((long)op->optinsn.insn -
471 			((long)op->kp.addr + JMP32_INSN_SIZE));
472 
473 		WARN_ON(kprobe_disabled(&op->kp));
474 
475 		/* Backup instructions which will be replaced by jump address */
476 		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
477 		       DISP32_SIZE);
478 
479 		insn_buff[0] = JMP32_INSN_OPCODE;
480 		*(s32 *)(&insn_buff[1]) = rel;
481 
482 		text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
483 
484 		list_del_init(&op->list);
485 	}
486 }
487 
488 /*
489  * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
490  *
491  * After that, we can restore the 4 bytes after the INT3 to undo what
492  * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
493  * unused once the INT3 lands.
494  */
495 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
496 {
497 	u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
498 	u8 old[JMP32_INSN_SIZE];
499 	u8 *addr = op->kp.addr;
500 
501 	memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
502 	memcpy(new + INT3_INSN_SIZE,
503 	       op->optinsn.copied_insn,
504 	       JMP32_INSN_SIZE - INT3_INSN_SIZE);
505 
506 	text_poke(addr, new, INT3_INSN_SIZE);
507 	text_poke_sync();
508 	text_poke(addr + INT3_INSN_SIZE,
509 		  new + INT3_INSN_SIZE,
510 		  JMP32_INSN_SIZE - INT3_INSN_SIZE);
511 	text_poke_sync();
512 
513 	perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
514 }
515 
516 /*
517  * Recover original instructions and breakpoints from relative jumps.
518  * Caller must call with locking kprobe_mutex.
519  */
520 extern void arch_unoptimize_kprobes(struct list_head *oplist,
521 				    struct list_head *done_list)
522 {
523 	struct optimized_kprobe *op, *tmp;
524 
525 	list_for_each_entry_safe(op, tmp, oplist, list) {
526 		arch_unoptimize_kprobe(op);
527 		list_move(&op->list, done_list);
528 	}
529 }
530 
531 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
532 {
533 	struct optimized_kprobe *op;
534 
535 	if (p->flags & KPROBE_FLAG_OPTIMIZED) {
536 		/* This kprobe is really able to run optimized path. */
537 		op = container_of(p, struct optimized_kprobe, kp);
538 		/* Detour through copied instructions */
539 		regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
540 		if (!reenter)
541 			reset_current_kprobe();
542 		return 1;
543 	}
544 	return 0;
545 }
546 NOKPROBE_SYMBOL(setup_detour_execution);
547