xref: /openbmc/linux/arch/x86/kernel/kprobes/core.c (revision 4949009e)
1 /*
2  *  Kernel Probes (KProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2002, 2004
19  *
20  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21  *		Probes initial implementation ( includes contributions from
22  *		Rusty Russell).
23  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24  *		interface to access function arguments.
25  * 2004-Oct	Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26  *		<prasanna@in.ibm.com> adapted for x86_64 from i386.
27  * 2005-Mar	Roland McGrath <roland@redhat.com>
28  *		Fixed to handle %rip-relative addressing mode correctly.
29  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31  *		<prasanna@in.ibm.com> added function-return probes.
32  * 2005-May	Rusty Lynch <rusty.lynch@intel.com>
33  *		Added function return probes functionality
34  * 2006-Feb	Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35  *		kprobe-booster and kretprobe-booster for i386.
36  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37  *		and kretprobe-booster for x86-64
38  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39  *		<arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40  *		unified x86 kprobes code.
41  */
42 #include <linux/kprobes.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/hardirq.h>
47 #include <linux/preempt.h>
48 #include <linux/module.h>
49 #include <linux/kdebug.h>
50 #include <linux/kallsyms.h>
51 #include <linux/ftrace.h>
52 
53 #include <asm/cacheflush.h>
54 #include <asm/desc.h>
55 #include <asm/pgtable.h>
56 #include <asm/uaccess.h>
57 #include <asm/alternative.h>
58 #include <asm/insn.h>
59 #include <asm/debugreg.h>
60 
61 #include "common.h"
62 
63 void jprobe_return_end(void);
64 
65 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
66 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
67 
68 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
69 
70 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
71 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
72 	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
73 	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
74 	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
75 	 << (row % 32))
76 	/*
77 	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
78 	 * Groups, and some special opcodes can not boost.
79 	 * This is non-const and volatile to keep gcc from statically
80 	 * optimizing it out, as variable_test_bit makes gcc think only
81 	 * *(unsigned long*) is used.
82 	 */
83 static volatile u32 twobyte_is_boostable[256 / 32] = {
84 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
85 	/*      ----------------------------------------------          */
86 	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
87 	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
88 	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
89 	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
90 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
91 	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
92 	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
93 	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
94 	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
95 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
96 	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
97 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
98 	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
99 	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
100 	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
101 	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
102 	/*      -----------------------------------------------         */
103 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
104 };
105 #undef W
106 
107 struct kretprobe_blackpoint kretprobe_blacklist[] = {
108 	{"__switch_to", }, /* This function switches only current task, but
109 			      doesn't switch kernel stack.*/
110 	{NULL, NULL}	/* Terminator */
111 };
112 
113 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
114 
115 static nokprobe_inline void
116 __synthesize_relative_insn(void *from, void *to, u8 op)
117 {
118 	struct __arch_relative_insn {
119 		u8 op;
120 		s32 raddr;
121 	} __packed *insn;
122 
123 	insn = (struct __arch_relative_insn *)from;
124 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
125 	insn->op = op;
126 }
127 
128 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
129 void synthesize_reljump(void *from, void *to)
130 {
131 	__synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
132 }
133 NOKPROBE_SYMBOL(synthesize_reljump);
134 
135 /* Insert a call instruction at address 'from', which calls address 'to'.*/
136 void synthesize_relcall(void *from, void *to)
137 {
138 	__synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
139 }
140 NOKPROBE_SYMBOL(synthesize_relcall);
141 
142 /*
143  * Skip the prefixes of the instruction.
144  */
145 static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
146 {
147 	insn_attr_t attr;
148 
149 	attr = inat_get_opcode_attribute((insn_byte_t)*insn);
150 	while (inat_is_legacy_prefix(attr)) {
151 		insn++;
152 		attr = inat_get_opcode_attribute((insn_byte_t)*insn);
153 	}
154 #ifdef CONFIG_X86_64
155 	if (inat_is_rex_prefix(attr))
156 		insn++;
157 #endif
158 	return insn;
159 }
160 NOKPROBE_SYMBOL(skip_prefixes);
161 
162 /*
163  * Returns non-zero if opcode is boostable.
164  * RIP relative instructions are adjusted at copying time in 64 bits mode
165  */
166 int can_boost(kprobe_opcode_t *opcodes)
167 {
168 	kprobe_opcode_t opcode;
169 	kprobe_opcode_t *orig_opcodes = opcodes;
170 
171 	if (search_exception_tables((unsigned long)opcodes))
172 		return 0;	/* Page fault may occur on this address. */
173 
174 retry:
175 	if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
176 		return 0;
177 	opcode = *(opcodes++);
178 
179 	/* 2nd-byte opcode */
180 	if (opcode == 0x0f) {
181 		if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
182 			return 0;
183 		return test_bit(*opcodes,
184 				(unsigned long *)twobyte_is_boostable);
185 	}
186 
187 	switch (opcode & 0xf0) {
188 #ifdef CONFIG_X86_64
189 	case 0x40:
190 		goto retry; /* REX prefix is boostable */
191 #endif
192 	case 0x60:
193 		if (0x63 < opcode && opcode < 0x67)
194 			goto retry; /* prefixes */
195 		/* can't boost Address-size override and bound */
196 		return (opcode != 0x62 && opcode != 0x67);
197 	case 0x70:
198 		return 0; /* can't boost conditional jump */
199 	case 0xc0:
200 		/* can't boost software-interruptions */
201 		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
202 	case 0xd0:
203 		/* can boost AA* and XLAT */
204 		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
205 	case 0xe0:
206 		/* can boost in/out and absolute jmps */
207 		return ((opcode & 0x04) || opcode == 0xea);
208 	case 0xf0:
209 		if ((opcode & 0x0c) == 0 && opcode != 0xf1)
210 			goto retry; /* lock/rep(ne) prefix */
211 		/* clear and set flags are boostable */
212 		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
213 	default:
214 		/* segment override prefixes are boostable */
215 		if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
216 			goto retry; /* prefixes */
217 		/* CS override prefix and call are not boostable */
218 		return (opcode != 0x2e && opcode != 0x9a);
219 	}
220 }
221 
222 static unsigned long
223 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
224 {
225 	struct kprobe *kp;
226 
227 	kp = get_kprobe((void *)addr);
228 	/* There is no probe, return original address */
229 	if (!kp)
230 		return addr;
231 
232 	/*
233 	 *  Basically, kp->ainsn.insn has an original instruction.
234 	 *  However, RIP-relative instruction can not do single-stepping
235 	 *  at different place, __copy_instruction() tweaks the displacement of
236 	 *  that instruction. In that case, we can't recover the instruction
237 	 *  from the kp->ainsn.insn.
238 	 *
239 	 *  On the other hand, kp->opcode has a copy of the first byte of
240 	 *  the probed instruction, which is overwritten by int3. And
241 	 *  the instruction at kp->addr is not modified by kprobes except
242 	 *  for the first byte, we can recover the original instruction
243 	 *  from it and kp->opcode.
244 	 */
245 	memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
246 	buf[0] = kp->opcode;
247 	return (unsigned long)buf;
248 }
249 
250 /*
251  * Recover the probed instruction at addr for further analysis.
252  * Caller must lock kprobes by kprobe_mutex, or disable preemption
253  * for preventing to release referencing kprobes.
254  */
255 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
256 {
257 	unsigned long __addr;
258 
259 	__addr = __recover_optprobed_insn(buf, addr);
260 	if (__addr != addr)
261 		return __addr;
262 
263 	return __recover_probed_insn(buf, addr);
264 }
265 
266 /* Check if paddr is at an instruction boundary */
267 static int can_probe(unsigned long paddr)
268 {
269 	unsigned long addr, __addr, offset = 0;
270 	struct insn insn;
271 	kprobe_opcode_t buf[MAX_INSN_SIZE];
272 
273 	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
274 		return 0;
275 
276 	/* Decode instructions */
277 	addr = paddr - offset;
278 	while (addr < paddr) {
279 		/*
280 		 * Check if the instruction has been modified by another
281 		 * kprobe, in which case we replace the breakpoint by the
282 		 * original instruction in our buffer.
283 		 * Also, jump optimization will change the breakpoint to
284 		 * relative-jump. Since the relative-jump itself is
285 		 * normally used, we just go through if there is no kprobe.
286 		 */
287 		__addr = recover_probed_instruction(buf, addr);
288 		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
289 		insn_get_length(&insn);
290 
291 		/*
292 		 * Another debugging subsystem might insert this breakpoint.
293 		 * In that case, we can't recover it.
294 		 */
295 		if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
296 			return 0;
297 		addr += insn.length;
298 	}
299 
300 	return (addr == paddr);
301 }
302 
303 /*
304  * Returns non-zero if opcode modifies the interrupt flag.
305  */
306 static int is_IF_modifier(kprobe_opcode_t *insn)
307 {
308 	/* Skip prefixes */
309 	insn = skip_prefixes(insn);
310 
311 	switch (*insn) {
312 	case 0xfa:		/* cli */
313 	case 0xfb:		/* sti */
314 	case 0xcf:		/* iret/iretd */
315 	case 0x9d:		/* popf/popfd */
316 		return 1;
317 	}
318 
319 	return 0;
320 }
321 
322 /*
323  * Copy an instruction and adjust the displacement if the instruction
324  * uses the %rip-relative addressing mode.
325  * If it does, Return the address of the 32-bit displacement word.
326  * If not, return null.
327  * Only applicable to 64-bit x86.
328  */
329 int __copy_instruction(u8 *dest, u8 *src)
330 {
331 	struct insn insn;
332 	kprobe_opcode_t buf[MAX_INSN_SIZE];
333 	unsigned long recovered_insn =
334 		recover_probed_instruction(buf, (unsigned long)src);
335 
336 	kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
337 	insn_get_length(&insn);
338 	/* Another subsystem puts a breakpoint, failed to recover */
339 	if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
340 		return 0;
341 	memcpy(dest, insn.kaddr, insn.length);
342 
343 #ifdef CONFIG_X86_64
344 	if (insn_rip_relative(&insn)) {
345 		s64 newdisp;
346 		u8 *disp;
347 		kernel_insn_init(&insn, dest, insn.length);
348 		insn_get_displacement(&insn);
349 		/*
350 		 * The copied instruction uses the %rip-relative addressing
351 		 * mode.  Adjust the displacement for the difference between
352 		 * the original location of this instruction and the location
353 		 * of the copy that will actually be run.  The tricky bit here
354 		 * is making sure that the sign extension happens correctly in
355 		 * this calculation, since we need a signed 32-bit result to
356 		 * be sign-extended to 64 bits when it's added to the %rip
357 		 * value and yield the same 64-bit result that the sign-
358 		 * extension of the original signed 32-bit displacement would
359 		 * have given.
360 		 */
361 		newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
362 		if ((s64) (s32) newdisp != newdisp) {
363 			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
364 			pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
365 			return 0;
366 		}
367 		disp = (u8 *) dest + insn_offset_displacement(&insn);
368 		*(s32 *) disp = (s32) newdisp;
369 	}
370 #endif
371 	return insn.length;
372 }
373 
374 static int arch_copy_kprobe(struct kprobe *p)
375 {
376 	int ret;
377 
378 	/* Copy an instruction with recovering if other optprobe modifies it.*/
379 	ret = __copy_instruction(p->ainsn.insn, p->addr);
380 	if (!ret)
381 		return -EINVAL;
382 
383 	/*
384 	 * __copy_instruction can modify the displacement of the instruction,
385 	 * but it doesn't affect boostable check.
386 	 */
387 	if (can_boost(p->ainsn.insn))
388 		p->ainsn.boostable = 0;
389 	else
390 		p->ainsn.boostable = -1;
391 
392 	/* Check whether the instruction modifies Interrupt Flag or not */
393 	p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
394 
395 	/* Also, displacement change doesn't affect the first byte */
396 	p->opcode = p->ainsn.insn[0];
397 
398 	return 0;
399 }
400 
401 int arch_prepare_kprobe(struct kprobe *p)
402 {
403 	if (alternatives_text_reserved(p->addr, p->addr))
404 		return -EINVAL;
405 
406 	if (!can_probe((unsigned long)p->addr))
407 		return -EILSEQ;
408 	/* insn: must be on special executable page on x86. */
409 	p->ainsn.insn = get_insn_slot();
410 	if (!p->ainsn.insn)
411 		return -ENOMEM;
412 
413 	return arch_copy_kprobe(p);
414 }
415 
416 void arch_arm_kprobe(struct kprobe *p)
417 {
418 	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
419 }
420 
421 void arch_disarm_kprobe(struct kprobe *p)
422 {
423 	text_poke(p->addr, &p->opcode, 1);
424 }
425 
426 void arch_remove_kprobe(struct kprobe *p)
427 {
428 	if (p->ainsn.insn) {
429 		free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
430 		p->ainsn.insn = NULL;
431 	}
432 }
433 
434 static nokprobe_inline void
435 save_previous_kprobe(struct kprobe_ctlblk *kcb)
436 {
437 	kcb->prev_kprobe.kp = kprobe_running();
438 	kcb->prev_kprobe.status = kcb->kprobe_status;
439 	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
440 	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
441 }
442 
443 static nokprobe_inline void
444 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
445 {
446 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
447 	kcb->kprobe_status = kcb->prev_kprobe.status;
448 	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
449 	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
450 }
451 
452 static nokprobe_inline void
453 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
454 		   struct kprobe_ctlblk *kcb)
455 {
456 	__this_cpu_write(current_kprobe, p);
457 	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
458 		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
459 	if (p->ainsn.if_modifier)
460 		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
461 }
462 
463 static nokprobe_inline void clear_btf(void)
464 {
465 	if (test_thread_flag(TIF_BLOCKSTEP)) {
466 		unsigned long debugctl = get_debugctlmsr();
467 
468 		debugctl &= ~DEBUGCTLMSR_BTF;
469 		update_debugctlmsr(debugctl);
470 	}
471 }
472 
473 static nokprobe_inline void restore_btf(void)
474 {
475 	if (test_thread_flag(TIF_BLOCKSTEP)) {
476 		unsigned long debugctl = get_debugctlmsr();
477 
478 		debugctl |= DEBUGCTLMSR_BTF;
479 		update_debugctlmsr(debugctl);
480 	}
481 }
482 
483 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
484 {
485 	unsigned long *sara = stack_addr(regs);
486 
487 	ri->ret_addr = (kprobe_opcode_t *) *sara;
488 
489 	/* Replace the return addr with trampoline addr */
490 	*sara = (unsigned long) &kretprobe_trampoline;
491 }
492 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
493 
494 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
495 			     struct kprobe_ctlblk *kcb, int reenter)
496 {
497 	if (setup_detour_execution(p, regs, reenter))
498 		return;
499 
500 #if !defined(CONFIG_PREEMPT)
501 	if (p->ainsn.boostable == 1 && !p->post_handler) {
502 		/* Boost up -- we can execute copied instructions directly */
503 		if (!reenter)
504 			reset_current_kprobe();
505 		/*
506 		 * Reentering boosted probe doesn't reset current_kprobe,
507 		 * nor set current_kprobe, because it doesn't use single
508 		 * stepping.
509 		 */
510 		regs->ip = (unsigned long)p->ainsn.insn;
511 		preempt_enable_no_resched();
512 		return;
513 	}
514 #endif
515 	if (reenter) {
516 		save_previous_kprobe(kcb);
517 		set_current_kprobe(p, regs, kcb);
518 		kcb->kprobe_status = KPROBE_REENTER;
519 	} else
520 		kcb->kprobe_status = KPROBE_HIT_SS;
521 	/* Prepare real single stepping */
522 	clear_btf();
523 	regs->flags |= X86_EFLAGS_TF;
524 	regs->flags &= ~X86_EFLAGS_IF;
525 	/* single step inline if the instruction is an int3 */
526 	if (p->opcode == BREAKPOINT_INSTRUCTION)
527 		regs->ip = (unsigned long)p->addr;
528 	else
529 		regs->ip = (unsigned long)p->ainsn.insn;
530 }
531 NOKPROBE_SYMBOL(setup_singlestep);
532 
533 /*
534  * We have reentered the kprobe_handler(), since another probe was hit while
535  * within the handler. We save the original kprobes variables and just single
536  * step on the instruction of the new probe without calling any user handlers.
537  */
538 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
539 			  struct kprobe_ctlblk *kcb)
540 {
541 	switch (kcb->kprobe_status) {
542 	case KPROBE_HIT_SSDONE:
543 	case KPROBE_HIT_ACTIVE:
544 	case KPROBE_HIT_SS:
545 		kprobes_inc_nmissed_count(p);
546 		setup_singlestep(p, regs, kcb, 1);
547 		break;
548 	case KPROBE_REENTER:
549 		/* A probe has been hit in the codepath leading up to, or just
550 		 * after, single-stepping of a probed instruction. This entire
551 		 * codepath should strictly reside in .kprobes.text section.
552 		 * Raise a BUG or we'll continue in an endless reentering loop
553 		 * and eventually a stack overflow.
554 		 */
555 		printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
556 		       p->addr);
557 		dump_kprobe(p);
558 		BUG();
559 	default:
560 		/* impossible cases */
561 		WARN_ON(1);
562 		return 0;
563 	}
564 
565 	return 1;
566 }
567 NOKPROBE_SYMBOL(reenter_kprobe);
568 
569 /*
570  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
571  * remain disabled throughout this function.
572  */
573 int kprobe_int3_handler(struct pt_regs *regs)
574 {
575 	kprobe_opcode_t *addr;
576 	struct kprobe *p;
577 	struct kprobe_ctlblk *kcb;
578 
579 	if (user_mode_vm(regs))
580 		return 0;
581 
582 	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
583 	/*
584 	 * We don't want to be preempted for the entire
585 	 * duration of kprobe processing. We conditionally
586 	 * re-enable preemption at the end of this function,
587 	 * and also in reenter_kprobe() and setup_singlestep().
588 	 */
589 	preempt_disable();
590 
591 	kcb = get_kprobe_ctlblk();
592 	p = get_kprobe(addr);
593 
594 	if (p) {
595 		if (kprobe_running()) {
596 			if (reenter_kprobe(p, regs, kcb))
597 				return 1;
598 		} else {
599 			set_current_kprobe(p, regs, kcb);
600 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
601 
602 			/*
603 			 * If we have no pre-handler or it returned 0, we
604 			 * continue with normal processing.  If we have a
605 			 * pre-handler and it returned non-zero, it prepped
606 			 * for calling the break_handler below on re-entry
607 			 * for jprobe processing, so get out doing nothing
608 			 * more here.
609 			 */
610 			if (!p->pre_handler || !p->pre_handler(p, regs))
611 				setup_singlestep(p, regs, kcb, 0);
612 			return 1;
613 		}
614 	} else if (*addr != BREAKPOINT_INSTRUCTION) {
615 		/*
616 		 * The breakpoint instruction was removed right
617 		 * after we hit it.  Another cpu has removed
618 		 * either a probepoint or a debugger breakpoint
619 		 * at this address.  In either case, no further
620 		 * handling of this interrupt is appropriate.
621 		 * Back up over the (now missing) int3 and run
622 		 * the original instruction.
623 		 */
624 		regs->ip = (unsigned long)addr;
625 		preempt_enable_no_resched();
626 		return 1;
627 	} else if (kprobe_running()) {
628 		p = __this_cpu_read(current_kprobe);
629 		if (p->break_handler && p->break_handler(p, regs)) {
630 			if (!skip_singlestep(p, regs, kcb))
631 				setup_singlestep(p, regs, kcb, 0);
632 			return 1;
633 		}
634 	} /* else: not a kprobe fault; let the kernel handle it */
635 
636 	preempt_enable_no_resched();
637 	return 0;
638 }
639 NOKPROBE_SYMBOL(kprobe_int3_handler);
640 
641 /*
642  * When a retprobed function returns, this code saves registers and
643  * calls trampoline_handler() runs, which calls the kretprobe's handler.
644  */
645 static void __used kretprobe_trampoline_holder(void)
646 {
647 	asm volatile (
648 			".global kretprobe_trampoline\n"
649 			"kretprobe_trampoline: \n"
650 #ifdef CONFIG_X86_64
651 			/* We don't bother saving the ss register */
652 			"	pushq %rsp\n"
653 			"	pushfq\n"
654 			SAVE_REGS_STRING
655 			"	movq %rsp, %rdi\n"
656 			"	call trampoline_handler\n"
657 			/* Replace saved sp with true return address. */
658 			"	movq %rax, 152(%rsp)\n"
659 			RESTORE_REGS_STRING
660 			"	popfq\n"
661 #else
662 			"	pushf\n"
663 			SAVE_REGS_STRING
664 			"	movl %esp, %eax\n"
665 			"	call trampoline_handler\n"
666 			/* Move flags to cs */
667 			"	movl 56(%esp), %edx\n"
668 			"	movl %edx, 52(%esp)\n"
669 			/* Replace saved flags with true return address. */
670 			"	movl %eax, 56(%esp)\n"
671 			RESTORE_REGS_STRING
672 			"	popf\n"
673 #endif
674 			"	ret\n");
675 }
676 NOKPROBE_SYMBOL(kretprobe_trampoline_holder);
677 NOKPROBE_SYMBOL(kretprobe_trampoline);
678 
679 /*
680  * Called from kretprobe_trampoline
681  */
682 __visible __used void *trampoline_handler(struct pt_regs *regs)
683 {
684 	struct kretprobe_instance *ri = NULL;
685 	struct hlist_head *head, empty_rp;
686 	struct hlist_node *tmp;
687 	unsigned long flags, orig_ret_address = 0;
688 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
689 	kprobe_opcode_t *correct_ret_addr = NULL;
690 
691 	INIT_HLIST_HEAD(&empty_rp);
692 	kretprobe_hash_lock(current, &head, &flags);
693 	/* fixup registers */
694 #ifdef CONFIG_X86_64
695 	regs->cs = __KERNEL_CS;
696 #else
697 	regs->cs = __KERNEL_CS | get_kernel_rpl();
698 	regs->gs = 0;
699 #endif
700 	regs->ip = trampoline_address;
701 	regs->orig_ax = ~0UL;
702 
703 	/*
704 	 * It is possible to have multiple instances associated with a given
705 	 * task either because multiple functions in the call path have
706 	 * return probes installed on them, and/or more than one
707 	 * return probe was registered for a target function.
708 	 *
709 	 * We can handle this because:
710 	 *     - instances are always pushed into the head of the list
711 	 *     - when multiple return probes are registered for the same
712 	 *	 function, the (chronologically) first instance's ret_addr
713 	 *	 will be the real return address, and all the rest will
714 	 *	 point to kretprobe_trampoline.
715 	 */
716 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
717 		if (ri->task != current)
718 			/* another task is sharing our hash bucket */
719 			continue;
720 
721 		orig_ret_address = (unsigned long)ri->ret_addr;
722 
723 		if (orig_ret_address != trampoline_address)
724 			/*
725 			 * This is the real return address. Any other
726 			 * instances associated with this task are for
727 			 * other calls deeper on the call stack
728 			 */
729 			break;
730 	}
731 
732 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
733 
734 	correct_ret_addr = ri->ret_addr;
735 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
736 		if (ri->task != current)
737 			/* another task is sharing our hash bucket */
738 			continue;
739 
740 		orig_ret_address = (unsigned long)ri->ret_addr;
741 		if (ri->rp && ri->rp->handler) {
742 			__this_cpu_write(current_kprobe, &ri->rp->kp);
743 			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
744 			ri->ret_addr = correct_ret_addr;
745 			ri->rp->handler(ri, regs);
746 			__this_cpu_write(current_kprobe, NULL);
747 		}
748 
749 		recycle_rp_inst(ri, &empty_rp);
750 
751 		if (orig_ret_address != trampoline_address)
752 			/*
753 			 * This is the real return address. Any other
754 			 * instances associated with this task are for
755 			 * other calls deeper on the call stack
756 			 */
757 			break;
758 	}
759 
760 	kretprobe_hash_unlock(current, &flags);
761 
762 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
763 		hlist_del(&ri->hlist);
764 		kfree(ri);
765 	}
766 	return (void *)orig_ret_address;
767 }
768 NOKPROBE_SYMBOL(trampoline_handler);
769 
770 /*
771  * Called after single-stepping.  p->addr is the address of the
772  * instruction whose first byte has been replaced by the "int 3"
773  * instruction.  To avoid the SMP problems that can occur when we
774  * temporarily put back the original opcode to single-step, we
775  * single-stepped a copy of the instruction.  The address of this
776  * copy is p->ainsn.insn.
777  *
778  * This function prepares to return from the post-single-step
779  * interrupt.  We have to fix up the stack as follows:
780  *
781  * 0) Except in the case of absolute or indirect jump or call instructions,
782  * the new ip is relative to the copied instruction.  We need to make
783  * it relative to the original instruction.
784  *
785  * 1) If the single-stepped instruction was pushfl, then the TF and IF
786  * flags are set in the just-pushed flags, and may need to be cleared.
787  *
788  * 2) If the single-stepped instruction was a call, the return address
789  * that is atop the stack is the address following the copied instruction.
790  * We need to make it the address following the original instruction.
791  *
792  * If this is the first time we've single-stepped the instruction at
793  * this probepoint, and the instruction is boostable, boost it: add a
794  * jump instruction after the copied instruction, that jumps to the next
795  * instruction after the probepoint.
796  */
797 static void resume_execution(struct kprobe *p, struct pt_regs *regs,
798 			     struct kprobe_ctlblk *kcb)
799 {
800 	unsigned long *tos = stack_addr(regs);
801 	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
802 	unsigned long orig_ip = (unsigned long)p->addr;
803 	kprobe_opcode_t *insn = p->ainsn.insn;
804 
805 	/* Skip prefixes */
806 	insn = skip_prefixes(insn);
807 
808 	regs->flags &= ~X86_EFLAGS_TF;
809 	switch (*insn) {
810 	case 0x9c:	/* pushfl */
811 		*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
812 		*tos |= kcb->kprobe_old_flags;
813 		break;
814 	case 0xc2:	/* iret/ret/lret */
815 	case 0xc3:
816 	case 0xca:
817 	case 0xcb:
818 	case 0xcf:
819 	case 0xea:	/* jmp absolute -- ip is correct */
820 		/* ip is already adjusted, no more changes required */
821 		p->ainsn.boostable = 1;
822 		goto no_change;
823 	case 0xe8:	/* call relative - Fix return addr */
824 		*tos = orig_ip + (*tos - copy_ip);
825 		break;
826 #ifdef CONFIG_X86_32
827 	case 0x9a:	/* call absolute -- same as call absolute, indirect */
828 		*tos = orig_ip + (*tos - copy_ip);
829 		goto no_change;
830 #endif
831 	case 0xff:
832 		if ((insn[1] & 0x30) == 0x10) {
833 			/*
834 			 * call absolute, indirect
835 			 * Fix return addr; ip is correct.
836 			 * But this is not boostable
837 			 */
838 			*tos = orig_ip + (*tos - copy_ip);
839 			goto no_change;
840 		} else if (((insn[1] & 0x31) == 0x20) ||
841 			   ((insn[1] & 0x31) == 0x21)) {
842 			/*
843 			 * jmp near and far, absolute indirect
844 			 * ip is correct. And this is boostable
845 			 */
846 			p->ainsn.boostable = 1;
847 			goto no_change;
848 		}
849 	default:
850 		break;
851 	}
852 
853 	if (p->ainsn.boostable == 0) {
854 		if ((regs->ip > copy_ip) &&
855 		    (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
856 			/*
857 			 * These instructions can be executed directly if it
858 			 * jumps back to correct address.
859 			 */
860 			synthesize_reljump((void *)regs->ip,
861 				(void *)orig_ip + (regs->ip - copy_ip));
862 			p->ainsn.boostable = 1;
863 		} else {
864 			p->ainsn.boostable = -1;
865 		}
866 	}
867 
868 	regs->ip += orig_ip - copy_ip;
869 
870 no_change:
871 	restore_btf();
872 }
873 NOKPROBE_SYMBOL(resume_execution);
874 
875 /*
876  * Interrupts are disabled on entry as trap1 is an interrupt gate and they
877  * remain disabled throughout this function.
878  */
879 int kprobe_debug_handler(struct pt_regs *regs)
880 {
881 	struct kprobe *cur = kprobe_running();
882 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
883 
884 	if (!cur)
885 		return 0;
886 
887 	resume_execution(cur, regs, kcb);
888 	regs->flags |= kcb->kprobe_saved_flags;
889 
890 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
891 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
892 		cur->post_handler(cur, regs, 0);
893 	}
894 
895 	/* Restore back the original saved kprobes variables and continue. */
896 	if (kcb->kprobe_status == KPROBE_REENTER) {
897 		restore_previous_kprobe(kcb);
898 		goto out;
899 	}
900 	reset_current_kprobe();
901 out:
902 	preempt_enable_no_resched();
903 
904 	/*
905 	 * if somebody else is singlestepping across a probe point, flags
906 	 * will have TF set, in which case, continue the remaining processing
907 	 * of do_debug, as if this is not a probe hit.
908 	 */
909 	if (regs->flags & X86_EFLAGS_TF)
910 		return 0;
911 
912 	return 1;
913 }
914 NOKPROBE_SYMBOL(kprobe_debug_handler);
915 
916 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
917 {
918 	struct kprobe *cur = kprobe_running();
919 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
920 
921 	if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
922 		/* This must happen on single-stepping */
923 		WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
924 			kcb->kprobe_status != KPROBE_REENTER);
925 		/*
926 		 * We are here because the instruction being single
927 		 * stepped caused a page fault. We reset the current
928 		 * kprobe and the ip points back to the probe address
929 		 * and allow the page fault handler to continue as a
930 		 * normal page fault.
931 		 */
932 		regs->ip = (unsigned long)cur->addr;
933 		regs->flags |= kcb->kprobe_old_flags;
934 		if (kcb->kprobe_status == KPROBE_REENTER)
935 			restore_previous_kprobe(kcb);
936 		else
937 			reset_current_kprobe();
938 		preempt_enable_no_resched();
939 	} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
940 		   kcb->kprobe_status == KPROBE_HIT_SSDONE) {
941 		/*
942 		 * We increment the nmissed count for accounting,
943 		 * we can also use npre/npostfault count for accounting
944 		 * these specific fault cases.
945 		 */
946 		kprobes_inc_nmissed_count(cur);
947 
948 		/*
949 		 * We come here because instructions in the pre/post
950 		 * handler caused the page_fault, this could happen
951 		 * if handler tries to access user space by
952 		 * copy_from_user(), get_user() etc. Let the
953 		 * user-specified handler try to fix it first.
954 		 */
955 		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
956 			return 1;
957 
958 		/*
959 		 * In case the user-specified fault handler returned
960 		 * zero, try to fix up.
961 		 */
962 		if (fixup_exception(regs))
963 			return 1;
964 
965 		/*
966 		 * fixup routine could not handle it,
967 		 * Let do_page_fault() fix it.
968 		 */
969 	}
970 
971 	return 0;
972 }
973 NOKPROBE_SYMBOL(kprobe_fault_handler);
974 
975 /*
976  * Wrapper routine for handling exceptions.
977  */
978 int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
979 			     void *data)
980 {
981 	struct die_args *args = data;
982 	int ret = NOTIFY_DONE;
983 
984 	if (args->regs && user_mode_vm(args->regs))
985 		return ret;
986 
987 	if (val == DIE_GPF) {
988 		/*
989 		 * To be potentially processing a kprobe fault and to
990 		 * trust the result from kprobe_running(), we have
991 		 * be non-preemptible.
992 		 */
993 		if (!preemptible() && kprobe_running() &&
994 		    kprobe_fault_handler(args->regs, args->trapnr))
995 			ret = NOTIFY_STOP;
996 	}
997 	return ret;
998 }
999 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1000 
1001 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1002 {
1003 	struct jprobe *jp = container_of(p, struct jprobe, kp);
1004 	unsigned long addr;
1005 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1006 
1007 	kcb->jprobe_saved_regs = *regs;
1008 	kcb->jprobe_saved_sp = stack_addr(regs);
1009 	addr = (unsigned long)(kcb->jprobe_saved_sp);
1010 
1011 	/*
1012 	 * As Linus pointed out, gcc assumes that the callee
1013 	 * owns the argument space and could overwrite it, e.g.
1014 	 * tailcall optimization. So, to be absolutely safe
1015 	 * we also save and restore enough stack bytes to cover
1016 	 * the argument area.
1017 	 */
1018 	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
1019 	       MIN_STACK_SIZE(addr));
1020 	regs->flags &= ~X86_EFLAGS_IF;
1021 	trace_hardirqs_off();
1022 	regs->ip = (unsigned long)(jp->entry);
1023 
1024 	/*
1025 	 * jprobes use jprobe_return() which skips the normal return
1026 	 * path of the function, and this messes up the accounting of the
1027 	 * function graph tracer to get messed up.
1028 	 *
1029 	 * Pause function graph tracing while performing the jprobe function.
1030 	 */
1031 	pause_graph_tracing();
1032 	return 1;
1033 }
1034 NOKPROBE_SYMBOL(setjmp_pre_handler);
1035 
1036 void jprobe_return(void)
1037 {
1038 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1039 
1040 	asm volatile (
1041 #ifdef CONFIG_X86_64
1042 			"       xchg   %%rbx,%%rsp	\n"
1043 #else
1044 			"       xchgl   %%ebx,%%esp	\n"
1045 #endif
1046 			"       int3			\n"
1047 			"       .globl jprobe_return_end\n"
1048 			"       jprobe_return_end:	\n"
1049 			"       nop			\n"::"b"
1050 			(kcb->jprobe_saved_sp):"memory");
1051 }
1052 NOKPROBE_SYMBOL(jprobe_return);
1053 NOKPROBE_SYMBOL(jprobe_return_end);
1054 
1055 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1056 {
1057 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1058 	u8 *addr = (u8 *) (regs->ip - 1);
1059 	struct jprobe *jp = container_of(p, struct jprobe, kp);
1060 	void *saved_sp = kcb->jprobe_saved_sp;
1061 
1062 	if ((addr > (u8 *) jprobe_return) &&
1063 	    (addr < (u8 *) jprobe_return_end)) {
1064 		if (stack_addr(regs) != saved_sp) {
1065 			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1066 			printk(KERN_ERR
1067 			       "current sp %p does not match saved sp %p\n",
1068 			       stack_addr(regs), saved_sp);
1069 			printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1070 			show_regs(saved_regs);
1071 			printk(KERN_ERR "Current registers\n");
1072 			show_regs(regs);
1073 			BUG();
1074 		}
1075 		/* It's OK to start function graph tracing again */
1076 		unpause_graph_tracing();
1077 		*regs = kcb->jprobe_saved_regs;
1078 		memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1079 		preempt_enable_no_resched();
1080 		return 1;
1081 	}
1082 	return 0;
1083 }
1084 NOKPROBE_SYMBOL(longjmp_break_handler);
1085 
1086 bool arch_within_kprobe_blacklist(unsigned long addr)
1087 {
1088 	return  (addr >= (unsigned long)__kprobes_text_start &&
1089 		 addr < (unsigned long)__kprobes_text_end) ||
1090 		(addr >= (unsigned long)__entry_text_start &&
1091 		 addr < (unsigned long)__entry_text_end);
1092 }
1093 
1094 int __init arch_init_kprobes(void)
1095 {
1096 	return 0;
1097 }
1098 
1099 int arch_trampoline_kprobe(struct kprobe *p)
1100 {
1101 	return 0;
1102 }
1103