xref: /openbmc/linux/arch/x86/kernel/kprobes/core.c (revision 4da722ca)
1 /*
2  *  Kernel Probes (KProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2002, 2004
19  *
20  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21  *		Probes initial implementation ( includes contributions from
22  *		Rusty Russell).
23  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24  *		interface to access function arguments.
25  * 2004-Oct	Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26  *		<prasanna@in.ibm.com> adapted for x86_64 from i386.
27  * 2005-Mar	Roland McGrath <roland@redhat.com>
28  *		Fixed to handle %rip-relative addressing mode correctly.
29  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31  *		<prasanna@in.ibm.com> added function-return probes.
32  * 2005-May	Rusty Lynch <rusty.lynch@intel.com>
33  *		Added function return probes functionality
34  * 2006-Feb	Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35  *		kprobe-booster and kretprobe-booster for i386.
36  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37  *		and kretprobe-booster for x86-64
38  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39  *		<arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40  *		unified x86 kprobes code.
41  */
42 #include <linux/kprobes.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/hardirq.h>
47 #include <linux/preempt.h>
48 #include <linux/sched/debug.h>
49 #include <linux/extable.h>
50 #include <linux/kdebug.h>
51 #include <linux/kallsyms.h>
52 #include <linux/ftrace.h>
53 #include <linux/frame.h>
54 #include <linux/kasan.h>
55 #include <linux/moduleloader.h>
56 
57 #include <asm/text-patching.h>
58 #include <asm/cacheflush.h>
59 #include <asm/desc.h>
60 #include <asm/pgtable.h>
61 #include <linux/uaccess.h>
62 #include <asm/alternative.h>
63 #include <asm/insn.h>
64 #include <asm/debugreg.h>
65 #include <asm/set_memory.h>
66 
67 #include "common.h"
68 
69 void jprobe_return_end(void);
70 
71 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
72 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
73 
74 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
75 
76 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
77 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
78 	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
79 	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
80 	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
81 	 << (row % 32))
82 	/*
83 	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
84 	 * Groups, and some special opcodes can not boost.
85 	 * This is non-const and volatile to keep gcc from statically
86 	 * optimizing it out, as variable_test_bit makes gcc think only
87 	 * *(unsigned long*) is used.
88 	 */
89 static volatile u32 twobyte_is_boostable[256 / 32] = {
90 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
91 	/*      ----------------------------------------------          */
92 	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
93 	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
94 	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
95 	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
96 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
97 	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
98 	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
99 	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
100 	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
101 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
102 	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
103 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
104 	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
105 	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
106 	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
107 	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
108 	/*      -----------------------------------------------         */
109 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
110 };
111 #undef W
112 
113 struct kretprobe_blackpoint kretprobe_blacklist[] = {
114 	{"__switch_to", }, /* This function switches only current task, but
115 			      doesn't switch kernel stack.*/
116 	{NULL, NULL}	/* Terminator */
117 };
118 
119 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
120 
121 static nokprobe_inline void
122 __synthesize_relative_insn(void *from, void *to, u8 op)
123 {
124 	struct __arch_relative_insn {
125 		u8 op;
126 		s32 raddr;
127 	} __packed *insn;
128 
129 	insn = (struct __arch_relative_insn *)from;
130 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
131 	insn->op = op;
132 }
133 
134 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
135 void synthesize_reljump(void *from, void *to)
136 {
137 	__synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
138 }
139 NOKPROBE_SYMBOL(synthesize_reljump);
140 
141 /* Insert a call instruction at address 'from', which calls address 'to'.*/
142 void synthesize_relcall(void *from, void *to)
143 {
144 	__synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
145 }
146 NOKPROBE_SYMBOL(synthesize_relcall);
147 
148 /*
149  * Skip the prefixes of the instruction.
150  */
151 static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
152 {
153 	insn_attr_t attr;
154 
155 	attr = inat_get_opcode_attribute((insn_byte_t)*insn);
156 	while (inat_is_legacy_prefix(attr)) {
157 		insn++;
158 		attr = inat_get_opcode_attribute((insn_byte_t)*insn);
159 	}
160 #ifdef CONFIG_X86_64
161 	if (inat_is_rex_prefix(attr))
162 		insn++;
163 #endif
164 	return insn;
165 }
166 NOKPROBE_SYMBOL(skip_prefixes);
167 
168 /*
169  * Returns non-zero if INSN is boostable.
170  * RIP relative instructions are adjusted at copying time in 64 bits mode
171  */
172 int can_boost(struct insn *insn, void *addr)
173 {
174 	kprobe_opcode_t opcode;
175 
176 	if (search_exception_tables((unsigned long)addr))
177 		return 0;	/* Page fault may occur on this address. */
178 
179 	/* 2nd-byte opcode */
180 	if (insn->opcode.nbytes == 2)
181 		return test_bit(insn->opcode.bytes[1],
182 				(unsigned long *)twobyte_is_boostable);
183 
184 	if (insn->opcode.nbytes != 1)
185 		return 0;
186 
187 	/* Can't boost Address-size override prefix */
188 	if (unlikely(inat_is_address_size_prefix(insn->attr)))
189 		return 0;
190 
191 	opcode = insn->opcode.bytes[0];
192 
193 	switch (opcode & 0xf0) {
194 	case 0x60:
195 		/* can't boost "bound" */
196 		return (opcode != 0x62);
197 	case 0x70:
198 		return 0; /* can't boost conditional jump */
199 	case 0x90:
200 		return opcode != 0x9a;	/* can't boost call far */
201 	case 0xc0:
202 		/* can't boost software-interruptions */
203 		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
204 	case 0xd0:
205 		/* can boost AA* and XLAT */
206 		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
207 	case 0xe0:
208 		/* can boost in/out and absolute jmps */
209 		return ((opcode & 0x04) || opcode == 0xea);
210 	case 0xf0:
211 		/* clear and set flags are boostable */
212 		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
213 	default:
214 		/* CS override prefix and call are not boostable */
215 		return (opcode != 0x2e && opcode != 0x9a);
216 	}
217 }
218 
219 static unsigned long
220 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
221 {
222 	struct kprobe *kp;
223 	unsigned long faddr;
224 
225 	kp = get_kprobe((void *)addr);
226 	faddr = ftrace_location(addr);
227 	/*
228 	 * Addresses inside the ftrace location are refused by
229 	 * arch_check_ftrace_location(). Something went terribly wrong
230 	 * if such an address is checked here.
231 	 */
232 	if (WARN_ON(faddr && faddr != addr))
233 		return 0UL;
234 	/*
235 	 * Use the current code if it is not modified by Kprobe
236 	 * and it cannot be modified by ftrace.
237 	 */
238 	if (!kp && !faddr)
239 		return addr;
240 
241 	/*
242 	 * Basically, kp->ainsn.insn has an original instruction.
243 	 * However, RIP-relative instruction can not do single-stepping
244 	 * at different place, __copy_instruction() tweaks the displacement of
245 	 * that instruction. In that case, we can't recover the instruction
246 	 * from the kp->ainsn.insn.
247 	 *
248 	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
249 	 * of the first byte of the probed instruction, which is overwritten
250 	 * by int3. And the instruction at kp->addr is not modified by kprobes
251 	 * except for the first byte, we can recover the original instruction
252 	 * from it and kp->opcode.
253 	 *
254 	 * In case of Kprobes using ftrace, we do not have a copy of
255 	 * the original instruction. In fact, the ftrace location might
256 	 * be modified at anytime and even could be in an inconsistent state.
257 	 * Fortunately, we know that the original code is the ideal 5-byte
258 	 * long NOP.
259 	 */
260 	if (probe_kernel_read(buf, (void *)addr,
261 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
262 		return 0UL;
263 
264 	if (faddr)
265 		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
266 	else
267 		buf[0] = kp->opcode;
268 	return (unsigned long)buf;
269 }
270 
271 /*
272  * Recover the probed instruction at addr for further analysis.
273  * Caller must lock kprobes by kprobe_mutex, or disable preemption
274  * for preventing to release referencing kprobes.
275  * Returns zero if the instruction can not get recovered (or access failed).
276  */
277 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
278 {
279 	unsigned long __addr;
280 
281 	__addr = __recover_optprobed_insn(buf, addr);
282 	if (__addr != addr)
283 		return __addr;
284 
285 	return __recover_probed_insn(buf, addr);
286 }
287 
288 /* Check if paddr is at an instruction boundary */
289 static int can_probe(unsigned long paddr)
290 {
291 	unsigned long addr, __addr, offset = 0;
292 	struct insn insn;
293 	kprobe_opcode_t buf[MAX_INSN_SIZE];
294 
295 	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
296 		return 0;
297 
298 	/* Decode instructions */
299 	addr = paddr - offset;
300 	while (addr < paddr) {
301 		/*
302 		 * Check if the instruction has been modified by another
303 		 * kprobe, in which case we replace the breakpoint by the
304 		 * original instruction in our buffer.
305 		 * Also, jump optimization will change the breakpoint to
306 		 * relative-jump. Since the relative-jump itself is
307 		 * normally used, we just go through if there is no kprobe.
308 		 */
309 		__addr = recover_probed_instruction(buf, addr);
310 		if (!__addr)
311 			return 0;
312 		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
313 		insn_get_length(&insn);
314 
315 		/*
316 		 * Another debugging subsystem might insert this breakpoint.
317 		 * In that case, we can't recover it.
318 		 */
319 		if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
320 			return 0;
321 		addr += insn.length;
322 	}
323 
324 	return (addr == paddr);
325 }
326 
327 /*
328  * Returns non-zero if opcode modifies the interrupt flag.
329  */
330 static int is_IF_modifier(kprobe_opcode_t *insn)
331 {
332 	/* Skip prefixes */
333 	insn = skip_prefixes(insn);
334 
335 	switch (*insn) {
336 	case 0xfa:		/* cli */
337 	case 0xfb:		/* sti */
338 	case 0xcf:		/* iret/iretd */
339 	case 0x9d:		/* popf/popfd */
340 		return 1;
341 	}
342 
343 	return 0;
344 }
345 
346 /*
347  * Copy an instruction with recovering modified instruction by kprobes
348  * and adjust the displacement if the instruction uses the %rip-relative
349  * addressing mode.
350  * This returns the length of copied instruction, or 0 if it has an error.
351  */
352 int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
353 {
354 	kprobe_opcode_t buf[MAX_INSN_SIZE];
355 	unsigned long recovered_insn =
356 		recover_probed_instruction(buf, (unsigned long)src);
357 
358 	if (!recovered_insn || !insn)
359 		return 0;
360 
361 	/* This can access kernel text if given address is not recovered */
362 	if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE))
363 		return 0;
364 
365 	kernel_insn_init(insn, dest, MAX_INSN_SIZE);
366 	insn_get_length(insn);
367 
368 	/* Another subsystem puts a breakpoint, failed to recover */
369 	if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
370 		return 0;
371 
372 #ifdef CONFIG_X86_64
373 	/* Only x86_64 has RIP relative instructions */
374 	if (insn_rip_relative(insn)) {
375 		s64 newdisp;
376 		u8 *disp;
377 		/*
378 		 * The copied instruction uses the %rip-relative addressing
379 		 * mode.  Adjust the displacement for the difference between
380 		 * the original location of this instruction and the location
381 		 * of the copy that will actually be run.  The tricky bit here
382 		 * is making sure that the sign extension happens correctly in
383 		 * this calculation, since we need a signed 32-bit result to
384 		 * be sign-extended to 64 bits when it's added to the %rip
385 		 * value and yield the same 64-bit result that the sign-
386 		 * extension of the original signed 32-bit displacement would
387 		 * have given.
388 		 */
389 		newdisp = (u8 *) src + (s64) insn->displacement.value
390 			  - (u8 *) dest;
391 		if ((s64) (s32) newdisp != newdisp) {
392 			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
393 			pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
394 				src, dest, insn->displacement.value);
395 			return 0;
396 		}
397 		disp = (u8 *) dest + insn_offset_displacement(insn);
398 		*(s32 *) disp = (s32) newdisp;
399 	}
400 #endif
401 	return insn->length;
402 }
403 
404 /* Prepare reljump right after instruction to boost */
405 static void prepare_boost(struct kprobe *p, struct insn *insn)
406 {
407 	if (can_boost(insn, p->addr) &&
408 	    MAX_INSN_SIZE - insn->length >= RELATIVEJUMP_SIZE) {
409 		/*
410 		 * These instructions can be executed directly if it
411 		 * jumps back to correct address.
412 		 */
413 		synthesize_reljump(p->ainsn.insn + insn->length,
414 				   p->addr + insn->length);
415 		p->ainsn.boostable = true;
416 	} else {
417 		p->ainsn.boostable = false;
418 	}
419 }
420 
421 /* Recover page to RW mode before releasing it */
422 void free_insn_page(void *page)
423 {
424 	set_memory_nx((unsigned long)page & PAGE_MASK, 1);
425 	set_memory_rw((unsigned long)page & PAGE_MASK, 1);
426 	module_memfree(page);
427 }
428 
429 static int arch_copy_kprobe(struct kprobe *p)
430 {
431 	struct insn insn;
432 	int len;
433 
434 	set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
435 
436 	/* Copy an instruction with recovering if other optprobe modifies it.*/
437 	len = __copy_instruction(p->ainsn.insn, p->addr, &insn);
438 	if (!len)
439 		return -EINVAL;
440 
441 	/*
442 	 * __copy_instruction can modify the displacement of the instruction,
443 	 * but it doesn't affect boostable check.
444 	 */
445 	prepare_boost(p, &insn);
446 
447 	set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
448 
449 	/* Check whether the instruction modifies Interrupt Flag or not */
450 	p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
451 
452 	/* Also, displacement change doesn't affect the first byte */
453 	p->opcode = p->ainsn.insn[0];
454 
455 	return 0;
456 }
457 
458 int arch_prepare_kprobe(struct kprobe *p)
459 {
460 	if (alternatives_text_reserved(p->addr, p->addr))
461 		return -EINVAL;
462 
463 	if (!can_probe((unsigned long)p->addr))
464 		return -EILSEQ;
465 	/* insn: must be on special executable page on x86. */
466 	p->ainsn.insn = get_insn_slot();
467 	if (!p->ainsn.insn)
468 		return -ENOMEM;
469 
470 	return arch_copy_kprobe(p);
471 }
472 
473 void arch_arm_kprobe(struct kprobe *p)
474 {
475 	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
476 }
477 
478 void arch_disarm_kprobe(struct kprobe *p)
479 {
480 	text_poke(p->addr, &p->opcode, 1);
481 }
482 
483 void arch_remove_kprobe(struct kprobe *p)
484 {
485 	if (p->ainsn.insn) {
486 		free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
487 		p->ainsn.insn = NULL;
488 	}
489 }
490 
491 static nokprobe_inline void
492 save_previous_kprobe(struct kprobe_ctlblk *kcb)
493 {
494 	kcb->prev_kprobe.kp = kprobe_running();
495 	kcb->prev_kprobe.status = kcb->kprobe_status;
496 	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
497 	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
498 }
499 
500 static nokprobe_inline void
501 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
502 {
503 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
504 	kcb->kprobe_status = kcb->prev_kprobe.status;
505 	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
506 	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
507 }
508 
509 static nokprobe_inline void
510 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
511 		   struct kprobe_ctlblk *kcb)
512 {
513 	__this_cpu_write(current_kprobe, p);
514 	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
515 		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
516 	if (p->ainsn.if_modifier)
517 		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
518 }
519 
520 static nokprobe_inline void clear_btf(void)
521 {
522 	if (test_thread_flag(TIF_BLOCKSTEP)) {
523 		unsigned long debugctl = get_debugctlmsr();
524 
525 		debugctl &= ~DEBUGCTLMSR_BTF;
526 		update_debugctlmsr(debugctl);
527 	}
528 }
529 
530 static nokprobe_inline void restore_btf(void)
531 {
532 	if (test_thread_flag(TIF_BLOCKSTEP)) {
533 		unsigned long debugctl = get_debugctlmsr();
534 
535 		debugctl |= DEBUGCTLMSR_BTF;
536 		update_debugctlmsr(debugctl);
537 	}
538 }
539 
540 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
541 {
542 	unsigned long *sara = stack_addr(regs);
543 
544 	ri->ret_addr = (kprobe_opcode_t *) *sara;
545 
546 	/* Replace the return addr with trampoline addr */
547 	*sara = (unsigned long) &kretprobe_trampoline;
548 }
549 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
550 
551 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
552 			     struct kprobe_ctlblk *kcb, int reenter)
553 {
554 	if (setup_detour_execution(p, regs, reenter))
555 		return;
556 
557 #if !defined(CONFIG_PREEMPT)
558 	if (p->ainsn.boostable && !p->post_handler) {
559 		/* Boost up -- we can execute copied instructions directly */
560 		if (!reenter)
561 			reset_current_kprobe();
562 		/*
563 		 * Reentering boosted probe doesn't reset current_kprobe,
564 		 * nor set current_kprobe, because it doesn't use single
565 		 * stepping.
566 		 */
567 		regs->ip = (unsigned long)p->ainsn.insn;
568 		preempt_enable_no_resched();
569 		return;
570 	}
571 #endif
572 	if (reenter) {
573 		save_previous_kprobe(kcb);
574 		set_current_kprobe(p, regs, kcb);
575 		kcb->kprobe_status = KPROBE_REENTER;
576 	} else
577 		kcb->kprobe_status = KPROBE_HIT_SS;
578 	/* Prepare real single stepping */
579 	clear_btf();
580 	regs->flags |= X86_EFLAGS_TF;
581 	regs->flags &= ~X86_EFLAGS_IF;
582 	/* single step inline if the instruction is an int3 */
583 	if (p->opcode == BREAKPOINT_INSTRUCTION)
584 		regs->ip = (unsigned long)p->addr;
585 	else
586 		regs->ip = (unsigned long)p->ainsn.insn;
587 }
588 NOKPROBE_SYMBOL(setup_singlestep);
589 
590 /*
591  * We have reentered the kprobe_handler(), since another probe was hit while
592  * within the handler. We save the original kprobes variables and just single
593  * step on the instruction of the new probe without calling any user handlers.
594  */
595 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
596 			  struct kprobe_ctlblk *kcb)
597 {
598 	switch (kcb->kprobe_status) {
599 	case KPROBE_HIT_SSDONE:
600 	case KPROBE_HIT_ACTIVE:
601 	case KPROBE_HIT_SS:
602 		kprobes_inc_nmissed_count(p);
603 		setup_singlestep(p, regs, kcb, 1);
604 		break;
605 	case KPROBE_REENTER:
606 		/* A probe has been hit in the codepath leading up to, or just
607 		 * after, single-stepping of a probed instruction. This entire
608 		 * codepath should strictly reside in .kprobes.text section.
609 		 * Raise a BUG or we'll continue in an endless reentering loop
610 		 * and eventually a stack overflow.
611 		 */
612 		printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
613 		       p->addr);
614 		dump_kprobe(p);
615 		BUG();
616 	default:
617 		/* impossible cases */
618 		WARN_ON(1);
619 		return 0;
620 	}
621 
622 	return 1;
623 }
624 NOKPROBE_SYMBOL(reenter_kprobe);
625 
626 /*
627  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
628  * remain disabled throughout this function.
629  */
630 int kprobe_int3_handler(struct pt_regs *regs)
631 {
632 	kprobe_opcode_t *addr;
633 	struct kprobe *p;
634 	struct kprobe_ctlblk *kcb;
635 
636 	if (user_mode(regs))
637 		return 0;
638 
639 	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
640 	/*
641 	 * We don't want to be preempted for the entire
642 	 * duration of kprobe processing. We conditionally
643 	 * re-enable preemption at the end of this function,
644 	 * and also in reenter_kprobe() and setup_singlestep().
645 	 */
646 	preempt_disable();
647 
648 	kcb = get_kprobe_ctlblk();
649 	p = get_kprobe(addr);
650 
651 	if (p) {
652 		if (kprobe_running()) {
653 			if (reenter_kprobe(p, regs, kcb))
654 				return 1;
655 		} else {
656 			set_current_kprobe(p, regs, kcb);
657 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
658 
659 			/*
660 			 * If we have no pre-handler or it returned 0, we
661 			 * continue with normal processing.  If we have a
662 			 * pre-handler and it returned non-zero, it prepped
663 			 * for calling the break_handler below on re-entry
664 			 * for jprobe processing, so get out doing nothing
665 			 * more here.
666 			 */
667 			if (!p->pre_handler || !p->pre_handler(p, regs))
668 				setup_singlestep(p, regs, kcb, 0);
669 			return 1;
670 		}
671 	} else if (*addr != BREAKPOINT_INSTRUCTION) {
672 		/*
673 		 * The breakpoint instruction was removed right
674 		 * after we hit it.  Another cpu has removed
675 		 * either a probepoint or a debugger breakpoint
676 		 * at this address.  In either case, no further
677 		 * handling of this interrupt is appropriate.
678 		 * Back up over the (now missing) int3 and run
679 		 * the original instruction.
680 		 */
681 		regs->ip = (unsigned long)addr;
682 		preempt_enable_no_resched();
683 		return 1;
684 	} else if (kprobe_running()) {
685 		p = __this_cpu_read(current_kprobe);
686 		if (p->break_handler && p->break_handler(p, regs)) {
687 			if (!skip_singlestep(p, regs, kcb))
688 				setup_singlestep(p, regs, kcb, 0);
689 			return 1;
690 		}
691 	} /* else: not a kprobe fault; let the kernel handle it */
692 
693 	preempt_enable_no_resched();
694 	return 0;
695 }
696 NOKPROBE_SYMBOL(kprobe_int3_handler);
697 
698 /*
699  * When a retprobed function returns, this code saves registers and
700  * calls trampoline_handler() runs, which calls the kretprobe's handler.
701  */
702 asm(
703 	".global kretprobe_trampoline\n"
704 	".type kretprobe_trampoline, @function\n"
705 	"kretprobe_trampoline:\n"
706 #ifdef CONFIG_X86_64
707 	/* We don't bother saving the ss register */
708 	"	pushq %rsp\n"
709 	"	pushfq\n"
710 	SAVE_REGS_STRING
711 	"	movq %rsp, %rdi\n"
712 	"	call trampoline_handler\n"
713 	/* Replace saved sp with true return address. */
714 	"	movq %rax, 152(%rsp)\n"
715 	RESTORE_REGS_STRING
716 	"	popfq\n"
717 #else
718 	"	pushf\n"
719 	SAVE_REGS_STRING
720 	"	movl %esp, %eax\n"
721 	"	call trampoline_handler\n"
722 	/* Move flags to cs */
723 	"	movl 56(%esp), %edx\n"
724 	"	movl %edx, 52(%esp)\n"
725 	/* Replace saved flags with true return address. */
726 	"	movl %eax, 56(%esp)\n"
727 	RESTORE_REGS_STRING
728 	"	popf\n"
729 #endif
730 	"	ret\n"
731 	".size kretprobe_trampoline, .-kretprobe_trampoline\n"
732 );
733 NOKPROBE_SYMBOL(kretprobe_trampoline);
734 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
735 
736 /*
737  * Called from kretprobe_trampoline
738  */
739 __visible __used void *trampoline_handler(struct pt_regs *regs)
740 {
741 	struct kretprobe_instance *ri = NULL;
742 	struct hlist_head *head, empty_rp;
743 	struct hlist_node *tmp;
744 	unsigned long flags, orig_ret_address = 0;
745 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
746 	kprobe_opcode_t *correct_ret_addr = NULL;
747 
748 	INIT_HLIST_HEAD(&empty_rp);
749 	kretprobe_hash_lock(current, &head, &flags);
750 	/* fixup registers */
751 #ifdef CONFIG_X86_64
752 	regs->cs = __KERNEL_CS;
753 #else
754 	regs->cs = __KERNEL_CS | get_kernel_rpl();
755 	regs->gs = 0;
756 #endif
757 	regs->ip = trampoline_address;
758 	regs->orig_ax = ~0UL;
759 
760 	/*
761 	 * It is possible to have multiple instances associated with a given
762 	 * task either because multiple functions in the call path have
763 	 * return probes installed on them, and/or more than one
764 	 * return probe was registered for a target function.
765 	 *
766 	 * We can handle this because:
767 	 *     - instances are always pushed into the head of the list
768 	 *     - when multiple return probes are registered for the same
769 	 *	 function, the (chronologically) first instance's ret_addr
770 	 *	 will be the real return address, and all the rest will
771 	 *	 point to kretprobe_trampoline.
772 	 */
773 	hlist_for_each_entry(ri, head, hlist) {
774 		if (ri->task != current)
775 			/* another task is sharing our hash bucket */
776 			continue;
777 
778 		orig_ret_address = (unsigned long)ri->ret_addr;
779 
780 		if (orig_ret_address != trampoline_address)
781 			/*
782 			 * This is the real return address. Any other
783 			 * instances associated with this task are for
784 			 * other calls deeper on the call stack
785 			 */
786 			break;
787 	}
788 
789 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
790 
791 	correct_ret_addr = ri->ret_addr;
792 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
793 		if (ri->task != current)
794 			/* another task is sharing our hash bucket */
795 			continue;
796 
797 		orig_ret_address = (unsigned long)ri->ret_addr;
798 		if (ri->rp && ri->rp->handler) {
799 			__this_cpu_write(current_kprobe, &ri->rp->kp);
800 			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
801 			ri->ret_addr = correct_ret_addr;
802 			ri->rp->handler(ri, regs);
803 			__this_cpu_write(current_kprobe, NULL);
804 		}
805 
806 		recycle_rp_inst(ri, &empty_rp);
807 
808 		if (orig_ret_address != trampoline_address)
809 			/*
810 			 * This is the real return address. Any other
811 			 * instances associated with this task are for
812 			 * other calls deeper on the call stack
813 			 */
814 			break;
815 	}
816 
817 	kretprobe_hash_unlock(current, &flags);
818 
819 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
820 		hlist_del(&ri->hlist);
821 		kfree(ri);
822 	}
823 	return (void *)orig_ret_address;
824 }
825 NOKPROBE_SYMBOL(trampoline_handler);
826 
827 /*
828  * Called after single-stepping.  p->addr is the address of the
829  * instruction whose first byte has been replaced by the "int 3"
830  * instruction.  To avoid the SMP problems that can occur when we
831  * temporarily put back the original opcode to single-step, we
832  * single-stepped a copy of the instruction.  The address of this
833  * copy is p->ainsn.insn.
834  *
835  * This function prepares to return from the post-single-step
836  * interrupt.  We have to fix up the stack as follows:
837  *
838  * 0) Except in the case of absolute or indirect jump or call instructions,
839  * the new ip is relative to the copied instruction.  We need to make
840  * it relative to the original instruction.
841  *
842  * 1) If the single-stepped instruction was pushfl, then the TF and IF
843  * flags are set in the just-pushed flags, and may need to be cleared.
844  *
845  * 2) If the single-stepped instruction was a call, the return address
846  * that is atop the stack is the address following the copied instruction.
847  * We need to make it the address following the original instruction.
848  *
849  * If this is the first time we've single-stepped the instruction at
850  * this probepoint, and the instruction is boostable, boost it: add a
851  * jump instruction after the copied instruction, that jumps to the next
852  * instruction after the probepoint.
853  */
854 static void resume_execution(struct kprobe *p, struct pt_regs *regs,
855 			     struct kprobe_ctlblk *kcb)
856 {
857 	unsigned long *tos = stack_addr(regs);
858 	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
859 	unsigned long orig_ip = (unsigned long)p->addr;
860 	kprobe_opcode_t *insn = p->ainsn.insn;
861 
862 	/* Skip prefixes */
863 	insn = skip_prefixes(insn);
864 
865 	regs->flags &= ~X86_EFLAGS_TF;
866 	switch (*insn) {
867 	case 0x9c:	/* pushfl */
868 		*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
869 		*tos |= kcb->kprobe_old_flags;
870 		break;
871 	case 0xc2:	/* iret/ret/lret */
872 	case 0xc3:
873 	case 0xca:
874 	case 0xcb:
875 	case 0xcf:
876 	case 0xea:	/* jmp absolute -- ip is correct */
877 		/* ip is already adjusted, no more changes required */
878 		p->ainsn.boostable = true;
879 		goto no_change;
880 	case 0xe8:	/* call relative - Fix return addr */
881 		*tos = orig_ip + (*tos - copy_ip);
882 		break;
883 #ifdef CONFIG_X86_32
884 	case 0x9a:	/* call absolute -- same as call absolute, indirect */
885 		*tos = orig_ip + (*tos - copy_ip);
886 		goto no_change;
887 #endif
888 	case 0xff:
889 		if ((insn[1] & 0x30) == 0x10) {
890 			/*
891 			 * call absolute, indirect
892 			 * Fix return addr; ip is correct.
893 			 * But this is not boostable
894 			 */
895 			*tos = orig_ip + (*tos - copy_ip);
896 			goto no_change;
897 		} else if (((insn[1] & 0x31) == 0x20) ||
898 			   ((insn[1] & 0x31) == 0x21)) {
899 			/*
900 			 * jmp near and far, absolute indirect
901 			 * ip is correct. And this is boostable
902 			 */
903 			p->ainsn.boostable = true;
904 			goto no_change;
905 		}
906 	default:
907 		break;
908 	}
909 
910 	regs->ip += orig_ip - copy_ip;
911 
912 no_change:
913 	restore_btf();
914 }
915 NOKPROBE_SYMBOL(resume_execution);
916 
917 /*
918  * Interrupts are disabled on entry as trap1 is an interrupt gate and they
919  * remain disabled throughout this function.
920  */
921 int kprobe_debug_handler(struct pt_regs *regs)
922 {
923 	struct kprobe *cur = kprobe_running();
924 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
925 
926 	if (!cur)
927 		return 0;
928 
929 	resume_execution(cur, regs, kcb);
930 	regs->flags |= kcb->kprobe_saved_flags;
931 
932 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
933 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
934 		cur->post_handler(cur, regs, 0);
935 	}
936 
937 	/* Restore back the original saved kprobes variables and continue. */
938 	if (kcb->kprobe_status == KPROBE_REENTER) {
939 		restore_previous_kprobe(kcb);
940 		goto out;
941 	}
942 	reset_current_kprobe();
943 out:
944 	preempt_enable_no_resched();
945 
946 	/*
947 	 * if somebody else is singlestepping across a probe point, flags
948 	 * will have TF set, in which case, continue the remaining processing
949 	 * of do_debug, as if this is not a probe hit.
950 	 */
951 	if (regs->flags & X86_EFLAGS_TF)
952 		return 0;
953 
954 	return 1;
955 }
956 NOKPROBE_SYMBOL(kprobe_debug_handler);
957 
958 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
959 {
960 	struct kprobe *cur = kprobe_running();
961 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
962 
963 	if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
964 		/* This must happen on single-stepping */
965 		WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
966 			kcb->kprobe_status != KPROBE_REENTER);
967 		/*
968 		 * We are here because the instruction being single
969 		 * stepped caused a page fault. We reset the current
970 		 * kprobe and the ip points back to the probe address
971 		 * and allow the page fault handler to continue as a
972 		 * normal page fault.
973 		 */
974 		regs->ip = (unsigned long)cur->addr;
975 		/*
976 		 * Trap flag (TF) has been set here because this fault
977 		 * happened where the single stepping will be done.
978 		 * So clear it by resetting the current kprobe:
979 		 */
980 		regs->flags &= ~X86_EFLAGS_TF;
981 
982 		/*
983 		 * If the TF flag was set before the kprobe hit,
984 		 * don't touch it:
985 		 */
986 		regs->flags |= kcb->kprobe_old_flags;
987 
988 		if (kcb->kprobe_status == KPROBE_REENTER)
989 			restore_previous_kprobe(kcb);
990 		else
991 			reset_current_kprobe();
992 		preempt_enable_no_resched();
993 	} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
994 		   kcb->kprobe_status == KPROBE_HIT_SSDONE) {
995 		/*
996 		 * We increment the nmissed count for accounting,
997 		 * we can also use npre/npostfault count for accounting
998 		 * these specific fault cases.
999 		 */
1000 		kprobes_inc_nmissed_count(cur);
1001 
1002 		/*
1003 		 * We come here because instructions in the pre/post
1004 		 * handler caused the page_fault, this could happen
1005 		 * if handler tries to access user space by
1006 		 * copy_from_user(), get_user() etc. Let the
1007 		 * user-specified handler try to fix it first.
1008 		 */
1009 		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
1010 			return 1;
1011 
1012 		/*
1013 		 * In case the user-specified fault handler returned
1014 		 * zero, try to fix up.
1015 		 */
1016 		if (fixup_exception(regs, trapnr))
1017 			return 1;
1018 
1019 		/*
1020 		 * fixup routine could not handle it,
1021 		 * Let do_page_fault() fix it.
1022 		 */
1023 	}
1024 
1025 	return 0;
1026 }
1027 NOKPROBE_SYMBOL(kprobe_fault_handler);
1028 
1029 /*
1030  * Wrapper routine for handling exceptions.
1031  */
1032 int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
1033 			     void *data)
1034 {
1035 	struct die_args *args = data;
1036 	int ret = NOTIFY_DONE;
1037 
1038 	if (args->regs && user_mode(args->regs))
1039 		return ret;
1040 
1041 	if (val == DIE_GPF) {
1042 		/*
1043 		 * To be potentially processing a kprobe fault and to
1044 		 * trust the result from kprobe_running(), we have
1045 		 * be non-preemptible.
1046 		 */
1047 		if (!preemptible() && kprobe_running() &&
1048 		    kprobe_fault_handler(args->regs, args->trapnr))
1049 			ret = NOTIFY_STOP;
1050 	}
1051 	return ret;
1052 }
1053 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1054 
1055 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1056 {
1057 	struct jprobe *jp = container_of(p, struct jprobe, kp);
1058 	unsigned long addr;
1059 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1060 
1061 	kcb->jprobe_saved_regs = *regs;
1062 	kcb->jprobe_saved_sp = stack_addr(regs);
1063 	addr = (unsigned long)(kcb->jprobe_saved_sp);
1064 
1065 	/*
1066 	 * As Linus pointed out, gcc assumes that the callee
1067 	 * owns the argument space and could overwrite it, e.g.
1068 	 * tailcall optimization. So, to be absolutely safe
1069 	 * we also save and restore enough stack bytes to cover
1070 	 * the argument area.
1071 	 * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
1072 	 * raw stack chunk with redzones:
1073 	 */
1074 	__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
1075 	regs->flags &= ~X86_EFLAGS_IF;
1076 	trace_hardirqs_off();
1077 	regs->ip = (unsigned long)(jp->entry);
1078 
1079 	/*
1080 	 * jprobes use jprobe_return() which skips the normal return
1081 	 * path of the function, and this messes up the accounting of the
1082 	 * function graph tracer to get messed up.
1083 	 *
1084 	 * Pause function graph tracing while performing the jprobe function.
1085 	 */
1086 	pause_graph_tracing();
1087 	return 1;
1088 }
1089 NOKPROBE_SYMBOL(setjmp_pre_handler);
1090 
1091 void jprobe_return(void)
1092 {
1093 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1094 
1095 	/* Unpoison stack redzones in the frames we are going to jump over. */
1096 	kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
1097 
1098 	asm volatile (
1099 #ifdef CONFIG_X86_64
1100 			"       xchg   %%rbx,%%rsp	\n"
1101 #else
1102 			"       xchgl   %%ebx,%%esp	\n"
1103 #endif
1104 			"       int3			\n"
1105 			"       .globl jprobe_return_end\n"
1106 			"       jprobe_return_end:	\n"
1107 			"       nop			\n"::"b"
1108 			(kcb->jprobe_saved_sp):"memory");
1109 }
1110 NOKPROBE_SYMBOL(jprobe_return);
1111 NOKPROBE_SYMBOL(jprobe_return_end);
1112 
1113 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1114 {
1115 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1116 	u8 *addr = (u8 *) (regs->ip - 1);
1117 	struct jprobe *jp = container_of(p, struct jprobe, kp);
1118 	void *saved_sp = kcb->jprobe_saved_sp;
1119 
1120 	if ((addr > (u8 *) jprobe_return) &&
1121 	    (addr < (u8 *) jprobe_return_end)) {
1122 		if (stack_addr(regs) != saved_sp) {
1123 			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1124 			printk(KERN_ERR
1125 			       "current sp %p does not match saved sp %p\n",
1126 			       stack_addr(regs), saved_sp);
1127 			printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1128 			show_regs(saved_regs);
1129 			printk(KERN_ERR "Current registers\n");
1130 			show_regs(regs);
1131 			BUG();
1132 		}
1133 		/* It's OK to start function graph tracing again */
1134 		unpause_graph_tracing();
1135 		*regs = kcb->jprobe_saved_regs;
1136 		__memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1137 		preempt_enable_no_resched();
1138 		return 1;
1139 	}
1140 	return 0;
1141 }
1142 NOKPROBE_SYMBOL(longjmp_break_handler);
1143 
1144 bool arch_within_kprobe_blacklist(unsigned long addr)
1145 {
1146 	return  (addr >= (unsigned long)__kprobes_text_start &&
1147 		 addr < (unsigned long)__kprobes_text_end) ||
1148 		(addr >= (unsigned long)__entry_text_start &&
1149 		 addr < (unsigned long)__entry_text_end);
1150 }
1151 
1152 int __init arch_init_kprobes(void)
1153 {
1154 	return 0;
1155 }
1156 
1157 int arch_trampoline_kprobe(struct kprobe *p)
1158 {
1159 	return 0;
1160 }
1161