xref: /openbmc/linux/arch/x86/kernel/kprobes/core.c (revision 62e59c4e)
1 /*
2  *  Kernel Probes (KProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2002, 2004
19  *
20  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21  *		Probes initial implementation ( includes contributions from
22  *		Rusty Russell).
23  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24  *		interface to access function arguments.
25  * 2004-Oct	Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26  *		<prasanna@in.ibm.com> adapted for x86_64 from i386.
27  * 2005-Mar	Roland McGrath <roland@redhat.com>
28  *		Fixed to handle %rip-relative addressing mode correctly.
29  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31  *		<prasanna@in.ibm.com> added function-return probes.
32  * 2005-May	Rusty Lynch <rusty.lynch@intel.com>
33  *		Added function return probes functionality
34  * 2006-Feb	Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35  *		kprobe-booster and kretprobe-booster for i386.
36  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37  *		and kretprobe-booster for x86-64
38  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39  *		<arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40  *		unified x86 kprobes code.
41  */
42 #include <linux/kprobes.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/hardirq.h>
47 #include <linux/preempt.h>
48 #include <linux/sched/debug.h>
49 #include <linux/extable.h>
50 #include <linux/kdebug.h>
51 #include <linux/kallsyms.h>
52 #include <linux/ftrace.h>
53 #include <linux/frame.h>
54 #include <linux/kasan.h>
55 #include <linux/moduleloader.h>
56 
57 #include <asm/text-patching.h>
58 #include <asm/cacheflush.h>
59 #include <asm/desc.h>
60 #include <asm/pgtable.h>
61 #include <linux/uaccess.h>
62 #include <asm/alternative.h>
63 #include <asm/insn.h>
64 #include <asm/debugreg.h>
65 #include <asm/set_memory.h>
66 
67 #include "common.h"
68 
69 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
70 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
71 
72 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
73 
74 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
75 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
76 	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
77 	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
78 	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
79 	 << (row % 32))
80 	/*
81 	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
82 	 * Groups, and some special opcodes can not boost.
83 	 * This is non-const and volatile to keep gcc from statically
84 	 * optimizing it out, as variable_test_bit makes gcc think only
85 	 * *(unsigned long*) is used.
86 	 */
87 static volatile u32 twobyte_is_boostable[256 / 32] = {
88 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
89 	/*      ----------------------------------------------          */
90 	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
91 	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
92 	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
93 	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
94 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
95 	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
96 	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
97 	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
98 	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
99 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
100 	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
101 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
102 	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
103 	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
104 	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
105 	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
106 	/*      -----------------------------------------------         */
107 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
108 };
109 #undef W
110 
111 struct kretprobe_blackpoint kretprobe_blacklist[] = {
112 	{"__switch_to", }, /* This function switches only current task, but
113 			      doesn't switch kernel stack.*/
114 	{NULL, NULL}	/* Terminator */
115 };
116 
117 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
118 
119 static nokprobe_inline void
120 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
121 {
122 	struct __arch_relative_insn {
123 		u8 op;
124 		s32 raddr;
125 	} __packed *insn;
126 
127 	insn = (struct __arch_relative_insn *)dest;
128 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
129 	insn->op = op;
130 }
131 
132 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
133 void synthesize_reljump(void *dest, void *from, void *to)
134 {
135 	__synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
136 }
137 NOKPROBE_SYMBOL(synthesize_reljump);
138 
139 /* Insert a call instruction at address 'from', which calls address 'to'.*/
140 void synthesize_relcall(void *dest, void *from, void *to)
141 {
142 	__synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
143 }
144 NOKPROBE_SYMBOL(synthesize_relcall);
145 
146 /*
147  * Skip the prefixes of the instruction.
148  */
149 static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
150 {
151 	insn_attr_t attr;
152 
153 	attr = inat_get_opcode_attribute((insn_byte_t)*insn);
154 	while (inat_is_legacy_prefix(attr)) {
155 		insn++;
156 		attr = inat_get_opcode_attribute((insn_byte_t)*insn);
157 	}
158 #ifdef CONFIG_X86_64
159 	if (inat_is_rex_prefix(attr))
160 		insn++;
161 #endif
162 	return insn;
163 }
164 NOKPROBE_SYMBOL(skip_prefixes);
165 
166 /*
167  * Returns non-zero if INSN is boostable.
168  * RIP relative instructions are adjusted at copying time in 64 bits mode
169  */
170 int can_boost(struct insn *insn, void *addr)
171 {
172 	kprobe_opcode_t opcode;
173 
174 	if (search_exception_tables((unsigned long)addr))
175 		return 0;	/* Page fault may occur on this address. */
176 
177 	/* 2nd-byte opcode */
178 	if (insn->opcode.nbytes == 2)
179 		return test_bit(insn->opcode.bytes[1],
180 				(unsigned long *)twobyte_is_boostable);
181 
182 	if (insn->opcode.nbytes != 1)
183 		return 0;
184 
185 	/* Can't boost Address-size override prefix */
186 	if (unlikely(inat_is_address_size_prefix(insn->attr)))
187 		return 0;
188 
189 	opcode = insn->opcode.bytes[0];
190 
191 	switch (opcode & 0xf0) {
192 	case 0x60:
193 		/* can't boost "bound" */
194 		return (opcode != 0x62);
195 	case 0x70:
196 		return 0; /* can't boost conditional jump */
197 	case 0x90:
198 		return opcode != 0x9a;	/* can't boost call far */
199 	case 0xc0:
200 		/* can't boost software-interruptions */
201 		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
202 	case 0xd0:
203 		/* can boost AA* and XLAT */
204 		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
205 	case 0xe0:
206 		/* can boost in/out and absolute jmps */
207 		return ((opcode & 0x04) || opcode == 0xea);
208 	case 0xf0:
209 		/* clear and set flags are boostable */
210 		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
211 	default:
212 		/* CS override prefix and call are not boostable */
213 		return (opcode != 0x2e && opcode != 0x9a);
214 	}
215 }
216 
217 static unsigned long
218 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
219 {
220 	struct kprobe *kp;
221 	unsigned long faddr;
222 
223 	kp = get_kprobe((void *)addr);
224 	faddr = ftrace_location(addr);
225 	/*
226 	 * Addresses inside the ftrace location are refused by
227 	 * arch_check_ftrace_location(). Something went terribly wrong
228 	 * if such an address is checked here.
229 	 */
230 	if (WARN_ON(faddr && faddr != addr))
231 		return 0UL;
232 	/*
233 	 * Use the current code if it is not modified by Kprobe
234 	 * and it cannot be modified by ftrace.
235 	 */
236 	if (!kp && !faddr)
237 		return addr;
238 
239 	/*
240 	 * Basically, kp->ainsn.insn has an original instruction.
241 	 * However, RIP-relative instruction can not do single-stepping
242 	 * at different place, __copy_instruction() tweaks the displacement of
243 	 * that instruction. In that case, we can't recover the instruction
244 	 * from the kp->ainsn.insn.
245 	 *
246 	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
247 	 * of the first byte of the probed instruction, which is overwritten
248 	 * by int3. And the instruction at kp->addr is not modified by kprobes
249 	 * except for the first byte, we can recover the original instruction
250 	 * from it and kp->opcode.
251 	 *
252 	 * In case of Kprobes using ftrace, we do not have a copy of
253 	 * the original instruction. In fact, the ftrace location might
254 	 * be modified at anytime and even could be in an inconsistent state.
255 	 * Fortunately, we know that the original code is the ideal 5-byte
256 	 * long NOP.
257 	 */
258 	if (probe_kernel_read(buf, (void *)addr,
259 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
260 		return 0UL;
261 
262 	if (faddr)
263 		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
264 	else
265 		buf[0] = kp->opcode;
266 	return (unsigned long)buf;
267 }
268 
269 /*
270  * Recover the probed instruction at addr for further analysis.
271  * Caller must lock kprobes by kprobe_mutex, or disable preemption
272  * for preventing to release referencing kprobes.
273  * Returns zero if the instruction can not get recovered (or access failed).
274  */
275 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
276 {
277 	unsigned long __addr;
278 
279 	__addr = __recover_optprobed_insn(buf, addr);
280 	if (__addr != addr)
281 		return __addr;
282 
283 	return __recover_probed_insn(buf, addr);
284 }
285 
286 /* Check if paddr is at an instruction boundary */
287 static int can_probe(unsigned long paddr)
288 {
289 	unsigned long addr, __addr, offset = 0;
290 	struct insn insn;
291 	kprobe_opcode_t buf[MAX_INSN_SIZE];
292 
293 	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
294 		return 0;
295 
296 	/* Decode instructions */
297 	addr = paddr - offset;
298 	while (addr < paddr) {
299 		/*
300 		 * Check if the instruction has been modified by another
301 		 * kprobe, in which case we replace the breakpoint by the
302 		 * original instruction in our buffer.
303 		 * Also, jump optimization will change the breakpoint to
304 		 * relative-jump. Since the relative-jump itself is
305 		 * normally used, we just go through if there is no kprobe.
306 		 */
307 		__addr = recover_probed_instruction(buf, addr);
308 		if (!__addr)
309 			return 0;
310 		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
311 		insn_get_length(&insn);
312 
313 		/*
314 		 * Another debugging subsystem might insert this breakpoint.
315 		 * In that case, we can't recover it.
316 		 */
317 		if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
318 			return 0;
319 		addr += insn.length;
320 	}
321 
322 	return (addr == paddr);
323 }
324 
325 /*
326  * Returns non-zero if opcode modifies the interrupt flag.
327  */
328 static int is_IF_modifier(kprobe_opcode_t *insn)
329 {
330 	/* Skip prefixes */
331 	insn = skip_prefixes(insn);
332 
333 	switch (*insn) {
334 	case 0xfa:		/* cli */
335 	case 0xfb:		/* sti */
336 	case 0xcf:		/* iret/iretd */
337 	case 0x9d:		/* popf/popfd */
338 		return 1;
339 	}
340 
341 	return 0;
342 }
343 
344 /*
345  * Copy an instruction with recovering modified instruction by kprobes
346  * and adjust the displacement if the instruction uses the %rip-relative
347  * addressing mode. Note that since @real will be the final place of copied
348  * instruction, displacement must be adjust by @real, not @dest.
349  * This returns the length of copied instruction, or 0 if it has an error.
350  */
351 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
352 {
353 	kprobe_opcode_t buf[MAX_INSN_SIZE];
354 	unsigned long recovered_insn =
355 		recover_probed_instruction(buf, (unsigned long)src);
356 
357 	if (!recovered_insn || !insn)
358 		return 0;
359 
360 	/* This can access kernel text if given address is not recovered */
361 	if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE))
362 		return 0;
363 
364 	kernel_insn_init(insn, dest, MAX_INSN_SIZE);
365 	insn_get_length(insn);
366 
367 	/* Another subsystem puts a breakpoint, failed to recover */
368 	if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
369 		return 0;
370 
371 	/* We should not singlestep on the exception masking instructions */
372 	if (insn_masking_exception(insn))
373 		return 0;
374 
375 #ifdef CONFIG_X86_64
376 	/* Only x86_64 has RIP relative instructions */
377 	if (insn_rip_relative(insn)) {
378 		s64 newdisp;
379 		u8 *disp;
380 		/*
381 		 * The copied instruction uses the %rip-relative addressing
382 		 * mode.  Adjust the displacement for the difference between
383 		 * the original location of this instruction and the location
384 		 * of the copy that will actually be run.  The tricky bit here
385 		 * is making sure that the sign extension happens correctly in
386 		 * this calculation, since we need a signed 32-bit result to
387 		 * be sign-extended to 64 bits when it's added to the %rip
388 		 * value and yield the same 64-bit result that the sign-
389 		 * extension of the original signed 32-bit displacement would
390 		 * have given.
391 		 */
392 		newdisp = (u8 *) src + (s64) insn->displacement.value
393 			  - (u8 *) real;
394 		if ((s64) (s32) newdisp != newdisp) {
395 			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
396 			return 0;
397 		}
398 		disp = (u8 *) dest + insn_offset_displacement(insn);
399 		*(s32 *) disp = (s32) newdisp;
400 	}
401 #endif
402 	return insn->length;
403 }
404 
405 /* Prepare reljump right after instruction to boost */
406 static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
407 			  struct insn *insn)
408 {
409 	int len = insn->length;
410 
411 	if (can_boost(insn, p->addr) &&
412 	    MAX_INSN_SIZE - len >= RELATIVEJUMP_SIZE) {
413 		/*
414 		 * These instructions can be executed directly if it
415 		 * jumps back to correct address.
416 		 */
417 		synthesize_reljump(buf + len, p->ainsn.insn + len,
418 				   p->addr + insn->length);
419 		len += RELATIVEJUMP_SIZE;
420 		p->ainsn.boostable = true;
421 	} else {
422 		p->ainsn.boostable = false;
423 	}
424 
425 	return len;
426 }
427 
428 /* Make page to RO mode when allocate it */
429 void *alloc_insn_page(void)
430 {
431 	void *page;
432 
433 	page = module_alloc(PAGE_SIZE);
434 	if (page)
435 		set_memory_ro((unsigned long)page & PAGE_MASK, 1);
436 
437 	return page;
438 }
439 
440 /* Recover page to RW mode before releasing it */
441 void free_insn_page(void *page)
442 {
443 	set_memory_nx((unsigned long)page & PAGE_MASK, 1);
444 	set_memory_rw((unsigned long)page & PAGE_MASK, 1);
445 	module_memfree(page);
446 }
447 
448 static int arch_copy_kprobe(struct kprobe *p)
449 {
450 	struct insn insn;
451 	kprobe_opcode_t buf[MAX_INSN_SIZE];
452 	int len;
453 
454 	/* Copy an instruction with recovering if other optprobe modifies it.*/
455 	len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
456 	if (!len)
457 		return -EINVAL;
458 
459 	/*
460 	 * __copy_instruction can modify the displacement of the instruction,
461 	 * but it doesn't affect boostable check.
462 	 */
463 	len = prepare_boost(buf, p, &insn);
464 
465 	/* Check whether the instruction modifies Interrupt Flag or not */
466 	p->ainsn.if_modifier = is_IF_modifier(buf);
467 
468 	/* Also, displacement change doesn't affect the first byte */
469 	p->opcode = buf[0];
470 
471 	/* OK, write back the instruction(s) into ROX insn buffer */
472 	text_poke(p->ainsn.insn, buf, len);
473 
474 	return 0;
475 }
476 
477 int arch_prepare_kprobe(struct kprobe *p)
478 {
479 	int ret;
480 
481 	if (alternatives_text_reserved(p->addr, p->addr))
482 		return -EINVAL;
483 
484 	if (!can_probe((unsigned long)p->addr))
485 		return -EILSEQ;
486 	/* insn: must be on special executable page on x86. */
487 	p->ainsn.insn = get_insn_slot();
488 	if (!p->ainsn.insn)
489 		return -ENOMEM;
490 
491 	ret = arch_copy_kprobe(p);
492 	if (ret) {
493 		free_insn_slot(p->ainsn.insn, 0);
494 		p->ainsn.insn = NULL;
495 	}
496 
497 	return ret;
498 }
499 
500 void arch_arm_kprobe(struct kprobe *p)
501 {
502 	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
503 }
504 
505 void arch_disarm_kprobe(struct kprobe *p)
506 {
507 	text_poke(p->addr, &p->opcode, 1);
508 }
509 
510 void arch_remove_kprobe(struct kprobe *p)
511 {
512 	if (p->ainsn.insn) {
513 		free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
514 		p->ainsn.insn = NULL;
515 	}
516 }
517 
518 static nokprobe_inline void
519 save_previous_kprobe(struct kprobe_ctlblk *kcb)
520 {
521 	kcb->prev_kprobe.kp = kprobe_running();
522 	kcb->prev_kprobe.status = kcb->kprobe_status;
523 	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
524 	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
525 }
526 
527 static nokprobe_inline void
528 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
529 {
530 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
531 	kcb->kprobe_status = kcb->prev_kprobe.status;
532 	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
533 	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
534 }
535 
536 static nokprobe_inline void
537 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
538 		   struct kprobe_ctlblk *kcb)
539 {
540 	__this_cpu_write(current_kprobe, p);
541 	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
542 		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
543 	if (p->ainsn.if_modifier)
544 		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
545 }
546 
547 static nokprobe_inline void clear_btf(void)
548 {
549 	if (test_thread_flag(TIF_BLOCKSTEP)) {
550 		unsigned long debugctl = get_debugctlmsr();
551 
552 		debugctl &= ~DEBUGCTLMSR_BTF;
553 		update_debugctlmsr(debugctl);
554 	}
555 }
556 
557 static nokprobe_inline void restore_btf(void)
558 {
559 	if (test_thread_flag(TIF_BLOCKSTEP)) {
560 		unsigned long debugctl = get_debugctlmsr();
561 
562 		debugctl |= DEBUGCTLMSR_BTF;
563 		update_debugctlmsr(debugctl);
564 	}
565 }
566 
567 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
568 {
569 	unsigned long *sara = stack_addr(regs);
570 
571 	ri->ret_addr = (kprobe_opcode_t *) *sara;
572 	ri->fp = sara;
573 
574 	/* Replace the return addr with trampoline addr */
575 	*sara = (unsigned long) &kretprobe_trampoline;
576 }
577 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
578 
579 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
580 			     struct kprobe_ctlblk *kcb, int reenter)
581 {
582 	if (setup_detour_execution(p, regs, reenter))
583 		return;
584 
585 #if !defined(CONFIG_PREEMPT)
586 	if (p->ainsn.boostable && !p->post_handler) {
587 		/* Boost up -- we can execute copied instructions directly */
588 		if (!reenter)
589 			reset_current_kprobe();
590 		/*
591 		 * Reentering boosted probe doesn't reset current_kprobe,
592 		 * nor set current_kprobe, because it doesn't use single
593 		 * stepping.
594 		 */
595 		regs->ip = (unsigned long)p->ainsn.insn;
596 		return;
597 	}
598 #endif
599 	if (reenter) {
600 		save_previous_kprobe(kcb);
601 		set_current_kprobe(p, regs, kcb);
602 		kcb->kprobe_status = KPROBE_REENTER;
603 	} else
604 		kcb->kprobe_status = KPROBE_HIT_SS;
605 	/* Prepare real single stepping */
606 	clear_btf();
607 	regs->flags |= X86_EFLAGS_TF;
608 	regs->flags &= ~X86_EFLAGS_IF;
609 	/* single step inline if the instruction is an int3 */
610 	if (p->opcode == BREAKPOINT_INSTRUCTION)
611 		regs->ip = (unsigned long)p->addr;
612 	else
613 		regs->ip = (unsigned long)p->ainsn.insn;
614 }
615 NOKPROBE_SYMBOL(setup_singlestep);
616 
617 /*
618  * We have reentered the kprobe_handler(), since another probe was hit while
619  * within the handler. We save the original kprobes variables and just single
620  * step on the instruction of the new probe without calling any user handlers.
621  */
622 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
623 			  struct kprobe_ctlblk *kcb)
624 {
625 	switch (kcb->kprobe_status) {
626 	case KPROBE_HIT_SSDONE:
627 	case KPROBE_HIT_ACTIVE:
628 	case KPROBE_HIT_SS:
629 		kprobes_inc_nmissed_count(p);
630 		setup_singlestep(p, regs, kcb, 1);
631 		break;
632 	case KPROBE_REENTER:
633 		/* A probe has been hit in the codepath leading up to, or just
634 		 * after, single-stepping of a probed instruction. This entire
635 		 * codepath should strictly reside in .kprobes.text section.
636 		 * Raise a BUG or we'll continue in an endless reentering loop
637 		 * and eventually a stack overflow.
638 		 */
639 		pr_err("Unrecoverable kprobe detected.\n");
640 		dump_kprobe(p);
641 		BUG();
642 	default:
643 		/* impossible cases */
644 		WARN_ON(1);
645 		return 0;
646 	}
647 
648 	return 1;
649 }
650 NOKPROBE_SYMBOL(reenter_kprobe);
651 
652 /*
653  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
654  * remain disabled throughout this function.
655  */
656 int kprobe_int3_handler(struct pt_regs *regs)
657 {
658 	kprobe_opcode_t *addr;
659 	struct kprobe *p;
660 	struct kprobe_ctlblk *kcb;
661 
662 	if (user_mode(regs))
663 		return 0;
664 
665 	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
666 	/*
667 	 * We don't want to be preempted for the entire duration of kprobe
668 	 * processing. Since int3 and debug trap disables irqs and we clear
669 	 * IF while singlestepping, it must be no preemptible.
670 	 */
671 
672 	kcb = get_kprobe_ctlblk();
673 	p = get_kprobe(addr);
674 
675 	if (p) {
676 		if (kprobe_running()) {
677 			if (reenter_kprobe(p, regs, kcb))
678 				return 1;
679 		} else {
680 			set_current_kprobe(p, regs, kcb);
681 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
682 
683 			/*
684 			 * If we have no pre-handler or it returned 0, we
685 			 * continue with normal processing.  If we have a
686 			 * pre-handler and it returned non-zero, that means
687 			 * user handler setup registers to exit to another
688 			 * instruction, we must skip the single stepping.
689 			 */
690 			if (!p->pre_handler || !p->pre_handler(p, regs))
691 				setup_singlestep(p, regs, kcb, 0);
692 			else
693 				reset_current_kprobe();
694 			return 1;
695 		}
696 	} else if (*addr != BREAKPOINT_INSTRUCTION) {
697 		/*
698 		 * The breakpoint instruction was removed right
699 		 * after we hit it.  Another cpu has removed
700 		 * either a probepoint or a debugger breakpoint
701 		 * at this address.  In either case, no further
702 		 * handling of this interrupt is appropriate.
703 		 * Back up over the (now missing) int3 and run
704 		 * the original instruction.
705 		 */
706 		regs->ip = (unsigned long)addr;
707 		return 1;
708 	} /* else: not a kprobe fault; let the kernel handle it */
709 
710 	return 0;
711 }
712 NOKPROBE_SYMBOL(kprobe_int3_handler);
713 
714 /*
715  * When a retprobed function returns, this code saves registers and
716  * calls trampoline_handler() runs, which calls the kretprobe's handler.
717  */
718 asm(
719 	".global kretprobe_trampoline\n"
720 	".type kretprobe_trampoline, @function\n"
721 	"kretprobe_trampoline:\n"
722 #ifdef CONFIG_X86_64
723 	/* We don't bother saving the ss register */
724 	"	pushq %rsp\n"
725 	"	pushfq\n"
726 	SAVE_REGS_STRING
727 	"	movq %rsp, %rdi\n"
728 	"	call trampoline_handler\n"
729 	/* Replace saved sp with true return address. */
730 	"	movq %rax, 152(%rsp)\n"
731 	RESTORE_REGS_STRING
732 	"	popfq\n"
733 #else
734 	"	pushf\n"
735 	SAVE_REGS_STRING
736 	"	movl %esp, %eax\n"
737 	"	call trampoline_handler\n"
738 	/* Move flags to cs */
739 	"	movl 56(%esp), %edx\n"
740 	"	movl %edx, 52(%esp)\n"
741 	/* Replace saved flags with true return address. */
742 	"	movl %eax, 56(%esp)\n"
743 	RESTORE_REGS_STRING
744 	"	popf\n"
745 #endif
746 	"	ret\n"
747 	".size kretprobe_trampoline, .-kretprobe_trampoline\n"
748 );
749 NOKPROBE_SYMBOL(kretprobe_trampoline);
750 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
751 
752 static struct kprobe kretprobe_kprobe = {
753 	.addr = (void *)kretprobe_trampoline,
754 };
755 
756 /*
757  * Called from kretprobe_trampoline
758  */
759 static __used void *trampoline_handler(struct pt_regs *regs)
760 {
761 	struct kprobe_ctlblk *kcb;
762 	struct kretprobe_instance *ri = NULL;
763 	struct hlist_head *head, empty_rp;
764 	struct hlist_node *tmp;
765 	unsigned long flags, orig_ret_address = 0;
766 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
767 	kprobe_opcode_t *correct_ret_addr = NULL;
768 	void *frame_pointer;
769 	bool skipped = false;
770 
771 	preempt_disable();
772 
773 	/*
774 	 * Set a dummy kprobe for avoiding kretprobe recursion.
775 	 * Since kretprobe never run in kprobe handler, kprobe must not
776 	 * be running at this point.
777 	 */
778 	kcb = get_kprobe_ctlblk();
779 	__this_cpu_write(current_kprobe, &kretprobe_kprobe);
780 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
781 
782 	INIT_HLIST_HEAD(&empty_rp);
783 	kretprobe_hash_lock(current, &head, &flags);
784 	/* fixup registers */
785 #ifdef CONFIG_X86_64
786 	regs->cs = __KERNEL_CS;
787 	/* On x86-64, we use pt_regs->sp for return address holder. */
788 	frame_pointer = &regs->sp;
789 #else
790 	regs->cs = __KERNEL_CS | get_kernel_rpl();
791 	regs->gs = 0;
792 	/* On x86-32, we use pt_regs->flags for return address holder. */
793 	frame_pointer = &regs->flags;
794 #endif
795 	regs->ip = trampoline_address;
796 	regs->orig_ax = ~0UL;
797 
798 	/*
799 	 * It is possible to have multiple instances associated with a given
800 	 * task either because multiple functions in the call path have
801 	 * return probes installed on them, and/or more than one
802 	 * return probe was registered for a target function.
803 	 *
804 	 * We can handle this because:
805 	 *     - instances are always pushed into the head of the list
806 	 *     - when multiple return probes are registered for the same
807 	 *	 function, the (chronologically) first instance's ret_addr
808 	 *	 will be the real return address, and all the rest will
809 	 *	 point to kretprobe_trampoline.
810 	 */
811 	hlist_for_each_entry(ri, head, hlist) {
812 		if (ri->task != current)
813 			/* another task is sharing our hash bucket */
814 			continue;
815 		/*
816 		 * Return probes must be pushed on this hash list correct
817 		 * order (same as return order) so that it can be poped
818 		 * correctly. However, if we find it is pushed it incorrect
819 		 * order, this means we find a function which should not be
820 		 * probed, because the wrong order entry is pushed on the
821 		 * path of processing other kretprobe itself.
822 		 */
823 		if (ri->fp != frame_pointer) {
824 			if (!skipped)
825 				pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
826 			skipped = true;
827 			continue;
828 		}
829 
830 		orig_ret_address = (unsigned long)ri->ret_addr;
831 		if (skipped)
832 			pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
833 				ri->rp->kp.addr);
834 
835 		if (orig_ret_address != trampoline_address)
836 			/*
837 			 * This is the real return address. Any other
838 			 * instances associated with this task are for
839 			 * other calls deeper on the call stack
840 			 */
841 			break;
842 	}
843 
844 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
845 
846 	correct_ret_addr = ri->ret_addr;
847 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
848 		if (ri->task != current)
849 			/* another task is sharing our hash bucket */
850 			continue;
851 		if (ri->fp != frame_pointer)
852 			continue;
853 
854 		orig_ret_address = (unsigned long)ri->ret_addr;
855 		if (ri->rp && ri->rp->handler) {
856 			__this_cpu_write(current_kprobe, &ri->rp->kp);
857 			ri->ret_addr = correct_ret_addr;
858 			ri->rp->handler(ri, regs);
859 			__this_cpu_write(current_kprobe, &kretprobe_kprobe);
860 		}
861 
862 		recycle_rp_inst(ri, &empty_rp);
863 
864 		if (orig_ret_address != trampoline_address)
865 			/*
866 			 * This is the real return address. Any other
867 			 * instances associated with this task are for
868 			 * other calls deeper on the call stack
869 			 */
870 			break;
871 	}
872 
873 	kretprobe_hash_unlock(current, &flags);
874 
875 	__this_cpu_write(current_kprobe, NULL);
876 	preempt_enable();
877 
878 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
879 		hlist_del(&ri->hlist);
880 		kfree(ri);
881 	}
882 	return (void *)orig_ret_address;
883 }
884 NOKPROBE_SYMBOL(trampoline_handler);
885 
886 /*
887  * Called after single-stepping.  p->addr is the address of the
888  * instruction whose first byte has been replaced by the "int 3"
889  * instruction.  To avoid the SMP problems that can occur when we
890  * temporarily put back the original opcode to single-step, we
891  * single-stepped a copy of the instruction.  The address of this
892  * copy is p->ainsn.insn.
893  *
894  * This function prepares to return from the post-single-step
895  * interrupt.  We have to fix up the stack as follows:
896  *
897  * 0) Except in the case of absolute or indirect jump or call instructions,
898  * the new ip is relative to the copied instruction.  We need to make
899  * it relative to the original instruction.
900  *
901  * 1) If the single-stepped instruction was pushfl, then the TF and IF
902  * flags are set in the just-pushed flags, and may need to be cleared.
903  *
904  * 2) If the single-stepped instruction was a call, the return address
905  * that is atop the stack is the address following the copied instruction.
906  * We need to make it the address following the original instruction.
907  *
908  * If this is the first time we've single-stepped the instruction at
909  * this probepoint, and the instruction is boostable, boost it: add a
910  * jump instruction after the copied instruction, that jumps to the next
911  * instruction after the probepoint.
912  */
913 static void resume_execution(struct kprobe *p, struct pt_regs *regs,
914 			     struct kprobe_ctlblk *kcb)
915 {
916 	unsigned long *tos = stack_addr(regs);
917 	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
918 	unsigned long orig_ip = (unsigned long)p->addr;
919 	kprobe_opcode_t *insn = p->ainsn.insn;
920 
921 	/* Skip prefixes */
922 	insn = skip_prefixes(insn);
923 
924 	regs->flags &= ~X86_EFLAGS_TF;
925 	switch (*insn) {
926 	case 0x9c:	/* pushfl */
927 		*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
928 		*tos |= kcb->kprobe_old_flags;
929 		break;
930 	case 0xc2:	/* iret/ret/lret */
931 	case 0xc3:
932 	case 0xca:
933 	case 0xcb:
934 	case 0xcf:
935 	case 0xea:	/* jmp absolute -- ip is correct */
936 		/* ip is already adjusted, no more changes required */
937 		p->ainsn.boostable = true;
938 		goto no_change;
939 	case 0xe8:	/* call relative - Fix return addr */
940 		*tos = orig_ip + (*tos - copy_ip);
941 		break;
942 #ifdef CONFIG_X86_32
943 	case 0x9a:	/* call absolute -- same as call absolute, indirect */
944 		*tos = orig_ip + (*tos - copy_ip);
945 		goto no_change;
946 #endif
947 	case 0xff:
948 		if ((insn[1] & 0x30) == 0x10) {
949 			/*
950 			 * call absolute, indirect
951 			 * Fix return addr; ip is correct.
952 			 * But this is not boostable
953 			 */
954 			*tos = orig_ip + (*tos - copy_ip);
955 			goto no_change;
956 		} else if (((insn[1] & 0x31) == 0x20) ||
957 			   ((insn[1] & 0x31) == 0x21)) {
958 			/*
959 			 * jmp near and far, absolute indirect
960 			 * ip is correct. And this is boostable
961 			 */
962 			p->ainsn.boostable = true;
963 			goto no_change;
964 		}
965 	default:
966 		break;
967 	}
968 
969 	regs->ip += orig_ip - copy_ip;
970 
971 no_change:
972 	restore_btf();
973 }
974 NOKPROBE_SYMBOL(resume_execution);
975 
976 /*
977  * Interrupts are disabled on entry as trap1 is an interrupt gate and they
978  * remain disabled throughout this function.
979  */
980 int kprobe_debug_handler(struct pt_regs *regs)
981 {
982 	struct kprobe *cur = kprobe_running();
983 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
984 
985 	if (!cur)
986 		return 0;
987 
988 	resume_execution(cur, regs, kcb);
989 	regs->flags |= kcb->kprobe_saved_flags;
990 
991 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
992 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
993 		cur->post_handler(cur, regs, 0);
994 	}
995 
996 	/* Restore back the original saved kprobes variables and continue. */
997 	if (kcb->kprobe_status == KPROBE_REENTER) {
998 		restore_previous_kprobe(kcb);
999 		goto out;
1000 	}
1001 	reset_current_kprobe();
1002 out:
1003 	/*
1004 	 * if somebody else is singlestepping across a probe point, flags
1005 	 * will have TF set, in which case, continue the remaining processing
1006 	 * of do_debug, as if this is not a probe hit.
1007 	 */
1008 	if (regs->flags & X86_EFLAGS_TF)
1009 		return 0;
1010 
1011 	return 1;
1012 }
1013 NOKPROBE_SYMBOL(kprobe_debug_handler);
1014 
1015 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1016 {
1017 	struct kprobe *cur = kprobe_running();
1018 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1019 
1020 	if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
1021 		/* This must happen on single-stepping */
1022 		WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
1023 			kcb->kprobe_status != KPROBE_REENTER);
1024 		/*
1025 		 * We are here because the instruction being single
1026 		 * stepped caused a page fault. We reset the current
1027 		 * kprobe and the ip points back to the probe address
1028 		 * and allow the page fault handler to continue as a
1029 		 * normal page fault.
1030 		 */
1031 		regs->ip = (unsigned long)cur->addr;
1032 		/*
1033 		 * Trap flag (TF) has been set here because this fault
1034 		 * happened where the single stepping will be done.
1035 		 * So clear it by resetting the current kprobe:
1036 		 */
1037 		regs->flags &= ~X86_EFLAGS_TF;
1038 
1039 		/*
1040 		 * If the TF flag was set before the kprobe hit,
1041 		 * don't touch it:
1042 		 */
1043 		regs->flags |= kcb->kprobe_old_flags;
1044 
1045 		if (kcb->kprobe_status == KPROBE_REENTER)
1046 			restore_previous_kprobe(kcb);
1047 		else
1048 			reset_current_kprobe();
1049 	} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
1050 		   kcb->kprobe_status == KPROBE_HIT_SSDONE) {
1051 		/*
1052 		 * We increment the nmissed count for accounting,
1053 		 * we can also use npre/npostfault count for accounting
1054 		 * these specific fault cases.
1055 		 */
1056 		kprobes_inc_nmissed_count(cur);
1057 
1058 		/*
1059 		 * We come here because instructions in the pre/post
1060 		 * handler caused the page_fault, this could happen
1061 		 * if handler tries to access user space by
1062 		 * copy_from_user(), get_user() etc. Let the
1063 		 * user-specified handler try to fix it first.
1064 		 */
1065 		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
1066 			return 1;
1067 	}
1068 
1069 	return 0;
1070 }
1071 NOKPROBE_SYMBOL(kprobe_fault_handler);
1072 
1073 int __init arch_populate_kprobe_blacklist(void)
1074 {
1075 	int ret;
1076 
1077 	ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
1078 					 (unsigned long)__irqentry_text_end);
1079 	if (ret)
1080 		return ret;
1081 
1082 	return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
1083 					 (unsigned long)__entry_text_end);
1084 }
1085 
1086 int __init arch_init_kprobes(void)
1087 {
1088 	return 0;
1089 }
1090 
1091 int arch_trampoline_kprobe(struct kprobe *p)
1092 {
1093 	return 0;
1094 }
1095