xref: /openbmc/linux/arch/x86/kernel/kprobes/core.c (revision e2c75e76)
1 /*
2  *  Kernel Probes (KProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2002, 2004
19  *
20  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21  *		Probes initial implementation ( includes contributions from
22  *		Rusty Russell).
23  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24  *		interface to access function arguments.
25  * 2004-Oct	Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26  *		<prasanna@in.ibm.com> adapted for x86_64 from i386.
27  * 2005-Mar	Roland McGrath <roland@redhat.com>
28  *		Fixed to handle %rip-relative addressing mode correctly.
29  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31  *		<prasanna@in.ibm.com> added function-return probes.
32  * 2005-May	Rusty Lynch <rusty.lynch@intel.com>
33  *		Added function return probes functionality
34  * 2006-Feb	Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35  *		kprobe-booster and kretprobe-booster for i386.
36  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37  *		and kretprobe-booster for x86-64
38  * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39  *		<arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40  *		unified x86 kprobes code.
41  */
42 #include <linux/kprobes.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/hardirq.h>
47 #include <linux/preempt.h>
48 #include <linux/sched/debug.h>
49 #include <linux/extable.h>
50 #include <linux/kdebug.h>
51 #include <linux/kallsyms.h>
52 #include <linux/ftrace.h>
53 #include <linux/frame.h>
54 #include <linux/kasan.h>
55 #include <linux/moduleloader.h>
56 
57 #include <asm/text-patching.h>
58 #include <asm/cacheflush.h>
59 #include <asm/desc.h>
60 #include <asm/pgtable.h>
61 #include <linux/uaccess.h>
62 #include <asm/alternative.h>
63 #include <asm/insn.h>
64 #include <asm/debugreg.h>
65 #include <asm/set_memory.h>
66 
67 #include "common.h"
68 
69 void jprobe_return_end(void);
70 
71 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
72 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
73 
74 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
75 
76 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
77 	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
78 	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
79 	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
80 	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
81 	 << (row % 32))
82 	/*
83 	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
84 	 * Groups, and some special opcodes can not boost.
85 	 * This is non-const and volatile to keep gcc from statically
86 	 * optimizing it out, as variable_test_bit makes gcc think only
87 	 * *(unsigned long*) is used.
88 	 */
89 static volatile u32 twobyte_is_boostable[256 / 32] = {
90 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
91 	/*      ----------------------------------------------          */
92 	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
93 	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
94 	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
95 	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
96 	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
97 	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
98 	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
99 	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
100 	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
101 	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
102 	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
103 	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
104 	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
105 	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
106 	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
107 	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
108 	/*      -----------------------------------------------         */
109 	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
110 };
111 #undef W
112 
113 struct kretprobe_blackpoint kretprobe_blacklist[] = {
114 	{"__switch_to", }, /* This function switches only current task, but
115 			      doesn't switch kernel stack.*/
116 	{NULL, NULL}	/* Terminator */
117 };
118 
119 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
120 
121 static nokprobe_inline void
122 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
123 {
124 	struct __arch_relative_insn {
125 		u8 op;
126 		s32 raddr;
127 	} __packed *insn;
128 
129 	insn = (struct __arch_relative_insn *)dest;
130 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
131 	insn->op = op;
132 }
133 
134 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
135 void synthesize_reljump(void *dest, void *from, void *to)
136 {
137 	__synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
138 }
139 NOKPROBE_SYMBOL(synthesize_reljump);
140 
141 /* Insert a call instruction at address 'from', which calls address 'to'.*/
142 void synthesize_relcall(void *dest, void *from, void *to)
143 {
144 	__synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
145 }
146 NOKPROBE_SYMBOL(synthesize_relcall);
147 
148 /*
149  * Skip the prefixes of the instruction.
150  */
151 static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
152 {
153 	insn_attr_t attr;
154 
155 	attr = inat_get_opcode_attribute((insn_byte_t)*insn);
156 	while (inat_is_legacy_prefix(attr)) {
157 		insn++;
158 		attr = inat_get_opcode_attribute((insn_byte_t)*insn);
159 	}
160 #ifdef CONFIG_X86_64
161 	if (inat_is_rex_prefix(attr))
162 		insn++;
163 #endif
164 	return insn;
165 }
166 NOKPROBE_SYMBOL(skip_prefixes);
167 
168 /*
169  * Returns non-zero if INSN is boostable.
170  * RIP relative instructions are adjusted at copying time in 64 bits mode
171  */
172 int can_boost(struct insn *insn, void *addr)
173 {
174 	kprobe_opcode_t opcode;
175 
176 	if (search_exception_tables((unsigned long)addr))
177 		return 0;	/* Page fault may occur on this address. */
178 
179 	/* 2nd-byte opcode */
180 	if (insn->opcode.nbytes == 2)
181 		return test_bit(insn->opcode.bytes[1],
182 				(unsigned long *)twobyte_is_boostable);
183 
184 	if (insn->opcode.nbytes != 1)
185 		return 0;
186 
187 	/* Can't boost Address-size override prefix */
188 	if (unlikely(inat_is_address_size_prefix(insn->attr)))
189 		return 0;
190 
191 	opcode = insn->opcode.bytes[0];
192 
193 	switch (opcode & 0xf0) {
194 	case 0x60:
195 		/* can't boost "bound" */
196 		return (opcode != 0x62);
197 	case 0x70:
198 		return 0; /* can't boost conditional jump */
199 	case 0x90:
200 		return opcode != 0x9a;	/* can't boost call far */
201 	case 0xc0:
202 		/* can't boost software-interruptions */
203 		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
204 	case 0xd0:
205 		/* can boost AA* and XLAT */
206 		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
207 	case 0xe0:
208 		/* can boost in/out and absolute jmps */
209 		return ((opcode & 0x04) || opcode == 0xea);
210 	case 0xf0:
211 		/* clear and set flags are boostable */
212 		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
213 	default:
214 		/* CS override prefix and call are not boostable */
215 		return (opcode != 0x2e && opcode != 0x9a);
216 	}
217 }
218 
219 static unsigned long
220 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
221 {
222 	struct kprobe *kp;
223 	unsigned long faddr;
224 
225 	kp = get_kprobe((void *)addr);
226 	faddr = ftrace_location(addr);
227 	/*
228 	 * Addresses inside the ftrace location are refused by
229 	 * arch_check_ftrace_location(). Something went terribly wrong
230 	 * if such an address is checked here.
231 	 */
232 	if (WARN_ON(faddr && faddr != addr))
233 		return 0UL;
234 	/*
235 	 * Use the current code if it is not modified by Kprobe
236 	 * and it cannot be modified by ftrace.
237 	 */
238 	if (!kp && !faddr)
239 		return addr;
240 
241 	/*
242 	 * Basically, kp->ainsn.insn has an original instruction.
243 	 * However, RIP-relative instruction can not do single-stepping
244 	 * at different place, __copy_instruction() tweaks the displacement of
245 	 * that instruction. In that case, we can't recover the instruction
246 	 * from the kp->ainsn.insn.
247 	 *
248 	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
249 	 * of the first byte of the probed instruction, which is overwritten
250 	 * by int3. And the instruction at kp->addr is not modified by kprobes
251 	 * except for the first byte, we can recover the original instruction
252 	 * from it and kp->opcode.
253 	 *
254 	 * In case of Kprobes using ftrace, we do not have a copy of
255 	 * the original instruction. In fact, the ftrace location might
256 	 * be modified at anytime and even could be in an inconsistent state.
257 	 * Fortunately, we know that the original code is the ideal 5-byte
258 	 * long NOP.
259 	 */
260 	if (probe_kernel_read(buf, (void *)addr,
261 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
262 		return 0UL;
263 
264 	if (faddr)
265 		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
266 	else
267 		buf[0] = kp->opcode;
268 	return (unsigned long)buf;
269 }
270 
271 /*
272  * Recover the probed instruction at addr for further analysis.
273  * Caller must lock kprobes by kprobe_mutex, or disable preemption
274  * for preventing to release referencing kprobes.
275  * Returns zero if the instruction can not get recovered (or access failed).
276  */
277 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
278 {
279 	unsigned long __addr;
280 
281 	__addr = __recover_optprobed_insn(buf, addr);
282 	if (__addr != addr)
283 		return __addr;
284 
285 	return __recover_probed_insn(buf, addr);
286 }
287 
288 /* Check if paddr is at an instruction boundary */
289 static int can_probe(unsigned long paddr)
290 {
291 	unsigned long addr, __addr, offset = 0;
292 	struct insn insn;
293 	kprobe_opcode_t buf[MAX_INSN_SIZE];
294 
295 	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
296 		return 0;
297 
298 	/* Decode instructions */
299 	addr = paddr - offset;
300 	while (addr < paddr) {
301 		/*
302 		 * Check if the instruction has been modified by another
303 		 * kprobe, in which case we replace the breakpoint by the
304 		 * original instruction in our buffer.
305 		 * Also, jump optimization will change the breakpoint to
306 		 * relative-jump. Since the relative-jump itself is
307 		 * normally used, we just go through if there is no kprobe.
308 		 */
309 		__addr = recover_probed_instruction(buf, addr);
310 		if (!__addr)
311 			return 0;
312 		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
313 		insn_get_length(&insn);
314 
315 		/*
316 		 * Another debugging subsystem might insert this breakpoint.
317 		 * In that case, we can't recover it.
318 		 */
319 		if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
320 			return 0;
321 		addr += insn.length;
322 	}
323 
324 	return (addr == paddr);
325 }
326 
327 /*
328  * Returns non-zero if opcode modifies the interrupt flag.
329  */
330 static int is_IF_modifier(kprobe_opcode_t *insn)
331 {
332 	/* Skip prefixes */
333 	insn = skip_prefixes(insn);
334 
335 	switch (*insn) {
336 	case 0xfa:		/* cli */
337 	case 0xfb:		/* sti */
338 	case 0xcf:		/* iret/iretd */
339 	case 0x9d:		/* popf/popfd */
340 		return 1;
341 	}
342 
343 	return 0;
344 }
345 
346 /*
347  * Copy an instruction with recovering modified instruction by kprobes
348  * and adjust the displacement if the instruction uses the %rip-relative
349  * addressing mode. Note that since @real will be the final place of copied
350  * instruction, displacement must be adjust by @real, not @dest.
351  * This returns the length of copied instruction, or 0 if it has an error.
352  */
353 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
354 {
355 	kprobe_opcode_t buf[MAX_INSN_SIZE];
356 	unsigned long recovered_insn =
357 		recover_probed_instruction(buf, (unsigned long)src);
358 
359 	if (!recovered_insn || !insn)
360 		return 0;
361 
362 	/* This can access kernel text if given address is not recovered */
363 	if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE))
364 		return 0;
365 
366 	kernel_insn_init(insn, dest, MAX_INSN_SIZE);
367 	insn_get_length(insn);
368 
369 	/* Another subsystem puts a breakpoint, failed to recover */
370 	if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
371 		return 0;
372 
373 #ifdef CONFIG_X86_64
374 	/* Only x86_64 has RIP relative instructions */
375 	if (insn_rip_relative(insn)) {
376 		s64 newdisp;
377 		u8 *disp;
378 		/*
379 		 * The copied instruction uses the %rip-relative addressing
380 		 * mode.  Adjust the displacement for the difference between
381 		 * the original location of this instruction and the location
382 		 * of the copy that will actually be run.  The tricky bit here
383 		 * is making sure that the sign extension happens correctly in
384 		 * this calculation, since we need a signed 32-bit result to
385 		 * be sign-extended to 64 bits when it's added to the %rip
386 		 * value and yield the same 64-bit result that the sign-
387 		 * extension of the original signed 32-bit displacement would
388 		 * have given.
389 		 */
390 		newdisp = (u8 *) src + (s64) insn->displacement.value
391 			  - (u8 *) real;
392 		if ((s64) (s32) newdisp != newdisp) {
393 			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
394 			pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
395 				src, real, insn->displacement.value);
396 			return 0;
397 		}
398 		disp = (u8 *) dest + insn_offset_displacement(insn);
399 		*(s32 *) disp = (s32) newdisp;
400 	}
401 #endif
402 	return insn->length;
403 }
404 
405 /* Prepare reljump right after instruction to boost */
406 static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
407 			  struct insn *insn)
408 {
409 	int len = insn->length;
410 
411 	if (can_boost(insn, p->addr) &&
412 	    MAX_INSN_SIZE - len >= RELATIVEJUMP_SIZE) {
413 		/*
414 		 * These instructions can be executed directly if it
415 		 * jumps back to correct address.
416 		 */
417 		synthesize_reljump(buf + len, p->ainsn.insn + len,
418 				   p->addr + insn->length);
419 		len += RELATIVEJUMP_SIZE;
420 		p->ainsn.boostable = true;
421 	} else {
422 		p->ainsn.boostable = false;
423 	}
424 
425 	return len;
426 }
427 
428 /* Make page to RO mode when allocate it */
429 void *alloc_insn_page(void)
430 {
431 	void *page;
432 
433 	page = module_alloc(PAGE_SIZE);
434 	if (page)
435 		set_memory_ro((unsigned long)page & PAGE_MASK, 1);
436 
437 	return page;
438 }
439 
440 /* Recover page to RW mode before releasing it */
441 void free_insn_page(void *page)
442 {
443 	set_memory_nx((unsigned long)page & PAGE_MASK, 1);
444 	set_memory_rw((unsigned long)page & PAGE_MASK, 1);
445 	module_memfree(page);
446 }
447 
448 static int arch_copy_kprobe(struct kprobe *p)
449 {
450 	struct insn insn;
451 	kprobe_opcode_t buf[MAX_INSN_SIZE];
452 	int len;
453 
454 	/* Copy an instruction with recovering if other optprobe modifies it.*/
455 	len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
456 	if (!len)
457 		return -EINVAL;
458 
459 	/*
460 	 * __copy_instruction can modify the displacement of the instruction,
461 	 * but it doesn't affect boostable check.
462 	 */
463 	len = prepare_boost(buf, p, &insn);
464 
465 	/* Check whether the instruction modifies Interrupt Flag or not */
466 	p->ainsn.if_modifier = is_IF_modifier(buf);
467 
468 	/* Also, displacement change doesn't affect the first byte */
469 	p->opcode = buf[0];
470 
471 	/* OK, write back the instruction(s) into ROX insn buffer */
472 	text_poke(p->ainsn.insn, buf, len);
473 
474 	return 0;
475 }
476 
477 int arch_prepare_kprobe(struct kprobe *p)
478 {
479 	int ret;
480 
481 	if (alternatives_text_reserved(p->addr, p->addr))
482 		return -EINVAL;
483 
484 	if (!can_probe((unsigned long)p->addr))
485 		return -EILSEQ;
486 	/* insn: must be on special executable page on x86. */
487 	p->ainsn.insn = get_insn_slot();
488 	if (!p->ainsn.insn)
489 		return -ENOMEM;
490 
491 	ret = arch_copy_kprobe(p);
492 	if (ret) {
493 		free_insn_slot(p->ainsn.insn, 0);
494 		p->ainsn.insn = NULL;
495 	}
496 
497 	return ret;
498 }
499 
500 void arch_arm_kprobe(struct kprobe *p)
501 {
502 	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
503 }
504 
505 void arch_disarm_kprobe(struct kprobe *p)
506 {
507 	text_poke(p->addr, &p->opcode, 1);
508 }
509 
510 void arch_remove_kprobe(struct kprobe *p)
511 {
512 	if (p->ainsn.insn) {
513 		free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
514 		p->ainsn.insn = NULL;
515 	}
516 }
517 
518 static nokprobe_inline void
519 save_previous_kprobe(struct kprobe_ctlblk *kcb)
520 {
521 	kcb->prev_kprobe.kp = kprobe_running();
522 	kcb->prev_kprobe.status = kcb->kprobe_status;
523 	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
524 	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
525 }
526 
527 static nokprobe_inline void
528 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
529 {
530 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
531 	kcb->kprobe_status = kcb->prev_kprobe.status;
532 	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
533 	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
534 }
535 
536 static nokprobe_inline void
537 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
538 		   struct kprobe_ctlblk *kcb)
539 {
540 	__this_cpu_write(current_kprobe, p);
541 	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
542 		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
543 	if (p->ainsn.if_modifier)
544 		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
545 }
546 
547 static nokprobe_inline void clear_btf(void)
548 {
549 	if (test_thread_flag(TIF_BLOCKSTEP)) {
550 		unsigned long debugctl = get_debugctlmsr();
551 
552 		debugctl &= ~DEBUGCTLMSR_BTF;
553 		update_debugctlmsr(debugctl);
554 	}
555 }
556 
557 static nokprobe_inline void restore_btf(void)
558 {
559 	if (test_thread_flag(TIF_BLOCKSTEP)) {
560 		unsigned long debugctl = get_debugctlmsr();
561 
562 		debugctl |= DEBUGCTLMSR_BTF;
563 		update_debugctlmsr(debugctl);
564 	}
565 }
566 
567 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
568 {
569 	unsigned long *sara = stack_addr(regs);
570 
571 	ri->ret_addr = (kprobe_opcode_t *) *sara;
572 
573 	/* Replace the return addr with trampoline addr */
574 	*sara = (unsigned long) &kretprobe_trampoline;
575 }
576 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
577 
578 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
579 			     struct kprobe_ctlblk *kcb, int reenter)
580 {
581 	if (setup_detour_execution(p, regs, reenter))
582 		return;
583 
584 #if !defined(CONFIG_PREEMPT)
585 	if (p->ainsn.boostable && !p->post_handler) {
586 		/* Boost up -- we can execute copied instructions directly */
587 		if (!reenter)
588 			reset_current_kprobe();
589 		/*
590 		 * Reentering boosted probe doesn't reset current_kprobe,
591 		 * nor set current_kprobe, because it doesn't use single
592 		 * stepping.
593 		 */
594 		regs->ip = (unsigned long)p->ainsn.insn;
595 		preempt_enable_no_resched();
596 		return;
597 	}
598 #endif
599 	if (reenter) {
600 		save_previous_kprobe(kcb);
601 		set_current_kprobe(p, regs, kcb);
602 		kcb->kprobe_status = KPROBE_REENTER;
603 	} else
604 		kcb->kprobe_status = KPROBE_HIT_SS;
605 	/* Prepare real single stepping */
606 	clear_btf();
607 	regs->flags |= X86_EFLAGS_TF;
608 	regs->flags &= ~X86_EFLAGS_IF;
609 	/* single step inline if the instruction is an int3 */
610 	if (p->opcode == BREAKPOINT_INSTRUCTION)
611 		regs->ip = (unsigned long)p->addr;
612 	else
613 		regs->ip = (unsigned long)p->ainsn.insn;
614 }
615 NOKPROBE_SYMBOL(setup_singlestep);
616 
617 /*
618  * We have reentered the kprobe_handler(), since another probe was hit while
619  * within the handler. We save the original kprobes variables and just single
620  * step on the instruction of the new probe without calling any user handlers.
621  */
622 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
623 			  struct kprobe_ctlblk *kcb)
624 {
625 	switch (kcb->kprobe_status) {
626 	case KPROBE_HIT_SSDONE:
627 	case KPROBE_HIT_ACTIVE:
628 	case KPROBE_HIT_SS:
629 		kprobes_inc_nmissed_count(p);
630 		setup_singlestep(p, regs, kcb, 1);
631 		break;
632 	case KPROBE_REENTER:
633 		/* A probe has been hit in the codepath leading up to, or just
634 		 * after, single-stepping of a probed instruction. This entire
635 		 * codepath should strictly reside in .kprobes.text section.
636 		 * Raise a BUG or we'll continue in an endless reentering loop
637 		 * and eventually a stack overflow.
638 		 */
639 		printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
640 		       p->addr);
641 		dump_kprobe(p);
642 		BUG();
643 	default:
644 		/* impossible cases */
645 		WARN_ON(1);
646 		return 0;
647 	}
648 
649 	return 1;
650 }
651 NOKPROBE_SYMBOL(reenter_kprobe);
652 
653 /*
654  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
655  * remain disabled throughout this function.
656  */
657 int kprobe_int3_handler(struct pt_regs *regs)
658 {
659 	kprobe_opcode_t *addr;
660 	struct kprobe *p;
661 	struct kprobe_ctlblk *kcb;
662 
663 	if (user_mode(regs))
664 		return 0;
665 
666 	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
667 	/*
668 	 * We don't want to be preempted for the entire
669 	 * duration of kprobe processing. We conditionally
670 	 * re-enable preemption at the end of this function,
671 	 * and also in reenter_kprobe() and setup_singlestep().
672 	 */
673 	preempt_disable();
674 
675 	kcb = get_kprobe_ctlblk();
676 	p = get_kprobe(addr);
677 
678 	if (p) {
679 		if (kprobe_running()) {
680 			if (reenter_kprobe(p, regs, kcb))
681 				return 1;
682 		} else {
683 			set_current_kprobe(p, regs, kcb);
684 			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
685 
686 			/*
687 			 * If we have no pre-handler or it returned 0, we
688 			 * continue with normal processing.  If we have a
689 			 * pre-handler and it returned non-zero, it prepped
690 			 * for calling the break_handler below on re-entry
691 			 * for jprobe processing, so get out doing nothing
692 			 * more here.
693 			 */
694 			if (!p->pre_handler || !p->pre_handler(p, regs))
695 				setup_singlestep(p, regs, kcb, 0);
696 			return 1;
697 		}
698 	} else if (*addr != BREAKPOINT_INSTRUCTION) {
699 		/*
700 		 * The breakpoint instruction was removed right
701 		 * after we hit it.  Another cpu has removed
702 		 * either a probepoint or a debugger breakpoint
703 		 * at this address.  In either case, no further
704 		 * handling of this interrupt is appropriate.
705 		 * Back up over the (now missing) int3 and run
706 		 * the original instruction.
707 		 */
708 		regs->ip = (unsigned long)addr;
709 		preempt_enable_no_resched();
710 		return 1;
711 	} else if (kprobe_running()) {
712 		p = __this_cpu_read(current_kprobe);
713 		if (p->break_handler && p->break_handler(p, regs)) {
714 			if (!skip_singlestep(p, regs, kcb))
715 				setup_singlestep(p, regs, kcb, 0);
716 			return 1;
717 		}
718 	} /* else: not a kprobe fault; let the kernel handle it */
719 
720 	preempt_enable_no_resched();
721 	return 0;
722 }
723 NOKPROBE_SYMBOL(kprobe_int3_handler);
724 
725 /*
726  * When a retprobed function returns, this code saves registers and
727  * calls trampoline_handler() runs, which calls the kretprobe's handler.
728  */
729 asm(
730 	".global kretprobe_trampoline\n"
731 	".type kretprobe_trampoline, @function\n"
732 	"kretprobe_trampoline:\n"
733 #ifdef CONFIG_X86_64
734 	/* We don't bother saving the ss register */
735 	"	pushq %rsp\n"
736 	"	pushfq\n"
737 	SAVE_REGS_STRING
738 	"	movq %rsp, %rdi\n"
739 	"	call trampoline_handler\n"
740 	/* Replace saved sp with true return address. */
741 	"	movq %rax, 152(%rsp)\n"
742 	RESTORE_REGS_STRING
743 	"	popfq\n"
744 #else
745 	"	pushf\n"
746 	SAVE_REGS_STRING
747 	"	movl %esp, %eax\n"
748 	"	call trampoline_handler\n"
749 	/* Move flags to cs */
750 	"	movl 56(%esp), %edx\n"
751 	"	movl %edx, 52(%esp)\n"
752 	/* Replace saved flags with true return address. */
753 	"	movl %eax, 56(%esp)\n"
754 	RESTORE_REGS_STRING
755 	"	popf\n"
756 #endif
757 	"	ret\n"
758 	".size kretprobe_trampoline, .-kretprobe_trampoline\n"
759 );
760 NOKPROBE_SYMBOL(kretprobe_trampoline);
761 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
762 
763 /*
764  * Called from kretprobe_trampoline
765  */
766 __visible __used void *trampoline_handler(struct pt_regs *regs)
767 {
768 	struct kretprobe_instance *ri = NULL;
769 	struct hlist_head *head, empty_rp;
770 	struct hlist_node *tmp;
771 	unsigned long flags, orig_ret_address = 0;
772 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
773 	kprobe_opcode_t *correct_ret_addr = NULL;
774 
775 	INIT_HLIST_HEAD(&empty_rp);
776 	kretprobe_hash_lock(current, &head, &flags);
777 	/* fixup registers */
778 #ifdef CONFIG_X86_64
779 	regs->cs = __KERNEL_CS;
780 #else
781 	regs->cs = __KERNEL_CS | get_kernel_rpl();
782 	regs->gs = 0;
783 #endif
784 	regs->ip = trampoline_address;
785 	regs->orig_ax = ~0UL;
786 
787 	/*
788 	 * It is possible to have multiple instances associated with a given
789 	 * task either because multiple functions in the call path have
790 	 * return probes installed on them, and/or more than one
791 	 * return probe was registered for a target function.
792 	 *
793 	 * We can handle this because:
794 	 *     - instances are always pushed into the head of the list
795 	 *     - when multiple return probes are registered for the same
796 	 *	 function, the (chronologically) first instance's ret_addr
797 	 *	 will be the real return address, and all the rest will
798 	 *	 point to kretprobe_trampoline.
799 	 */
800 	hlist_for_each_entry(ri, head, hlist) {
801 		if (ri->task != current)
802 			/* another task is sharing our hash bucket */
803 			continue;
804 
805 		orig_ret_address = (unsigned long)ri->ret_addr;
806 
807 		if (orig_ret_address != trampoline_address)
808 			/*
809 			 * This is the real return address. Any other
810 			 * instances associated with this task are for
811 			 * other calls deeper on the call stack
812 			 */
813 			break;
814 	}
815 
816 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
817 
818 	correct_ret_addr = ri->ret_addr;
819 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
820 		if (ri->task != current)
821 			/* another task is sharing our hash bucket */
822 			continue;
823 
824 		orig_ret_address = (unsigned long)ri->ret_addr;
825 		if (ri->rp && ri->rp->handler) {
826 			__this_cpu_write(current_kprobe, &ri->rp->kp);
827 			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
828 			ri->ret_addr = correct_ret_addr;
829 			ri->rp->handler(ri, regs);
830 			__this_cpu_write(current_kprobe, NULL);
831 		}
832 
833 		recycle_rp_inst(ri, &empty_rp);
834 
835 		if (orig_ret_address != trampoline_address)
836 			/*
837 			 * This is the real return address. Any other
838 			 * instances associated with this task are for
839 			 * other calls deeper on the call stack
840 			 */
841 			break;
842 	}
843 
844 	kretprobe_hash_unlock(current, &flags);
845 
846 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
847 		hlist_del(&ri->hlist);
848 		kfree(ri);
849 	}
850 	return (void *)orig_ret_address;
851 }
852 NOKPROBE_SYMBOL(trampoline_handler);
853 
854 /*
855  * Called after single-stepping.  p->addr is the address of the
856  * instruction whose first byte has been replaced by the "int 3"
857  * instruction.  To avoid the SMP problems that can occur when we
858  * temporarily put back the original opcode to single-step, we
859  * single-stepped a copy of the instruction.  The address of this
860  * copy is p->ainsn.insn.
861  *
862  * This function prepares to return from the post-single-step
863  * interrupt.  We have to fix up the stack as follows:
864  *
865  * 0) Except in the case of absolute or indirect jump or call instructions,
866  * the new ip is relative to the copied instruction.  We need to make
867  * it relative to the original instruction.
868  *
869  * 1) If the single-stepped instruction was pushfl, then the TF and IF
870  * flags are set in the just-pushed flags, and may need to be cleared.
871  *
872  * 2) If the single-stepped instruction was a call, the return address
873  * that is atop the stack is the address following the copied instruction.
874  * We need to make it the address following the original instruction.
875  *
876  * If this is the first time we've single-stepped the instruction at
877  * this probepoint, and the instruction is boostable, boost it: add a
878  * jump instruction after the copied instruction, that jumps to the next
879  * instruction after the probepoint.
880  */
881 static void resume_execution(struct kprobe *p, struct pt_regs *regs,
882 			     struct kprobe_ctlblk *kcb)
883 {
884 	unsigned long *tos = stack_addr(regs);
885 	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
886 	unsigned long orig_ip = (unsigned long)p->addr;
887 	kprobe_opcode_t *insn = p->ainsn.insn;
888 
889 	/* Skip prefixes */
890 	insn = skip_prefixes(insn);
891 
892 	regs->flags &= ~X86_EFLAGS_TF;
893 	switch (*insn) {
894 	case 0x9c:	/* pushfl */
895 		*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
896 		*tos |= kcb->kprobe_old_flags;
897 		break;
898 	case 0xc2:	/* iret/ret/lret */
899 	case 0xc3:
900 	case 0xca:
901 	case 0xcb:
902 	case 0xcf:
903 	case 0xea:	/* jmp absolute -- ip is correct */
904 		/* ip is already adjusted, no more changes required */
905 		p->ainsn.boostable = true;
906 		goto no_change;
907 	case 0xe8:	/* call relative - Fix return addr */
908 		*tos = orig_ip + (*tos - copy_ip);
909 		break;
910 #ifdef CONFIG_X86_32
911 	case 0x9a:	/* call absolute -- same as call absolute, indirect */
912 		*tos = orig_ip + (*tos - copy_ip);
913 		goto no_change;
914 #endif
915 	case 0xff:
916 		if ((insn[1] & 0x30) == 0x10) {
917 			/*
918 			 * call absolute, indirect
919 			 * Fix return addr; ip is correct.
920 			 * But this is not boostable
921 			 */
922 			*tos = orig_ip + (*tos - copy_ip);
923 			goto no_change;
924 		} else if (((insn[1] & 0x31) == 0x20) ||
925 			   ((insn[1] & 0x31) == 0x21)) {
926 			/*
927 			 * jmp near and far, absolute indirect
928 			 * ip is correct. And this is boostable
929 			 */
930 			p->ainsn.boostable = true;
931 			goto no_change;
932 		}
933 	default:
934 		break;
935 	}
936 
937 	regs->ip += orig_ip - copy_ip;
938 
939 no_change:
940 	restore_btf();
941 }
942 NOKPROBE_SYMBOL(resume_execution);
943 
944 /*
945  * Interrupts are disabled on entry as trap1 is an interrupt gate and they
946  * remain disabled throughout this function.
947  */
948 int kprobe_debug_handler(struct pt_regs *regs)
949 {
950 	struct kprobe *cur = kprobe_running();
951 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
952 
953 	if (!cur)
954 		return 0;
955 
956 	resume_execution(cur, regs, kcb);
957 	regs->flags |= kcb->kprobe_saved_flags;
958 
959 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
960 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
961 		cur->post_handler(cur, regs, 0);
962 	}
963 
964 	/* Restore back the original saved kprobes variables and continue. */
965 	if (kcb->kprobe_status == KPROBE_REENTER) {
966 		restore_previous_kprobe(kcb);
967 		goto out;
968 	}
969 	reset_current_kprobe();
970 out:
971 	preempt_enable_no_resched();
972 
973 	/*
974 	 * if somebody else is singlestepping across a probe point, flags
975 	 * will have TF set, in which case, continue the remaining processing
976 	 * of do_debug, as if this is not a probe hit.
977 	 */
978 	if (regs->flags & X86_EFLAGS_TF)
979 		return 0;
980 
981 	return 1;
982 }
983 NOKPROBE_SYMBOL(kprobe_debug_handler);
984 
985 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
986 {
987 	struct kprobe *cur = kprobe_running();
988 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
989 
990 	if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
991 		/* This must happen on single-stepping */
992 		WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
993 			kcb->kprobe_status != KPROBE_REENTER);
994 		/*
995 		 * We are here because the instruction being single
996 		 * stepped caused a page fault. We reset the current
997 		 * kprobe and the ip points back to the probe address
998 		 * and allow the page fault handler to continue as a
999 		 * normal page fault.
1000 		 */
1001 		regs->ip = (unsigned long)cur->addr;
1002 		/*
1003 		 * Trap flag (TF) has been set here because this fault
1004 		 * happened where the single stepping will be done.
1005 		 * So clear it by resetting the current kprobe:
1006 		 */
1007 		regs->flags &= ~X86_EFLAGS_TF;
1008 
1009 		/*
1010 		 * If the TF flag was set before the kprobe hit,
1011 		 * don't touch it:
1012 		 */
1013 		regs->flags |= kcb->kprobe_old_flags;
1014 
1015 		if (kcb->kprobe_status == KPROBE_REENTER)
1016 			restore_previous_kprobe(kcb);
1017 		else
1018 			reset_current_kprobe();
1019 		preempt_enable_no_resched();
1020 	} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
1021 		   kcb->kprobe_status == KPROBE_HIT_SSDONE) {
1022 		/*
1023 		 * We increment the nmissed count for accounting,
1024 		 * we can also use npre/npostfault count for accounting
1025 		 * these specific fault cases.
1026 		 */
1027 		kprobes_inc_nmissed_count(cur);
1028 
1029 		/*
1030 		 * We come here because instructions in the pre/post
1031 		 * handler caused the page_fault, this could happen
1032 		 * if handler tries to access user space by
1033 		 * copy_from_user(), get_user() etc. Let the
1034 		 * user-specified handler try to fix it first.
1035 		 */
1036 		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
1037 			return 1;
1038 
1039 		/*
1040 		 * In case the user-specified fault handler returned
1041 		 * zero, try to fix up.
1042 		 */
1043 		if (fixup_exception(regs, trapnr))
1044 			return 1;
1045 
1046 		/*
1047 		 * fixup routine could not handle it,
1048 		 * Let do_page_fault() fix it.
1049 		 */
1050 	}
1051 
1052 	return 0;
1053 }
1054 NOKPROBE_SYMBOL(kprobe_fault_handler);
1055 
1056 /*
1057  * Wrapper routine for handling exceptions.
1058  */
1059 int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
1060 			     void *data)
1061 {
1062 	struct die_args *args = data;
1063 	int ret = NOTIFY_DONE;
1064 
1065 	if (args->regs && user_mode(args->regs))
1066 		return ret;
1067 
1068 	if (val == DIE_GPF) {
1069 		/*
1070 		 * To be potentially processing a kprobe fault and to
1071 		 * trust the result from kprobe_running(), we have
1072 		 * be non-preemptible.
1073 		 */
1074 		if (!preemptible() && kprobe_running() &&
1075 		    kprobe_fault_handler(args->regs, args->trapnr))
1076 			ret = NOTIFY_STOP;
1077 	}
1078 	return ret;
1079 }
1080 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1081 
1082 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1083 {
1084 	struct jprobe *jp = container_of(p, struct jprobe, kp);
1085 	unsigned long addr;
1086 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1087 
1088 	kcb->jprobe_saved_regs = *regs;
1089 	kcb->jprobe_saved_sp = stack_addr(regs);
1090 	addr = (unsigned long)(kcb->jprobe_saved_sp);
1091 
1092 	/*
1093 	 * As Linus pointed out, gcc assumes that the callee
1094 	 * owns the argument space and could overwrite it, e.g.
1095 	 * tailcall optimization. So, to be absolutely safe
1096 	 * we also save and restore enough stack bytes to cover
1097 	 * the argument area.
1098 	 * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
1099 	 * raw stack chunk with redzones:
1100 	 */
1101 	__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
1102 	regs->ip = (unsigned long)(jp->entry);
1103 
1104 	/*
1105 	 * jprobes use jprobe_return() which skips the normal return
1106 	 * path of the function, and this messes up the accounting of the
1107 	 * function graph tracer to get messed up.
1108 	 *
1109 	 * Pause function graph tracing while performing the jprobe function.
1110 	 */
1111 	pause_graph_tracing();
1112 	return 1;
1113 }
1114 NOKPROBE_SYMBOL(setjmp_pre_handler);
1115 
1116 void jprobe_return(void)
1117 {
1118 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1119 
1120 	/* Unpoison stack redzones in the frames we are going to jump over. */
1121 	kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
1122 
1123 	asm volatile (
1124 #ifdef CONFIG_X86_64
1125 			"       xchg   %%rbx,%%rsp	\n"
1126 #else
1127 			"       xchgl   %%ebx,%%esp	\n"
1128 #endif
1129 			"       int3			\n"
1130 			"       .globl jprobe_return_end\n"
1131 			"       jprobe_return_end:	\n"
1132 			"       nop			\n"::"b"
1133 			(kcb->jprobe_saved_sp):"memory");
1134 }
1135 NOKPROBE_SYMBOL(jprobe_return);
1136 NOKPROBE_SYMBOL(jprobe_return_end);
1137 
1138 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1139 {
1140 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1141 	u8 *addr = (u8 *) (regs->ip - 1);
1142 	struct jprobe *jp = container_of(p, struct jprobe, kp);
1143 	void *saved_sp = kcb->jprobe_saved_sp;
1144 
1145 	if ((addr > (u8 *) jprobe_return) &&
1146 	    (addr < (u8 *) jprobe_return_end)) {
1147 		if (stack_addr(regs) != saved_sp) {
1148 			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1149 			printk(KERN_ERR
1150 			       "current sp %p does not match saved sp %p\n",
1151 			       stack_addr(regs), saved_sp);
1152 			printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1153 			show_regs(saved_regs);
1154 			printk(KERN_ERR "Current registers\n");
1155 			show_regs(regs);
1156 			BUG();
1157 		}
1158 		/* It's OK to start function graph tracing again */
1159 		unpause_graph_tracing();
1160 		*regs = kcb->jprobe_saved_regs;
1161 		__memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1162 		preempt_enable_no_resched();
1163 		return 1;
1164 	}
1165 	return 0;
1166 }
1167 NOKPROBE_SYMBOL(longjmp_break_handler);
1168 
1169 bool arch_within_kprobe_blacklist(unsigned long addr)
1170 {
1171 	bool is_in_entry_trampoline_section = false;
1172 
1173 #ifdef CONFIG_X86_64
1174 	is_in_entry_trampoline_section =
1175 		(addr >= (unsigned long)__entry_trampoline_start &&
1176 		 addr < (unsigned long)__entry_trampoline_end);
1177 #endif
1178 	return  (addr >= (unsigned long)__kprobes_text_start &&
1179 		 addr < (unsigned long)__kprobes_text_end) ||
1180 		(addr >= (unsigned long)__entry_text_start &&
1181 		 addr < (unsigned long)__entry_text_end) ||
1182 		is_in_entry_trampoline_section;
1183 }
1184 
1185 int __init arch_init_kprobes(void)
1186 {
1187 	return 0;
1188 }
1189 
1190 int arch_trampoline_kprobe(struct kprobe *p)
1191 {
1192 	return 0;
1193 }
1194