xref: /openbmc/linux/arch/x86/kernel/kprobes/ftrace.c (revision 12eb4683)
1 /*
2  * Dynamic Ftrace based Kprobes Optimization
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) Hitachi Ltd., 2012
19  */
20 #include <linux/kprobes.h>
21 #include <linux/ptrace.h>
22 #include <linux/hardirq.h>
23 #include <linux/preempt.h>
24 #include <linux/ftrace.h>
25 
26 #include "common.h"
27 
28 static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
29 			     struct kprobe_ctlblk *kcb)
30 {
31 	/*
32 	 * Emulate singlestep (and also recover regs->ip)
33 	 * as if there is a 5byte nop
34 	 */
35 	regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
36 	if (unlikely(p->post_handler)) {
37 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
38 		p->post_handler(p, regs, 0);
39 	}
40 	__this_cpu_write(current_kprobe, NULL);
41 	return 1;
42 }
43 
44 int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
45 			      struct kprobe_ctlblk *kcb)
46 {
47 	if (kprobe_ftrace(p))
48 		return __skip_singlestep(p, regs, kcb);
49 	else
50 		return 0;
51 }
52 
53 /* Ftrace callback handler for kprobes */
54 void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
55 				     struct ftrace_ops *ops, struct pt_regs *regs)
56 {
57 	struct kprobe *p;
58 	struct kprobe_ctlblk *kcb;
59 	unsigned long flags;
60 
61 	/* Disable irq for emulating a breakpoint and avoiding preempt */
62 	local_irq_save(flags);
63 
64 	p = get_kprobe((kprobe_opcode_t *)ip);
65 	if (unlikely(!p) || kprobe_disabled(p))
66 		goto end;
67 
68 	kcb = get_kprobe_ctlblk();
69 	if (kprobe_running()) {
70 		kprobes_inc_nmissed_count(p);
71 	} else {
72 		/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
73 		regs->ip = ip + sizeof(kprobe_opcode_t);
74 
75 		__this_cpu_write(current_kprobe, p);
76 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
77 		if (!p->pre_handler || !p->pre_handler(p, regs))
78 			__skip_singlestep(p, regs, kcb);
79 		/*
80 		 * If pre_handler returns !0, it sets regs->ip and
81 		 * resets current kprobe.
82 		 */
83 	}
84 end:
85 	local_irq_restore(flags);
86 }
87 
88 int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
89 {
90 	p->ainsn.insn = NULL;
91 	p->ainsn.boostable = -1;
92 	return 0;
93 }
94