xref: /openbmc/linux/arch/x86/kernel/kprobes/ftrace.c (revision 293d5b43)
1 /*
2  * Dynamic Ftrace based Kprobes Optimization
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) Hitachi Ltd., 2012
19  */
20 #include <linux/kprobes.h>
21 #include <linux/ptrace.h>
22 #include <linux/hardirq.h>
23 #include <linux/preempt.h>
24 #include <linux/ftrace.h>
25 
26 #include "common.h"
27 
28 static nokprobe_inline
29 int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
30 		      struct kprobe_ctlblk *kcb, unsigned long orig_ip)
31 {
32 	/*
33 	 * Emulate singlestep (and also recover regs->ip)
34 	 * as if there is a 5byte nop
35 	 */
36 	regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
37 	if (unlikely(p->post_handler)) {
38 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
39 		p->post_handler(p, regs, 0);
40 	}
41 	__this_cpu_write(current_kprobe, NULL);
42 	if (orig_ip)
43 		regs->ip = orig_ip;
44 	return 1;
45 }
46 
47 int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
48 		    struct kprobe_ctlblk *kcb)
49 {
50 	if (kprobe_ftrace(p))
51 		return __skip_singlestep(p, regs, kcb, 0);
52 	else
53 		return 0;
54 }
55 NOKPROBE_SYMBOL(skip_singlestep);
56 
57 /* Ftrace callback handler for kprobes */
58 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
59 			   struct ftrace_ops *ops, struct pt_regs *regs)
60 {
61 	struct kprobe *p;
62 	struct kprobe_ctlblk *kcb;
63 	unsigned long flags;
64 
65 	/* Disable irq for emulating a breakpoint and avoiding preempt */
66 	local_irq_save(flags);
67 
68 	p = get_kprobe((kprobe_opcode_t *)ip);
69 	if (unlikely(!p) || kprobe_disabled(p))
70 		goto end;
71 
72 	kcb = get_kprobe_ctlblk();
73 	if (kprobe_running()) {
74 		kprobes_inc_nmissed_count(p);
75 	} else {
76 		unsigned long orig_ip = regs->ip;
77 		/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
78 		regs->ip = ip + sizeof(kprobe_opcode_t);
79 
80 		__this_cpu_write(current_kprobe, p);
81 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
82 		if (!p->pre_handler || !p->pre_handler(p, regs))
83 			__skip_singlestep(p, regs, kcb, orig_ip);
84 		/*
85 		 * If pre_handler returns !0, it sets regs->ip and
86 		 * resets current kprobe.
87 		 */
88 	}
89 end:
90 	local_irq_restore(flags);
91 }
92 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
93 
94 int arch_prepare_kprobe_ftrace(struct kprobe *p)
95 {
96 	p->ainsn.insn = NULL;
97 	p->ainsn.boostable = -1;
98 	return 0;
99 }
100