1 /*
2  * Dynamic Ftrace based Kprobes Optimization
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) Hitachi Ltd., 2012
19  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
20  *		  IBM Corporation
21  */
22 #include <linux/kprobes.h>
23 #include <linux/ptrace.h>
24 #include <linux/hardirq.h>
25 #include <linux/preempt.h>
26 #include <linux/ftrace.h>
27 
28 /*
29  * This is called from ftrace code after invoking registered handlers to
30  * disambiguate regs->nip changes done by jprobes and livepatch. We check if
31  * there is an active jprobe at the provided address (mcount location).
32  */
33 int __is_active_jprobe(unsigned long addr)
34 {
35 	if (!preemptible()) {
36 		struct kprobe *p = raw_cpu_read(current_kprobe);
37 		return (p && (unsigned long)p->addr == addr) ? 1 : 0;
38 	}
39 
40 	return 0;
41 }
42 
43 static nokprobe_inline
44 int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
45 		      struct kprobe_ctlblk *kcb, unsigned long orig_nip)
46 {
47 	/*
48 	 * Emulate singlestep (and also recover regs->nip)
49 	 * as if there is a nop
50 	 */
51 	regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
52 	if (unlikely(p->post_handler)) {
53 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
54 		p->post_handler(p, regs, 0);
55 	}
56 	__this_cpu_write(current_kprobe, NULL);
57 	if (orig_nip)
58 		regs->nip = orig_nip;
59 	return 1;
60 }
61 
62 int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
63 		    struct kprobe_ctlblk *kcb)
64 {
65 	if (kprobe_ftrace(p))
66 		return __skip_singlestep(p, regs, kcb, 0);
67 	else
68 		return 0;
69 }
70 NOKPROBE_SYMBOL(skip_singlestep);
71 
72 /* Ftrace callback handler for kprobes */
73 void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
74 			   struct ftrace_ops *ops, struct pt_regs *regs)
75 {
76 	struct kprobe *p;
77 	struct kprobe_ctlblk *kcb;
78 
79 	preempt_disable();
80 
81 	p = get_kprobe((kprobe_opcode_t *)nip);
82 	if (unlikely(!p) || kprobe_disabled(p))
83 		goto end;
84 
85 	kcb = get_kprobe_ctlblk();
86 	if (kprobe_running()) {
87 		kprobes_inc_nmissed_count(p);
88 	} else {
89 		unsigned long orig_nip = regs->nip;
90 
91 		/*
92 		 * On powerpc, NIP is *before* this instruction for the
93 		 * pre handler
94 		 */
95 		regs->nip -= MCOUNT_INSN_SIZE;
96 
97 		__this_cpu_write(current_kprobe, p);
98 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
99 		if (!p->pre_handler || !p->pre_handler(p, regs))
100 			__skip_singlestep(p, regs, kcb, orig_nip);
101 		else {
102 			/*
103 			 * If pre_handler returns !0, it sets regs->nip and
104 			 * resets current kprobe. In this case, we should not
105 			 * re-enable preemption.
106 			 */
107 			return;
108 		}
109 	}
110 end:
111 	preempt_enable_no_resched();
112 }
113 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
114 
115 int arch_prepare_kprobe_ftrace(struct kprobe *p)
116 {
117 	p->ainsn.insn = NULL;
118 	p->ainsn.boostable = -1;
119 	return 0;
120 }
121