11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2ead514d5SNaveen N. Rao /*
3ead514d5SNaveen N. Rao * Dynamic Ftrace based Kprobes Optimization
4ead514d5SNaveen N. Rao *
5ead514d5SNaveen N. Rao * Copyright (C) Hitachi Ltd., 2012
6ead514d5SNaveen N. Rao * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7ead514d5SNaveen N. Rao * IBM Corporation
8ead514d5SNaveen N. Rao */
9ead514d5SNaveen N. Rao #include <linux/kprobes.h>
10ead514d5SNaveen N. Rao #include <linux/ptrace.h>
11ead514d5SNaveen N. Rao #include <linux/hardirq.h>
12ead514d5SNaveen N. Rao #include <linux/preempt.h>
13ead514d5SNaveen N. Rao #include <linux/ftrace.h>
14ead514d5SNaveen N. Rao
15ead514d5SNaveen N. Rao /* Ftrace callback handler for kprobes */
kprobe_ftrace_handler(unsigned long nip,unsigned long parent_nip,struct ftrace_ops * ops,struct ftrace_regs * fregs)16ead514d5SNaveen N. Rao void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
17d19ad077SSteven Rostedt (VMware) struct ftrace_ops *ops, struct ftrace_regs *fregs)
18ead514d5SNaveen N. Rao {
19ead514d5SNaveen N. Rao struct kprobe *p;
20ead514d5SNaveen N. Rao struct kprobe_ctlblk *kcb;
21d19ad077SSteven Rostedt (VMware) struct pt_regs *regs;
22c536aa1cSSteven Rostedt (VMware) int bit;
23ead514d5SNaveen N. Rao
24*ae0d1ea3SStephen Brennan if (unlikely(kprobe_ftrace_disabled))
25*ae0d1ea3SStephen Brennan return;
26*ae0d1ea3SStephen Brennan
27773c1670SSteven Rostedt (VMware) bit = ftrace_test_recursion_trylock(nip, parent_nip);
28c536aa1cSSteven Rostedt (VMware) if (bit < 0)
29c536aa1cSSteven Rostedt (VMware) return;
30c536aa1cSSteven Rostedt (VMware)
31d19ad077SSteven Rostedt (VMware) regs = ftrace_get_regs(fregs);
32ead514d5SNaveen N. Rao p = get_kprobe((kprobe_opcode_t *)nip);
33ead514d5SNaveen N. Rao if (unlikely(!p) || kprobe_disabled(p))
34c536aa1cSSteven Rostedt (VMware) goto out;
35ead514d5SNaveen N. Rao
36ead514d5SNaveen N. Rao kcb = get_kprobe_ctlblk();
37ead514d5SNaveen N. Rao if (kprobe_running()) {
38ead514d5SNaveen N. Rao kprobes_inc_nmissed_count(p);
39ead514d5SNaveen N. Rao } else {
40ead514d5SNaveen N. Rao /*
41ead514d5SNaveen N. Rao * On powerpc, NIP is *before* this instruction for the
42ead514d5SNaveen N. Rao * pre handler
43ead514d5SNaveen N. Rao */
4459dc5bfcSNicholas Piggin regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
45ead514d5SNaveen N. Rao
46ead514d5SNaveen N. Rao __this_cpu_write(current_kprobe, p);
47ead514d5SNaveen N. Rao kcb->kprobe_status = KPROBE_HIT_ACTIVE;
486e5fd3a2SMasami Hiramatsu if (!p->pre_handler || !p->pre_handler(p, regs)) {
496e5fd3a2SMasami Hiramatsu /*
506e5fd3a2SMasami Hiramatsu * Emulate singlestep (and also recover regs->nip)
516e5fd3a2SMasami Hiramatsu * as if there is a nop
526e5fd3a2SMasami Hiramatsu */
5359dc5bfcSNicholas Piggin regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
546e5fd3a2SMasami Hiramatsu if (unlikely(p->post_handler)) {
556e5fd3a2SMasami Hiramatsu kcb->kprobe_status = KPROBE_HIT_SSDONE;
566e5fd3a2SMasami Hiramatsu p->post_handler(p, regs, 0);
576e5fd3a2SMasami Hiramatsu }
58cce188bdSMasami Hiramatsu }
59ead514d5SNaveen N. Rao /*
60cce188bdSMasami Hiramatsu * If pre_handler returns !0, it changes regs->nip. We have to
61cce188bdSMasami Hiramatsu * skip emulating post_handler.
62ead514d5SNaveen N. Rao */
63cce188bdSMasami Hiramatsu __this_cpu_write(current_kprobe, NULL);
646baea433SNaveen N. Rao }
65c536aa1cSSteven Rostedt (VMware) out:
66c536aa1cSSteven Rostedt (VMware) ftrace_test_recursion_unlock(bit);
67ead514d5SNaveen N. Rao }
68ead514d5SNaveen N. Rao NOKPROBE_SYMBOL(kprobe_ftrace_handler);
69ead514d5SNaveen N. Rao
arch_prepare_kprobe_ftrace(struct kprobe * p)70ead514d5SNaveen N. Rao int arch_prepare_kprobe_ftrace(struct kprobe *p)
71ead514d5SNaveen N. Rao {
72ead514d5SNaveen N. Rao p->ainsn.insn = NULL;
73ead514d5SNaveen N. Rao p->ainsn.boostable = -1;
74ead514d5SNaveen N. Rao return 0;
75ead514d5SNaveen N. Rao }
76