19d750c75SRyan Kosta // SPDX-License-Identifier: GPL-2.0
210626c32SAlan Kao /*
310626c32SAlan Kao * Copyright (C) 2013 Linaro Limited
410626c32SAlan Kao * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
510626c32SAlan Kao * Copyright (C) 2017 Andes Technology Corporation
610626c32SAlan Kao */
710626c32SAlan Kao
810626c32SAlan Kao #include <linux/ftrace.h>
9c15ac4fdSAlan Kao #include <linux/uaccess.h>
100ff7c3b3SZong Li #include <linux/memory.h>
11*08a2117eSAlexandre Ghiti #include <linux/stop_machine.h>
12c15ac4fdSAlan Kao #include <asm/cacheflush.h>
138fdddb2eSZong Li #include <asm/patch.h>
14c15ac4fdSAlan Kao
15c15ac4fdSAlan Kao #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_arch_code_modify_prepare(void)163a2bfec0SLi kunyu void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
170ff7c3b3SZong Li {
180ff7c3b3SZong Li mutex_lock(&text_mutex);
192a8db5ecSConor Dooley
202a8db5ecSConor Dooley /*
212a8db5ecSConor Dooley * The code sequences we use for ftrace can't be patched while the
222a8db5ecSConor Dooley * kernel is running, so we need to use stop_machine() to modify them
232a8db5ecSConor Dooley * for now. This doesn't play nice with text_mutex, we use this flag
242a8db5ecSConor Dooley * to elide the check.
252a8db5ecSConor Dooley */
262a8db5ecSConor Dooley riscv_patch_in_stop_machine = true;
270ff7c3b3SZong Li }
280ff7c3b3SZong Li
ftrace_arch_code_modify_post_process(void)293a2bfec0SLi kunyu void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
300ff7c3b3SZong Li {
312a8db5ecSConor Dooley riscv_patch_in_stop_machine = false;
320ff7c3b3SZong Li mutex_unlock(&text_mutex);
330ff7c3b3SZong Li }
340ff7c3b3SZong Li
ftrace_check_current_call(unsigned long hook_pos,unsigned int * expected)35c15ac4fdSAlan Kao static int ftrace_check_current_call(unsigned long hook_pos,
36c15ac4fdSAlan Kao unsigned int *expected)
37c15ac4fdSAlan Kao {
38c15ac4fdSAlan Kao unsigned int replaced[2];
39c15ac4fdSAlan Kao unsigned int nops[2] = {NOP4, NOP4};
40c15ac4fdSAlan Kao
41c15ac4fdSAlan Kao /* we expect nops at the hook position */
42c15ac4fdSAlan Kao if (!expected)
43c15ac4fdSAlan Kao expected = nops;
4410626c32SAlan Kao
4510626c32SAlan Kao /*
46c15ac4fdSAlan Kao * Read the text we want to modify;
47c15ac4fdSAlan Kao * return must be -EFAULT on read error
48c15ac4fdSAlan Kao */
49fe557319SChristoph Hellwig if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
50fe557319SChristoph Hellwig MCOUNT_INSN_SIZE))
51c15ac4fdSAlan Kao return -EFAULT;
52c15ac4fdSAlan Kao
53c15ac4fdSAlan Kao /*
54c15ac4fdSAlan Kao * Make sure it is what we expect it to be;
55c15ac4fdSAlan Kao * return must be -EINVAL on failed comparison
56c15ac4fdSAlan Kao */
57c15ac4fdSAlan Kao if (memcmp(expected, replaced, sizeof(replaced))) {
587265d103SJohan Hovold pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
59c15ac4fdSAlan Kao (void *)hook_pos, expected[0], expected[1], replaced[0],
60c15ac4fdSAlan Kao replaced[1]);
61c15ac4fdSAlan Kao return -EINVAL;
62c15ac4fdSAlan Kao }
63c15ac4fdSAlan Kao
64c15ac4fdSAlan Kao return 0;
65c15ac4fdSAlan Kao }
66c15ac4fdSAlan Kao
__ftrace_modify_call(unsigned long hook_pos,unsigned long target,bool enable,bool ra)67c15ac4fdSAlan Kao static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
686724a76cSGuo Ren bool enable, bool ra)
69c15ac4fdSAlan Kao {
70c15ac4fdSAlan Kao unsigned int call[2];
71c15ac4fdSAlan Kao unsigned int nops[2] = {NOP4, NOP4};
72c15ac4fdSAlan Kao
736724a76cSGuo Ren if (ra)
746724a76cSGuo Ren make_call_ra(hook_pos, target, call);
756724a76cSGuo Ren else
766724a76cSGuo Ren make_call_t0(hook_pos, target, call);
77c15ac4fdSAlan Kao
788fdddb2eSZong Li /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
79*08a2117eSAlexandre Ghiti if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
80c15ac4fdSAlan Kao return -EPERM;
81c15ac4fdSAlan Kao
82c15ac4fdSAlan Kao return 0;
83c15ac4fdSAlan Kao }
84c15ac4fdSAlan Kao
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)85c15ac4fdSAlan Kao int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
86c15ac4fdSAlan Kao {
876724a76cSGuo Ren unsigned int call[2];
88c15ac4fdSAlan Kao
896724a76cSGuo Ren make_call_t0(rec->ip, addr, call);
90c15ac4fdSAlan Kao
91*08a2117eSAlexandre Ghiti if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
92afc76b8bSGuo Ren return -EPERM;
93afc76b8bSGuo Ren
94afc76b8bSGuo Ren return 0;
95c15ac4fdSAlan Kao }
96c15ac4fdSAlan Kao
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)97c15ac4fdSAlan Kao int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
98c15ac4fdSAlan Kao unsigned long addr)
99c15ac4fdSAlan Kao {
1006724a76cSGuo Ren unsigned int nops[2] = {NOP4, NOP4};
101c15ac4fdSAlan Kao
102*08a2117eSAlexandre Ghiti if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
103afc76b8bSGuo Ren return -EPERM;
104c15ac4fdSAlan Kao
105afc76b8bSGuo Ren return 0;
106c15ac4fdSAlan Kao }
107c15ac4fdSAlan Kao
10866d18dbdSPalmer Dabbelt /*
10966d18dbdSPalmer Dabbelt * This is called early on, and isn't wrapped by
11066d18dbdSPalmer Dabbelt * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
11166d18dbdSPalmer Dabbelt * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
11266d18dbdSPalmer Dabbelt * just directly poke the text, but it's simpler to just take the lock
11366d18dbdSPalmer Dabbelt * ourselves.
11466d18dbdSPalmer Dabbelt */
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)11566d18dbdSPalmer Dabbelt int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
11666d18dbdSPalmer Dabbelt {
11766d18dbdSPalmer Dabbelt int out;
11866d18dbdSPalmer Dabbelt
1192a8db5ecSConor Dooley mutex_lock(&text_mutex);
12066d18dbdSPalmer Dabbelt out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1212a8db5ecSConor Dooley mutex_unlock(&text_mutex);
12266d18dbdSPalmer Dabbelt
123b0b415f1SAlexandre Ghiti if (!mod)
124b0b415f1SAlexandre Ghiti local_flush_icache_range(rec->ip, rec->ip + MCOUNT_INSN_SIZE);
125b0b415f1SAlexandre Ghiti
12666d18dbdSPalmer Dabbelt return out;
12766d18dbdSPalmer Dabbelt }
12866d18dbdSPalmer Dabbelt
ftrace_update_ftrace_func(ftrace_func_t func)129c15ac4fdSAlan Kao int ftrace_update_ftrace_func(ftrace_func_t func)
130c15ac4fdSAlan Kao {
131c15ac4fdSAlan Kao int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
1326724a76cSGuo Ren (unsigned long)func, true, true);
133c15ac4fdSAlan Kao if (!ret) {
134c15ac4fdSAlan Kao ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
1356724a76cSGuo Ren (unsigned long)func, true, true);
136c15ac4fdSAlan Kao }
137c15ac4fdSAlan Kao
138c15ac4fdSAlan Kao return ret;
139c15ac4fdSAlan Kao }
140*08a2117eSAlexandre Ghiti
141*08a2117eSAlexandre Ghiti struct ftrace_modify_param {
142*08a2117eSAlexandre Ghiti int command;
143*08a2117eSAlexandre Ghiti atomic_t cpu_count;
144*08a2117eSAlexandre Ghiti };
145*08a2117eSAlexandre Ghiti
__ftrace_modify_code(void * data)146*08a2117eSAlexandre Ghiti static int __ftrace_modify_code(void *data)
147*08a2117eSAlexandre Ghiti {
148*08a2117eSAlexandre Ghiti struct ftrace_modify_param *param = data;
149*08a2117eSAlexandre Ghiti
150*08a2117eSAlexandre Ghiti if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
151*08a2117eSAlexandre Ghiti ftrace_modify_all_code(param->command);
152*08a2117eSAlexandre Ghiti /*
153*08a2117eSAlexandre Ghiti * Make sure the patching store is effective *before* we
154*08a2117eSAlexandre Ghiti * increment the counter which releases all waiting CPUs
155*08a2117eSAlexandre Ghiti * by using the release variant of atomic increment. The
156*08a2117eSAlexandre Ghiti * release pairs with the call to local_flush_icache_all()
157*08a2117eSAlexandre Ghiti * on the waiting CPU.
158*08a2117eSAlexandre Ghiti */
159*08a2117eSAlexandre Ghiti atomic_inc_return_release(¶m->cpu_count);
160*08a2117eSAlexandre Ghiti } else {
161*08a2117eSAlexandre Ghiti while (atomic_read(¶m->cpu_count) <= num_online_cpus())
162*08a2117eSAlexandre Ghiti cpu_relax();
163*08a2117eSAlexandre Ghiti }
164*08a2117eSAlexandre Ghiti
165*08a2117eSAlexandre Ghiti local_flush_icache_all();
166*08a2117eSAlexandre Ghiti
167*08a2117eSAlexandre Ghiti return 0;
168*08a2117eSAlexandre Ghiti }
169*08a2117eSAlexandre Ghiti
arch_ftrace_update_code(int command)170*08a2117eSAlexandre Ghiti void arch_ftrace_update_code(int command)
171*08a2117eSAlexandre Ghiti {
172*08a2117eSAlexandre Ghiti struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
173*08a2117eSAlexandre Ghiti
174*08a2117eSAlexandre Ghiti stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask);
175*08a2117eSAlexandre Ghiti }
176c15ac4fdSAlan Kao #endif
177c15ac4fdSAlan Kao
178aea4c671SAlan Kao #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)179aea4c671SAlan Kao int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
180aea4c671SAlan Kao unsigned long addr)
181aea4c671SAlan Kao {
182aea4c671SAlan Kao unsigned int call[2];
1836724a76cSGuo Ren unsigned long caller = rec->ip;
184aea4c671SAlan Kao int ret;
185aea4c671SAlan Kao
1866724a76cSGuo Ren make_call_t0(caller, old_addr, call);
187afc76b8bSGuo Ren ret = ftrace_check_current_call(caller, call);
188aea4c671SAlan Kao
189aea4c671SAlan Kao if (ret)
190aea4c671SAlan Kao return ret;
191aea4c671SAlan Kao
1926724a76cSGuo Ren return __ftrace_modify_call(caller, addr, true, false);
193aea4c671SAlan Kao }
194aea4c671SAlan Kao #endif
195aea4c671SAlan Kao
196c15ac4fdSAlan Kao #ifdef CONFIG_FUNCTION_GRAPH_TRACER
197c15ac4fdSAlan Kao /*
198c15ac4fdSAlan Kao * Most of this function is copied from arm64.
19910626c32SAlan Kao */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)20010626c32SAlan Kao void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
20110626c32SAlan Kao unsigned long frame_pointer)
20210626c32SAlan Kao {
20310626c32SAlan Kao unsigned long return_hooker = (unsigned long)&return_to_handler;
20410626c32SAlan Kao unsigned long old;
20510626c32SAlan Kao
20610626c32SAlan Kao if (unlikely(atomic_read(¤t->tracing_graph_pause)))
20710626c32SAlan Kao return;
20810626c32SAlan Kao
20910626c32SAlan Kao /*
21010626c32SAlan Kao * We don't suffer access faults, so no extra fault-recovery assembly
21110626c32SAlan Kao * is needed here.
21210626c32SAlan Kao */
21310626c32SAlan Kao old = *parent;
21410626c32SAlan Kao
2151d8f6579SZong Li if (!function_graph_enter(old, self_addr, frame_pointer, parent))
21610626c32SAlan Kao *parent = return_hooker;
21710626c32SAlan Kao }
218bc1a4c3aSAlan Kao
219bc1a4c3aSAlan Kao #ifdef CONFIG_DYNAMIC_FTRACE
220bc1a4c3aSAlan Kao extern void ftrace_graph_call(void);
221afc76b8bSGuo Ren extern void ftrace_graph_regs_call(void);
ftrace_enable_ftrace_graph_caller(void)222bc1a4c3aSAlan Kao int ftrace_enable_ftrace_graph_caller(void)
223bc1a4c3aSAlan Kao {
224bc1a4c3aSAlan Kao int ret;
225bc1a4c3aSAlan Kao
226afc76b8bSGuo Ren ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
2276724a76cSGuo Ren (unsigned long)&prepare_ftrace_return, true, true);
228bc1a4c3aSAlan Kao if (ret)
229bc1a4c3aSAlan Kao return ret;
230bc1a4c3aSAlan Kao
231afc76b8bSGuo Ren return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
2326724a76cSGuo Ren (unsigned long)&prepare_ftrace_return, true, true);
233bc1a4c3aSAlan Kao }
234bc1a4c3aSAlan Kao
ftrace_disable_ftrace_graph_caller(void)235bc1a4c3aSAlan Kao int ftrace_disable_ftrace_graph_caller(void)
236bc1a4c3aSAlan Kao {
237bc1a4c3aSAlan Kao int ret;
238bc1a4c3aSAlan Kao
239afc76b8bSGuo Ren ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
2406724a76cSGuo Ren (unsigned long)&prepare_ftrace_return, false, true);
241bc1a4c3aSAlan Kao if (ret)
242bc1a4c3aSAlan Kao return ret;
243bc1a4c3aSAlan Kao
244afc76b8bSGuo Ren return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
2456724a76cSGuo Ren (unsigned long)&prepare_ftrace_return, false, true);
246bc1a4c3aSAlan Kao }
247bc1a4c3aSAlan Kao #endif /* CONFIG_DYNAMIC_FTRACE */
248bc1a4c3aSAlan Kao #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
249