xref: /openbmc/linux/arch/sh/kernel/ftrace.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2fad57febSMatt Fleming /*
37780b6a2SMatt Fleming  * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
4b5cfeac9SPaul Mundt  * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
5fad57febSMatt Fleming  *
6fad57febSMatt Fleming  * Code for replacing ftrace calls with jumps.
7fad57febSMatt Fleming  *
8fad57febSMatt Fleming  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
9fad57febSMatt Fleming  *
10fad57febSMatt Fleming  * Thanks goes to Ingo Molnar, for suggesting the idea.
11fad57febSMatt Fleming  * Mathieu Desnoyers, for suggesting postponing the modifications.
12fad57febSMatt Fleming  * Arjan van de Ven, for keeping me straight, and explaining to me
13fad57febSMatt Fleming  * the dangers of modifying code on the run.
14fad57febSMatt Fleming  */
15fad57febSMatt Fleming #include <linux/uaccess.h>
16fad57febSMatt Fleming #include <linux/ftrace.h>
17fad57febSMatt Fleming #include <linux/string.h>
18fad57febSMatt Fleming #include <linux/init.h>
19fad57febSMatt Fleming #include <linux/io.h>
20327933f5SMatt Fleming #include <linux/kernel.h>
21fad57febSMatt Fleming #include <asm/ftrace.h>
22fad57febSMatt Fleming #include <asm/cacheflush.h>
23c652d780SMatt Fleming #include <asm/unistd.h>
24c652d780SMatt Fleming #include <trace/syscall.h>
25fad57febSMatt Fleming 
26327933f5SMatt Fleming #ifdef CONFIG_DYNAMIC_FTRACE
27fad57febSMatt Fleming static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
28fad57febSMatt Fleming 
299e28c46bSMatt Fleming static unsigned char ftrace_nop[4];
309e28c46bSMatt Fleming /*
319e28c46bSMatt Fleming  * If we're trying to nop out a call to a function, we instead
329e28c46bSMatt Fleming  * place a call to the address after the memory table.
339e28c46bSMatt Fleming  *
349e28c46bSMatt Fleming  * 8c011060 <a>:
359e28c46bSMatt Fleming  * 8c011060:       02 d1           mov.l   8c01106c <a+0xc>,r1
369e28c46bSMatt Fleming  * 8c011062:       22 4f           sts.l   pr,@-r15
379e28c46bSMatt Fleming  * 8c011064:       02 c7           mova    8c011070 <a+0x10>,r0
389e28c46bSMatt Fleming  * 8c011066:       2b 41           jmp     @r1
399e28c46bSMatt Fleming  * 8c011068:       2a 40           lds     r0,pr
409e28c46bSMatt Fleming  * 8c01106a:       09 00           nop
419e28c46bSMatt Fleming  * 8c01106c:       68 24           .word 0x2468     <--- ip
429e28c46bSMatt Fleming  * 8c01106e:       1d 8c           .word 0x8c1d
439e28c46bSMatt Fleming  * 8c011070:       26 4f           lds.l   @r15+,pr <--- ip + MCOUNT_INSN_SIZE
449e28c46bSMatt Fleming  *
459e28c46bSMatt Fleming  * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
469e28c46bSMatt Fleming  * past the _mcount call and continue executing code like normal.
479e28c46bSMatt Fleming  */
ftrace_nop_replace(unsigned long ip)489e28c46bSMatt Fleming static unsigned char *ftrace_nop_replace(unsigned long ip)
49fad57febSMatt Fleming {
509e28c46bSMatt Fleming 	__raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
51fad57febSMatt Fleming 	return ftrace_nop;
52fad57febSMatt Fleming }
53fad57febSMatt Fleming 
ftrace_call_replace(unsigned long ip,unsigned long addr)549e28c46bSMatt Fleming static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
55fad57febSMatt Fleming {
56fad57febSMatt Fleming 	/* Place the address in the memory table. */
57fad57febSMatt Fleming 	__raw_writel(addr, ftrace_replaced_code);
58fad57febSMatt Fleming 
59fad57febSMatt Fleming 	/*
60fad57febSMatt Fleming 	 * No locking needed, this must be called via kstop_machine
61fad57febSMatt Fleming 	 * which in essence is like running on a uniprocessor machine.
62fad57febSMatt Fleming 	 */
63fad57febSMatt Fleming 	return ftrace_replaced_code;
64fad57febSMatt Fleming }
65fad57febSMatt Fleming 
66e4b053d9SPaul Mundt /*
67e4b053d9SPaul Mundt  * Modifying code must take extra care. On an SMP machine, if
68e4b053d9SPaul Mundt  * the code being modified is also being executed on another CPU
69e4b053d9SPaul Mundt  * that CPU will have undefined results and possibly take a GPF.
70*f2cc020dSIngo Molnar  * We use kstop_machine to stop other CPUS from executing code.
71e4b053d9SPaul Mundt  * But this does not stop NMIs from happening. We still need
72e4b053d9SPaul Mundt  * to protect against that. We separate out the modification of
73e4b053d9SPaul Mundt  * the code to take care of this.
74e4b053d9SPaul Mundt  *
75e4b053d9SPaul Mundt  * Two buffers are added: An IP buffer and a "code" buffer.
76e4b053d9SPaul Mundt  *
77e4b053d9SPaul Mundt  * 1) Put the instruction pointer into the IP buffer
78e4b053d9SPaul Mundt  *    and the new code into the "code" buffer.
79e4b053d9SPaul Mundt  * 2) Wait for any running NMIs to finish and set a flag that says
80e4b053d9SPaul Mundt  *    we are modifying code, it is done in an atomic operation.
81e4b053d9SPaul Mundt  * 3) Write the code
82e4b053d9SPaul Mundt  * 4) clear the flag.
83e4b053d9SPaul Mundt  * 5) Wait for any running NMIs to finish.
84e4b053d9SPaul Mundt  *
85e4b053d9SPaul Mundt  * If an NMI is executed, the first thing it does is to call
86e4b053d9SPaul Mundt  * "ftrace_nmi_enter". This will check if the flag is set to write
87e4b053d9SPaul Mundt  * and if it is, it will write what is in the IP and "code" buffers.
88e4b053d9SPaul Mundt  *
89e4b053d9SPaul Mundt  * The trick is, it does not matter if everyone is writing the same
90e4b053d9SPaul Mundt  * content to the code location. Also, if a CPU is executing code
91e4b053d9SPaul Mundt  * it is OK to write to that code location if the contents being written
92e4b053d9SPaul Mundt  * are the same as what exists.
93e4b053d9SPaul Mundt  */
94e4b053d9SPaul Mundt #define MOD_CODE_WRITE_FLAG (1 << 31)	/* set when NMI should do the write */
95e4b053d9SPaul Mundt static atomic_t nmi_running = ATOMIC_INIT(0);
96e4b053d9SPaul Mundt static int mod_code_status;		/* holds return value of text write */
97e4b053d9SPaul Mundt static void *mod_code_ip;		/* holds the IP to write to */
98e4b053d9SPaul Mundt static void *mod_code_newcode;		/* holds the text to write to the IP */
99e4b053d9SPaul Mundt 
clear_mod_flag(void)100e4b053d9SPaul Mundt static void clear_mod_flag(void)
101e4b053d9SPaul Mundt {
102e4b053d9SPaul Mundt 	int old = atomic_read(&nmi_running);
103e4b053d9SPaul Mundt 
104e4b053d9SPaul Mundt 	for (;;) {
105e4b053d9SPaul Mundt 		int new = old & ~MOD_CODE_WRITE_FLAG;
106e4b053d9SPaul Mundt 
107e4b053d9SPaul Mundt 		if (old == new)
108e4b053d9SPaul Mundt 			break;
109e4b053d9SPaul Mundt 
110e4b053d9SPaul Mundt 		old = atomic_cmpxchg(&nmi_running, old, new);
111e4b053d9SPaul Mundt 	}
112e4b053d9SPaul Mundt }
113e4b053d9SPaul Mundt 
ftrace_mod_code(void)114e4b053d9SPaul Mundt static void ftrace_mod_code(void)
115e4b053d9SPaul Mundt {
116e4b053d9SPaul Mundt 	/*
117e4b053d9SPaul Mundt 	 * Yes, more than one CPU process can be writing to mod_code_status.
118e4b053d9SPaul Mundt 	 *    (and the code itself)
119e4b053d9SPaul Mundt 	 * But if one were to fail, then they all should, and if one were
120e4b053d9SPaul Mundt 	 * to succeed, then they all should.
121e4b053d9SPaul Mundt 	 */
122fe557319SChristoph Hellwig 	mod_code_status = copy_to_kernel_nofault(mod_code_ip, mod_code_newcode,
123e4b053d9SPaul Mundt 					     MCOUNT_INSN_SIZE);
124e4b053d9SPaul Mundt 
125e4b053d9SPaul Mundt 	/* if we fail, then kill any new writers */
126e4b053d9SPaul Mundt 	if (mod_code_status)
127e4b053d9SPaul Mundt 		clear_mod_flag();
128e4b053d9SPaul Mundt }
129e4b053d9SPaul Mundt 
arch_ftrace_nmi_enter(void)1307b2c8625SSteven Rostedt (Red Hat) void arch_ftrace_nmi_enter(void)
131e4b053d9SPaul Mundt {
132e4b053d9SPaul Mundt 	if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
133e4b053d9SPaul Mundt 		smp_rmb();
134e4b053d9SPaul Mundt 		ftrace_mod_code();
135e4b053d9SPaul Mundt 	}
136e4b053d9SPaul Mundt 	/* Must have previous changes seen before executions */
137e4b053d9SPaul Mundt 	smp_mb();
138e4b053d9SPaul Mundt }
139e4b053d9SPaul Mundt 
arch_ftrace_nmi_exit(void)1407b2c8625SSteven Rostedt (Red Hat) void arch_ftrace_nmi_exit(void)
141e4b053d9SPaul Mundt {
142e4b053d9SPaul Mundt 	/* Finish all executions before clearing nmi_running */
143e4b053d9SPaul Mundt 	smp_mb();
144e4b053d9SPaul Mundt 	atomic_dec(&nmi_running);
145e4b053d9SPaul Mundt }
146e4b053d9SPaul Mundt 
wait_for_nmi_and_set_mod_flag(void)147e4b053d9SPaul Mundt static void wait_for_nmi_and_set_mod_flag(void)
148e4b053d9SPaul Mundt {
149e4b053d9SPaul Mundt 	if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
150e4b053d9SPaul Mundt 		return;
151e4b053d9SPaul Mundt 
152e4b053d9SPaul Mundt 	do {
153e4b053d9SPaul Mundt 		cpu_relax();
154e4b053d9SPaul Mundt 	} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
155e4b053d9SPaul Mundt }
156e4b053d9SPaul Mundt 
wait_for_nmi(void)157e4b053d9SPaul Mundt static void wait_for_nmi(void)
158e4b053d9SPaul Mundt {
159e4b053d9SPaul Mundt 	if (!atomic_read(&nmi_running))
160e4b053d9SPaul Mundt 		return;
161e4b053d9SPaul Mundt 
162e4b053d9SPaul Mundt 	do {
163e4b053d9SPaul Mundt 		cpu_relax();
164e4b053d9SPaul Mundt 	} while (atomic_read(&nmi_running));
165e4b053d9SPaul Mundt }
166e4b053d9SPaul Mundt 
167e4b053d9SPaul Mundt static int
do_ftrace_mod_code(unsigned long ip,void * new_code)168e4b053d9SPaul Mundt do_ftrace_mod_code(unsigned long ip, void *new_code)
169e4b053d9SPaul Mundt {
170e4b053d9SPaul Mundt 	mod_code_ip = (void *)ip;
171e4b053d9SPaul Mundt 	mod_code_newcode = new_code;
172e4b053d9SPaul Mundt 
173e4b053d9SPaul Mundt 	/* The buffers need to be visible before we let NMIs write them */
174e4b053d9SPaul Mundt 	smp_mb();
175e4b053d9SPaul Mundt 
176e4b053d9SPaul Mundt 	wait_for_nmi_and_set_mod_flag();
177e4b053d9SPaul Mundt 
178e4b053d9SPaul Mundt 	/* Make sure all running NMIs have finished before we write the code */
179e4b053d9SPaul Mundt 	smp_mb();
180e4b053d9SPaul Mundt 
181e4b053d9SPaul Mundt 	ftrace_mod_code();
182e4b053d9SPaul Mundt 
183e4b053d9SPaul Mundt 	/* Make sure the write happens before clearing the bit */
184e4b053d9SPaul Mundt 	smp_mb();
185e4b053d9SPaul Mundt 
186e4b053d9SPaul Mundt 	clear_mod_flag();
187e4b053d9SPaul Mundt 	wait_for_nmi();
188e4b053d9SPaul Mundt 
189e4b053d9SPaul Mundt 	return mod_code_status;
190e4b053d9SPaul Mundt }
191e4b053d9SPaul Mundt 
ftrace_modify_code(unsigned long ip,unsigned char * old_code,unsigned char * new_code)1929e28c46bSMatt Fleming static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
193fad57febSMatt Fleming 		       unsigned char *new_code)
194fad57febSMatt Fleming {
195fad57febSMatt Fleming 	unsigned char replaced[MCOUNT_INSN_SIZE];
196fad57febSMatt Fleming 
197fad57febSMatt Fleming 	/*
1985243238aSLi Bin 	 * Note:
1995243238aSLi Bin 	 * We are paranoid about modifying text, as if a bug was to happen, it
2005243238aSLi Bin 	 * could cause us to read or write to someplace that could cause harm.
2015243238aSLi Bin 	 * Carefully read and modify the code with probe_kernel_*(), and make
2025243238aSLi Bin 	 * sure what we read is what we expected it to be before modifying it.
203fad57febSMatt Fleming 	 */
204fad57febSMatt Fleming 
205fad57febSMatt Fleming 	/* read the text we want to modify */
206fe557319SChristoph Hellwig 	if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
207fad57febSMatt Fleming 		return -EFAULT;
208fad57febSMatt Fleming 
209fad57febSMatt Fleming 	/* Make sure it is what we expect it to be */
210fad57febSMatt Fleming 	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
211fad57febSMatt Fleming 		return -EINVAL;
212fad57febSMatt Fleming 
213fad57febSMatt Fleming 	/* replace the text with the new text */
214e4b053d9SPaul Mundt 	if (do_ftrace_mod_code(ip, new_code))
215fad57febSMatt Fleming 		return -EPERM;
216fad57febSMatt Fleming 
217fad57febSMatt Fleming 	flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
218fad57febSMatt Fleming 
219fad57febSMatt Fleming 	return 0;
220fad57febSMatt Fleming }
221fad57febSMatt Fleming 
ftrace_update_ftrace_func(ftrace_func_t func)222fad57febSMatt Fleming int ftrace_update_ftrace_func(ftrace_func_t func)
223fad57febSMatt Fleming {
2249e28c46bSMatt Fleming 	unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
225fad57febSMatt Fleming 	unsigned char old[MCOUNT_INSN_SIZE], *new;
226fad57febSMatt Fleming 
2279e28c46bSMatt Fleming 	memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
228fad57febSMatt Fleming 	new = ftrace_call_replace(ip, (unsigned long)func);
229fad57febSMatt Fleming 
2309e28c46bSMatt Fleming 	return ftrace_modify_code(ip, old, new);
231fad57febSMatt Fleming }
232fad57febSMatt Fleming 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)233b5cfeac9SPaul Mundt int ftrace_make_nop(struct module *mod,
234b5cfeac9SPaul Mundt 		    struct dyn_ftrace *rec, unsigned long addr)
235b5cfeac9SPaul Mundt {
236b5cfeac9SPaul Mundt 	unsigned char *new, *old;
237b5cfeac9SPaul Mundt 	unsigned long ip = rec->ip;
238b5cfeac9SPaul Mundt 
239b5cfeac9SPaul Mundt 	old = ftrace_call_replace(ip, addr);
2409e28c46bSMatt Fleming 	new = ftrace_nop_replace(ip);
241b5cfeac9SPaul Mundt 
242b5cfeac9SPaul Mundt 	return ftrace_modify_code(rec->ip, old, new);
243b5cfeac9SPaul Mundt }
244b5cfeac9SPaul Mundt 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)245b5cfeac9SPaul Mundt int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
246b5cfeac9SPaul Mundt {
247b5cfeac9SPaul Mundt 	unsigned char *new, *old;
248b5cfeac9SPaul Mundt 	unsigned long ip = rec->ip;
249b5cfeac9SPaul Mundt 
2509e28c46bSMatt Fleming 	old = ftrace_nop_replace(ip);
251b5cfeac9SPaul Mundt 	new = ftrace_call_replace(ip, addr);
252b5cfeac9SPaul Mundt 
253b5cfeac9SPaul Mundt 	return ftrace_modify_code(rec->ip, old, new);
254b5cfeac9SPaul Mundt }
255327933f5SMatt Fleming #endif /* CONFIG_DYNAMIC_FTRACE */
256327933f5SMatt Fleming 
257327933f5SMatt Fleming #ifdef CONFIG_FUNCTION_GRAPH_TRACER
258327933f5SMatt Fleming #ifdef CONFIG_DYNAMIC_FTRACE
259327933f5SMatt Fleming extern void ftrace_graph_call(void);
260327933f5SMatt Fleming 
ftrace_mod(unsigned long ip,unsigned long old_addr,unsigned long new_addr)261327933f5SMatt Fleming static int ftrace_mod(unsigned long ip, unsigned long old_addr,
262327933f5SMatt Fleming 		      unsigned long new_addr)
263327933f5SMatt Fleming {
264327933f5SMatt Fleming 	unsigned char code[MCOUNT_INSN_SIZE];
265327933f5SMatt Fleming 
266fe557319SChristoph Hellwig 	if (copy_from_kernel_nofault(code, (void *)ip, MCOUNT_INSN_SIZE))
267327933f5SMatt Fleming 		return -EFAULT;
268327933f5SMatt Fleming 
269327933f5SMatt Fleming 	if (old_addr != __raw_readl((unsigned long *)code))
270327933f5SMatt Fleming 		return -EINVAL;
271327933f5SMatt Fleming 
272327933f5SMatt Fleming 	__raw_writel(new_addr, ip);
273327933f5SMatt Fleming 	return 0;
274327933f5SMatt Fleming }
275327933f5SMatt Fleming 
ftrace_enable_ftrace_graph_caller(void)276327933f5SMatt Fleming int ftrace_enable_ftrace_graph_caller(void)
277327933f5SMatt Fleming {
278327933f5SMatt Fleming 	unsigned long ip, old_addr, new_addr;
279327933f5SMatt Fleming 
280327933f5SMatt Fleming 	ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
281327933f5SMatt Fleming 	old_addr = (unsigned long)(&skip_trace);
282327933f5SMatt Fleming 	new_addr = (unsigned long)(&ftrace_graph_caller);
283327933f5SMatt Fleming 
284327933f5SMatt Fleming 	return ftrace_mod(ip, old_addr, new_addr);
285327933f5SMatt Fleming }
286327933f5SMatt Fleming 
ftrace_disable_ftrace_graph_caller(void)287327933f5SMatt Fleming int ftrace_disable_ftrace_graph_caller(void)
288327933f5SMatt Fleming {
289327933f5SMatt Fleming 	unsigned long ip, old_addr, new_addr;
290327933f5SMatt Fleming 
291327933f5SMatt Fleming 	ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
292327933f5SMatt Fleming 	old_addr = (unsigned long)(&ftrace_graph_caller);
293327933f5SMatt Fleming 	new_addr = (unsigned long)(&skip_trace);
294327933f5SMatt Fleming 
295327933f5SMatt Fleming 	return ftrace_mod(ip, old_addr, new_addr);
296327933f5SMatt Fleming }
297327933f5SMatt Fleming #endif /* CONFIG_DYNAMIC_FTRACE */
298327933f5SMatt Fleming 
299327933f5SMatt Fleming /*
300327933f5SMatt Fleming  * Hook the return address and push it in the stack of return addrs
301327933f5SMatt Fleming  * in the current thread info.
302327933f5SMatt Fleming  *
303327933f5SMatt Fleming  * This is the main routine for the function graph tracer. The function
304327933f5SMatt Fleming  * graph tracer essentially works like this:
305327933f5SMatt Fleming  *
306327933f5SMatt Fleming  * parent is the stack address containing self_addr's return address.
307327933f5SMatt Fleming  * We pull the real return address out of parent and store it in
308327933f5SMatt Fleming  * current's ret_stack. Then, we replace the return address on the stack
309327933f5SMatt Fleming  * with the address of return_to_handler. self_addr is the function that
310327933f5SMatt Fleming  * called mcount.
311327933f5SMatt Fleming  *
312327933f5SMatt Fleming  * When self_addr returns, it will jump to return_to_handler which calls
313327933f5SMatt Fleming  * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
314327933f5SMatt Fleming  * return address off of current's ret_stack and jump to it.
315327933f5SMatt Fleming  */
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr)316327933f5SMatt Fleming void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
317327933f5SMatt Fleming {
318327933f5SMatt Fleming 	unsigned long old;
319bc715ee4SSteven Rostedt (VMware) 	int faulted;
320327933f5SMatt Fleming 	unsigned long return_hooker = (unsigned long)&return_to_handler;
321327933f5SMatt Fleming 
3227fa322dbSSteven Rostedt (Red Hat) 	if (unlikely(ftrace_graph_is_dead()))
3237fa322dbSSteven Rostedt (Red Hat) 		return;
3247fa322dbSSteven Rostedt (Red Hat) 
325327933f5SMatt Fleming 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
326327933f5SMatt Fleming 		return;
327327933f5SMatt Fleming 
328327933f5SMatt Fleming 	/*
329327933f5SMatt Fleming 	 * Protect against fault, even if it shouldn't
330327933f5SMatt Fleming 	 * happen. This tool is too much intrusive to
331327933f5SMatt Fleming 	 * ignore such a protection.
332327933f5SMatt Fleming 	 */
333327933f5SMatt Fleming 	__asm__ __volatile__(
334327933f5SMatt Fleming 		"1:						\n\t"
335327933f5SMatt Fleming 		"mov.l		@%2, %0				\n\t"
336327933f5SMatt Fleming 		"2:						\n\t"
337327933f5SMatt Fleming 		"mov.l		%3, @%2				\n\t"
338327933f5SMatt Fleming 		"mov		#0, %1				\n\t"
339327933f5SMatt Fleming 		"3:						\n\t"
340327933f5SMatt Fleming 		".section .fixup, \"ax\"			\n\t"
341327933f5SMatt Fleming 		"4:						\n\t"
342327933f5SMatt Fleming 		"mov.l		5f, %0				\n\t"
343327933f5SMatt Fleming 		"jmp		@%0				\n\t"
344327933f5SMatt Fleming 		" mov		#1, %1				\n\t"
345327933f5SMatt Fleming 		".balign 4					\n\t"
346327933f5SMatt Fleming 		"5:	.long 3b				\n\t"
347327933f5SMatt Fleming 		".previous					\n\t"
348327933f5SMatt Fleming 		".section __ex_table,\"a\"			\n\t"
349327933f5SMatt Fleming 		".long 1b, 4b					\n\t"
350327933f5SMatt Fleming 		".long 2b, 4b					\n\t"
351327933f5SMatt Fleming 		".previous					\n\t"
352327933f5SMatt Fleming 		: "=&r" (old), "=r" (faulted)
353327933f5SMatt Fleming 		: "r" (parent), "r" (return_hooker)
354327933f5SMatt Fleming 	);
355327933f5SMatt Fleming 
356327933f5SMatt Fleming 	if (unlikely(faulted)) {
357327933f5SMatt Fleming 		ftrace_graph_stop();
358327933f5SMatt Fleming 		WARN_ON(1);
359327933f5SMatt Fleming 		return;
360327933f5SMatt Fleming 	}
361327933f5SMatt Fleming 
362bc715ee4SSteven Rostedt (VMware) 	if (function_graph_enter(old, self_addr, 0, NULL))
363327933f5SMatt Fleming 		__raw_writel(old, parent);
364327933f5SMatt Fleming }
365327933f5SMatt Fleming #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
366