1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c49f6353SAnton Blanchard #include <linux/percpu.h>
3c49f6353SAnton Blanchard #include <linux/jump_label.h>
4c49f6353SAnton Blanchard #include <asm/trace.h>
542f5b4caSDaniel Axtens #include <asm/asm-prototypes.h>
6c49f6353SAnton Blanchard 
7d4fe0965SZhouyi Zhou #ifdef HAVE_JUMP_LABEL
8c49f6353SAnton Blanchard struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
9c49f6353SAnton Blanchard 
108cf868afSSteven Rostedt (Red Hat) int opal_tracepoint_regfunc(void)
11c49f6353SAnton Blanchard {
12c49f6353SAnton Blanchard 	static_key_slow_inc(&opal_tracepoint_key);
138cf868afSSteven Rostedt (Red Hat) 	return 0;
14c49f6353SAnton Blanchard }
15c49f6353SAnton Blanchard 
16c49f6353SAnton Blanchard void opal_tracepoint_unregfunc(void)
17c49f6353SAnton Blanchard {
18c49f6353SAnton Blanchard 	static_key_slow_dec(&opal_tracepoint_key);
19c49f6353SAnton Blanchard }
20c49f6353SAnton Blanchard #else
21c49f6353SAnton Blanchard /*
22c49f6353SAnton Blanchard  * We optimise OPAL calls by placing opal_tracepoint_refcount
23c49f6353SAnton Blanchard  * directly in the TOC so we can check if the opal tracepoints are
24c49f6353SAnton Blanchard  * enabled via a single load.
25c49f6353SAnton Blanchard  */
26c49f6353SAnton Blanchard 
27c49f6353SAnton Blanchard /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
28c49f6353SAnton Blanchard extern long opal_tracepoint_refcount;
29c49f6353SAnton Blanchard 
308cf868afSSteven Rostedt (Red Hat) int opal_tracepoint_regfunc(void)
31c49f6353SAnton Blanchard {
32c49f6353SAnton Blanchard 	opal_tracepoint_refcount++;
338cf868afSSteven Rostedt (Red Hat) 	return 0;
34c49f6353SAnton Blanchard }
35c49f6353SAnton Blanchard 
36c49f6353SAnton Blanchard void opal_tracepoint_unregfunc(void)
37c49f6353SAnton Blanchard {
38c49f6353SAnton Blanchard 	opal_tracepoint_refcount--;
39c49f6353SAnton Blanchard }
40c49f6353SAnton Blanchard #endif
41c49f6353SAnton Blanchard 
42c49f6353SAnton Blanchard /*
43c49f6353SAnton Blanchard  * Since the tracing code might execute OPAL calls we need to guard against
44c49f6353SAnton Blanchard  * recursion.
45c49f6353SAnton Blanchard  */
46c49f6353SAnton Blanchard static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
47c49f6353SAnton Blanchard 
48c49f6353SAnton Blanchard void __trace_opal_entry(unsigned long opcode, unsigned long *args)
49c49f6353SAnton Blanchard {
50c49f6353SAnton Blanchard 	unsigned long flags;
51c49f6353SAnton Blanchard 	unsigned int *depth;
52c49f6353SAnton Blanchard 
53c49f6353SAnton Blanchard 	local_irq_save(flags);
54c49f6353SAnton Blanchard 
5569111bacSChristoph Lameter 	depth = this_cpu_ptr(&opal_trace_depth);
56c49f6353SAnton Blanchard 
57c49f6353SAnton Blanchard 	if (*depth)
58c49f6353SAnton Blanchard 		goto out;
59c49f6353SAnton Blanchard 
60c49f6353SAnton Blanchard 	(*depth)++;
61c49f6353SAnton Blanchard 	preempt_disable();
62c49f6353SAnton Blanchard 	trace_opal_entry(opcode, args);
63c49f6353SAnton Blanchard 	(*depth)--;
64c49f6353SAnton Blanchard 
65c49f6353SAnton Blanchard out:
66c49f6353SAnton Blanchard 	local_irq_restore(flags);
67c49f6353SAnton Blanchard }
68c49f6353SAnton Blanchard 
69c49f6353SAnton Blanchard void __trace_opal_exit(long opcode, unsigned long retval)
70c49f6353SAnton Blanchard {
71c49f6353SAnton Blanchard 	unsigned long flags;
72c49f6353SAnton Blanchard 	unsigned int *depth;
73c49f6353SAnton Blanchard 
74c49f6353SAnton Blanchard 	local_irq_save(flags);
75c49f6353SAnton Blanchard 
7669111bacSChristoph Lameter 	depth = this_cpu_ptr(&opal_trace_depth);
77c49f6353SAnton Blanchard 
78c49f6353SAnton Blanchard 	if (*depth)
79c49f6353SAnton Blanchard 		goto out;
80c49f6353SAnton Blanchard 
81c49f6353SAnton Blanchard 	(*depth)++;
82c49f6353SAnton Blanchard 	trace_opal_exit(opcode, retval);
83c49f6353SAnton Blanchard 	preempt_enable();
84c49f6353SAnton Blanchard 	(*depth)--;
85c49f6353SAnton Blanchard 
86c49f6353SAnton Blanchard out:
87c49f6353SAnton Blanchard 	local_irq_restore(flags);
88c49f6353SAnton Blanchard }
89