1c49f6353SAnton Blanchard #include <linux/percpu.h>
2c49f6353SAnton Blanchard #include <linux/jump_label.h>
3c49f6353SAnton Blanchard #include <asm/trace.h>
442f5b4caSDaniel Axtens #include <asm/asm-prototypes.h>
5c49f6353SAnton Blanchard 
6d4fe0965SZhouyi Zhou #ifdef HAVE_JUMP_LABEL
7c49f6353SAnton Blanchard struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
8c49f6353SAnton Blanchard 
9c49f6353SAnton Blanchard void opal_tracepoint_regfunc(void)
10c49f6353SAnton Blanchard {
11c49f6353SAnton Blanchard 	static_key_slow_inc(&opal_tracepoint_key);
12c49f6353SAnton Blanchard }
13c49f6353SAnton Blanchard 
14c49f6353SAnton Blanchard void opal_tracepoint_unregfunc(void)
15c49f6353SAnton Blanchard {
16c49f6353SAnton Blanchard 	static_key_slow_dec(&opal_tracepoint_key);
17c49f6353SAnton Blanchard }
18c49f6353SAnton Blanchard #else
19c49f6353SAnton Blanchard /*
20c49f6353SAnton Blanchard  * We optimise OPAL calls by placing opal_tracepoint_refcount
21c49f6353SAnton Blanchard  * directly in the TOC so we can check if the opal tracepoints are
22c49f6353SAnton Blanchard  * enabled via a single load.
23c49f6353SAnton Blanchard  */
24c49f6353SAnton Blanchard 
25c49f6353SAnton Blanchard /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
26c49f6353SAnton Blanchard extern long opal_tracepoint_refcount;
27c49f6353SAnton Blanchard 
28c49f6353SAnton Blanchard void opal_tracepoint_regfunc(void)
29c49f6353SAnton Blanchard {
30c49f6353SAnton Blanchard 	opal_tracepoint_refcount++;
31c49f6353SAnton Blanchard }
32c49f6353SAnton Blanchard 
33c49f6353SAnton Blanchard void opal_tracepoint_unregfunc(void)
34c49f6353SAnton Blanchard {
35c49f6353SAnton Blanchard 	opal_tracepoint_refcount--;
36c49f6353SAnton Blanchard }
37c49f6353SAnton Blanchard #endif
38c49f6353SAnton Blanchard 
39c49f6353SAnton Blanchard /*
40c49f6353SAnton Blanchard  * Since the tracing code might execute OPAL calls we need to guard against
41c49f6353SAnton Blanchard  * recursion.
42c49f6353SAnton Blanchard  */
43c49f6353SAnton Blanchard static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
44c49f6353SAnton Blanchard 
45c49f6353SAnton Blanchard void __trace_opal_entry(unsigned long opcode, unsigned long *args)
46c49f6353SAnton Blanchard {
47c49f6353SAnton Blanchard 	unsigned long flags;
48c49f6353SAnton Blanchard 	unsigned int *depth;
49c49f6353SAnton Blanchard 
50c49f6353SAnton Blanchard 	local_irq_save(flags);
51c49f6353SAnton Blanchard 
5269111bacSChristoph Lameter 	depth = this_cpu_ptr(&opal_trace_depth);
53c49f6353SAnton Blanchard 
54c49f6353SAnton Blanchard 	if (*depth)
55c49f6353SAnton Blanchard 		goto out;
56c49f6353SAnton Blanchard 
57c49f6353SAnton Blanchard 	(*depth)++;
58c49f6353SAnton Blanchard 	preempt_disable();
59c49f6353SAnton Blanchard 	trace_opal_entry(opcode, args);
60c49f6353SAnton Blanchard 	(*depth)--;
61c49f6353SAnton Blanchard 
62c49f6353SAnton Blanchard out:
63c49f6353SAnton Blanchard 	local_irq_restore(flags);
64c49f6353SAnton Blanchard }
65c49f6353SAnton Blanchard 
66c49f6353SAnton Blanchard void __trace_opal_exit(long opcode, unsigned long retval)
67c49f6353SAnton Blanchard {
68c49f6353SAnton Blanchard 	unsigned long flags;
69c49f6353SAnton Blanchard 	unsigned int *depth;
70c49f6353SAnton Blanchard 
71c49f6353SAnton Blanchard 	local_irq_save(flags);
72c49f6353SAnton Blanchard 
7369111bacSChristoph Lameter 	depth = this_cpu_ptr(&opal_trace_depth);
74c49f6353SAnton Blanchard 
75c49f6353SAnton Blanchard 	if (*depth)
76c49f6353SAnton Blanchard 		goto out;
77c49f6353SAnton Blanchard 
78c49f6353SAnton Blanchard 	(*depth)++;
79c49f6353SAnton Blanchard 	trace_opal_exit(opcode, retval);
80c49f6353SAnton Blanchard 	preempt_enable();
81c49f6353SAnton Blanchard 	(*depth)--;
82c49f6353SAnton Blanchard 
83c49f6353SAnton Blanchard out:
84c49f6353SAnton Blanchard 	local_irq_restore(flags);
85c49f6353SAnton Blanchard }
86