1c49f6353SAnton Blanchard #include <linux/percpu.h>
2c49f6353SAnton Blanchard #include <linux/jump_label.h>
3c49f6353SAnton Blanchard #include <asm/trace.h>
4c49f6353SAnton Blanchard 
5c49f6353SAnton Blanchard #ifdef CONFIG_JUMP_LABEL
6c49f6353SAnton Blanchard struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
7c49f6353SAnton Blanchard 
8c49f6353SAnton Blanchard void opal_tracepoint_regfunc(void)
9c49f6353SAnton Blanchard {
10c49f6353SAnton Blanchard 	static_key_slow_inc(&opal_tracepoint_key);
11c49f6353SAnton Blanchard }
12c49f6353SAnton Blanchard 
13c49f6353SAnton Blanchard void opal_tracepoint_unregfunc(void)
14c49f6353SAnton Blanchard {
15c49f6353SAnton Blanchard 	static_key_slow_dec(&opal_tracepoint_key);
16c49f6353SAnton Blanchard }
17c49f6353SAnton Blanchard #else
18c49f6353SAnton Blanchard /*
19c49f6353SAnton Blanchard  * We optimise OPAL calls by placing opal_tracepoint_refcount
20c49f6353SAnton Blanchard  * directly in the TOC so we can check if the opal tracepoints are
21c49f6353SAnton Blanchard  * enabled via a single load.
22c49f6353SAnton Blanchard  */
23c49f6353SAnton Blanchard 
24c49f6353SAnton Blanchard /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
25c49f6353SAnton Blanchard extern long opal_tracepoint_refcount;
26c49f6353SAnton Blanchard 
27c49f6353SAnton Blanchard void opal_tracepoint_regfunc(void)
28c49f6353SAnton Blanchard {
29c49f6353SAnton Blanchard 	opal_tracepoint_refcount++;
30c49f6353SAnton Blanchard }
31c49f6353SAnton Blanchard 
32c49f6353SAnton Blanchard void opal_tracepoint_unregfunc(void)
33c49f6353SAnton Blanchard {
34c49f6353SAnton Blanchard 	opal_tracepoint_refcount--;
35c49f6353SAnton Blanchard }
36c49f6353SAnton Blanchard #endif
37c49f6353SAnton Blanchard 
38c49f6353SAnton Blanchard /*
39c49f6353SAnton Blanchard  * Since the tracing code might execute OPAL calls we need to guard against
40c49f6353SAnton Blanchard  * recursion.
41c49f6353SAnton Blanchard  */
42c49f6353SAnton Blanchard static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
43c49f6353SAnton Blanchard 
44c49f6353SAnton Blanchard void __trace_opal_entry(unsigned long opcode, unsigned long *args)
45c49f6353SAnton Blanchard {
46c49f6353SAnton Blanchard 	unsigned long flags;
47c49f6353SAnton Blanchard 	unsigned int *depth;
48c49f6353SAnton Blanchard 
49c49f6353SAnton Blanchard 	local_irq_save(flags);
50c49f6353SAnton Blanchard 
51c49f6353SAnton Blanchard 	depth = &__get_cpu_var(opal_trace_depth);
52c49f6353SAnton Blanchard 
53c49f6353SAnton Blanchard 	if (*depth)
54c49f6353SAnton Blanchard 		goto out;
55c49f6353SAnton Blanchard 
56c49f6353SAnton Blanchard 	(*depth)++;
57c49f6353SAnton Blanchard 	preempt_disable();
58c49f6353SAnton Blanchard 	trace_opal_entry(opcode, args);
59c49f6353SAnton Blanchard 	(*depth)--;
60c49f6353SAnton Blanchard 
61c49f6353SAnton Blanchard out:
62c49f6353SAnton Blanchard 	local_irq_restore(flags);
63c49f6353SAnton Blanchard }
64c49f6353SAnton Blanchard 
65c49f6353SAnton Blanchard void __trace_opal_exit(long opcode, unsigned long retval)
66c49f6353SAnton Blanchard {
67c49f6353SAnton Blanchard 	unsigned long flags;
68c49f6353SAnton Blanchard 	unsigned int *depth;
69c49f6353SAnton Blanchard 
70c49f6353SAnton Blanchard 	local_irq_save(flags);
71c49f6353SAnton Blanchard 
72c49f6353SAnton Blanchard 	depth = &__get_cpu_var(opal_trace_depth);
73c49f6353SAnton Blanchard 
74c49f6353SAnton Blanchard 	if (*depth)
75c49f6353SAnton Blanchard 		goto out;
76c49f6353SAnton Blanchard 
77c49f6353SAnton Blanchard 	(*depth)++;
78c49f6353SAnton Blanchard 	trace_opal_exit(opcode, retval);
79c49f6353SAnton Blanchard 	preempt_enable();
80c49f6353SAnton Blanchard 	(*depth)--;
81c49f6353SAnton Blanchard 
82c49f6353SAnton Blanchard out:
83c49f6353SAnton Blanchard 	local_irq_restore(flags);
84c49f6353SAnton Blanchard }
85