1 #include <linux/percpu.h>
2 #include <linux/jump_label.h>
3 #include <asm/trace.h>
4 
5 #ifdef HAVE_JUMP_LABEL
6 struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
7 
8 void opal_tracepoint_regfunc(void)
9 {
10 	static_key_slow_inc(&opal_tracepoint_key);
11 }
12 
13 void opal_tracepoint_unregfunc(void)
14 {
15 	static_key_slow_dec(&opal_tracepoint_key);
16 }
17 #else
18 /*
19  * We optimise OPAL calls by placing opal_tracepoint_refcount
20  * directly in the TOC so we can check if the opal tracepoints are
21  * enabled via a single load.
22  */
23 
24 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
25 extern long opal_tracepoint_refcount;
26 
27 void opal_tracepoint_regfunc(void)
28 {
29 	opal_tracepoint_refcount++;
30 }
31 
32 void opal_tracepoint_unregfunc(void)
33 {
34 	opal_tracepoint_refcount--;
35 }
36 #endif
37 
38 /*
39  * Since the tracing code might execute OPAL calls we need to guard against
40  * recursion.
41  */
42 static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
43 
44 void __trace_opal_entry(unsigned long opcode, unsigned long *args)
45 {
46 	unsigned long flags;
47 	unsigned int *depth;
48 
49 	local_irq_save(flags);
50 
51 	depth = this_cpu_ptr(&opal_trace_depth);
52 
53 	if (*depth)
54 		goto out;
55 
56 	(*depth)++;
57 	preempt_disable();
58 	trace_opal_entry(opcode, args);
59 	(*depth)--;
60 
61 out:
62 	local_irq_restore(flags);
63 }
64 
65 void __trace_opal_exit(long opcode, unsigned long retval)
66 {
67 	unsigned long flags;
68 	unsigned int *depth;
69 
70 	local_irq_save(flags);
71 
72 	depth = this_cpu_ptr(&opal_trace_depth);
73 
74 	if (*depth)
75 		goto out;
76 
77 	(*depth)++;
78 	trace_opal_exit(opcode, retval);
79 	preempt_enable();
80 	(*depth)--;
81 
82 out:
83 	local_irq_restore(flags);
84 }
85