1 #include <linux/percpu.h>
2 #include <linux/jump_label.h>
3 #include <asm/trace.h>
4 #include <asm/asm-prototypes.h>
5 
6 #ifdef HAVE_JUMP_LABEL
7 struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
8 
9 void opal_tracepoint_regfunc(void)
10 {
11 	static_key_slow_inc(&opal_tracepoint_key);
12 }
13 
14 void opal_tracepoint_unregfunc(void)
15 {
16 	static_key_slow_dec(&opal_tracepoint_key);
17 }
18 #else
19 /*
20  * We optimise OPAL calls by placing opal_tracepoint_refcount
21  * directly in the TOC so we can check if the opal tracepoints are
22  * enabled via a single load.
23  */
24 
25 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
26 extern long opal_tracepoint_refcount;
27 
28 void opal_tracepoint_regfunc(void)
29 {
30 	opal_tracepoint_refcount++;
31 }
32 
33 void opal_tracepoint_unregfunc(void)
34 {
35 	opal_tracepoint_refcount--;
36 }
37 #endif
38 
39 /*
40  * Since the tracing code might execute OPAL calls we need to guard against
41  * recursion.
42  */
43 static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
44 
45 void __trace_opal_entry(unsigned long opcode, unsigned long *args)
46 {
47 	unsigned long flags;
48 	unsigned int *depth;
49 
50 	local_irq_save(flags);
51 
52 	depth = this_cpu_ptr(&opal_trace_depth);
53 
54 	if (*depth)
55 		goto out;
56 
57 	(*depth)++;
58 	preempt_disable();
59 	trace_opal_entry(opcode, args);
60 	(*depth)--;
61 
62 out:
63 	local_irq_restore(flags);
64 }
65 
66 void __trace_opal_exit(long opcode, unsigned long retval)
67 {
68 	unsigned long flags;
69 	unsigned int *depth;
70 
71 	local_irq_save(flags);
72 
73 	depth = this_cpu_ptr(&opal_trace_depth);
74 
75 	if (*depth)
76 		goto out;
77 
78 	(*depth)++;
79 	trace_opal_exit(opcode, retval);
80 	preempt_enable();
81 	(*depth)--;
82 
83 out:
84 	local_irq_restore(flags);
85 }
86