1 #include <linux/percpu.h>
2 #include <linux/jump_label.h>
3 #include <asm/trace.h>
4 #include <asm/asm-prototypes.h>
5 
6 #ifdef HAVE_JUMP_LABEL
7 struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
8 
9 int opal_tracepoint_regfunc(void)
10 {
11 	static_key_slow_inc(&opal_tracepoint_key);
12 	return 0;
13 }
14 
15 void opal_tracepoint_unregfunc(void)
16 {
17 	static_key_slow_dec(&opal_tracepoint_key);
18 }
19 #else
20 /*
21  * We optimise OPAL calls by placing opal_tracepoint_refcount
22  * directly in the TOC so we can check if the opal tracepoints are
23  * enabled via a single load.
24  */
25 
26 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
27 extern long opal_tracepoint_refcount;
28 
29 int opal_tracepoint_regfunc(void)
30 {
31 	opal_tracepoint_refcount++;
32 	return 0;
33 }
34 
35 void opal_tracepoint_unregfunc(void)
36 {
37 	opal_tracepoint_refcount--;
38 }
39 #endif
40 
41 /*
42  * Since the tracing code might execute OPAL calls we need to guard against
43  * recursion.
44  */
45 static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
46 
47 void __trace_opal_entry(unsigned long opcode, unsigned long *args)
48 {
49 	unsigned long flags;
50 	unsigned int *depth;
51 
52 	local_irq_save(flags);
53 
54 	depth = this_cpu_ptr(&opal_trace_depth);
55 
56 	if (*depth)
57 		goto out;
58 
59 	(*depth)++;
60 	preempt_disable();
61 	trace_opal_entry(opcode, args);
62 	(*depth)--;
63 
64 out:
65 	local_irq_restore(flags);
66 }
67 
68 void __trace_opal_exit(long opcode, unsigned long retval)
69 {
70 	unsigned long flags;
71 	unsigned int *depth;
72 
73 	local_irq_save(flags);
74 
75 	depth = this_cpu_ptr(&opal_trace_depth);
76 
77 	if (*depth)
78 		goto out;
79 
80 	(*depth)++;
81 	trace_opal_exit(opcode, retval);
82 	preempt_enable();
83 	(*depth)--;
84 
85 out:
86 	local_irq_restore(flags);
87 }
88