xref: /openbmc/linux/include/trace/events/workqueue.h (revision 884caada)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM workqueue
4 
5 #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WORKQUEUE_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/workqueue.h>
10 
11 DECLARE_EVENT_CLASS(workqueue_work,
12 
13 	TP_PROTO(struct work_struct *work),
14 
15 	TP_ARGS(work),
16 
17 	TP_STRUCT__entry(
18 		__field( void *,	work	)
19 	),
20 
21 	TP_fast_assign(
22 		__entry->work		= work;
23 	),
24 
25 	TP_printk("work struct %p", __entry->work)
26 );
27 
28 struct pool_workqueue;
29 
30 /**
31  * workqueue_queue_work - called when a work gets queued
32  * @req_cpu:	the requested cpu
33  * @pwq:	pointer to struct pool_workqueue
34  * @work:	pointer to struct work_struct
35  *
36  * This event occurs when a work is queued immediately or once a
37  * delayed work is actually queued on a workqueue (ie: once the delay
38  * has been reached).
39  */
40 TRACE_EVENT(workqueue_queue_work,
41 
42 	TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
43 		 struct work_struct *work),
44 
45 	TP_ARGS(req_cpu, pwq, work),
46 
47 	TP_STRUCT__entry(
48 		__field( void *,	work	)
49 		__field( void *,	function)
50 		__field( void *,	workqueue)
51 		__field( unsigned int,	req_cpu	)
52 		__field( unsigned int,	cpu	)
53 	),
54 
55 	TP_fast_assign(
56 		__entry->work		= work;
57 		__entry->function	= work->func;
58 		__entry->workqueue	= pwq->wq;
59 		__entry->req_cpu	= req_cpu;
60 		__entry->cpu		= pwq->pool->cpu;
61 	),
62 
63 	TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u",
64 		  __entry->work, __entry->function, __entry->workqueue,
65 		  __entry->req_cpu, __entry->cpu)
66 );
67 
68 /**
69  * workqueue_activate_work - called when a work gets activated
70  * @work:	pointer to struct work_struct
71  *
72  * This event occurs when a queued work is put on the active queue,
73  * which happens immediately after queueing unless @max_active limit
74  * is reached.
75  */
76 DEFINE_EVENT(workqueue_work, workqueue_activate_work,
77 
78 	TP_PROTO(struct work_struct *work),
79 
80 	TP_ARGS(work)
81 );
82 
83 /**
84  * workqueue_execute_start - called immediately before the workqueue callback
85  * @work:	pointer to struct work_struct
86  *
87  * Allows to track workqueue execution.
88  */
89 TRACE_EVENT(workqueue_execute_start,
90 
91 	TP_PROTO(struct work_struct *work),
92 
93 	TP_ARGS(work),
94 
95 	TP_STRUCT__entry(
96 		__field( void *,	work	)
97 		__field( void *,	function)
98 	),
99 
100 	TP_fast_assign(
101 		__entry->work		= work;
102 		__entry->function	= work->func;
103 	),
104 
105 	TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
106 );
107 
108 /**
109  * workqueue_execute_end - called immediately after the workqueue callback
110  * @work:	pointer to struct work_struct
111  *
112  * Allows to track workqueue execution.
113  */
114 DEFINE_EVENT(workqueue_work, workqueue_execute_end,
115 
116 	TP_PROTO(struct work_struct *work),
117 
118 	TP_ARGS(work)
119 );
120 
121 #endif /*  _TRACE_WORKQUEUE_H */
122 
123 /* This part must be outside protection */
124 #include <trace/define_trace.h>
125