xref: /openbmc/linux/include/trace/events/sched.h (revision fd589a8f)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3 
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6 
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 
10 /*
11  * Tracepoint for calling kthread_stop, performed to end a kthread:
12  */
13 TRACE_EVENT(sched_kthread_stop,
14 
15 	TP_PROTO(struct task_struct *t),
16 
17 	TP_ARGS(t),
18 
19 	TP_STRUCT__entry(
20 		__array(	char,	comm,	TASK_COMM_LEN	)
21 		__field(	pid_t,	pid			)
22 	),
23 
24 	TP_fast_assign(
25 		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
26 		__entry->pid	= t->pid;
27 	),
28 
29 	TP_printk("task %s:%d", __entry->comm, __entry->pid)
30 );
31 
32 /*
33  * Tracepoint for the return value of the kthread stopping:
34  */
35 TRACE_EVENT(sched_kthread_stop_ret,
36 
37 	TP_PROTO(int ret),
38 
39 	TP_ARGS(ret),
40 
41 	TP_STRUCT__entry(
42 		__field(	int,	ret	)
43 	),
44 
45 	TP_fast_assign(
46 		__entry->ret	= ret;
47 	),
48 
49 	TP_printk("ret %d", __entry->ret)
50 );
51 
52 /*
53  * Tracepoint for waiting on task to unschedule:
54  *
55  * (NOTE: the 'rq' argument is not used by generic trace events,
56  *        but used by the latency tracer plugin. )
57  */
58 TRACE_EVENT(sched_wait_task,
59 
60 	TP_PROTO(struct rq *rq, struct task_struct *p),
61 
62 	TP_ARGS(rq, p),
63 
64 	TP_STRUCT__entry(
65 		__array(	char,	comm,	TASK_COMM_LEN	)
66 		__field(	pid_t,	pid			)
67 		__field(	int,	prio			)
68 	),
69 
70 	TP_fast_assign(
71 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 		__entry->pid	= p->pid;
73 		__entry->prio	= p->prio;
74 	),
75 
76 	TP_printk("task %s:%d [%d]",
77 		  __entry->comm, __entry->pid, __entry->prio)
78 );
79 
80 /*
81  * Tracepoint for waking up a task:
82  *
83  * (NOTE: the 'rq' argument is not used by generic trace events,
84  *        but used by the latency tracer plugin. )
85  */
86 TRACE_EVENT(sched_wakeup,
87 
88 	TP_PROTO(struct rq *rq, struct task_struct *p, int success),
89 
90 	TP_ARGS(rq, p, success),
91 
92 	TP_STRUCT__entry(
93 		__array(	char,	comm,	TASK_COMM_LEN	)
94 		__field(	pid_t,	pid			)
95 		__field(	int,	prio			)
96 		__field(	int,	success			)
97 		__field(	int,	cpu			)
98 	),
99 
100 	TP_fast_assign(
101 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
102 		__entry->pid		= p->pid;
103 		__entry->prio		= p->prio;
104 		__entry->success	= success;
105 		__entry->cpu		= task_cpu(p);
106 	),
107 
108 	TP_printk("task %s:%d [%d] success=%d [%03d]",
109 		  __entry->comm, __entry->pid, __entry->prio,
110 		  __entry->success, __entry->cpu)
111 );
112 
113 /*
114  * Tracepoint for waking up a new task:
115  *
116  * (NOTE: the 'rq' argument is not used by generic trace events,
117  *        but used by the latency tracer plugin. )
118  */
119 TRACE_EVENT(sched_wakeup_new,
120 
121 	TP_PROTO(struct rq *rq, struct task_struct *p, int success),
122 
123 	TP_ARGS(rq, p, success),
124 
125 	TP_STRUCT__entry(
126 		__array(	char,	comm,	TASK_COMM_LEN	)
127 		__field(	pid_t,	pid			)
128 		__field(	int,	prio			)
129 		__field(	int,	success			)
130 		__field(	int,	cpu			)
131 	),
132 
133 	TP_fast_assign(
134 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
135 		__entry->pid		= p->pid;
136 		__entry->prio		= p->prio;
137 		__entry->success	= success;
138 		__entry->cpu		= task_cpu(p);
139 	),
140 
141 	TP_printk("task %s:%d [%d] success=%d [%03d]",
142 		  __entry->comm, __entry->pid, __entry->prio,
143 		  __entry->success, __entry->cpu)
144 );
145 
146 /*
147  * Tracepoint for task switches, performed by the scheduler:
148  *
149  * (NOTE: the 'rq' argument is not used by generic trace events,
150  *        but used by the latency tracer plugin. )
151  */
152 TRACE_EVENT(sched_switch,
153 
154 	TP_PROTO(struct rq *rq, struct task_struct *prev,
155 		 struct task_struct *next),
156 
157 	TP_ARGS(rq, prev, next),
158 
159 	TP_STRUCT__entry(
160 		__array(	char,	prev_comm,	TASK_COMM_LEN	)
161 		__field(	pid_t,	prev_pid			)
162 		__field(	int,	prev_prio			)
163 		__field(	long,	prev_state			)
164 		__array(	char,	next_comm,	TASK_COMM_LEN	)
165 		__field(	pid_t,	next_pid			)
166 		__field(	int,	next_prio			)
167 	),
168 
169 	TP_fast_assign(
170 		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
171 		__entry->prev_pid	= prev->pid;
172 		__entry->prev_prio	= prev->prio;
173 		__entry->prev_state	= prev->state;
174 		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
175 		__entry->next_pid	= next->pid;
176 		__entry->next_prio	= next->prio;
177 	),
178 
179 	TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]",
180 		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
181 		__entry->prev_state ?
182 		  __print_flags(__entry->prev_state, "|",
183 				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
184 				{ 16, "Z" }, { 32, "X" }, { 64, "x" },
185 				{ 128, "W" }) : "R",
186 		__entry->next_comm, __entry->next_pid, __entry->next_prio)
187 );
188 
189 /*
190  * Tracepoint for a task being migrated:
191  */
192 TRACE_EVENT(sched_migrate_task,
193 
194 	TP_PROTO(struct task_struct *p, int dest_cpu),
195 
196 	TP_ARGS(p, dest_cpu),
197 
198 	TP_STRUCT__entry(
199 		__array(	char,	comm,	TASK_COMM_LEN	)
200 		__field(	pid_t,	pid			)
201 		__field(	int,	prio			)
202 		__field(	int,	orig_cpu		)
203 		__field(	int,	dest_cpu		)
204 	),
205 
206 	TP_fast_assign(
207 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
208 		__entry->pid		= p->pid;
209 		__entry->prio		= p->prio;
210 		__entry->orig_cpu	= task_cpu(p);
211 		__entry->dest_cpu	= dest_cpu;
212 	),
213 
214 	TP_printk("task %s:%d [%d] from: %d  to: %d",
215 		  __entry->comm, __entry->pid, __entry->prio,
216 		  __entry->orig_cpu, __entry->dest_cpu)
217 );
218 
219 /*
220  * Tracepoint for freeing a task:
221  */
222 TRACE_EVENT(sched_process_free,
223 
224 	TP_PROTO(struct task_struct *p),
225 
226 	TP_ARGS(p),
227 
228 	TP_STRUCT__entry(
229 		__array(	char,	comm,	TASK_COMM_LEN	)
230 		__field(	pid_t,	pid			)
231 		__field(	int,	prio			)
232 	),
233 
234 	TP_fast_assign(
235 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
236 		__entry->pid		= p->pid;
237 		__entry->prio		= p->prio;
238 	),
239 
240 	TP_printk("task %s:%d [%d]",
241 		  __entry->comm, __entry->pid, __entry->prio)
242 );
243 
244 /*
245  * Tracepoint for a task exiting:
246  */
247 TRACE_EVENT(sched_process_exit,
248 
249 	TP_PROTO(struct task_struct *p),
250 
251 	TP_ARGS(p),
252 
253 	TP_STRUCT__entry(
254 		__array(	char,	comm,	TASK_COMM_LEN	)
255 		__field(	pid_t,	pid			)
256 		__field(	int,	prio			)
257 	),
258 
259 	TP_fast_assign(
260 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
261 		__entry->pid		= p->pid;
262 		__entry->prio		= p->prio;
263 	),
264 
265 	TP_printk("task %s:%d [%d]",
266 		  __entry->comm, __entry->pid, __entry->prio)
267 );
268 
269 /*
270  * Tracepoint for a waiting task:
271  */
272 TRACE_EVENT(sched_process_wait,
273 
274 	TP_PROTO(struct pid *pid),
275 
276 	TP_ARGS(pid),
277 
278 	TP_STRUCT__entry(
279 		__array(	char,	comm,	TASK_COMM_LEN	)
280 		__field(	pid_t,	pid			)
281 		__field(	int,	prio			)
282 	),
283 
284 	TP_fast_assign(
285 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
286 		__entry->pid		= pid_nr(pid);
287 		__entry->prio		= current->prio;
288 	),
289 
290 	TP_printk("task %s:%d [%d]",
291 		  __entry->comm, __entry->pid, __entry->prio)
292 );
293 
294 /*
295  * Tracepoint for do_fork:
296  */
297 TRACE_EVENT(sched_process_fork,
298 
299 	TP_PROTO(struct task_struct *parent, struct task_struct *child),
300 
301 	TP_ARGS(parent, child),
302 
303 	TP_STRUCT__entry(
304 		__array(	char,	parent_comm,	TASK_COMM_LEN	)
305 		__field(	pid_t,	parent_pid			)
306 		__array(	char,	child_comm,	TASK_COMM_LEN	)
307 		__field(	pid_t,	child_pid			)
308 	),
309 
310 	TP_fast_assign(
311 		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
312 		__entry->parent_pid	= parent->pid;
313 		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
314 		__entry->child_pid	= child->pid;
315 	),
316 
317 	TP_printk("parent %s:%d  child %s:%d",
318 		__entry->parent_comm, __entry->parent_pid,
319 		__entry->child_comm, __entry->child_pid)
320 );
321 
322 /*
323  * Tracepoint for sending a signal:
324  */
325 TRACE_EVENT(sched_signal_send,
326 
327 	TP_PROTO(int sig, struct task_struct *p),
328 
329 	TP_ARGS(sig, p),
330 
331 	TP_STRUCT__entry(
332 		__field(	int,	sig			)
333 		__array(	char,	comm,	TASK_COMM_LEN	)
334 		__field(	pid_t,	pid			)
335 	),
336 
337 	TP_fast_assign(
338 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
339 		__entry->pid	= p->pid;
340 		__entry->sig	= sig;
341 	),
342 
343 	TP_printk("sig: %d  task %s:%d",
344 		  __entry->sig, __entry->comm, __entry->pid)
345 );
346 
347 /*
348  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
349  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
350  */
351 
352 /*
353  * Tracepoint for accounting wait time (time the task is runnable
354  * but not actually running due to scheduler contention).
355  */
356 TRACE_EVENT(sched_stat_wait,
357 
358 	TP_PROTO(struct task_struct *tsk, u64 delay),
359 
360 	TP_ARGS(tsk, delay),
361 
362 	TP_STRUCT__entry(
363 		__array( char,	comm,	TASK_COMM_LEN	)
364 		__field( pid_t,	pid			)
365 		__field( u64,	delay			)
366 	),
367 
368 	TP_fast_assign(
369 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
370 		__entry->pid	= tsk->pid;
371 		__entry->delay	= delay;
372 	)
373 	TP_perf_assign(
374 		__perf_count(delay);
375 	),
376 
377 	TP_printk("task: %s:%d wait: %Lu [ns]",
378 			__entry->comm, __entry->pid,
379 			(unsigned long long)__entry->delay)
380 );
381 
382 /*
383  * Tracepoint for accounting runtime (time the task is executing
384  * on a CPU).
385  */
386 TRACE_EVENT(sched_stat_runtime,
387 
388 	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
389 
390 	TP_ARGS(tsk, runtime, vruntime),
391 
392 	TP_STRUCT__entry(
393 		__array( char,	comm,	TASK_COMM_LEN	)
394 		__field( pid_t,	pid			)
395 		__field( u64,	runtime			)
396 		__field( u64,	vruntime			)
397 	),
398 
399 	TP_fast_assign(
400 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
401 		__entry->pid		= tsk->pid;
402 		__entry->runtime	= runtime;
403 		__entry->vruntime	= vruntime;
404 	)
405 	TP_perf_assign(
406 		__perf_count(runtime);
407 	),
408 
409 	TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]",
410 			__entry->comm, __entry->pid,
411 			(unsigned long long)__entry->runtime,
412 			(unsigned long long)__entry->vruntime)
413 );
414 
415 /*
416  * Tracepoint for accounting sleep time (time the task is not runnable,
417  * including iowait, see below).
418  */
419 TRACE_EVENT(sched_stat_sleep,
420 
421 	TP_PROTO(struct task_struct *tsk, u64 delay),
422 
423 	TP_ARGS(tsk, delay),
424 
425 	TP_STRUCT__entry(
426 		__array( char,	comm,	TASK_COMM_LEN	)
427 		__field( pid_t,	pid			)
428 		__field( u64,	delay			)
429 	),
430 
431 	TP_fast_assign(
432 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
433 		__entry->pid	= tsk->pid;
434 		__entry->delay	= delay;
435 	)
436 	TP_perf_assign(
437 		__perf_count(delay);
438 	),
439 
440 	TP_printk("task: %s:%d sleep: %Lu [ns]",
441 			__entry->comm, __entry->pid,
442 			(unsigned long long)__entry->delay)
443 );
444 
445 /*
446  * Tracepoint for accounting iowait time (time the task is not runnable
447  * due to waiting on IO to complete).
448  */
449 TRACE_EVENT(sched_stat_iowait,
450 
451 	TP_PROTO(struct task_struct *tsk, u64 delay),
452 
453 	TP_ARGS(tsk, delay),
454 
455 	TP_STRUCT__entry(
456 		__array( char,	comm,	TASK_COMM_LEN	)
457 		__field( pid_t,	pid			)
458 		__field( u64,	delay			)
459 	),
460 
461 	TP_fast_assign(
462 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
463 		__entry->pid	= tsk->pid;
464 		__entry->delay	= delay;
465 	)
466 	TP_perf_assign(
467 		__perf_count(delay);
468 	),
469 
470 	TP_printk("task: %s:%d iowait: %Lu [ns]",
471 			__entry->comm, __entry->pid,
472 			(unsigned long long)__entry->delay)
473 );
474 
475 #endif /* _TRACE_SCHED_H */
476 
477 /* This part must be outside protection */
478 #include <trace/define_trace.h>
479