xref: /openbmc/linux/include/trace/events/sched.h (revision 39b6f3aa)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3 
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6 
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10 
11 /*
12  * Tracepoint for calling kthread_stop, performed to end a kthread:
13  */
14 TRACE_EVENT(sched_kthread_stop,
15 
16 	TP_PROTO(struct task_struct *t),
17 
18 	TP_ARGS(t),
19 
20 	TP_STRUCT__entry(
21 		__array(	char,	comm,	TASK_COMM_LEN	)
22 		__field(	pid_t,	pid			)
23 	),
24 
25 	TP_fast_assign(
26 		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 		__entry->pid	= t->pid;
28 	),
29 
30 	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
31 );
32 
33 /*
34  * Tracepoint for the return value of the kthread stopping:
35  */
36 TRACE_EVENT(sched_kthread_stop_ret,
37 
38 	TP_PROTO(int ret),
39 
40 	TP_ARGS(ret),
41 
42 	TP_STRUCT__entry(
43 		__field(	int,	ret	)
44 	),
45 
46 	TP_fast_assign(
47 		__entry->ret	= ret;
48 	),
49 
50 	TP_printk("ret=%d", __entry->ret)
51 );
52 
53 /*
54  * Tracepoint for waking up a task:
55  */
56 DECLARE_EVENT_CLASS(sched_wakeup_template,
57 
58 	TP_PROTO(struct task_struct *p, int success),
59 
60 	TP_ARGS(p, success),
61 
62 	TP_STRUCT__entry(
63 		__array(	char,	comm,	TASK_COMM_LEN	)
64 		__field(	pid_t,	pid			)
65 		__field(	int,	prio			)
66 		__field(	int,	success			)
67 		__field(	int,	target_cpu		)
68 	),
69 
70 	TP_fast_assign(
71 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 		__entry->pid		= p->pid;
73 		__entry->prio		= p->prio;
74 		__entry->success	= success;
75 		__entry->target_cpu	= task_cpu(p);
76 	)
77 	TP_perf_assign(
78 		__perf_task(p);
79 	),
80 
81 	TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
82 		  __entry->comm, __entry->pid, __entry->prio,
83 		  __entry->success, __entry->target_cpu)
84 );
85 
86 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
87 	     TP_PROTO(struct task_struct *p, int success),
88 	     TP_ARGS(p, success));
89 
90 /*
91  * Tracepoint for waking up a new task:
92  */
93 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
94 	     TP_PROTO(struct task_struct *p, int success),
95 	     TP_ARGS(p, success));
96 
97 #ifdef CREATE_TRACE_POINTS
98 static inline long __trace_sched_switch_state(struct task_struct *p)
99 {
100 	long state = p->state;
101 
102 #ifdef CONFIG_PREEMPT
103 	/*
104 	 * For all intents and purposes a preempted task is a running task.
105 	 */
106 	if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
107 		state = TASK_RUNNING | TASK_STATE_MAX;
108 #endif
109 
110 	return state;
111 }
112 #endif
113 
114 /*
115  * Tracepoint for task switches, performed by the scheduler:
116  */
117 TRACE_EVENT(sched_switch,
118 
119 	TP_PROTO(struct task_struct *prev,
120 		 struct task_struct *next),
121 
122 	TP_ARGS(prev, next),
123 
124 	TP_STRUCT__entry(
125 		__array(	char,	prev_comm,	TASK_COMM_LEN	)
126 		__field(	pid_t,	prev_pid			)
127 		__field(	int,	prev_prio			)
128 		__field(	long,	prev_state			)
129 		__array(	char,	next_comm,	TASK_COMM_LEN	)
130 		__field(	pid_t,	next_pid			)
131 		__field(	int,	next_prio			)
132 	),
133 
134 	TP_fast_assign(
135 		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
136 		__entry->prev_pid	= prev->pid;
137 		__entry->prev_prio	= prev->prio;
138 		__entry->prev_state	= __trace_sched_switch_state(prev);
139 		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
140 		__entry->next_pid	= next->pid;
141 		__entry->next_prio	= next->prio;
142 	),
143 
144 	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
145 		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
146 		__entry->prev_state & (TASK_STATE_MAX-1) ?
147 		  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
148 				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
149 				{ 16, "Z" }, { 32, "X" }, { 64, "x" },
150 				{ 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
151 		__entry->prev_state & TASK_STATE_MAX ? "+" : "",
152 		__entry->next_comm, __entry->next_pid, __entry->next_prio)
153 );
154 
155 /*
156  * Tracepoint for a task being migrated:
157  */
158 TRACE_EVENT(sched_migrate_task,
159 
160 	TP_PROTO(struct task_struct *p, int dest_cpu),
161 
162 	TP_ARGS(p, dest_cpu),
163 
164 	TP_STRUCT__entry(
165 		__array(	char,	comm,	TASK_COMM_LEN	)
166 		__field(	pid_t,	pid			)
167 		__field(	int,	prio			)
168 		__field(	int,	orig_cpu		)
169 		__field(	int,	dest_cpu		)
170 	),
171 
172 	TP_fast_assign(
173 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
174 		__entry->pid		= p->pid;
175 		__entry->prio		= p->prio;
176 		__entry->orig_cpu	= task_cpu(p);
177 		__entry->dest_cpu	= dest_cpu;
178 	),
179 
180 	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
181 		  __entry->comm, __entry->pid, __entry->prio,
182 		  __entry->orig_cpu, __entry->dest_cpu)
183 );
184 
185 DECLARE_EVENT_CLASS(sched_process_template,
186 
187 	TP_PROTO(struct task_struct *p),
188 
189 	TP_ARGS(p),
190 
191 	TP_STRUCT__entry(
192 		__array(	char,	comm,	TASK_COMM_LEN	)
193 		__field(	pid_t,	pid			)
194 		__field(	int,	prio			)
195 	),
196 
197 	TP_fast_assign(
198 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
199 		__entry->pid		= p->pid;
200 		__entry->prio		= p->prio;
201 	),
202 
203 	TP_printk("comm=%s pid=%d prio=%d",
204 		  __entry->comm, __entry->pid, __entry->prio)
205 );
206 
207 /*
208  * Tracepoint for freeing a task:
209  */
210 DEFINE_EVENT(sched_process_template, sched_process_free,
211 	     TP_PROTO(struct task_struct *p),
212 	     TP_ARGS(p));
213 
214 
215 /*
216  * Tracepoint for a task exiting:
217  */
218 DEFINE_EVENT(sched_process_template, sched_process_exit,
219 	     TP_PROTO(struct task_struct *p),
220 	     TP_ARGS(p));
221 
222 /*
223  * Tracepoint for waiting on task to unschedule:
224  */
225 DEFINE_EVENT(sched_process_template, sched_wait_task,
226 	TP_PROTO(struct task_struct *p),
227 	TP_ARGS(p));
228 
229 /*
230  * Tracepoint for a waiting task:
231  */
232 TRACE_EVENT(sched_process_wait,
233 
234 	TP_PROTO(struct pid *pid),
235 
236 	TP_ARGS(pid),
237 
238 	TP_STRUCT__entry(
239 		__array(	char,	comm,	TASK_COMM_LEN	)
240 		__field(	pid_t,	pid			)
241 		__field(	int,	prio			)
242 	),
243 
244 	TP_fast_assign(
245 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
246 		__entry->pid		= pid_nr(pid);
247 		__entry->prio		= current->prio;
248 	),
249 
250 	TP_printk("comm=%s pid=%d prio=%d",
251 		  __entry->comm, __entry->pid, __entry->prio)
252 );
253 
254 /*
255  * Tracepoint for do_fork:
256  */
257 TRACE_EVENT(sched_process_fork,
258 
259 	TP_PROTO(struct task_struct *parent, struct task_struct *child),
260 
261 	TP_ARGS(parent, child),
262 
263 	TP_STRUCT__entry(
264 		__array(	char,	parent_comm,	TASK_COMM_LEN	)
265 		__field(	pid_t,	parent_pid			)
266 		__array(	char,	child_comm,	TASK_COMM_LEN	)
267 		__field(	pid_t,	child_pid			)
268 	),
269 
270 	TP_fast_assign(
271 		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
272 		__entry->parent_pid	= parent->pid;
273 		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
274 		__entry->child_pid	= child->pid;
275 	),
276 
277 	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
278 		__entry->parent_comm, __entry->parent_pid,
279 		__entry->child_comm, __entry->child_pid)
280 );
281 
282 /*
283  * Tracepoint for exec:
284  */
285 TRACE_EVENT(sched_process_exec,
286 
287 	TP_PROTO(struct task_struct *p, pid_t old_pid,
288 		 struct linux_binprm *bprm),
289 
290 	TP_ARGS(p, old_pid, bprm),
291 
292 	TP_STRUCT__entry(
293 		__string(	filename,	bprm->filename	)
294 		__field(	pid_t,		pid		)
295 		__field(	pid_t,		old_pid		)
296 	),
297 
298 	TP_fast_assign(
299 		__assign_str(filename, bprm->filename);
300 		__entry->pid		= p->pid;
301 		__entry->old_pid	= old_pid;
302 	),
303 
304 	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
305 		  __entry->pid, __entry->old_pid)
306 );
307 
308 /*
309  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
310  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
311  */
312 DECLARE_EVENT_CLASS(sched_stat_template,
313 
314 	TP_PROTO(struct task_struct *tsk, u64 delay),
315 
316 	TP_ARGS(tsk, delay),
317 
318 	TP_STRUCT__entry(
319 		__array( char,	comm,	TASK_COMM_LEN	)
320 		__field( pid_t,	pid			)
321 		__field( u64,	delay			)
322 	),
323 
324 	TP_fast_assign(
325 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
326 		__entry->pid	= tsk->pid;
327 		__entry->delay	= delay;
328 	)
329 	TP_perf_assign(
330 		__perf_count(delay);
331 		__perf_task(tsk);
332 	),
333 
334 	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
335 			__entry->comm, __entry->pid,
336 			(unsigned long long)__entry->delay)
337 );
338 
339 
340 /*
341  * Tracepoint for accounting wait time (time the task is runnable
342  * but not actually running due to scheduler contention).
343  */
344 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
345 	     TP_PROTO(struct task_struct *tsk, u64 delay),
346 	     TP_ARGS(tsk, delay));
347 
348 /*
349  * Tracepoint for accounting sleep time (time the task is not runnable,
350  * including iowait, see below).
351  */
352 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
353 	     TP_PROTO(struct task_struct *tsk, u64 delay),
354 	     TP_ARGS(tsk, delay));
355 
356 /*
357  * Tracepoint for accounting iowait time (time the task is not runnable
358  * due to waiting on IO to complete).
359  */
360 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
361 	     TP_PROTO(struct task_struct *tsk, u64 delay),
362 	     TP_ARGS(tsk, delay));
363 
364 /*
365  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
366  */
367 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
368 	     TP_PROTO(struct task_struct *tsk, u64 delay),
369 	     TP_ARGS(tsk, delay));
370 
371 /*
372  * Tracepoint for accounting runtime (time the task is executing
373  * on a CPU).
374  */
375 TRACE_EVENT(sched_stat_runtime,
376 
377 	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
378 
379 	TP_ARGS(tsk, runtime, vruntime),
380 
381 	TP_STRUCT__entry(
382 		__array( char,	comm,	TASK_COMM_LEN	)
383 		__field( pid_t,	pid			)
384 		__field( u64,	runtime			)
385 		__field( u64,	vruntime			)
386 	),
387 
388 	TP_fast_assign(
389 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
390 		__entry->pid		= tsk->pid;
391 		__entry->runtime	= runtime;
392 		__entry->vruntime	= vruntime;
393 	)
394 	TP_perf_assign(
395 		__perf_count(runtime);
396 	),
397 
398 	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
399 			__entry->comm, __entry->pid,
400 			(unsigned long long)__entry->runtime,
401 			(unsigned long long)__entry->vruntime)
402 );
403 
404 /*
405  * Tracepoint for showing priority inheritance modifying a tasks
406  * priority.
407  */
408 TRACE_EVENT(sched_pi_setprio,
409 
410 	TP_PROTO(struct task_struct *tsk, int newprio),
411 
412 	TP_ARGS(tsk, newprio),
413 
414 	TP_STRUCT__entry(
415 		__array( char,	comm,	TASK_COMM_LEN	)
416 		__field( pid_t,	pid			)
417 		__field( int,	oldprio			)
418 		__field( int,	newprio			)
419 	),
420 
421 	TP_fast_assign(
422 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
423 		__entry->pid		= tsk->pid;
424 		__entry->oldprio	= tsk->prio;
425 		__entry->newprio	= newprio;
426 	),
427 
428 	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
429 			__entry->comm, __entry->pid,
430 			__entry->oldprio, __entry->newprio)
431 );
432 
433 #endif /* _TRACE_SCHED_H */
434 
435 /* This part must be outside protection */
436 #include <trace/define_trace.h>
437