xref: /openbmc/linux/include/trace/events/sched.h (revision 63dc02bd)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3 
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6 
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10 
11 /*
12  * Tracepoint for calling kthread_stop, performed to end a kthread:
13  */
14 TRACE_EVENT(sched_kthread_stop,
15 
16 	TP_PROTO(struct task_struct *t),
17 
18 	TP_ARGS(t),
19 
20 	TP_STRUCT__entry(
21 		__array(	char,	comm,	TASK_COMM_LEN	)
22 		__field(	pid_t,	pid			)
23 	),
24 
25 	TP_fast_assign(
26 		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 		__entry->pid	= t->pid;
28 	),
29 
30 	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
31 );
32 
33 /*
34  * Tracepoint for the return value of the kthread stopping:
35  */
36 TRACE_EVENT(sched_kthread_stop_ret,
37 
38 	TP_PROTO(int ret),
39 
40 	TP_ARGS(ret),
41 
42 	TP_STRUCT__entry(
43 		__field(	int,	ret	)
44 	),
45 
46 	TP_fast_assign(
47 		__entry->ret	= ret;
48 	),
49 
50 	TP_printk("ret=%d", __entry->ret)
51 );
52 
53 /*
54  * Tracepoint for waking up a task:
55  */
56 DECLARE_EVENT_CLASS(sched_wakeup_template,
57 
58 	TP_PROTO(struct task_struct *p, int success),
59 
60 	TP_ARGS(p, success),
61 
62 	TP_STRUCT__entry(
63 		__array(	char,	comm,	TASK_COMM_LEN	)
64 		__field(	pid_t,	pid			)
65 		__field(	int,	prio			)
66 		__field(	int,	success			)
67 		__field(	int,	target_cpu		)
68 	),
69 
70 	TP_fast_assign(
71 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 		__entry->pid		= p->pid;
73 		__entry->prio		= p->prio;
74 		__entry->success	= success;
75 		__entry->target_cpu	= task_cpu(p);
76 	),
77 
78 	TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
79 		  __entry->comm, __entry->pid, __entry->prio,
80 		  __entry->success, __entry->target_cpu)
81 );
82 
83 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
84 	     TP_PROTO(struct task_struct *p, int success),
85 	     TP_ARGS(p, success));
86 
87 /*
88  * Tracepoint for waking up a new task:
89  */
90 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
91 	     TP_PROTO(struct task_struct *p, int success),
92 	     TP_ARGS(p, success));
93 
94 #ifdef CREATE_TRACE_POINTS
95 static inline long __trace_sched_switch_state(struct task_struct *p)
96 {
97 	long state = p->state;
98 
99 #ifdef CONFIG_PREEMPT
100 	/*
101 	 * For all intents and purposes a preempted task is a running task.
102 	 */
103 	if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
104 		state = TASK_RUNNING | TASK_STATE_MAX;
105 #endif
106 
107 	return state;
108 }
109 #endif
110 
111 /*
112  * Tracepoint for task switches, performed by the scheduler:
113  */
114 TRACE_EVENT(sched_switch,
115 
116 	TP_PROTO(struct task_struct *prev,
117 		 struct task_struct *next),
118 
119 	TP_ARGS(prev, next),
120 
121 	TP_STRUCT__entry(
122 		__array(	char,	prev_comm,	TASK_COMM_LEN	)
123 		__field(	pid_t,	prev_pid			)
124 		__field(	int,	prev_prio			)
125 		__field(	long,	prev_state			)
126 		__array(	char,	next_comm,	TASK_COMM_LEN	)
127 		__field(	pid_t,	next_pid			)
128 		__field(	int,	next_prio			)
129 	),
130 
131 	TP_fast_assign(
132 		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
133 		__entry->prev_pid	= prev->pid;
134 		__entry->prev_prio	= prev->prio;
135 		__entry->prev_state	= __trace_sched_switch_state(prev);
136 		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
137 		__entry->next_pid	= next->pid;
138 		__entry->next_prio	= next->prio;
139 	),
140 
141 	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
142 		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
143 		__entry->prev_state & (TASK_STATE_MAX-1) ?
144 		  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
145 				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
146 				{ 16, "Z" }, { 32, "X" }, { 64, "x" },
147 				{ 128, "W" }) : "R",
148 		__entry->prev_state & TASK_STATE_MAX ? "+" : "",
149 		__entry->next_comm, __entry->next_pid, __entry->next_prio)
150 );
151 
152 /*
153  * Tracepoint for a task being migrated:
154  */
155 TRACE_EVENT(sched_migrate_task,
156 
157 	TP_PROTO(struct task_struct *p, int dest_cpu),
158 
159 	TP_ARGS(p, dest_cpu),
160 
161 	TP_STRUCT__entry(
162 		__array(	char,	comm,	TASK_COMM_LEN	)
163 		__field(	pid_t,	pid			)
164 		__field(	int,	prio			)
165 		__field(	int,	orig_cpu		)
166 		__field(	int,	dest_cpu		)
167 	),
168 
169 	TP_fast_assign(
170 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
171 		__entry->pid		= p->pid;
172 		__entry->prio		= p->prio;
173 		__entry->orig_cpu	= task_cpu(p);
174 		__entry->dest_cpu	= dest_cpu;
175 	),
176 
177 	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
178 		  __entry->comm, __entry->pid, __entry->prio,
179 		  __entry->orig_cpu, __entry->dest_cpu)
180 );
181 
182 DECLARE_EVENT_CLASS(sched_process_template,
183 
184 	TP_PROTO(struct task_struct *p),
185 
186 	TP_ARGS(p),
187 
188 	TP_STRUCT__entry(
189 		__array(	char,	comm,	TASK_COMM_LEN	)
190 		__field(	pid_t,	pid			)
191 		__field(	int,	prio			)
192 	),
193 
194 	TP_fast_assign(
195 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
196 		__entry->pid		= p->pid;
197 		__entry->prio		= p->prio;
198 	),
199 
200 	TP_printk("comm=%s pid=%d prio=%d",
201 		  __entry->comm, __entry->pid, __entry->prio)
202 );
203 
204 /*
205  * Tracepoint for freeing a task:
206  */
207 DEFINE_EVENT(sched_process_template, sched_process_free,
208 	     TP_PROTO(struct task_struct *p),
209 	     TP_ARGS(p));
210 
211 
212 /*
213  * Tracepoint for a task exiting:
214  */
215 DEFINE_EVENT(sched_process_template, sched_process_exit,
216 	     TP_PROTO(struct task_struct *p),
217 	     TP_ARGS(p));
218 
219 /*
220  * Tracepoint for waiting on task to unschedule:
221  */
222 DEFINE_EVENT(sched_process_template, sched_wait_task,
223 	TP_PROTO(struct task_struct *p),
224 	TP_ARGS(p));
225 
226 /*
227  * Tracepoint for a waiting task:
228  */
229 TRACE_EVENT(sched_process_wait,
230 
231 	TP_PROTO(struct pid *pid),
232 
233 	TP_ARGS(pid),
234 
235 	TP_STRUCT__entry(
236 		__array(	char,	comm,	TASK_COMM_LEN	)
237 		__field(	pid_t,	pid			)
238 		__field(	int,	prio			)
239 	),
240 
241 	TP_fast_assign(
242 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
243 		__entry->pid		= pid_nr(pid);
244 		__entry->prio		= current->prio;
245 	),
246 
247 	TP_printk("comm=%s pid=%d prio=%d",
248 		  __entry->comm, __entry->pid, __entry->prio)
249 );
250 
251 /*
252  * Tracepoint for do_fork:
253  */
254 TRACE_EVENT(sched_process_fork,
255 
256 	TP_PROTO(struct task_struct *parent, struct task_struct *child),
257 
258 	TP_ARGS(parent, child),
259 
260 	TP_STRUCT__entry(
261 		__array(	char,	parent_comm,	TASK_COMM_LEN	)
262 		__field(	pid_t,	parent_pid			)
263 		__array(	char,	child_comm,	TASK_COMM_LEN	)
264 		__field(	pid_t,	child_pid			)
265 	),
266 
267 	TP_fast_assign(
268 		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
269 		__entry->parent_pid	= parent->pid;
270 		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
271 		__entry->child_pid	= child->pid;
272 	),
273 
274 	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
275 		__entry->parent_comm, __entry->parent_pid,
276 		__entry->child_comm, __entry->child_pid)
277 );
278 
279 /*
280  * Tracepoint for exec:
281  */
282 TRACE_EVENT(sched_process_exec,
283 
284 	TP_PROTO(struct task_struct *p, pid_t old_pid,
285 		 struct linux_binprm *bprm),
286 
287 	TP_ARGS(p, old_pid, bprm),
288 
289 	TP_STRUCT__entry(
290 		__string(	filename,	bprm->filename	)
291 		__field(	pid_t,		pid		)
292 		__field(	pid_t,		old_pid		)
293 	),
294 
295 	TP_fast_assign(
296 		__assign_str(filename, bprm->filename);
297 		__entry->pid		= p->pid;
298 		__entry->old_pid	= old_pid;
299 	),
300 
301 	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
302 		  __entry->pid, __entry->old_pid)
303 );
304 
305 /*
306  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
307  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
308  */
309 DECLARE_EVENT_CLASS(sched_stat_template,
310 
311 	TP_PROTO(struct task_struct *tsk, u64 delay),
312 
313 	TP_ARGS(tsk, delay),
314 
315 	TP_STRUCT__entry(
316 		__array( char,	comm,	TASK_COMM_LEN	)
317 		__field( pid_t,	pid			)
318 		__field( u64,	delay			)
319 	),
320 
321 	TP_fast_assign(
322 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
323 		__entry->pid	= tsk->pid;
324 		__entry->delay	= delay;
325 	)
326 	TP_perf_assign(
327 		__perf_count(delay);
328 	),
329 
330 	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
331 			__entry->comm, __entry->pid,
332 			(unsigned long long)__entry->delay)
333 );
334 
335 
336 /*
337  * Tracepoint for accounting wait time (time the task is runnable
338  * but not actually running due to scheduler contention).
339  */
340 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
341 	     TP_PROTO(struct task_struct *tsk, u64 delay),
342 	     TP_ARGS(tsk, delay));
343 
344 /*
345  * Tracepoint for accounting sleep time (time the task is not runnable,
346  * including iowait, see below).
347  */
348 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
349 	     TP_PROTO(struct task_struct *tsk, u64 delay),
350 	     TP_ARGS(tsk, delay));
351 
352 /*
353  * Tracepoint for accounting iowait time (time the task is not runnable
354  * due to waiting on IO to complete).
355  */
356 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
357 	     TP_PROTO(struct task_struct *tsk, u64 delay),
358 	     TP_ARGS(tsk, delay));
359 
360 /*
361  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
362  */
363 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
364 	     TP_PROTO(struct task_struct *tsk, u64 delay),
365 	     TP_ARGS(tsk, delay));
366 
367 /*
368  * Tracepoint for accounting runtime (time the task is executing
369  * on a CPU).
370  */
371 TRACE_EVENT(sched_stat_runtime,
372 
373 	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
374 
375 	TP_ARGS(tsk, runtime, vruntime),
376 
377 	TP_STRUCT__entry(
378 		__array( char,	comm,	TASK_COMM_LEN	)
379 		__field( pid_t,	pid			)
380 		__field( u64,	runtime			)
381 		__field( u64,	vruntime			)
382 	),
383 
384 	TP_fast_assign(
385 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
386 		__entry->pid		= tsk->pid;
387 		__entry->runtime	= runtime;
388 		__entry->vruntime	= vruntime;
389 	)
390 	TP_perf_assign(
391 		__perf_count(runtime);
392 	),
393 
394 	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
395 			__entry->comm, __entry->pid,
396 			(unsigned long long)__entry->runtime,
397 			(unsigned long long)__entry->vruntime)
398 );
399 
400 /*
401  * Tracepoint for showing priority inheritance modifying a tasks
402  * priority.
403  */
404 TRACE_EVENT(sched_pi_setprio,
405 
406 	TP_PROTO(struct task_struct *tsk, int newprio),
407 
408 	TP_ARGS(tsk, newprio),
409 
410 	TP_STRUCT__entry(
411 		__array( char,	comm,	TASK_COMM_LEN	)
412 		__field( pid_t,	pid			)
413 		__field( int,	oldprio			)
414 		__field( int,	newprio			)
415 	),
416 
417 	TP_fast_assign(
418 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
419 		__entry->pid		= tsk->pid;
420 		__entry->oldprio	= tsk->prio;
421 		__entry->newprio	= newprio;
422 	),
423 
424 	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
425 			__entry->comm, __entry->pid,
426 			__entry->oldprio, __entry->newprio)
427 );
428 
429 #endif /* _TRACE_SCHED_H */
430 
431 /* This part must be outside protection */
432 #include <trace/define_trace.h>
433