xref: /openbmc/linux/include/trace/events/sched.h (revision 089a49b6)
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3 
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6 
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10 
11 /*
12  * Tracepoint for calling kthread_stop, performed to end a kthread:
13  */
14 TRACE_EVENT(sched_kthread_stop,
15 
16 	TP_PROTO(struct task_struct *t),
17 
18 	TP_ARGS(t),
19 
20 	TP_STRUCT__entry(
21 		__array(	char,	comm,	TASK_COMM_LEN	)
22 		__field(	pid_t,	pid			)
23 	),
24 
25 	TP_fast_assign(
26 		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 		__entry->pid	= t->pid;
28 	),
29 
30 	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
31 );
32 
33 /*
34  * Tracepoint for the return value of the kthread stopping:
35  */
36 TRACE_EVENT(sched_kthread_stop_ret,
37 
38 	TP_PROTO(int ret),
39 
40 	TP_ARGS(ret),
41 
42 	TP_STRUCT__entry(
43 		__field(	int,	ret	)
44 	),
45 
46 	TP_fast_assign(
47 		__entry->ret	= ret;
48 	),
49 
50 	TP_printk("ret=%d", __entry->ret)
51 );
52 
53 /*
54  * Tracepoint for waking up a task:
55  */
56 DECLARE_EVENT_CLASS(sched_wakeup_template,
57 
58 	TP_PROTO(struct task_struct *p, int success),
59 
60 	TP_ARGS(__perf_task(p), success),
61 
62 	TP_STRUCT__entry(
63 		__array(	char,	comm,	TASK_COMM_LEN	)
64 		__field(	pid_t,	pid			)
65 		__field(	int,	prio			)
66 		__field(	int,	success			)
67 		__field(	int,	target_cpu		)
68 	),
69 
70 	TP_fast_assign(
71 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 		__entry->pid		= p->pid;
73 		__entry->prio		= p->prio;
74 		__entry->success	= success;
75 		__entry->target_cpu	= task_cpu(p);
76 	),
77 
78 	TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
79 		  __entry->comm, __entry->pid, __entry->prio,
80 		  __entry->success, __entry->target_cpu)
81 );
82 
83 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
84 	     TP_PROTO(struct task_struct *p, int success),
85 	     TP_ARGS(p, success));
86 
87 /*
88  * Tracepoint for waking up a new task:
89  */
90 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
91 	     TP_PROTO(struct task_struct *p, int success),
92 	     TP_ARGS(p, success));
93 
94 #ifdef CREATE_TRACE_POINTS
95 static inline long __trace_sched_switch_state(struct task_struct *p)
96 {
97 	long state = p->state;
98 
99 #ifdef CONFIG_PREEMPT
100 	/*
101 	 * For all intents and purposes a preempted task is a running task.
102 	 */
103 	if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
104 		state = TASK_RUNNING | TASK_STATE_MAX;
105 #endif
106 
107 	return state;
108 }
109 #endif
110 
111 /*
112  * Tracepoint for task switches, performed by the scheduler:
113  */
114 TRACE_EVENT(sched_switch,
115 
116 	TP_PROTO(struct task_struct *prev,
117 		 struct task_struct *next),
118 
119 	TP_ARGS(prev, next),
120 
121 	TP_STRUCT__entry(
122 		__array(	char,	prev_comm,	TASK_COMM_LEN	)
123 		__field(	pid_t,	prev_pid			)
124 		__field(	int,	prev_prio			)
125 		__field(	long,	prev_state			)
126 		__array(	char,	next_comm,	TASK_COMM_LEN	)
127 		__field(	pid_t,	next_pid			)
128 		__field(	int,	next_prio			)
129 	),
130 
131 	TP_fast_assign(
132 		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
133 		__entry->prev_pid	= prev->pid;
134 		__entry->prev_prio	= prev->prio;
135 		__entry->prev_state	= __trace_sched_switch_state(prev);
136 		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
137 		__entry->next_pid	= next->pid;
138 		__entry->next_prio	= next->prio;
139 	),
140 
141 	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
142 		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
143 		__entry->prev_state & (TASK_STATE_MAX-1) ?
144 		  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
145 				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
146 				{ 16, "Z" }, { 32, "X" }, { 64, "x" },
147 				{ 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
148 		__entry->prev_state & TASK_STATE_MAX ? "+" : "",
149 		__entry->next_comm, __entry->next_pid, __entry->next_prio)
150 );
151 
152 /*
153  * Tracepoint for a task being migrated:
154  */
155 TRACE_EVENT(sched_migrate_task,
156 
157 	TP_PROTO(struct task_struct *p, int dest_cpu),
158 
159 	TP_ARGS(p, dest_cpu),
160 
161 	TP_STRUCT__entry(
162 		__array(	char,	comm,	TASK_COMM_LEN	)
163 		__field(	pid_t,	pid			)
164 		__field(	int,	prio			)
165 		__field(	int,	orig_cpu		)
166 		__field(	int,	dest_cpu		)
167 	),
168 
169 	TP_fast_assign(
170 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
171 		__entry->pid		= p->pid;
172 		__entry->prio		= p->prio;
173 		__entry->orig_cpu	= task_cpu(p);
174 		__entry->dest_cpu	= dest_cpu;
175 	),
176 
177 	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
178 		  __entry->comm, __entry->pid, __entry->prio,
179 		  __entry->orig_cpu, __entry->dest_cpu)
180 );
181 
182 DECLARE_EVENT_CLASS(sched_process_template,
183 
184 	TP_PROTO(struct task_struct *p),
185 
186 	TP_ARGS(p),
187 
188 	TP_STRUCT__entry(
189 		__array(	char,	comm,	TASK_COMM_LEN	)
190 		__field(	pid_t,	pid			)
191 		__field(	int,	prio			)
192 	),
193 
194 	TP_fast_assign(
195 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
196 		__entry->pid		= p->pid;
197 		__entry->prio		= p->prio;
198 	),
199 
200 	TP_printk("comm=%s pid=%d prio=%d",
201 		  __entry->comm, __entry->pid, __entry->prio)
202 );
203 
204 /*
205  * Tracepoint for freeing a task:
206  */
207 DEFINE_EVENT(sched_process_template, sched_process_free,
208 	     TP_PROTO(struct task_struct *p),
209 	     TP_ARGS(p));
210 
211 
212 /*
213  * Tracepoint for a task exiting:
214  */
215 DEFINE_EVENT(sched_process_template, sched_process_exit,
216 	     TP_PROTO(struct task_struct *p),
217 	     TP_ARGS(p));
218 
219 /*
220  * Tracepoint for waiting on task to unschedule:
221  */
222 DEFINE_EVENT(sched_process_template, sched_wait_task,
223 	TP_PROTO(struct task_struct *p),
224 	TP_ARGS(p));
225 
226 /*
227  * Tracepoint for a waiting task:
228  */
229 TRACE_EVENT(sched_process_wait,
230 
231 	TP_PROTO(struct pid *pid),
232 
233 	TP_ARGS(pid),
234 
235 	TP_STRUCT__entry(
236 		__array(	char,	comm,	TASK_COMM_LEN	)
237 		__field(	pid_t,	pid			)
238 		__field(	int,	prio			)
239 	),
240 
241 	TP_fast_assign(
242 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
243 		__entry->pid		= pid_nr(pid);
244 		__entry->prio		= current->prio;
245 	),
246 
247 	TP_printk("comm=%s pid=%d prio=%d",
248 		  __entry->comm, __entry->pid, __entry->prio)
249 );
250 
251 /*
252  * Tracepoint for do_fork:
253  */
254 TRACE_EVENT(sched_process_fork,
255 
256 	TP_PROTO(struct task_struct *parent, struct task_struct *child),
257 
258 	TP_ARGS(parent, child),
259 
260 	TP_STRUCT__entry(
261 		__array(	char,	parent_comm,	TASK_COMM_LEN	)
262 		__field(	pid_t,	parent_pid			)
263 		__array(	char,	child_comm,	TASK_COMM_LEN	)
264 		__field(	pid_t,	child_pid			)
265 	),
266 
267 	TP_fast_assign(
268 		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
269 		__entry->parent_pid	= parent->pid;
270 		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
271 		__entry->child_pid	= child->pid;
272 	),
273 
274 	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
275 		__entry->parent_comm, __entry->parent_pid,
276 		__entry->child_comm, __entry->child_pid)
277 );
278 
279 /*
280  * Tracepoint for exec:
281  */
282 TRACE_EVENT(sched_process_exec,
283 
284 	TP_PROTO(struct task_struct *p, pid_t old_pid,
285 		 struct linux_binprm *bprm),
286 
287 	TP_ARGS(p, old_pid, bprm),
288 
289 	TP_STRUCT__entry(
290 		__string(	filename,	bprm->filename	)
291 		__field(	pid_t,		pid		)
292 		__field(	pid_t,		old_pid		)
293 	),
294 
295 	TP_fast_assign(
296 		__assign_str(filename, bprm->filename);
297 		__entry->pid		= p->pid;
298 		__entry->old_pid	= old_pid;
299 	),
300 
301 	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
302 		  __entry->pid, __entry->old_pid)
303 );
304 
305 /*
306  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
307  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
308  */
309 DECLARE_EVENT_CLASS(sched_stat_template,
310 
311 	TP_PROTO(struct task_struct *tsk, u64 delay),
312 
313 	TP_ARGS(__perf_task(tsk), __perf_count(delay)),
314 
315 	TP_STRUCT__entry(
316 		__array( char,	comm,	TASK_COMM_LEN	)
317 		__field( pid_t,	pid			)
318 		__field( u64,	delay			)
319 	),
320 
321 	TP_fast_assign(
322 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
323 		__entry->pid	= tsk->pid;
324 		__entry->delay	= delay;
325 	),
326 
327 	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
328 			__entry->comm, __entry->pid,
329 			(unsigned long long)__entry->delay)
330 );
331 
332 
333 /*
334  * Tracepoint for accounting wait time (time the task is runnable
335  * but not actually running due to scheduler contention).
336  */
337 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
338 	     TP_PROTO(struct task_struct *tsk, u64 delay),
339 	     TP_ARGS(tsk, delay));
340 
341 /*
342  * Tracepoint for accounting sleep time (time the task is not runnable,
343  * including iowait, see below).
344  */
345 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
346 	     TP_PROTO(struct task_struct *tsk, u64 delay),
347 	     TP_ARGS(tsk, delay));
348 
349 /*
350  * Tracepoint for accounting iowait time (time the task is not runnable
351  * due to waiting on IO to complete).
352  */
353 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
354 	     TP_PROTO(struct task_struct *tsk, u64 delay),
355 	     TP_ARGS(tsk, delay));
356 
357 /*
358  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
359  */
360 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
361 	     TP_PROTO(struct task_struct *tsk, u64 delay),
362 	     TP_ARGS(tsk, delay));
363 
364 /*
365  * Tracepoint for accounting runtime (time the task is executing
366  * on a CPU).
367  */
368 DECLARE_EVENT_CLASS(sched_stat_runtime,
369 
370 	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
371 
372 	TP_ARGS(tsk, __perf_count(runtime), vruntime),
373 
374 	TP_STRUCT__entry(
375 		__array( char,	comm,	TASK_COMM_LEN	)
376 		__field( pid_t,	pid			)
377 		__field( u64,	runtime			)
378 		__field( u64,	vruntime			)
379 	),
380 
381 	TP_fast_assign(
382 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
383 		__entry->pid		= tsk->pid;
384 		__entry->runtime	= runtime;
385 		__entry->vruntime	= vruntime;
386 	),
387 
388 	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
389 			__entry->comm, __entry->pid,
390 			(unsigned long long)__entry->runtime,
391 			(unsigned long long)__entry->vruntime)
392 );
393 
394 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
395 	     TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
396 	     TP_ARGS(tsk, runtime, vruntime));
397 
398 /*
399  * Tracepoint for showing priority inheritance modifying a tasks
400  * priority.
401  */
402 TRACE_EVENT(sched_pi_setprio,
403 
404 	TP_PROTO(struct task_struct *tsk, int newprio),
405 
406 	TP_ARGS(tsk, newprio),
407 
408 	TP_STRUCT__entry(
409 		__array( char,	comm,	TASK_COMM_LEN	)
410 		__field( pid_t,	pid			)
411 		__field( int,	oldprio			)
412 		__field( int,	newprio			)
413 	),
414 
415 	TP_fast_assign(
416 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
417 		__entry->pid		= tsk->pid;
418 		__entry->oldprio	= tsk->prio;
419 		__entry->newprio	= newprio;
420 	),
421 
422 	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
423 			__entry->comm, __entry->pid,
424 			__entry->oldprio, __entry->newprio)
425 );
426 
427 #endif /* _TRACE_SCHED_H */
428 
429 /* This part must be outside protection */
430 #include <trace/define_trace.h>
431