1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2d0b6e04aSLi Zefan #undef TRACE_SYSTEM
3d0b6e04aSLi Zefan #define TRACE_SYSTEM sched
4d0b6e04aSLi Zefan
5ad8d75ffSSteven Rostedt #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6ad8d75ffSSteven Rostedt #define _TRACE_SCHED_H
7ad8d75ffSSteven Rostedt
8f630c7c6SRob Clark #include <linux/kthread.h>
96a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
10ad8d75ffSSteven Rostedt #include <linux/tracepoint.h>
114ff16c25SDavid Smith #include <linux/binfmts.h>
12ad8d75ffSSteven Rostedt
13ad8d75ffSSteven Rostedt /*
14ad8d75ffSSteven Rostedt * Tracepoint for calling kthread_stop, performed to end a kthread:
15ad8d75ffSSteven Rostedt */
16ad8d75ffSSteven Rostedt TRACE_EVENT(sched_kthread_stop,
17ad8d75ffSSteven Rostedt
18ad8d75ffSSteven Rostedt TP_PROTO(struct task_struct *t),
19ad8d75ffSSteven Rostedt
20ad8d75ffSSteven Rostedt TP_ARGS(t),
21ad8d75ffSSteven Rostedt
22ad8d75ffSSteven Rostedt TP_STRUCT__entry(
23ad8d75ffSSteven Rostedt __array( char, comm, TASK_COMM_LEN )
24ad8d75ffSSteven Rostedt __field( pid_t, pid )
25ad8d75ffSSteven Rostedt ),
26ad8d75ffSSteven Rostedt
27ad8d75ffSSteven Rostedt TP_fast_assign(
28ad8d75ffSSteven Rostedt memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29ad8d75ffSSteven Rostedt __entry->pid = t->pid;
30ad8d75ffSSteven Rostedt ),
31ad8d75ffSSteven Rostedt
32434a83c3SIngo Molnar TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33ad8d75ffSSteven Rostedt );
34ad8d75ffSSteven Rostedt
35ad8d75ffSSteven Rostedt /*
36ad8d75ffSSteven Rostedt * Tracepoint for the return value of the kthread stopping:
37ad8d75ffSSteven Rostedt */
38ad8d75ffSSteven Rostedt TRACE_EVENT(sched_kthread_stop_ret,
39ad8d75ffSSteven Rostedt
40ad8d75ffSSteven Rostedt TP_PROTO(int ret),
41ad8d75ffSSteven Rostedt
42ad8d75ffSSteven Rostedt TP_ARGS(ret),
43ad8d75ffSSteven Rostedt
44ad8d75ffSSteven Rostedt TP_STRUCT__entry(
45ad8d75ffSSteven Rostedt __field( int, ret )
46ad8d75ffSSteven Rostedt ),
47ad8d75ffSSteven Rostedt
48ad8d75ffSSteven Rostedt TP_fast_assign(
49ad8d75ffSSteven Rostedt __entry->ret = ret;
50ad8d75ffSSteven Rostedt ),
51ad8d75ffSSteven Rostedt
52434a83c3SIngo Molnar TP_printk("ret=%d", __entry->ret)
53ad8d75ffSSteven Rostedt );
54ad8d75ffSSteven Rostedt
55f630c7c6SRob Clark /**
56f630c7c6SRob Clark * sched_kthread_work_queue_work - called when a work gets queued
57f630c7c6SRob Clark * @worker: pointer to the kthread_worker
58f630c7c6SRob Clark * @work: pointer to struct kthread_work
59f630c7c6SRob Clark *
60f630c7c6SRob Clark * This event occurs when a work is queued immediately or once a
61f630c7c6SRob Clark * delayed work is actually queued (ie: once the delay has been
62f630c7c6SRob Clark * reached).
63f630c7c6SRob Clark */
64f630c7c6SRob Clark TRACE_EVENT(sched_kthread_work_queue_work,
65f630c7c6SRob Clark
66f630c7c6SRob Clark TP_PROTO(struct kthread_worker *worker,
67f630c7c6SRob Clark struct kthread_work *work),
68f630c7c6SRob Clark
69f630c7c6SRob Clark TP_ARGS(worker, work),
70f630c7c6SRob Clark
71f630c7c6SRob Clark TP_STRUCT__entry(
72f630c7c6SRob Clark __field( void *, work )
73f630c7c6SRob Clark __field( void *, function)
74f630c7c6SRob Clark __field( void *, worker)
75f630c7c6SRob Clark ),
76f630c7c6SRob Clark
77f630c7c6SRob Clark TP_fast_assign(
78f630c7c6SRob Clark __entry->work = work;
79f630c7c6SRob Clark __entry->function = work->func;
80f630c7c6SRob Clark __entry->worker = worker;
81f630c7c6SRob Clark ),
82f630c7c6SRob Clark
83f630c7c6SRob Clark TP_printk("work struct=%p function=%ps worker=%p",
84f630c7c6SRob Clark __entry->work, __entry->function, __entry->worker)
85f630c7c6SRob Clark );
86f630c7c6SRob Clark
87f630c7c6SRob Clark /**
88f630c7c6SRob Clark * sched_kthread_work_execute_start - called immediately before the work callback
89f630c7c6SRob Clark * @work: pointer to struct kthread_work
90f630c7c6SRob Clark *
91f630c7c6SRob Clark * Allows to track kthread work execution.
92f630c7c6SRob Clark */
93f630c7c6SRob Clark TRACE_EVENT(sched_kthread_work_execute_start,
94f630c7c6SRob Clark
95f630c7c6SRob Clark TP_PROTO(struct kthread_work *work),
96f630c7c6SRob Clark
97f630c7c6SRob Clark TP_ARGS(work),
98f630c7c6SRob Clark
99f630c7c6SRob Clark TP_STRUCT__entry(
100f630c7c6SRob Clark __field( void *, work )
101f630c7c6SRob Clark __field( void *, function)
102f630c7c6SRob Clark ),
103f630c7c6SRob Clark
104f630c7c6SRob Clark TP_fast_assign(
105f630c7c6SRob Clark __entry->work = work;
106f630c7c6SRob Clark __entry->function = work->func;
107f630c7c6SRob Clark ),
108f630c7c6SRob Clark
109f630c7c6SRob Clark TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110f630c7c6SRob Clark );
111f630c7c6SRob Clark
112f630c7c6SRob Clark /**
113f630c7c6SRob Clark * sched_kthread_work_execute_end - called immediately after the work callback
114f630c7c6SRob Clark * @work: pointer to struct work_struct
115f630c7c6SRob Clark * @function: pointer to worker function
116f630c7c6SRob Clark *
117f630c7c6SRob Clark * Allows to track workqueue execution.
118f630c7c6SRob Clark */
119f630c7c6SRob Clark TRACE_EVENT(sched_kthread_work_execute_end,
120f630c7c6SRob Clark
121f630c7c6SRob Clark TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122f630c7c6SRob Clark
123f630c7c6SRob Clark TP_ARGS(work, function),
124f630c7c6SRob Clark
125f630c7c6SRob Clark TP_STRUCT__entry(
126f630c7c6SRob Clark __field( void *, work )
127f630c7c6SRob Clark __field( void *, function)
128f630c7c6SRob Clark ),
129f630c7c6SRob Clark
130f630c7c6SRob Clark TP_fast_assign(
131f630c7c6SRob Clark __entry->work = work;
132f630c7c6SRob Clark __entry->function = function;
133f630c7c6SRob Clark ),
134f630c7c6SRob Clark
135f630c7c6SRob Clark TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136f630c7c6SRob Clark );
137f630c7c6SRob Clark
138ad8d75ffSSteven Rostedt /*
139ad8d75ffSSteven Rostedt * Tracepoint for waking up a task:
140ad8d75ffSSteven Rostedt */
141091ad365SIngo Molnar DECLARE_EVENT_CLASS(sched_wakeup_template,
142ad8d75ffSSteven Rostedt
143fbd705a0SPeter Zijlstra TP_PROTO(struct task_struct *p),
144ad8d75ffSSteven Rostedt
145fbd705a0SPeter Zijlstra TP_ARGS(__perf_task(p)),
146ad8d75ffSSteven Rostedt
147ad8d75ffSSteven Rostedt TP_STRUCT__entry(
148ad8d75ffSSteven Rostedt __array( char, comm, TASK_COMM_LEN )
149ad8d75ffSSteven Rostedt __field( pid_t, pid )
150ad8d75ffSSteven Rostedt __field( int, prio )
151434a83c3SIngo Molnar __field( int, target_cpu )
152ad8d75ffSSteven Rostedt ),
153ad8d75ffSSteven Rostedt
154ad8d75ffSSteven Rostedt TP_fast_assign(
155ad8d75ffSSteven Rostedt memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156ad8d75ffSSteven Rostedt __entry->pid = p->pid;
157b91473ffSPeter Zijlstra __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
158434a83c3SIngo Molnar __entry->target_cpu = task_cpu(p);
159ad8d75ffSSteven Rostedt ),
160ad8d75ffSSteven Rostedt
161fbd705a0SPeter Zijlstra TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162ad8d75ffSSteven Rostedt __entry->comm, __entry->pid, __entry->prio,
163fbd705a0SPeter Zijlstra __entry->target_cpu)
164ad8d75ffSSteven Rostedt );
165ad8d75ffSSteven Rostedt
166fbd705a0SPeter Zijlstra /*
167fbd705a0SPeter Zijlstra * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168fbd705a0SPeter Zijlstra * called from the waking context.
169fbd705a0SPeter Zijlstra */
170fbd705a0SPeter Zijlstra DEFINE_EVENT(sched_wakeup_template, sched_waking,
171fbd705a0SPeter Zijlstra TP_PROTO(struct task_struct *p),
172fbd705a0SPeter Zijlstra TP_ARGS(p));
173fbd705a0SPeter Zijlstra
174fbd705a0SPeter Zijlstra /*
175f2cc020dSIngo Molnar * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
1762705937aSRandy Dunlap * It is not always called from the waking context.
177fbd705a0SPeter Zijlstra */
17875ec29abSSteven Rostedt DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179fbd705a0SPeter Zijlstra TP_PROTO(struct task_struct *p),
180fbd705a0SPeter Zijlstra TP_ARGS(p));
18175ec29abSSteven Rostedt
182ad8d75ffSSteven Rostedt /*
183ad8d75ffSSteven Rostedt * Tracepoint for waking up a new task:
184ad8d75ffSSteven Rostedt */
18575ec29abSSteven Rostedt DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186fbd705a0SPeter Zijlstra TP_PROTO(struct task_struct *p),
187fbd705a0SPeter Zijlstra TP_ARGS(p));
188ad8d75ffSSteven Rostedt
18902f72694SPeter Zijlstra #ifdef CREATE_TRACE_POINTS
__trace_sched_switch_state(bool preempt,unsigned int prev_state,struct task_struct * p)190fa2c3254SValentin Schneider static inline long __trace_sched_switch_state(bool preempt,
191fa2c3254SValentin Schneider unsigned int prev_state,
192fa2c3254SValentin Schneider struct task_struct *p)
19302f72694SPeter Zijlstra {
1943054426dSPavankumar Kondeti unsigned int state;
1953054426dSPavankumar Kondeti
1968f9fbf09SOleg Nesterov #ifdef CONFIG_SCHED_DEBUG
1978f9fbf09SOleg Nesterov BUG_ON(p != current);
1988f9fbf09SOleg Nesterov #endif /* CONFIG_SCHED_DEBUG */
19902f72694SPeter Zijlstra
200c73464b1SPeter Zijlstra /*
201c73464b1SPeter Zijlstra * Preemption ignores task state, therefore preempted tasks are always
202c73464b1SPeter Zijlstra * RUNNING (we will not have dequeued if state != RUNNING).
203c73464b1SPeter Zijlstra */
204efb40f58SPeter Zijlstra if (preempt)
2053f5fe9feSThomas Gleixner return TASK_REPORT_MAX;
206efb40f58SPeter Zijlstra
2073054426dSPavankumar Kondeti /*
2083054426dSPavankumar Kondeti * task_state_index() uses fls() and returns a value from 0-8 range.
2093054426dSPavankumar Kondeti * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
2103054426dSPavankumar Kondeti * it for left shift operation to get the correct task->state
2113054426dSPavankumar Kondeti * mapping.
2123054426dSPavankumar Kondeti */
213fa2c3254SValentin Schneider state = __task_state_index(prev_state, p->exit_state);
2143054426dSPavankumar Kondeti
2153054426dSPavankumar Kondeti return state ? (1 << (state - 1)) : state;
21602f72694SPeter Zijlstra }
2178f9fbf09SOleg Nesterov #endif /* CREATE_TRACE_POINTS */
21802f72694SPeter Zijlstra
219ad8d75ffSSteven Rostedt /*
220ad8d75ffSSteven Rostedt * Tracepoint for task switches, performed by the scheduler:
221ad8d75ffSSteven Rostedt */
222ad8d75ffSSteven Rostedt TRACE_EVENT(sched_switch,
223ad8d75ffSSteven Rostedt
224c73464b1SPeter Zijlstra TP_PROTO(bool preempt,
225c73464b1SPeter Zijlstra struct task_struct *prev,
226*9c2136beSDelyan Kratunov struct task_struct *next,
227*9c2136beSDelyan Kratunov unsigned int prev_state),
228ad8d75ffSSteven Rostedt
229*9c2136beSDelyan Kratunov TP_ARGS(preempt, prev, next, prev_state),
230ad8d75ffSSteven Rostedt
231ad8d75ffSSteven Rostedt TP_STRUCT__entry(
232ad8d75ffSSteven Rostedt __array( char, prev_comm, TASK_COMM_LEN )
233ad8d75ffSSteven Rostedt __field( pid_t, prev_pid )
234ad8d75ffSSteven Rostedt __field( int, prev_prio )
235937cdb9dSSteven Rostedt __field( long, prev_state )
236ad8d75ffSSteven Rostedt __array( char, next_comm, TASK_COMM_LEN )
237ad8d75ffSSteven Rostedt __field( pid_t, next_pid )
238ad8d75ffSSteven Rostedt __field( int, next_prio )
239ad8d75ffSSteven Rostedt ),
240ad8d75ffSSteven Rostedt
241ad8d75ffSSteven Rostedt TP_fast_assign(
242ad8d75ffSSteven Rostedt memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
243ad8d75ffSSteven Rostedt __entry->prev_pid = prev->pid;
244ad8d75ffSSteven Rostedt __entry->prev_prio = prev->prio;
245fa2c3254SValentin Schneider __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
246ad8d75ffSSteven Rostedt memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
247ad8d75ffSSteven Rostedt __entry->next_pid = next->pid;
248ad8d75ffSSteven Rostedt __entry->next_prio = next->prio;
249b91473ffSPeter Zijlstra /* XXX SCHED_DEADLINE */
250ad8d75ffSSteven Rostedt ),
251ad8d75ffSSteven Rostedt
252557ab425SPeter Zijlstra TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
253ad8d75ffSSteven Rostedt __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
254efb40f58SPeter Zijlstra
25506eb6184SPeter Zijlstra (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
25606eb6184SPeter Zijlstra __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
257ff28915fSUwe Kleine-König { TASK_INTERRUPTIBLE, "S" },
258ff28915fSUwe Kleine-König { TASK_UNINTERRUPTIBLE, "D" },
259ff28915fSUwe Kleine-König { __TASK_STOPPED, "T" },
260ff28915fSUwe Kleine-König { __TASK_TRACED, "t" },
261ff28915fSUwe Kleine-König { EXIT_DEAD, "X" },
262ff28915fSUwe Kleine-König { EXIT_ZOMBIE, "Z" },
263ff28915fSUwe Kleine-König { TASK_PARKED, "P" },
264ff28915fSUwe Kleine-König { TASK_DEAD, "I" }) :
265efb40f58SPeter Zijlstra "R",
266efb40f58SPeter Zijlstra
2673f5fe9feSThomas Gleixner __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
268ad8d75ffSSteven Rostedt __entry->next_comm, __entry->next_pid, __entry->next_prio)
269ad8d75ffSSteven Rostedt );
270ad8d75ffSSteven Rostedt
271ad8d75ffSSteven Rostedt /*
272ad8d75ffSSteven Rostedt * Tracepoint for a task being migrated:
273ad8d75ffSSteven Rostedt */
274ad8d75ffSSteven Rostedt TRACE_EVENT(sched_migrate_task,
275ad8d75ffSSteven Rostedt
276de1d7286SMathieu Desnoyers TP_PROTO(struct task_struct *p, int dest_cpu),
277ad8d75ffSSteven Rostedt
278de1d7286SMathieu Desnoyers TP_ARGS(p, dest_cpu),
279ad8d75ffSSteven Rostedt
280ad8d75ffSSteven Rostedt TP_STRUCT__entry(
281ad8d75ffSSteven Rostedt __array( char, comm, TASK_COMM_LEN )
282ad8d75ffSSteven Rostedt __field( pid_t, pid )
283ad8d75ffSSteven Rostedt __field( int, prio )
284ad8d75ffSSteven Rostedt __field( int, orig_cpu )
285ad8d75ffSSteven Rostedt __field( int, dest_cpu )
286ad8d75ffSSteven Rostedt ),
287ad8d75ffSSteven Rostedt
288ad8d75ffSSteven Rostedt TP_fast_assign(
289ad8d75ffSSteven Rostedt memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
290ad8d75ffSSteven Rostedt __entry->pid = p->pid;
291b91473ffSPeter Zijlstra __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
292de1d7286SMathieu Desnoyers __entry->orig_cpu = task_cpu(p);
293ad8d75ffSSteven Rostedt __entry->dest_cpu = dest_cpu;
294ad8d75ffSSteven Rostedt ),
295ad8d75ffSSteven Rostedt
296434a83c3SIngo Molnar TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
297ad8d75ffSSteven Rostedt __entry->comm, __entry->pid, __entry->prio,
298ad8d75ffSSteven Rostedt __entry->orig_cpu, __entry->dest_cpu)
299ad8d75ffSSteven Rostedt );
300ad8d75ffSSteven Rostedt
301091ad365SIngo Molnar DECLARE_EVENT_CLASS(sched_process_template,
302ad8d75ffSSteven Rostedt
303ad8d75ffSSteven Rostedt TP_PROTO(struct task_struct *p),
304ad8d75ffSSteven Rostedt
305ad8d75ffSSteven Rostedt TP_ARGS(p),
306ad8d75ffSSteven Rostedt
307ad8d75ffSSteven Rostedt TP_STRUCT__entry(
308ad8d75ffSSteven Rostedt __array( char, comm, TASK_COMM_LEN )
309ad8d75ffSSteven Rostedt __field( pid_t, pid )
310ad8d75ffSSteven Rostedt __field( int, prio )
311ad8d75ffSSteven Rostedt ),
312ad8d75ffSSteven Rostedt
313ad8d75ffSSteven Rostedt TP_fast_assign(
314ad8d75ffSSteven Rostedt memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
315ad8d75ffSSteven Rostedt __entry->pid = p->pid;
316b91473ffSPeter Zijlstra __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
317ad8d75ffSSteven Rostedt ),
318ad8d75ffSSteven Rostedt
319434a83c3SIngo Molnar TP_printk("comm=%s pid=%d prio=%d",
320ad8d75ffSSteven Rostedt __entry->comm, __entry->pid, __entry->prio)
321ad8d75ffSSteven Rostedt );
322ad8d75ffSSteven Rostedt
323ad8d75ffSSteven Rostedt /*
32475ec29abSSteven Rostedt * Tracepoint for freeing a task:
32575ec29abSSteven Rostedt */
32675ec29abSSteven Rostedt DEFINE_EVENT(sched_process_template, sched_process_free,
32775ec29abSSteven Rostedt TP_PROTO(struct task_struct *p),
32875ec29abSSteven Rostedt TP_ARGS(p));
32975ec29abSSteven Rostedt
33075ec29abSSteven Rostedt /*
331ad8d75ffSSteven Rostedt * Tracepoint for a task exiting:
332ad8d75ffSSteven Rostedt */
33375ec29abSSteven Rostedt DEFINE_EVENT(sched_process_template, sched_process_exit,
334ad8d75ffSSteven Rostedt TP_PROTO(struct task_struct *p),
33575ec29abSSteven Rostedt TP_ARGS(p));
336ad8d75ffSSteven Rostedt
337ad8d75ffSSteven Rostedt /*
338210f7669SLi Zefan * Tracepoint for waiting on task to unschedule:
339210f7669SLi Zefan */
340210f7669SLi Zefan DEFINE_EVENT(sched_process_template, sched_wait_task,
341210f7669SLi Zefan TP_PROTO(struct task_struct *p),
342210f7669SLi Zefan TP_ARGS(p));
343210f7669SLi Zefan
344210f7669SLi Zefan /*
345ad8d75ffSSteven Rostedt * Tracepoint for a waiting task:
346ad8d75ffSSteven Rostedt */
347ad8d75ffSSteven Rostedt TRACE_EVENT(sched_process_wait,
348ad8d75ffSSteven Rostedt
349ad8d75ffSSteven Rostedt TP_PROTO(struct pid *pid),
350ad8d75ffSSteven Rostedt
351ad8d75ffSSteven Rostedt TP_ARGS(pid),
352ad8d75ffSSteven Rostedt
353ad8d75ffSSteven Rostedt TP_STRUCT__entry(
354ad8d75ffSSteven Rostedt __array( char, comm, TASK_COMM_LEN )
355ad8d75ffSSteven Rostedt __field( pid_t, pid )
356ad8d75ffSSteven Rostedt __field( int, prio )
357ad8d75ffSSteven Rostedt ),
358ad8d75ffSSteven Rostedt
359ad8d75ffSSteven Rostedt TP_fast_assign(
360ad8d75ffSSteven Rostedt memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
361ad8d75ffSSteven Rostedt __entry->pid = pid_nr(pid);
362b91473ffSPeter Zijlstra __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
363ad8d75ffSSteven Rostedt ),
364ad8d75ffSSteven Rostedt
365434a83c3SIngo Molnar TP_printk("comm=%s pid=%d prio=%d",
366ad8d75ffSSteven Rostedt __entry->comm, __entry->pid, __entry->prio)
367ad8d75ffSSteven Rostedt );
368ad8d75ffSSteven Rostedt
369ad8d75ffSSteven Rostedt /*
370cb5021caSYanfei Xu * Tracepoint for kernel_clone:
371ad8d75ffSSteven Rostedt */
372ad8d75ffSSteven Rostedt TRACE_EVENT(sched_process_fork,
373ad8d75ffSSteven Rostedt
374ad8d75ffSSteven Rostedt TP_PROTO(struct task_struct *parent, struct task_struct *child),
375ad8d75ffSSteven Rostedt
376ad8d75ffSSteven Rostedt TP_ARGS(parent, child),
377ad8d75ffSSteven Rostedt
378ad8d75ffSSteven Rostedt TP_STRUCT__entry(
379ad8d75ffSSteven Rostedt __array( char, parent_comm, TASK_COMM_LEN )
380ad8d75ffSSteven Rostedt __field( pid_t, parent_pid )
381ad8d75ffSSteven Rostedt __array( char, child_comm, TASK_COMM_LEN )
382ad8d75ffSSteven Rostedt __field( pid_t, child_pid )
383ad8d75ffSSteven Rostedt ),
384ad8d75ffSSteven Rostedt
385ad8d75ffSSteven Rostedt TP_fast_assign(
386ad8d75ffSSteven Rostedt memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
387ad8d75ffSSteven Rostedt __entry->parent_pid = parent->pid;
388ad8d75ffSSteven Rostedt memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
389ad8d75ffSSteven Rostedt __entry->child_pid = child->pid;
390ad8d75ffSSteven Rostedt ),
391ad8d75ffSSteven Rostedt
392434a83c3SIngo Molnar TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
393ad8d75ffSSteven Rostedt __entry->parent_comm, __entry->parent_pid,
394ad8d75ffSSteven Rostedt __entry->child_comm, __entry->child_pid)
395ad8d75ffSSteven Rostedt );
396ad8d75ffSSteven Rostedt
397ad8d75ffSSteven Rostedt /*
3984ff16c25SDavid Smith * Tracepoint for exec:
3994ff16c25SDavid Smith */
4004ff16c25SDavid Smith TRACE_EVENT(sched_process_exec,
4014ff16c25SDavid Smith
4024ff16c25SDavid Smith TP_PROTO(struct task_struct *p, pid_t old_pid,
4034ff16c25SDavid Smith struct linux_binprm *bprm),
4044ff16c25SDavid Smith
4054ff16c25SDavid Smith TP_ARGS(p, old_pid, bprm),
4064ff16c25SDavid Smith
4074ff16c25SDavid Smith TP_STRUCT__entry(
4084ff16c25SDavid Smith __string( filename, bprm->filename )
4094ff16c25SDavid Smith __field( pid_t, pid )
4104ff16c25SDavid Smith __field( pid_t, old_pid )
4114ff16c25SDavid Smith ),
4124ff16c25SDavid Smith
4134ff16c25SDavid Smith TP_fast_assign(
4144ff16c25SDavid Smith __assign_str(filename, bprm->filename);
4154ff16c25SDavid Smith __entry->pid = p->pid;
4166308191fSOleg Nesterov __entry->old_pid = old_pid;
4174ff16c25SDavid Smith ),
4184ff16c25SDavid Smith
4194ff16c25SDavid Smith TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
4204ff16c25SDavid Smith __entry->pid, __entry->old_pid)
4214ff16c25SDavid Smith );
4224ff16c25SDavid Smith
4232a09b5deSYafang Shao
4242a09b5deSYafang Shao #ifdef CONFIG_SCHEDSTATS
4252a09b5deSYafang Shao #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
4262a09b5deSYafang Shao #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
4272a09b5deSYafang Shao #else
4282a09b5deSYafang Shao #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
4292a09b5deSYafang Shao #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
4302a09b5deSYafang Shao #endif
4312a09b5deSYafang Shao
4324ff16c25SDavid Smith /*
433768d0c27SPeter Zijlstra * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
434768d0c27SPeter Zijlstra * adding sched_stat support to SCHED_FIFO/RR would be welcome.
435768d0c27SPeter Zijlstra */
4362a09b5deSYafang Shao DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
437768d0c27SPeter Zijlstra
438768d0c27SPeter Zijlstra TP_PROTO(struct task_struct *tsk, u64 delay),
439768d0c27SPeter Zijlstra
44012473965SOleg Nesterov TP_ARGS(__perf_task(tsk), __perf_count(delay)),
441768d0c27SPeter Zijlstra
442768d0c27SPeter Zijlstra TP_STRUCT__entry(
443768d0c27SPeter Zijlstra __array( char, comm, TASK_COMM_LEN )
444768d0c27SPeter Zijlstra __field( pid_t, pid )
445768d0c27SPeter Zijlstra __field( u64, delay )
446768d0c27SPeter Zijlstra ),
447768d0c27SPeter Zijlstra
448768d0c27SPeter Zijlstra TP_fast_assign(
449768d0c27SPeter Zijlstra memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
450768d0c27SPeter Zijlstra __entry->pid = tsk->pid;
451768d0c27SPeter Zijlstra __entry->delay = delay;
452768d0c27SPeter Zijlstra ),
453768d0c27SPeter Zijlstra
454434a83c3SIngo Molnar TP_printk("comm=%s pid=%d delay=%Lu [ns]",
455768d0c27SPeter Zijlstra __entry->comm, __entry->pid,
456768d0c27SPeter Zijlstra (unsigned long long)__entry->delay)
457768d0c27SPeter Zijlstra );
458768d0c27SPeter Zijlstra
45975ec29abSSteven Rostedt /*
46075ec29abSSteven Rostedt * Tracepoint for accounting wait time (time the task is runnable
46175ec29abSSteven Rostedt * but not actually running due to scheduler contention).
46275ec29abSSteven Rostedt */
4632a09b5deSYafang Shao DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
46475ec29abSSteven Rostedt TP_PROTO(struct task_struct *tsk, u64 delay),
46575ec29abSSteven Rostedt TP_ARGS(tsk, delay));
46675ec29abSSteven Rostedt
46775ec29abSSteven Rostedt /*
46875ec29abSSteven Rostedt * Tracepoint for accounting sleep time (time the task is not runnable,
46975ec29abSSteven Rostedt * including iowait, see below).
47075ec29abSSteven Rostedt */
4712a09b5deSYafang Shao DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
47275ec29abSSteven Rostedt TP_PROTO(struct task_struct *tsk, u64 delay),
473470dda74SLi Zefan TP_ARGS(tsk, delay));
47475ec29abSSteven Rostedt
47575ec29abSSteven Rostedt /*
47675ec29abSSteven Rostedt * Tracepoint for accounting iowait time (time the task is not runnable
47775ec29abSSteven Rostedt * due to waiting on IO to complete).
47875ec29abSSteven Rostedt */
4792a09b5deSYafang Shao DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
48075ec29abSSteven Rostedt TP_PROTO(struct task_struct *tsk, u64 delay),
481470dda74SLi Zefan TP_ARGS(tsk, delay));
48275ec29abSSteven Rostedt
483768d0c27SPeter Zijlstra /*
484b781a602SAndrew Vagin * Tracepoint for accounting blocked time (time the task is in uninterruptible).
485b781a602SAndrew Vagin */
4862a09b5deSYafang Shao DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
487b781a602SAndrew Vagin TP_PROTO(struct task_struct *tsk, u64 delay),
488b781a602SAndrew Vagin TP_ARGS(tsk, delay));
489b781a602SAndrew Vagin
490b781a602SAndrew Vagin /*
491f977bb49SIngo Molnar * Tracepoint for accounting runtime (time the task is executing
492f977bb49SIngo Molnar * on a CPU).
493f977bb49SIngo Molnar */
49436009d07SOleg Nesterov DECLARE_EVENT_CLASS(sched_stat_runtime,
495f977bb49SIngo Molnar
496f977bb49SIngo Molnar TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
497f977bb49SIngo Molnar
49812473965SOleg Nesterov TP_ARGS(tsk, __perf_count(runtime), vruntime),
499f977bb49SIngo Molnar
500f977bb49SIngo Molnar TP_STRUCT__entry(
501f977bb49SIngo Molnar __array( char, comm, TASK_COMM_LEN )
502f977bb49SIngo Molnar __field( pid_t, pid )
503f977bb49SIngo Molnar __field( u64, runtime )
504f977bb49SIngo Molnar __field( u64, vruntime )
505f977bb49SIngo Molnar ),
506f977bb49SIngo Molnar
507f977bb49SIngo Molnar TP_fast_assign(
508f977bb49SIngo Molnar memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
509f977bb49SIngo Molnar __entry->pid = tsk->pid;
510f977bb49SIngo Molnar __entry->runtime = runtime;
511f977bb49SIngo Molnar __entry->vruntime = vruntime;
512f977bb49SIngo Molnar ),
513f977bb49SIngo Molnar
514434a83c3SIngo Molnar TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
515f977bb49SIngo Molnar __entry->comm, __entry->pid,
516f977bb49SIngo Molnar (unsigned long long)__entry->runtime,
517f977bb49SIngo Molnar (unsigned long long)__entry->vruntime)
518f977bb49SIngo Molnar );
519f977bb49SIngo Molnar
52036009d07SOleg Nesterov DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
52136009d07SOleg Nesterov TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
52236009d07SOleg Nesterov TP_ARGS(tsk, runtime, vruntime));
52336009d07SOleg Nesterov
524a8027073SSteven Rostedt /*
525a8027073SSteven Rostedt * Tracepoint for showing priority inheritance modifying a tasks
526a8027073SSteven Rostedt * priority.
527a8027073SSteven Rostedt */
528a8027073SSteven Rostedt TRACE_EVENT(sched_pi_setprio,
529a8027073SSteven Rostedt
530b91473ffSPeter Zijlstra TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
531a8027073SSteven Rostedt
532b91473ffSPeter Zijlstra TP_ARGS(tsk, pi_task),
533a8027073SSteven Rostedt
534a8027073SSteven Rostedt TP_STRUCT__entry(
535a8027073SSteven Rostedt __array( char, comm, TASK_COMM_LEN )
536a8027073SSteven Rostedt __field( pid_t, pid )
537a8027073SSteven Rostedt __field( int, oldprio )
538a8027073SSteven Rostedt __field( int, newprio )
539a8027073SSteven Rostedt ),
540a8027073SSteven Rostedt
541a8027073SSteven Rostedt TP_fast_assign(
542a8027073SSteven Rostedt memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
543a8027073SSteven Rostedt __entry->pid = tsk->pid;
544a8027073SSteven Rostedt __entry->oldprio = tsk->prio;
5454ff648deSSebastian Andrzej Siewior __entry->newprio = pi_task ?
5464ff648deSSebastian Andrzej Siewior min(tsk->normal_prio, pi_task->prio) :
5474ff648deSSebastian Andrzej Siewior tsk->normal_prio;
548b91473ffSPeter Zijlstra /* XXX SCHED_DEADLINE bits missing */
549a8027073SSteven Rostedt ),
550a8027073SSteven Rostedt
551a8027073SSteven Rostedt TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
552a8027073SSteven Rostedt __entry->comm, __entry->pid,
553a8027073SSteven Rostedt __entry->oldprio, __entry->newprio)
554a8027073SSteven Rostedt );
555a8027073SSteven Rostedt
5566a716c90SOleg Nesterov #ifdef CONFIG_DETECT_HUNG_TASK
5576a716c90SOleg Nesterov TRACE_EVENT(sched_process_hang,
5586a716c90SOleg Nesterov TP_PROTO(struct task_struct *tsk),
5596a716c90SOleg Nesterov TP_ARGS(tsk),
5606a716c90SOleg Nesterov
5616a716c90SOleg Nesterov TP_STRUCT__entry(
5626a716c90SOleg Nesterov __array( char, comm, TASK_COMM_LEN )
5636a716c90SOleg Nesterov __field( pid_t, pid )
5646a716c90SOleg Nesterov ),
5656a716c90SOleg Nesterov
5666a716c90SOleg Nesterov TP_fast_assign(
5676a716c90SOleg Nesterov memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
5686a716c90SOleg Nesterov __entry->pid = tsk->pid;
5696a716c90SOleg Nesterov ),
5706a716c90SOleg Nesterov
5716a716c90SOleg Nesterov TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
5726a716c90SOleg Nesterov );
5736a716c90SOleg Nesterov #endif /* CONFIG_DETECT_HUNG_TASK */
5746a716c90SOleg Nesterov
575b2b2042bSMel Gorman /*
576b2b2042bSMel Gorman * Tracks migration of tasks from one runqueue to another. Can be used to
577b2b2042bSMel Gorman * detect if automatic NUMA balancing is bouncing between nodes.
578b2b2042bSMel Gorman */
579b2b2042bSMel Gorman TRACE_EVENT(sched_move_numa,
580286549dcSMel Gorman
581286549dcSMel Gorman TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
582286549dcSMel Gorman
583286549dcSMel Gorman TP_ARGS(tsk, src_cpu, dst_cpu),
584286549dcSMel Gorman
585286549dcSMel Gorman TP_STRUCT__entry(
586286549dcSMel Gorman __field( pid_t, pid )
587286549dcSMel Gorman __field( pid_t, tgid )
588286549dcSMel Gorman __field( pid_t, ngid )
589286549dcSMel Gorman __field( int, src_cpu )
590286549dcSMel Gorman __field( int, src_nid )
591286549dcSMel Gorman __field( int, dst_cpu )
592286549dcSMel Gorman __field( int, dst_nid )
593286549dcSMel Gorman ),
594286549dcSMel Gorman
595286549dcSMel Gorman TP_fast_assign(
596286549dcSMel Gorman __entry->pid = task_pid_nr(tsk);
597286549dcSMel Gorman __entry->tgid = task_tgid_nr(tsk);
598286549dcSMel Gorman __entry->ngid = task_numa_group_id(tsk);
599286549dcSMel Gorman __entry->src_cpu = src_cpu;
600286549dcSMel Gorman __entry->src_nid = cpu_to_node(src_cpu);
601286549dcSMel Gorman __entry->dst_cpu = dst_cpu;
602286549dcSMel Gorman __entry->dst_nid = cpu_to_node(dst_cpu);
603286549dcSMel Gorman ),
604286549dcSMel Gorman
605286549dcSMel Gorman TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
606286549dcSMel Gorman __entry->pid, __entry->tgid, __entry->ngid,
607286549dcSMel Gorman __entry->src_cpu, __entry->src_nid,
608286549dcSMel Gorman __entry->dst_cpu, __entry->dst_nid)
609286549dcSMel Gorman );
610286549dcSMel Gorman
611b2b2042bSMel Gorman DECLARE_EVENT_CLASS(sched_numa_pair_template,
612286549dcSMel Gorman
613286549dcSMel Gorman TP_PROTO(struct task_struct *src_tsk, int src_cpu,
614286549dcSMel Gorman struct task_struct *dst_tsk, int dst_cpu),
615286549dcSMel Gorman
616286549dcSMel Gorman TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
617286549dcSMel Gorman
618286549dcSMel Gorman TP_STRUCT__entry(
619286549dcSMel Gorman __field( pid_t, src_pid )
620286549dcSMel Gorman __field( pid_t, src_tgid )
621286549dcSMel Gorman __field( pid_t, src_ngid )
622286549dcSMel Gorman __field( int, src_cpu )
623286549dcSMel Gorman __field( int, src_nid )
624286549dcSMel Gorman __field( pid_t, dst_pid )
625286549dcSMel Gorman __field( pid_t, dst_tgid )
626286549dcSMel Gorman __field( pid_t, dst_ngid )
627286549dcSMel Gorman __field( int, dst_cpu )
628286549dcSMel Gorman __field( int, dst_nid )
629286549dcSMel Gorman ),
630286549dcSMel Gorman
631286549dcSMel Gorman TP_fast_assign(
632286549dcSMel Gorman __entry->src_pid = task_pid_nr(src_tsk);
633286549dcSMel Gorman __entry->src_tgid = task_tgid_nr(src_tsk);
634286549dcSMel Gorman __entry->src_ngid = task_numa_group_id(src_tsk);
635286549dcSMel Gorman __entry->src_cpu = src_cpu;
636286549dcSMel Gorman __entry->src_nid = cpu_to_node(src_cpu);
637b2b2042bSMel Gorman __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
638b2b2042bSMel Gorman __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
639b2b2042bSMel Gorman __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
640286549dcSMel Gorman __entry->dst_cpu = dst_cpu;
641b2b2042bSMel Gorman __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
642286549dcSMel Gorman ),
643286549dcSMel Gorman
644286549dcSMel Gorman TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
645286549dcSMel Gorman __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
646286549dcSMel Gorman __entry->src_cpu, __entry->src_nid,
647286549dcSMel Gorman __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
648286549dcSMel Gorman __entry->dst_cpu, __entry->dst_nid)
649286549dcSMel Gorman );
650dfc68f29SAndy Lutomirski
651b2b2042bSMel Gorman DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
652b2b2042bSMel Gorman
653b2b2042bSMel Gorman TP_PROTO(struct task_struct *src_tsk, int src_cpu,
654b2b2042bSMel Gorman struct task_struct *dst_tsk, int dst_cpu),
655b2b2042bSMel Gorman
656b2b2042bSMel Gorman TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
657b2b2042bSMel Gorman );
658b2b2042bSMel Gorman
659b2b2042bSMel Gorman DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
660b2b2042bSMel Gorman
661b2b2042bSMel Gorman TP_PROTO(struct task_struct *src_tsk, int src_cpu,
662b2b2042bSMel Gorman struct task_struct *dst_tsk, int dst_cpu),
663b2b2042bSMel Gorman
664b2b2042bSMel Gorman TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
665b2b2042bSMel Gorman );
666b2b2042bSMel Gorman
667b2b2042bSMel Gorman
668dfc68f29SAndy Lutomirski /*
669dfc68f29SAndy Lutomirski * Tracepoint for waking a polling cpu without an IPI.
670dfc68f29SAndy Lutomirski */
671dfc68f29SAndy Lutomirski TRACE_EVENT(sched_wake_idle_without_ipi,
672dfc68f29SAndy Lutomirski
673dfc68f29SAndy Lutomirski TP_PROTO(int cpu),
674dfc68f29SAndy Lutomirski
675dfc68f29SAndy Lutomirski TP_ARGS(cpu),
676dfc68f29SAndy Lutomirski
677dfc68f29SAndy Lutomirski TP_STRUCT__entry(
678dfc68f29SAndy Lutomirski __field( int, cpu )
679dfc68f29SAndy Lutomirski ),
680dfc68f29SAndy Lutomirski
681dfc68f29SAndy Lutomirski TP_fast_assign(
682dfc68f29SAndy Lutomirski __entry->cpu = cpu;
683dfc68f29SAndy Lutomirski ),
684dfc68f29SAndy Lutomirski
685dfc68f29SAndy Lutomirski TP_printk("cpu=%d", __entry->cpu)
686dfc68f29SAndy Lutomirski );
687ba19f51fSQais Yousef
688ba19f51fSQais Yousef /*
689ba19f51fSQais Yousef * Following tracepoints are not exported in tracefs and provide hooking
690ba19f51fSQais Yousef * mechanisms only for testing and debugging purposes.
691ba19f51fSQais Yousef *
692ba19f51fSQais Yousef * Postfixed with _tp to make them easily identifiable in the code.
693ba19f51fSQais Yousef */
694ba19f51fSQais Yousef DECLARE_TRACE(pelt_cfs_tp,
695ba19f51fSQais Yousef TP_PROTO(struct cfs_rq *cfs_rq),
696ba19f51fSQais Yousef TP_ARGS(cfs_rq));
697ba19f51fSQais Yousef
698ba19f51fSQais Yousef DECLARE_TRACE(pelt_rt_tp,
699ba19f51fSQais Yousef TP_PROTO(struct rq *rq),
700ba19f51fSQais Yousef TP_ARGS(rq));
701ba19f51fSQais Yousef
702ba19f51fSQais Yousef DECLARE_TRACE(pelt_dl_tp,
703ba19f51fSQais Yousef TP_PROTO(struct rq *rq),
704ba19f51fSQais Yousef TP_ARGS(rq));
705ba19f51fSQais Yousef
70676504793SThara Gopinath DECLARE_TRACE(pelt_thermal_tp,
70776504793SThara Gopinath TP_PROTO(struct rq *rq),
70876504793SThara Gopinath TP_ARGS(rq));
70976504793SThara Gopinath
710ba19f51fSQais Yousef DECLARE_TRACE(pelt_irq_tp,
711ba19f51fSQais Yousef TP_PROTO(struct rq *rq),
712ba19f51fSQais Yousef TP_ARGS(rq));
713ba19f51fSQais Yousef
7148de6242cSQais Yousef DECLARE_TRACE(pelt_se_tp,
7158de6242cSQais Yousef TP_PROTO(struct sched_entity *se),
7168de6242cSQais Yousef TP_ARGS(se));
7178de6242cSQais Yousef
71851cf18c9SVincent Donnefort DECLARE_TRACE(sched_cpu_capacity_tp,
71951cf18c9SVincent Donnefort TP_PROTO(struct rq *rq),
72051cf18c9SVincent Donnefort TP_ARGS(rq));
72151cf18c9SVincent Donnefort
722f9f240f9SQais Yousef DECLARE_TRACE(sched_overutilized_tp,
723f9f240f9SQais Yousef TP_PROTO(struct root_domain *rd, bool overutilized),
724f9f240f9SQais Yousef TP_ARGS(rd, overutilized));
725f9f240f9SQais Yousef
7264581bea8SVincent Donnefort DECLARE_TRACE(sched_util_est_cfs_tp,
7274581bea8SVincent Donnefort TP_PROTO(struct cfs_rq *cfs_rq),
7284581bea8SVincent Donnefort TP_ARGS(cfs_rq));
7294581bea8SVincent Donnefort
7304581bea8SVincent Donnefort DECLARE_TRACE(sched_util_est_se_tp,
7314581bea8SVincent Donnefort TP_PROTO(struct sched_entity *se),
7324581bea8SVincent Donnefort TP_ARGS(se));
7334581bea8SVincent Donnefort
7349d246053SPhil Auld DECLARE_TRACE(sched_update_nr_running_tp,
7359d246053SPhil Auld TP_PROTO(struct rq *rq, int change),
7369d246053SPhil Auld TP_ARGS(rq, change));
7379d246053SPhil Auld
738ad8d75ffSSteven Rostedt #endif /* _TRACE_SCHED_H */
739ad8d75ffSSteven Rostedt
740ad8d75ffSSteven Rostedt /* This part must be outside protection */
741ad8d75ffSSteven Rostedt #include <trace/define_trace.h>
742