xref: /openbmc/linux/kernel/sched/stats.h (revision 4cff79e9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifdef CONFIG_SCHEDSTATS
4 
5 /*
6  * Expects runqueue lock to be held for atomicity of update
7  */
8 static inline void
9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
10 {
11 	if (rq) {
12 		rq->rq_sched_info.run_delay += delta;
13 		rq->rq_sched_info.pcount++;
14 	}
15 }
16 
17 /*
18  * Expects runqueue lock to be held for atomicity of update
19  */
20 static inline void
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
22 {
23 	if (rq)
24 		rq->rq_cpu_time += delta;
25 }
26 
27 static inline void
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
29 {
30 	if (rq)
31 		rq->rq_sched_info.run_delay += delta;
32 }
33 #define   schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
34 #define __schedstat_inc(var)		do { var++; } while (0)
35 #define   schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
36 #define __schedstat_add(var, amt)	do { var += (amt); } while (0)
37 #define   schedstat_add(var, amt)	do { if (schedstat_enabled()) { var += (amt); } } while (0)
38 #define __schedstat_set(var, val)	do { var = (val); } while (0)
39 #define   schedstat_set(var, val)	do { if (schedstat_enabled()) { var = (val); } } while (0)
40 #define   schedstat_val(var)		(var)
41 #define   schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
42 
43 #else /* !CONFIG_SCHEDSTATS: */
44 static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
45 static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
46 static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
47 # define   schedstat_enabled()		0
48 # define __schedstat_inc(var)		do { } while (0)
49 # define   schedstat_inc(var)		do { } while (0)
50 # define __schedstat_add(var, amt)	do { } while (0)
51 # define   schedstat_add(var, amt)	do { } while (0)
52 # define __schedstat_set(var, val)	do { } while (0)
53 # define   schedstat_set(var, val)	do { } while (0)
54 # define   schedstat_val(var)		0
55 # define   schedstat_val_or_zero(var)	0
56 #endif /* CONFIG_SCHEDSTATS */
57 
58 #ifdef CONFIG_SCHED_INFO
59 static inline void sched_info_reset_dequeued(struct task_struct *t)
60 {
61 	t->sched_info.last_queued = 0;
62 }
63 
64 /*
65  * We are interested in knowing how long it was from the *first* time a
66  * task was queued to the time that it finally hit a CPU, we call this routine
67  * from dequeue_task() to account for possible rq->clock skew across CPUs. The
68  * delta taken on each CPU would annul the skew.
69  */
70 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
71 {
72 	unsigned long long now = rq_clock(rq), delta = 0;
73 
74 	if (unlikely(sched_info_on()))
75 		if (t->sched_info.last_queued)
76 			delta = now - t->sched_info.last_queued;
77 	sched_info_reset_dequeued(t);
78 	t->sched_info.run_delay += delta;
79 
80 	rq_sched_info_dequeued(rq, delta);
81 }
82 
83 /*
84  * Called when a task finally hits the CPU.  We can now calculate how
85  * long it was waiting to run.  We also note when it began so that we
86  * can keep stats on how long its timeslice is.
87  */
88 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
89 {
90 	unsigned long long now = rq_clock(rq), delta = 0;
91 
92 	if (t->sched_info.last_queued)
93 		delta = now - t->sched_info.last_queued;
94 	sched_info_reset_dequeued(t);
95 	t->sched_info.run_delay += delta;
96 	t->sched_info.last_arrival = now;
97 	t->sched_info.pcount++;
98 
99 	rq_sched_info_arrive(rq, delta);
100 }
101 
102 /*
103  * This function is only called from enqueue_task(), but also only updates
104  * the timestamp if it is already not set.  It's assumed that
105  * sched_info_dequeued() will clear that stamp when appropriate.
106  */
107 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
108 {
109 	if (unlikely(sched_info_on())) {
110 		if (!t->sched_info.last_queued)
111 			t->sched_info.last_queued = rq_clock(rq);
112 	}
113 }
114 
115 /*
116  * Called when a process ceases being the active-running process involuntarily
117  * due, typically, to expiring its time slice (this may also be called when
118  * switching to the idle task).  Now we can calculate how long we ran.
119  * Also, if the process is still in the TASK_RUNNING state, call
120  * sched_info_queued() to mark that it has now again started waiting on
121  * the runqueue.
122  */
123 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
124 {
125 	unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
126 
127 	rq_sched_info_depart(rq, delta);
128 
129 	if (t->state == TASK_RUNNING)
130 		sched_info_queued(rq, t);
131 }
132 
133 /*
134  * Called when tasks are switched involuntarily due, typically, to expiring
135  * their time slice.  (This may also be called when switching to or from
136  * the idle task.)  We are only called when prev != next.
137  */
138 static inline void
139 __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
140 {
141 	/*
142 	 * prev now departs the CPU.  It's not interesting to record
143 	 * stats about how efficient we were at scheduling the idle
144 	 * process, however.
145 	 */
146 	if (prev != rq->idle)
147 		sched_info_depart(rq, prev);
148 
149 	if (next != rq->idle)
150 		sched_info_arrive(rq, next);
151 }
152 
153 static inline void
154 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
155 {
156 	if (unlikely(sched_info_on()))
157 		__sched_info_switch(rq, prev, next);
158 }
159 
160 #else /* !CONFIG_SCHED_INFO: */
161 # define sched_info_queued(rq, t)	do { } while (0)
162 # define sched_info_reset_dequeued(t)	do { } while (0)
163 # define sched_info_dequeued(rq, t)	do { } while (0)
164 # define sched_info_depart(rq, t)	do { } while (0)
165 # define sched_info_arrive(rq, next)	do { } while (0)
166 # define sched_info_switch(rq, t, next)	do { } while (0)
167 #endif /* CONFIG_SCHED_INFO */
168