xref: /openbmc/linux/kernel/sched/stats.h (revision bc05aa6e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifdef CONFIG_SCHEDSTATS
4 
5 /*
6  * Expects runqueue lock to be held for atomicity of update
7  */
8 static inline void
9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
10 {
11 	if (rq) {
12 		rq->rq_sched_info.run_delay += delta;
13 		rq->rq_sched_info.pcount++;
14 	}
15 }
16 
17 /*
18  * Expects runqueue lock to be held for atomicity of update
19  */
20 static inline void
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
22 {
23 	if (rq)
24 		rq->rq_cpu_time += delta;
25 }
26 
27 static inline void
28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
29 {
30 	if (rq)
31 		rq->rq_sched_info.run_delay += delta;
32 }
33 #define schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
34 #define __schedstat_inc(var)		do { var++; } while (0)
35 #define schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
36 #define __schedstat_add(var, amt)	do { var += (amt); } while (0)
37 #define schedstat_add(var, amt)		do { if (schedstat_enabled()) { var += (amt); } } while (0)
38 #define __schedstat_set(var, val)		do { var = (val); } while (0)
39 #define schedstat_set(var, val)		do { if (schedstat_enabled()) { var = (val); } } while (0)
40 #define schedstat_val(var)		(var)
41 #define schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
42 
43 #else /* !CONFIG_SCHEDSTATS */
44 static inline void
45 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
46 {}
47 static inline void
48 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
49 {}
50 static inline void
51 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
52 {}
53 #define schedstat_enabled()		0
54 #define __schedstat_inc(var)		do { } while (0)
55 #define schedstat_inc(var)		do { } while (0)
56 #define __schedstat_add(var, amt)	do { } while (0)
57 #define schedstat_add(var, amt)		do { } while (0)
58 #define __schedstat_set(var, val)	do { } while (0)
59 #define schedstat_set(var, val)		do { } while (0)
60 #define schedstat_val(var)		0
61 #define schedstat_val_or_zero(var)	0
62 #endif /* CONFIG_SCHEDSTATS */
63 
64 #ifdef CONFIG_SCHED_INFO
65 static inline void sched_info_reset_dequeued(struct task_struct *t)
66 {
67 	t->sched_info.last_queued = 0;
68 }
69 
70 /*
71  * We are interested in knowing how long it was from the *first* time a
72  * task was queued to the time that it finally hit a cpu, we call this routine
73  * from dequeue_task() to account for possible rq->clock skew across cpus. The
74  * delta taken on each cpu would annul the skew.
75  */
76 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
77 {
78 	unsigned long long now = rq_clock(rq), delta = 0;
79 
80 	if (unlikely(sched_info_on()))
81 		if (t->sched_info.last_queued)
82 			delta = now - t->sched_info.last_queued;
83 	sched_info_reset_dequeued(t);
84 	t->sched_info.run_delay += delta;
85 
86 	rq_sched_info_dequeued(rq, delta);
87 }
88 
89 /*
90  * Called when a task finally hits the cpu.  We can now calculate how
91  * long it was waiting to run.  We also note when it began so that we
92  * can keep stats on how long its timeslice is.
93  */
94 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
95 {
96 	unsigned long long now = rq_clock(rq), delta = 0;
97 
98 	if (t->sched_info.last_queued)
99 		delta = now - t->sched_info.last_queued;
100 	sched_info_reset_dequeued(t);
101 	t->sched_info.run_delay += delta;
102 	t->sched_info.last_arrival = now;
103 	t->sched_info.pcount++;
104 
105 	rq_sched_info_arrive(rq, delta);
106 }
107 
108 /*
109  * This function is only called from enqueue_task(), but also only updates
110  * the timestamp if it is already not set.  It's assumed that
111  * sched_info_dequeued() will clear that stamp when appropriate.
112  */
113 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
114 {
115 	if (unlikely(sched_info_on()))
116 		if (!t->sched_info.last_queued)
117 			t->sched_info.last_queued = rq_clock(rq);
118 }
119 
120 /*
121  * Called when a process ceases being the active-running process involuntarily
122  * due, typically, to expiring its time slice (this may also be called when
123  * switching to the idle task).  Now we can calculate how long we ran.
124  * Also, if the process is still in the TASK_RUNNING state, call
125  * sched_info_queued() to mark that it has now again started waiting on
126  * the runqueue.
127  */
128 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
129 {
130 	unsigned long long delta = rq_clock(rq) -
131 					t->sched_info.last_arrival;
132 
133 	rq_sched_info_depart(rq, delta);
134 
135 	if (t->state == TASK_RUNNING)
136 		sched_info_queued(rq, t);
137 }
138 
139 /*
140  * Called when tasks are switched involuntarily due, typically, to expiring
141  * their time slice.  (This may also be called when switching to or from
142  * the idle task.)  We are only called when prev != next.
143  */
144 static inline void
145 __sched_info_switch(struct rq *rq,
146 		    struct task_struct *prev, struct task_struct *next)
147 {
148 	/*
149 	 * prev now departs the cpu.  It's not interesting to record
150 	 * stats about how efficient we were at scheduling the idle
151 	 * process, however.
152 	 */
153 	if (prev != rq->idle)
154 		sched_info_depart(rq, prev);
155 
156 	if (next != rq->idle)
157 		sched_info_arrive(rq, next);
158 }
159 static inline void
160 sched_info_switch(struct rq *rq,
161 		  struct task_struct *prev, struct task_struct *next)
162 {
163 	if (unlikely(sched_info_on()))
164 		__sched_info_switch(rq, prev, next);
165 }
166 #else
167 #define sched_info_queued(rq, t)		do { } while (0)
168 #define sched_info_reset_dequeued(t)	do { } while (0)
169 #define sched_info_dequeued(rq, t)		do { } while (0)
170 #define sched_info_depart(rq, t)		do { } while (0)
171 #define sched_info_arrive(rq, next)		do { } while (0)
172 #define sched_info_switch(rq, t, next)		do { } while (0)
173 #endif /* CONFIG_SCHED_INFO */
174