xref: /openbmc/linux/kernel/sched/stats.h (revision 8571e645)
1 
2 #ifdef CONFIG_SCHEDSTATS
3 
4 /*
5  * Expects runqueue lock to be held for atomicity of update
6  */
7 static inline void
8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9 {
10 	if (rq) {
11 		rq->rq_sched_info.run_delay += delta;
12 		rq->rq_sched_info.pcount++;
13 	}
14 }
15 
16 /*
17  * Expects runqueue lock to be held for atomicity of update
18  */
19 static inline void
20 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21 {
22 	if (rq)
23 		rq->rq_cpu_time += delta;
24 }
25 
26 static inline void
27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28 {
29 	if (rq)
30 		rq->rq_sched_info.run_delay += delta;
31 }
32 # define schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
33 # define schedstat_inc(rq, field)	do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
34 # define schedstat_add(rq, field, amt)	do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
35 # define schedstat_set(var, val)	do { if (schedstat_enabled()) { var = (val); } } while (0)
36 #else /* !CONFIG_SCHEDSTATS */
37 static inline void
38 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
39 {}
40 static inline void
41 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
42 {}
43 static inline void
44 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
45 {}
46 # define schedstat_enabled()		0
47 # define schedstat_inc(rq, field)	do { } while (0)
48 # define schedstat_add(rq, field, amt)	do { } while (0)
49 # define schedstat_set(var, val)	do { } while (0)
50 #endif
51 
52 #ifdef CONFIG_SCHED_INFO
53 static inline void sched_info_reset_dequeued(struct task_struct *t)
54 {
55 	t->sched_info.last_queued = 0;
56 }
57 
58 /*
59  * We are interested in knowing how long it was from the *first* time a
60  * task was queued to the time that it finally hit a cpu, we call this routine
61  * from dequeue_task() to account for possible rq->clock skew across cpus. The
62  * delta taken on each cpu would annul the skew.
63  */
64 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
65 {
66 	unsigned long long now = rq_clock(rq), delta = 0;
67 
68 	if (unlikely(sched_info_on()))
69 		if (t->sched_info.last_queued)
70 			delta = now - t->sched_info.last_queued;
71 	sched_info_reset_dequeued(t);
72 	t->sched_info.run_delay += delta;
73 
74 	rq_sched_info_dequeued(rq, delta);
75 }
76 
77 /*
78  * Called when a task finally hits the cpu.  We can now calculate how
79  * long it was waiting to run.  We also note when it began so that we
80  * can keep stats on how long its timeslice is.
81  */
82 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
83 {
84 	unsigned long long now = rq_clock(rq), delta = 0;
85 
86 	if (t->sched_info.last_queued)
87 		delta = now - t->sched_info.last_queued;
88 	sched_info_reset_dequeued(t);
89 	t->sched_info.run_delay += delta;
90 	t->sched_info.last_arrival = now;
91 	t->sched_info.pcount++;
92 
93 	rq_sched_info_arrive(rq, delta);
94 }
95 
96 /*
97  * This function is only called from enqueue_task(), but also only updates
98  * the timestamp if it is already not set.  It's assumed that
99  * sched_info_dequeued() will clear that stamp when appropriate.
100  */
101 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
102 {
103 	if (unlikely(sched_info_on()))
104 		if (!t->sched_info.last_queued)
105 			t->sched_info.last_queued = rq_clock(rq);
106 }
107 
108 /*
109  * Called when a process ceases being the active-running process involuntarily
110  * due, typically, to expiring its time slice (this may also be called when
111  * switching to the idle task).  Now we can calculate how long we ran.
112  * Also, if the process is still in the TASK_RUNNING state, call
113  * sched_info_queued() to mark that it has now again started waiting on
114  * the runqueue.
115  */
116 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
117 {
118 	unsigned long long delta = rq_clock(rq) -
119 					t->sched_info.last_arrival;
120 
121 	rq_sched_info_depart(rq, delta);
122 
123 	if (t->state == TASK_RUNNING)
124 		sched_info_queued(rq, t);
125 }
126 
127 /*
128  * Called when tasks are switched involuntarily due, typically, to expiring
129  * their time slice.  (This may also be called when switching to or from
130  * the idle task.)  We are only called when prev != next.
131  */
132 static inline void
133 __sched_info_switch(struct rq *rq,
134 		    struct task_struct *prev, struct task_struct *next)
135 {
136 	/*
137 	 * prev now departs the cpu.  It's not interesting to record
138 	 * stats about how efficient we were at scheduling the idle
139 	 * process, however.
140 	 */
141 	if (prev != rq->idle)
142 		sched_info_depart(rq, prev);
143 
144 	if (next != rq->idle)
145 		sched_info_arrive(rq, next);
146 }
147 static inline void
148 sched_info_switch(struct rq *rq,
149 		  struct task_struct *prev, struct task_struct *next)
150 {
151 	if (unlikely(sched_info_on()))
152 		__sched_info_switch(rq, prev, next);
153 }
154 #else
155 #define sched_info_queued(rq, t)		do { } while (0)
156 #define sched_info_reset_dequeued(t)	do { } while (0)
157 #define sched_info_dequeued(rq, t)		do { } while (0)
158 #define sched_info_depart(rq, t)		do { } while (0)
159 #define sched_info_arrive(rq, next)		do { } while (0)
160 #define sched_info_switch(rq, t, next)		do { } while (0)
161 #endif /* CONFIG_SCHED_INFO */
162 
163 /*
164  * The following are functions that support scheduler-internal time accounting.
165  * These functions are generally called at the timer tick.  None of this depends
166  * on CONFIG_SCHEDSTATS.
167  */
168 
169 /**
170  * cputimer_running - return true if cputimer is running
171  *
172  * @tsk:	Pointer to target task.
173  */
174 static inline bool cputimer_running(struct task_struct *tsk)
175 
176 {
177 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
178 
179 	/* Check if cputimer isn't running. This is accessed without locking. */
180 	if (!READ_ONCE(cputimer->running))
181 		return false;
182 
183 	/*
184 	 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
185 	 * in __exit_signal(), we won't account to the signal struct further
186 	 * cputime consumed by that task, even though the task can still be
187 	 * ticking after __exit_signal().
188 	 *
189 	 * In order to keep a consistent behaviour between thread group cputime
190 	 * and thread group cputimer accounting, lets also ignore the cputime
191 	 * elapsing after __exit_signal() in any thread group timer running.
192 	 *
193 	 * This makes sure that POSIX CPU clocks and timers are synchronized, so
194 	 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
195 	 * clock delta is behind the expiring timer value.
196 	 */
197 	if (unlikely(!tsk->sighand))
198 		return false;
199 
200 	return true;
201 }
202 
203 /**
204  * account_group_user_time - Maintain utime for a thread group.
205  *
206  * @tsk:	Pointer to task structure.
207  * @cputime:	Time value by which to increment the utime field of the
208  *		thread_group_cputime structure.
209  *
210  * If thread group time is being maintained, get the structure for the
211  * running CPU and update the utime field there.
212  */
213 static inline void account_group_user_time(struct task_struct *tsk,
214 					   cputime_t cputime)
215 {
216 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
217 
218 	if (!cputimer_running(tsk))
219 		return;
220 
221 	atomic64_add(cputime, &cputimer->cputime_atomic.utime);
222 }
223 
224 /**
225  * account_group_system_time - Maintain stime for a thread group.
226  *
227  * @tsk:	Pointer to task structure.
228  * @cputime:	Time value by which to increment the stime field of the
229  *		thread_group_cputime structure.
230  *
231  * If thread group time is being maintained, get the structure for the
232  * running CPU and update the stime field there.
233  */
234 static inline void account_group_system_time(struct task_struct *tsk,
235 					     cputime_t cputime)
236 {
237 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
238 
239 	if (!cputimer_running(tsk))
240 		return;
241 
242 	atomic64_add(cputime, &cputimer->cputime_atomic.stime);
243 }
244 
245 /**
246  * account_group_exec_runtime - Maintain exec runtime for a thread group.
247  *
248  * @tsk:	Pointer to task structure.
249  * @ns:		Time value by which to increment the sum_exec_runtime field
250  *		of the thread_group_cputime structure.
251  *
252  * If thread group time is being maintained, get the structure for the
253  * running CPU and update the sum_exec_runtime field there.
254  */
255 static inline void account_group_exec_runtime(struct task_struct *tsk,
256 					      unsigned long long ns)
257 {
258 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
259 
260 	if (!cputimer_running(tsk))
261 		return;
262 
263 	atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
264 }
265