1 2 #ifdef CONFIG_SCHEDSTATS 3 4 /* 5 * Expects runqueue lock to be held for atomicity of update 6 */ 7 static inline void 8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 9 { 10 if (rq) { 11 rq->rq_sched_info.run_delay += delta; 12 rq->rq_sched_info.pcount++; 13 } 14 } 15 16 /* 17 * Expects runqueue lock to be held for atomicity of update 18 */ 19 static inline void 20 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 21 { 22 if (rq) 23 rq->rq_cpu_time += delta; 24 } 25 26 static inline void 27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 28 { 29 if (rq) 30 rq->rq_sched_info.run_delay += delta; 31 } 32 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats) 33 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) 34 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) 35 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) 36 #define schedstat_val(var) (var) 37 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) 38 39 #else /* !CONFIG_SCHEDSTATS */ 40 static inline void 41 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 42 {} 43 static inline void 44 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) 45 {} 46 static inline void 47 rq_sched_info_depart(struct rq *rq, unsigned long long delta) 48 {} 49 #define schedstat_enabled() 0 50 #define schedstat_inc(var) do { } while (0) 51 #define schedstat_add(var, amt) do { } while (0) 52 #define schedstat_set(var, val) do { } while (0) 53 #define schedstat_val(var) 0 54 #define schedstat_val_or_zero(var) 0 55 #endif /* CONFIG_SCHEDSTATS */ 56 57 #ifdef CONFIG_SCHED_INFO 58 static inline void sched_info_reset_dequeued(struct task_struct *t) 59 { 60 t->sched_info.last_queued = 0; 61 } 62 63 /* 64 * We are interested in knowing how long it was from the *first* time a 65 * task was queued to the time that it finally hit a cpu, we call this routine 66 * from dequeue_task() to account for possible rq->clock skew across cpus. The 67 * delta taken on each cpu would annul the skew. 68 */ 69 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) 70 { 71 unsigned long long now = rq_clock(rq), delta = 0; 72 73 if (unlikely(sched_info_on())) 74 if (t->sched_info.last_queued) 75 delta = now - t->sched_info.last_queued; 76 sched_info_reset_dequeued(t); 77 t->sched_info.run_delay += delta; 78 79 rq_sched_info_dequeued(rq, delta); 80 } 81 82 /* 83 * Called when a task finally hits the cpu. We can now calculate how 84 * long it was waiting to run. We also note when it began so that we 85 * can keep stats on how long its timeslice is. 86 */ 87 static void sched_info_arrive(struct rq *rq, struct task_struct *t) 88 { 89 unsigned long long now = rq_clock(rq), delta = 0; 90 91 if (t->sched_info.last_queued) 92 delta = now - t->sched_info.last_queued; 93 sched_info_reset_dequeued(t); 94 t->sched_info.run_delay += delta; 95 t->sched_info.last_arrival = now; 96 t->sched_info.pcount++; 97 98 rq_sched_info_arrive(rq, delta); 99 } 100 101 /* 102 * This function is only called from enqueue_task(), but also only updates 103 * the timestamp if it is already not set. It's assumed that 104 * sched_info_dequeued() will clear that stamp when appropriate. 105 */ 106 static inline void sched_info_queued(struct rq *rq, struct task_struct *t) 107 { 108 if (unlikely(sched_info_on())) 109 if (!t->sched_info.last_queued) 110 t->sched_info.last_queued = rq_clock(rq); 111 } 112 113 /* 114 * Called when a process ceases being the active-running process involuntarily 115 * due, typically, to expiring its time slice (this may also be called when 116 * switching to the idle task). Now we can calculate how long we ran. 117 * Also, if the process is still in the TASK_RUNNING state, call 118 * sched_info_queued() to mark that it has now again started waiting on 119 * the runqueue. 120 */ 121 static inline void sched_info_depart(struct rq *rq, struct task_struct *t) 122 { 123 unsigned long long delta = rq_clock(rq) - 124 t->sched_info.last_arrival; 125 126 rq_sched_info_depart(rq, delta); 127 128 if (t->state == TASK_RUNNING) 129 sched_info_queued(rq, t); 130 } 131 132 /* 133 * Called when tasks are switched involuntarily due, typically, to expiring 134 * their time slice. (This may also be called when switching to or from 135 * the idle task.) We are only called when prev != next. 136 */ 137 static inline void 138 __sched_info_switch(struct rq *rq, 139 struct task_struct *prev, struct task_struct *next) 140 { 141 /* 142 * prev now departs the cpu. It's not interesting to record 143 * stats about how efficient we were at scheduling the idle 144 * process, however. 145 */ 146 if (prev != rq->idle) 147 sched_info_depart(rq, prev); 148 149 if (next != rq->idle) 150 sched_info_arrive(rq, next); 151 } 152 static inline void 153 sched_info_switch(struct rq *rq, 154 struct task_struct *prev, struct task_struct *next) 155 { 156 if (unlikely(sched_info_on())) 157 __sched_info_switch(rq, prev, next); 158 } 159 #else 160 #define sched_info_queued(rq, t) do { } while (0) 161 #define sched_info_reset_dequeued(t) do { } while (0) 162 #define sched_info_dequeued(rq, t) do { } while (0) 163 #define sched_info_depart(rq, t) do { } while (0) 164 #define sched_info_arrive(rq, next) do { } while (0) 165 #define sched_info_switch(rq, t, next) do { } while (0) 166 #endif /* CONFIG_SCHED_INFO */ 167 168 /* 169 * The following are functions that support scheduler-internal time accounting. 170 * These functions are generally called at the timer tick. None of this depends 171 * on CONFIG_SCHEDSTATS. 172 */ 173 174 /** 175 * cputimer_running - return true if cputimer is running 176 * 177 * @tsk: Pointer to target task. 178 */ 179 static inline bool cputimer_running(struct task_struct *tsk) 180 181 { 182 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 183 184 /* Check if cputimer isn't running. This is accessed without locking. */ 185 if (!READ_ONCE(cputimer->running)) 186 return false; 187 188 /* 189 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime 190 * in __exit_signal(), we won't account to the signal struct further 191 * cputime consumed by that task, even though the task can still be 192 * ticking after __exit_signal(). 193 * 194 * In order to keep a consistent behaviour between thread group cputime 195 * and thread group cputimer accounting, lets also ignore the cputime 196 * elapsing after __exit_signal() in any thread group timer running. 197 * 198 * This makes sure that POSIX CPU clocks and timers are synchronized, so 199 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU 200 * clock delta is behind the expiring timer value. 201 */ 202 if (unlikely(!tsk->sighand)) 203 return false; 204 205 return true; 206 } 207 208 /** 209 * account_group_user_time - Maintain utime for a thread group. 210 * 211 * @tsk: Pointer to task structure. 212 * @cputime: Time value by which to increment the utime field of the 213 * thread_group_cputime structure. 214 * 215 * If thread group time is being maintained, get the structure for the 216 * running CPU and update the utime field there. 217 */ 218 static inline void account_group_user_time(struct task_struct *tsk, 219 cputime_t cputime) 220 { 221 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 222 223 if (!cputimer_running(tsk)) 224 return; 225 226 atomic64_add(cputime, &cputimer->cputime_atomic.utime); 227 } 228 229 /** 230 * account_group_system_time - Maintain stime for a thread group. 231 * 232 * @tsk: Pointer to task structure. 233 * @cputime: Time value by which to increment the stime field of the 234 * thread_group_cputime structure. 235 * 236 * If thread group time is being maintained, get the structure for the 237 * running CPU and update the stime field there. 238 */ 239 static inline void account_group_system_time(struct task_struct *tsk, 240 cputime_t cputime) 241 { 242 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 243 244 if (!cputimer_running(tsk)) 245 return; 246 247 atomic64_add(cputime, &cputimer->cputime_atomic.stime); 248 } 249 250 /** 251 * account_group_exec_runtime - Maintain exec runtime for a thread group. 252 * 253 * @tsk: Pointer to task structure. 254 * @ns: Time value by which to increment the sum_exec_runtime field 255 * of the thread_group_cputime structure. 256 * 257 * If thread group time is being maintained, get the structure for the 258 * running CPU and update the sum_exec_runtime field there. 259 */ 260 static inline void account_group_exec_runtime(struct task_struct *tsk, 261 unsigned long long ns) 262 { 263 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 264 265 if (!cputimer_running(tsk)) 266 return; 267 268 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); 269 } 270