xref: /openbmc/linux/kernel/sched/cputime.c (revision aa017ab9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple CPU accounting cgroup controller
4  */
5 #include "sched.h"
6 
7 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
8 
9 /*
10  * There are no locks covering percpu hardirq/softirq time.
11  * They are only modified in vtime_account, on corresponding CPU
12  * with interrupts disabled. So, writes are safe.
13  * They are read and saved off onto struct rq in update_rq_clock().
14  * This may result in other CPU reading this CPU's irq time and can
15  * race with irq/vtime_account on this CPU. We would either get old
16  * or new value with a side effect of accounting a slice of irq time to wrong
17  * task when irq is in progress while we read rq->clock. That is a worthy
18  * compromise in place of having locks on each irq in account_system_time.
19  */
20 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
21 
22 static int sched_clock_irqtime;
23 
24 void enable_sched_clock_irqtime(void)
25 {
26 	sched_clock_irqtime = 1;
27 }
28 
29 void disable_sched_clock_irqtime(void)
30 {
31 	sched_clock_irqtime = 0;
32 }
33 
34 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
35 				  enum cpu_usage_stat idx)
36 {
37 	u64 *cpustat = kcpustat_this_cpu->cpustat;
38 
39 	u64_stats_update_begin(&irqtime->sync);
40 	cpustat[idx] += delta;
41 	irqtime->total += delta;
42 	irqtime->tick_delta += delta;
43 	u64_stats_update_end(&irqtime->sync);
44 }
45 
46 /*
47  * Called before incrementing preempt_count on {soft,}irq_enter
48  * and before decrementing preempt_count on {soft,}irq_exit.
49  */
50 void irqtime_account_irq(struct task_struct *curr)
51 {
52 	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
53 	s64 delta;
54 	int cpu;
55 
56 	if (!sched_clock_irqtime)
57 		return;
58 
59 	cpu = smp_processor_id();
60 	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
61 	irqtime->irq_start_time += delta;
62 
63 	/*
64 	 * We do not account for softirq time from ksoftirqd here.
65 	 * We want to continue accounting softirq time to ksoftirqd thread
66 	 * in that case, so as not to confuse scheduler with a special task
67 	 * that do not consume any time, but still wants to run.
68 	 */
69 	if (hardirq_count())
70 		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
71 	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
73 }
74 EXPORT_SYMBOL_GPL(irqtime_account_irq);
75 
76 static u64 irqtime_tick_accounted(u64 maxtime)
77 {
78 	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
79 	u64 delta;
80 
81 	delta = min(irqtime->tick_delta, maxtime);
82 	irqtime->tick_delta -= delta;
83 
84 	return delta;
85 }
86 
87 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
88 
89 #define sched_clock_irqtime	(0)
90 
91 static u64 irqtime_tick_accounted(u64 dummy)
92 {
93 	return 0;
94 }
95 
96 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
97 
98 static inline void task_group_account_field(struct task_struct *p, int index,
99 					    u64 tmp)
100 {
101 	/*
102 	 * Since all updates are sure to touch the root cgroup, we
103 	 * get ourselves ahead and touch it first. If the root cgroup
104 	 * is the only cgroup, then nothing else should be necessary.
105 	 *
106 	 */
107 	__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
108 
109 	cgroup_account_cputime_field(p, index, tmp);
110 }
111 
112 /*
113  * Account user CPU time to a process.
114  * @p: the process that the CPU time gets accounted to
115  * @cputime: the CPU time spent in user space since the last update
116  */
117 void account_user_time(struct task_struct *p, u64 cputime)
118 {
119 	int index;
120 
121 	/* Add user time to process. */
122 	p->utime += cputime;
123 	account_group_user_time(p, cputime);
124 
125 	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
126 
127 	/* Add user time to cpustat. */
128 	task_group_account_field(p, index, cputime);
129 
130 	/* Account for user time used */
131 	acct_account_cputime(p);
132 }
133 
134 /*
135  * Account guest CPU time to a process.
136  * @p: the process that the CPU time gets accounted to
137  * @cputime: the CPU time spent in virtual machine since the last update
138  */
139 void account_guest_time(struct task_struct *p, u64 cputime)
140 {
141 	u64 *cpustat = kcpustat_this_cpu->cpustat;
142 
143 	/* Add guest time to process. */
144 	p->utime += cputime;
145 	account_group_user_time(p, cputime);
146 	p->gtime += cputime;
147 
148 	/* Add guest time to cpustat. */
149 	if (task_nice(p) > 0) {
150 		cpustat[CPUTIME_NICE] += cputime;
151 		cpustat[CPUTIME_GUEST_NICE] += cputime;
152 	} else {
153 		cpustat[CPUTIME_USER] += cputime;
154 		cpustat[CPUTIME_GUEST] += cputime;
155 	}
156 }
157 
158 /*
159  * Account system CPU time to a process and desired cpustat field
160  * @p: the process that the CPU time gets accounted to
161  * @cputime: the CPU time spent in kernel space since the last update
162  * @index: pointer to cpustat field that has to be updated
163  */
164 void account_system_index_time(struct task_struct *p,
165 			       u64 cputime, enum cpu_usage_stat index)
166 {
167 	/* Add system time to process. */
168 	p->stime += cputime;
169 	account_group_system_time(p, cputime);
170 
171 	/* Add system time to cpustat. */
172 	task_group_account_field(p, index, cputime);
173 
174 	/* Account for system time used */
175 	acct_account_cputime(p);
176 }
177 
178 /*
179  * Account system CPU time to a process.
180  * @p: the process that the CPU time gets accounted to
181  * @hardirq_offset: the offset to subtract from hardirq_count()
182  * @cputime: the CPU time spent in kernel space since the last update
183  */
184 void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
185 {
186 	int index;
187 
188 	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
189 		account_guest_time(p, cputime);
190 		return;
191 	}
192 
193 	if (hardirq_count() - hardirq_offset)
194 		index = CPUTIME_IRQ;
195 	else if (in_serving_softirq())
196 		index = CPUTIME_SOFTIRQ;
197 	else
198 		index = CPUTIME_SYSTEM;
199 
200 	account_system_index_time(p, cputime, index);
201 }
202 
203 /*
204  * Account for involuntary wait time.
205  * @cputime: the CPU time spent in involuntary wait
206  */
207 void account_steal_time(u64 cputime)
208 {
209 	u64 *cpustat = kcpustat_this_cpu->cpustat;
210 
211 	cpustat[CPUTIME_STEAL] += cputime;
212 }
213 
214 /*
215  * Account for idle time.
216  * @cputime: the CPU time spent in idle wait
217  */
218 void account_idle_time(u64 cputime)
219 {
220 	u64 *cpustat = kcpustat_this_cpu->cpustat;
221 	struct rq *rq = this_rq();
222 
223 	if (atomic_read(&rq->nr_iowait) > 0)
224 		cpustat[CPUTIME_IOWAIT] += cputime;
225 	else
226 		cpustat[CPUTIME_IDLE] += cputime;
227 }
228 
229 /*
230  * When a guest is interrupted for a longer amount of time, missed clock
231  * ticks are not redelivered later. Due to that, this function may on
232  * occasion account more time than the calling functions think elapsed.
233  */
234 static __always_inline u64 steal_account_process_time(u64 maxtime)
235 {
236 #ifdef CONFIG_PARAVIRT
237 	if (static_key_false(&paravirt_steal_enabled)) {
238 		u64 steal;
239 
240 		steal = paravirt_steal_clock(smp_processor_id());
241 		steal -= this_rq()->prev_steal_time;
242 		steal = min(steal, maxtime);
243 		account_steal_time(steal);
244 		this_rq()->prev_steal_time += steal;
245 
246 		return steal;
247 	}
248 #endif
249 	return 0;
250 }
251 
252 /*
253  * Account how much elapsed time was spent in steal, irq, or softirq time.
254  */
255 static inline u64 account_other_time(u64 max)
256 {
257 	u64 accounted;
258 
259 	lockdep_assert_irqs_disabled();
260 
261 	accounted = steal_account_process_time(max);
262 
263 	if (accounted < max)
264 		accounted += irqtime_tick_accounted(max - accounted);
265 
266 	return accounted;
267 }
268 
269 #ifdef CONFIG_64BIT
270 static inline u64 read_sum_exec_runtime(struct task_struct *t)
271 {
272 	return t->se.sum_exec_runtime;
273 }
274 #else
275 static u64 read_sum_exec_runtime(struct task_struct *t)
276 {
277 	u64 ns;
278 	struct rq_flags rf;
279 	struct rq *rq;
280 
281 	rq = task_rq_lock(t, &rf);
282 	ns = t->se.sum_exec_runtime;
283 	task_rq_unlock(rq, t, &rf);
284 
285 	return ns;
286 }
287 #endif
288 
289 /*
290  * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
291  * tasks (sum on group iteration) belonging to @tsk's group.
292  */
293 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
294 {
295 	struct signal_struct *sig = tsk->signal;
296 	u64 utime, stime;
297 	struct task_struct *t;
298 	unsigned int seq, nextseq;
299 	unsigned long flags;
300 
301 	/*
302 	 * Update current task runtime to account pending time since last
303 	 * scheduler action or thread_group_cputime() call. This thread group
304 	 * might have other running tasks on different CPUs, but updating
305 	 * their runtime can affect syscall performance, so we skip account
306 	 * those pending times and rely only on values updated on tick or
307 	 * other scheduler action.
308 	 */
309 	if (same_thread_group(current, tsk))
310 		(void) task_sched_runtime(current);
311 
312 	rcu_read_lock();
313 	/* Attempt a lockless read on the first round. */
314 	nextseq = 0;
315 	do {
316 		seq = nextseq;
317 		flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
318 		times->utime = sig->utime;
319 		times->stime = sig->stime;
320 		times->sum_exec_runtime = sig->sum_sched_runtime;
321 
322 		for_each_thread(tsk, t) {
323 			task_cputime(t, &utime, &stime);
324 			times->utime += utime;
325 			times->stime += stime;
326 			times->sum_exec_runtime += read_sum_exec_runtime(t);
327 		}
328 		/* If lockless access failed, take the lock. */
329 		nextseq = 1;
330 	} while (need_seqretry(&sig->stats_lock, seq));
331 	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
332 	rcu_read_unlock();
333 }
334 
335 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
336 /*
337  * Account a tick to a process and cpustat
338  * @p: the process that the CPU time gets accounted to
339  * @user_tick: is the tick from userspace
340  * @rq: the pointer to rq
341  *
342  * Tick demultiplexing follows the order
343  * - pending hardirq update
344  * - pending softirq update
345  * - user_time
346  * - idle_time
347  * - system time
348  *   - check for guest_time
349  *   - else account as system_time
350  *
351  * Check for hardirq is done both for system and user time as there is
352  * no timer going off while we are on hardirq and hence we may never get an
353  * opportunity to update it solely in system time.
354  * p->stime and friends are only updated on system time and not on irq
355  * softirq as those do not count in task exec_runtime any more.
356  */
357 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
358 					 int ticks)
359 {
360 	u64 other, cputime = TICK_NSEC * ticks;
361 
362 	/*
363 	 * When returning from idle, many ticks can get accounted at
364 	 * once, including some ticks of steal, irq, and softirq time.
365 	 * Subtract those ticks from the amount of time accounted to
366 	 * idle, or potentially user or system time. Due to rounding,
367 	 * other time can exceed ticks occasionally.
368 	 */
369 	other = account_other_time(ULONG_MAX);
370 	if (other >= cputime)
371 		return;
372 
373 	cputime -= other;
374 
375 	if (this_cpu_ksoftirqd() == p) {
376 		/*
377 		 * ksoftirqd time do not get accounted in cpu_softirq_time.
378 		 * So, we have to handle it separately here.
379 		 * Also, p->stime needs to be updated for ksoftirqd.
380 		 */
381 		account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
382 	} else if (user_tick) {
383 		account_user_time(p, cputime);
384 	} else if (p == this_rq()->idle) {
385 		account_idle_time(cputime);
386 	} else if (p->flags & PF_VCPU) { /* System time or guest time */
387 		account_guest_time(p, cputime);
388 	} else {
389 		account_system_index_time(p, cputime, CPUTIME_SYSTEM);
390 	}
391 }
392 
393 static void irqtime_account_idle_ticks(int ticks)
394 {
395 	irqtime_account_process_tick(current, 0, ticks);
396 }
397 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
398 static inline void irqtime_account_idle_ticks(int ticks) { }
399 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
400 						int nr_ticks) { }
401 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
402 
403 /*
404  * Use precise platform statistics if available:
405  */
406 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
407 
408 # ifndef __ARCH_HAS_VTIME_TASK_SWITCH
409 void vtime_task_switch(struct task_struct *prev)
410 {
411 	if (is_idle_task(prev))
412 		vtime_account_idle(prev);
413 	else
414 		vtime_account_kernel(prev);
415 
416 	vtime_flush(prev);
417 	arch_vtime_task_switch(prev);
418 }
419 # endif
420 
421 /*
422  * Archs that account the whole time spent in the idle task
423  * (outside irq) as idle time can rely on this and just implement
424  * vtime_account_kernel() and vtime_account_idle(). Archs that
425  * have other meaning of the idle time (s390 only includes the
426  * time spent by the CPU when it's in low power mode) must override
427  * vtime_account().
428  */
429 #ifndef __ARCH_HAS_VTIME_ACCOUNT
430 void vtime_account_irq_enter(struct task_struct *tsk)
431 {
432 	if (!in_interrupt() && is_idle_task(tsk))
433 		vtime_account_idle(tsk);
434 	else
435 		vtime_account_kernel(tsk);
436 }
437 EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
438 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
439 
440 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
441 		    u64 *ut, u64 *st)
442 {
443 	*ut = curr->utime;
444 	*st = curr->stime;
445 }
446 
447 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
448 {
449 	*ut = p->utime;
450 	*st = p->stime;
451 }
452 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
453 
454 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
455 {
456 	struct task_cputime cputime;
457 
458 	thread_group_cputime(p, &cputime);
459 
460 	*ut = cputime.utime;
461 	*st = cputime.stime;
462 }
463 
464 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
465 
466 /*
467  * Account a single tick of CPU time.
468  * @p: the process that the CPU time gets accounted to
469  * @user_tick: indicates if the tick is a user or a system tick
470  */
471 void account_process_tick(struct task_struct *p, int user_tick)
472 {
473 	u64 cputime, steal;
474 
475 	if (vtime_accounting_enabled_this_cpu())
476 		return;
477 
478 	if (sched_clock_irqtime) {
479 		irqtime_account_process_tick(p, user_tick, 1);
480 		return;
481 	}
482 
483 	cputime = TICK_NSEC;
484 	steal = steal_account_process_time(ULONG_MAX);
485 
486 	if (steal >= cputime)
487 		return;
488 
489 	cputime -= steal;
490 
491 	if (user_tick)
492 		account_user_time(p, cputime);
493 	else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
494 		account_system_time(p, HARDIRQ_OFFSET, cputime);
495 	else
496 		account_idle_time(cputime);
497 }
498 
499 /*
500  * Account multiple ticks of idle time.
501  * @ticks: number of stolen ticks
502  */
503 void account_idle_ticks(unsigned long ticks)
504 {
505 	u64 cputime, steal;
506 
507 	if (sched_clock_irqtime) {
508 		irqtime_account_idle_ticks(ticks);
509 		return;
510 	}
511 
512 	cputime = ticks * TICK_NSEC;
513 	steal = steal_account_process_time(ULONG_MAX);
514 
515 	if (steal >= cputime)
516 		return;
517 
518 	cputime -= steal;
519 	account_idle_time(cputime);
520 }
521 
522 /*
523  * Perform (stime * rtime) / total, but avoid multiplication overflow by
524  * losing precision when the numbers are big.
525  */
526 static u64 scale_stime(u64 stime, u64 rtime, u64 total)
527 {
528 	u64 scaled;
529 
530 	for (;;) {
531 		/* Make sure "rtime" is the bigger of stime/rtime */
532 		if (stime > rtime)
533 			swap(rtime, stime);
534 
535 		/* Make sure 'total' fits in 32 bits */
536 		if (total >> 32)
537 			goto drop_precision;
538 
539 		/* Does rtime (and thus stime) fit in 32 bits? */
540 		if (!(rtime >> 32))
541 			break;
542 
543 		/* Can we just balance rtime/stime rather than dropping bits? */
544 		if (stime >> 31)
545 			goto drop_precision;
546 
547 		/* We can grow stime and shrink rtime and try to make them both fit */
548 		stime <<= 1;
549 		rtime >>= 1;
550 		continue;
551 
552 drop_precision:
553 		/* We drop from rtime, it has more bits than stime */
554 		rtime >>= 1;
555 		total >>= 1;
556 	}
557 
558 	/*
559 	 * Make sure gcc understands that this is a 32x32->64 multiply,
560 	 * followed by a 64/32->64 divide.
561 	 */
562 	scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
563 	return scaled;
564 }
565 
566 /*
567  * Adjust tick based cputime random precision against scheduler runtime
568  * accounting.
569  *
570  * Tick based cputime accounting depend on random scheduling timeslices of a
571  * task to be interrupted or not by the timer.  Depending on these
572  * circumstances, the number of these interrupts may be over or
573  * under-optimistic, matching the real user and system cputime with a variable
574  * precision.
575  *
576  * Fix this by scaling these tick based values against the total runtime
577  * accounted by the CFS scheduler.
578  *
579  * This code provides the following guarantees:
580  *
581  *   stime + utime == rtime
582  *   stime_i+1 >= stime_i, utime_i+1 >= utime_i
583  *
584  * Assuming that rtime_i+1 >= rtime_i.
585  */
586 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
587 		    u64 *ut, u64 *st)
588 {
589 	u64 rtime, stime, utime;
590 	unsigned long flags;
591 
592 	/* Serialize concurrent callers such that we can honour our guarantees */
593 	raw_spin_lock_irqsave(&prev->lock, flags);
594 	rtime = curr->sum_exec_runtime;
595 
596 	/*
597 	 * This is possible under two circumstances:
598 	 *  - rtime isn't monotonic after all (a bug);
599 	 *  - we got reordered by the lock.
600 	 *
601 	 * In both cases this acts as a filter such that the rest of the code
602 	 * can assume it is monotonic regardless of anything else.
603 	 */
604 	if (prev->stime + prev->utime >= rtime)
605 		goto out;
606 
607 	stime = curr->stime;
608 	utime = curr->utime;
609 
610 	/*
611 	 * If either stime or utime are 0, assume all runtime is userspace.
612 	 * Once a task gets some ticks, the monotonicy code at 'update:'
613 	 * will ensure things converge to the observed ratio.
614 	 */
615 	if (stime == 0) {
616 		utime = rtime;
617 		goto update;
618 	}
619 
620 	if (utime == 0) {
621 		stime = rtime;
622 		goto update;
623 	}
624 
625 	stime = scale_stime(stime, rtime, stime + utime);
626 
627 update:
628 	/*
629 	 * Make sure stime doesn't go backwards; this preserves monotonicity
630 	 * for utime because rtime is monotonic.
631 	 *
632 	 *  utime_i+1 = rtime_i+1 - stime_i
633 	 *            = rtime_i+1 - (rtime_i - utime_i)
634 	 *            = (rtime_i+1 - rtime_i) + utime_i
635 	 *            >= utime_i
636 	 */
637 	if (stime < prev->stime)
638 		stime = prev->stime;
639 	utime = rtime - stime;
640 
641 	/*
642 	 * Make sure utime doesn't go backwards; this still preserves
643 	 * monotonicity for stime, analogous argument to above.
644 	 */
645 	if (utime < prev->utime) {
646 		utime = prev->utime;
647 		stime = rtime - utime;
648 	}
649 
650 	prev->stime = stime;
651 	prev->utime = utime;
652 out:
653 	*ut = prev->utime;
654 	*st = prev->stime;
655 	raw_spin_unlock_irqrestore(&prev->lock, flags);
656 }
657 
658 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
659 {
660 	struct task_cputime cputime = {
661 		.sum_exec_runtime = p->se.sum_exec_runtime,
662 	};
663 
664 	task_cputime(p, &cputime.utime, &cputime.stime);
665 	cputime_adjust(&cputime, &p->prev_cputime, ut, st);
666 }
667 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
668 
669 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
670 {
671 	struct task_cputime cputime;
672 
673 	thread_group_cputime(p, &cputime);
674 	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
675 }
676 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
677 
678 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
679 static u64 vtime_delta(struct vtime *vtime)
680 {
681 	unsigned long long clock;
682 
683 	clock = sched_clock();
684 	if (clock < vtime->starttime)
685 		return 0;
686 
687 	return clock - vtime->starttime;
688 }
689 
690 static u64 get_vtime_delta(struct vtime *vtime)
691 {
692 	u64 delta = vtime_delta(vtime);
693 	u64 other;
694 
695 	/*
696 	 * Unlike tick based timing, vtime based timing never has lost
697 	 * ticks, and no need for steal time accounting to make up for
698 	 * lost ticks. Vtime accounts a rounded version of actual
699 	 * elapsed time. Limit account_other_time to prevent rounding
700 	 * errors from causing elapsed vtime to go negative.
701 	 */
702 	other = account_other_time(delta);
703 	WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
704 	vtime->starttime += delta;
705 
706 	return delta - other;
707 }
708 
709 static void vtime_account_system(struct task_struct *tsk,
710 				 struct vtime *vtime)
711 {
712 	vtime->stime += get_vtime_delta(vtime);
713 	if (vtime->stime >= TICK_NSEC) {
714 		account_system_time(tsk, irq_count(), vtime->stime);
715 		vtime->stime = 0;
716 	}
717 }
718 
719 static void vtime_account_guest(struct task_struct *tsk,
720 				struct vtime *vtime)
721 {
722 	vtime->gtime += get_vtime_delta(vtime);
723 	if (vtime->gtime >= TICK_NSEC) {
724 		account_guest_time(tsk, vtime->gtime);
725 		vtime->gtime = 0;
726 	}
727 }
728 
729 static void __vtime_account_kernel(struct task_struct *tsk,
730 				   struct vtime *vtime)
731 {
732 	/* We might have scheduled out from guest path */
733 	if (vtime->state == VTIME_GUEST)
734 		vtime_account_guest(tsk, vtime);
735 	else
736 		vtime_account_system(tsk, vtime);
737 }
738 
739 void vtime_account_kernel(struct task_struct *tsk)
740 {
741 	struct vtime *vtime = &tsk->vtime;
742 
743 	if (!vtime_delta(vtime))
744 		return;
745 
746 	write_seqcount_begin(&vtime->seqcount);
747 	__vtime_account_kernel(tsk, vtime);
748 	write_seqcount_end(&vtime->seqcount);
749 }
750 
751 void vtime_user_enter(struct task_struct *tsk)
752 {
753 	struct vtime *vtime = &tsk->vtime;
754 
755 	write_seqcount_begin(&vtime->seqcount);
756 	vtime_account_system(tsk, vtime);
757 	vtime->state = VTIME_USER;
758 	write_seqcount_end(&vtime->seqcount);
759 }
760 
761 void vtime_user_exit(struct task_struct *tsk)
762 {
763 	struct vtime *vtime = &tsk->vtime;
764 
765 	write_seqcount_begin(&vtime->seqcount);
766 	vtime->utime += get_vtime_delta(vtime);
767 	if (vtime->utime >= TICK_NSEC) {
768 		account_user_time(tsk, vtime->utime);
769 		vtime->utime = 0;
770 	}
771 	vtime->state = VTIME_SYS;
772 	write_seqcount_end(&vtime->seqcount);
773 }
774 
775 void vtime_guest_enter(struct task_struct *tsk)
776 {
777 	struct vtime *vtime = &tsk->vtime;
778 	/*
779 	 * The flags must be updated under the lock with
780 	 * the vtime_starttime flush and update.
781 	 * That enforces a right ordering and update sequence
782 	 * synchronization against the reader (task_gtime())
783 	 * that can thus safely catch up with a tickless delta.
784 	 */
785 	write_seqcount_begin(&vtime->seqcount);
786 	vtime_account_system(tsk, vtime);
787 	tsk->flags |= PF_VCPU;
788 	vtime->state = VTIME_GUEST;
789 	write_seqcount_end(&vtime->seqcount);
790 }
791 EXPORT_SYMBOL_GPL(vtime_guest_enter);
792 
793 void vtime_guest_exit(struct task_struct *tsk)
794 {
795 	struct vtime *vtime = &tsk->vtime;
796 
797 	write_seqcount_begin(&vtime->seqcount);
798 	vtime_account_guest(tsk, vtime);
799 	tsk->flags &= ~PF_VCPU;
800 	vtime->state = VTIME_SYS;
801 	write_seqcount_end(&vtime->seqcount);
802 }
803 EXPORT_SYMBOL_GPL(vtime_guest_exit);
804 
805 void vtime_account_idle(struct task_struct *tsk)
806 {
807 	account_idle_time(get_vtime_delta(&tsk->vtime));
808 }
809 
810 void vtime_task_switch_generic(struct task_struct *prev)
811 {
812 	struct vtime *vtime = &prev->vtime;
813 
814 	write_seqcount_begin(&vtime->seqcount);
815 	if (vtime->state == VTIME_IDLE)
816 		vtime_account_idle(prev);
817 	else
818 		__vtime_account_kernel(prev, vtime);
819 	vtime->state = VTIME_INACTIVE;
820 	vtime->cpu = -1;
821 	write_seqcount_end(&vtime->seqcount);
822 
823 	vtime = &current->vtime;
824 
825 	write_seqcount_begin(&vtime->seqcount);
826 	if (is_idle_task(current))
827 		vtime->state = VTIME_IDLE;
828 	else if (current->flags & PF_VCPU)
829 		vtime->state = VTIME_GUEST;
830 	else
831 		vtime->state = VTIME_SYS;
832 	vtime->starttime = sched_clock();
833 	vtime->cpu = smp_processor_id();
834 	write_seqcount_end(&vtime->seqcount);
835 }
836 
837 void vtime_init_idle(struct task_struct *t, int cpu)
838 {
839 	struct vtime *vtime = &t->vtime;
840 	unsigned long flags;
841 
842 	local_irq_save(flags);
843 	write_seqcount_begin(&vtime->seqcount);
844 	vtime->state = VTIME_IDLE;
845 	vtime->starttime = sched_clock();
846 	vtime->cpu = cpu;
847 	write_seqcount_end(&vtime->seqcount);
848 	local_irq_restore(flags);
849 }
850 
851 u64 task_gtime(struct task_struct *t)
852 {
853 	struct vtime *vtime = &t->vtime;
854 	unsigned int seq;
855 	u64 gtime;
856 
857 	if (!vtime_accounting_enabled())
858 		return t->gtime;
859 
860 	do {
861 		seq = read_seqcount_begin(&vtime->seqcount);
862 
863 		gtime = t->gtime;
864 		if (vtime->state == VTIME_GUEST)
865 			gtime += vtime->gtime + vtime_delta(vtime);
866 
867 	} while (read_seqcount_retry(&vtime->seqcount, seq));
868 
869 	return gtime;
870 }
871 
872 /*
873  * Fetch cputime raw values from fields of task_struct and
874  * add up the pending nohz execution time since the last
875  * cputime snapshot.
876  */
877 void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
878 {
879 	struct vtime *vtime = &t->vtime;
880 	unsigned int seq;
881 	u64 delta;
882 
883 	if (!vtime_accounting_enabled()) {
884 		*utime = t->utime;
885 		*stime = t->stime;
886 		return;
887 	}
888 
889 	do {
890 		seq = read_seqcount_begin(&vtime->seqcount);
891 
892 		*utime = t->utime;
893 		*stime = t->stime;
894 
895 		/* Task is sleeping or idle, nothing to add */
896 		if (vtime->state < VTIME_SYS)
897 			continue;
898 
899 		delta = vtime_delta(vtime);
900 
901 		/*
902 		 * Task runs either in user (including guest) or kernel space,
903 		 * add pending nohz time to the right place.
904 		 */
905 		if (vtime->state == VTIME_SYS)
906 			*stime += vtime->stime + delta;
907 		else
908 			*utime += vtime->utime + delta;
909 	} while (read_seqcount_retry(&vtime->seqcount, seq));
910 }
911 
912 static int vtime_state_check(struct vtime *vtime, int cpu)
913 {
914 	/*
915 	 * We raced against a context switch, fetch the
916 	 * kcpustat task again.
917 	 */
918 	if (vtime->cpu != cpu && vtime->cpu != -1)
919 		return -EAGAIN;
920 
921 	/*
922 	 * Two possible things here:
923 	 * 1) We are seeing the scheduling out task (prev) or any past one.
924 	 * 2) We are seeing the scheduling in task (next) but it hasn't
925 	 *    passed though vtime_task_switch() yet so the pending
926 	 *    cputime of the prev task may not be flushed yet.
927 	 *
928 	 * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
929 	 */
930 	if (vtime->state == VTIME_INACTIVE)
931 		return -EAGAIN;
932 
933 	return 0;
934 }
935 
936 static u64 kcpustat_user_vtime(struct vtime *vtime)
937 {
938 	if (vtime->state == VTIME_USER)
939 		return vtime->utime + vtime_delta(vtime);
940 	else if (vtime->state == VTIME_GUEST)
941 		return vtime->gtime + vtime_delta(vtime);
942 	return 0;
943 }
944 
945 static int kcpustat_field_vtime(u64 *cpustat,
946 				struct task_struct *tsk,
947 				enum cpu_usage_stat usage,
948 				int cpu, u64 *val)
949 {
950 	struct vtime *vtime = &tsk->vtime;
951 	unsigned int seq;
952 	int err;
953 
954 	do {
955 		seq = read_seqcount_begin(&vtime->seqcount);
956 
957 		err = vtime_state_check(vtime, cpu);
958 		if (err < 0)
959 			return err;
960 
961 		*val = cpustat[usage];
962 
963 		/*
964 		 * Nice VS unnice cputime accounting may be inaccurate if
965 		 * the nice value has changed since the last vtime update.
966 		 * But proper fix would involve interrupting target on nice
967 		 * updates which is a no go on nohz_full (although the scheduler
968 		 * may still interrupt the target if rescheduling is needed...)
969 		 */
970 		switch (usage) {
971 		case CPUTIME_SYSTEM:
972 			if (vtime->state == VTIME_SYS)
973 				*val += vtime->stime + vtime_delta(vtime);
974 			break;
975 		case CPUTIME_USER:
976 			if (task_nice(tsk) <= 0)
977 				*val += kcpustat_user_vtime(vtime);
978 			break;
979 		case CPUTIME_NICE:
980 			if (task_nice(tsk) > 0)
981 				*val += kcpustat_user_vtime(vtime);
982 			break;
983 		case CPUTIME_GUEST:
984 			if (vtime->state == VTIME_GUEST && task_nice(tsk) <= 0)
985 				*val += vtime->gtime + vtime_delta(vtime);
986 			break;
987 		case CPUTIME_GUEST_NICE:
988 			if (vtime->state == VTIME_GUEST && task_nice(tsk) > 0)
989 				*val += vtime->gtime + vtime_delta(vtime);
990 			break;
991 		default:
992 			break;
993 		}
994 	} while (read_seqcount_retry(&vtime->seqcount, seq));
995 
996 	return 0;
997 }
998 
999 u64 kcpustat_field(struct kernel_cpustat *kcpustat,
1000 		   enum cpu_usage_stat usage, int cpu)
1001 {
1002 	u64 *cpustat = kcpustat->cpustat;
1003 	struct rq *rq;
1004 	u64 val;
1005 	int err;
1006 
1007 	if (!vtime_accounting_enabled_cpu(cpu))
1008 		return cpustat[usage];
1009 
1010 	rq = cpu_rq(cpu);
1011 
1012 	for (;;) {
1013 		struct task_struct *curr;
1014 
1015 		rcu_read_lock();
1016 		curr = rcu_dereference(rq->curr);
1017 		if (WARN_ON_ONCE(!curr)) {
1018 			rcu_read_unlock();
1019 			return cpustat[usage];
1020 		}
1021 
1022 		err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
1023 		rcu_read_unlock();
1024 
1025 		if (!err)
1026 			return val;
1027 
1028 		cpu_relax();
1029 	}
1030 }
1031 EXPORT_SYMBOL_GPL(kcpustat_field);
1032 
1033 static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
1034 				    const struct kernel_cpustat *src,
1035 				    struct task_struct *tsk, int cpu)
1036 {
1037 	struct vtime *vtime = &tsk->vtime;
1038 	unsigned int seq;
1039 	int err;
1040 
1041 	do {
1042 		u64 *cpustat;
1043 		u64 delta;
1044 
1045 		seq = read_seqcount_begin(&vtime->seqcount);
1046 
1047 		err = vtime_state_check(vtime, cpu);
1048 		if (err < 0)
1049 			return err;
1050 
1051 		*dst = *src;
1052 		cpustat = dst->cpustat;
1053 
1054 		/* Task is sleeping, dead or idle, nothing to add */
1055 		if (vtime->state < VTIME_SYS)
1056 			continue;
1057 
1058 		delta = vtime_delta(vtime);
1059 
1060 		/*
1061 		 * Task runs either in user (including guest) or kernel space,
1062 		 * add pending nohz time to the right place.
1063 		 */
1064 		if (vtime->state == VTIME_SYS) {
1065 			cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
1066 		} else if (vtime->state == VTIME_USER) {
1067 			if (task_nice(tsk) > 0)
1068 				cpustat[CPUTIME_NICE] += vtime->utime + delta;
1069 			else
1070 				cpustat[CPUTIME_USER] += vtime->utime + delta;
1071 		} else {
1072 			WARN_ON_ONCE(vtime->state != VTIME_GUEST);
1073 			if (task_nice(tsk) > 0) {
1074 				cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1075 				cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1076 			} else {
1077 				cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1078 				cpustat[CPUTIME_USER] += vtime->gtime + delta;
1079 			}
1080 		}
1081 	} while (read_seqcount_retry(&vtime->seqcount, seq));
1082 
1083 	return err;
1084 }
1085 
1086 void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1087 {
1088 	const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1089 	struct rq *rq;
1090 	int err;
1091 
1092 	if (!vtime_accounting_enabled_cpu(cpu)) {
1093 		*dst = *src;
1094 		return;
1095 	}
1096 
1097 	rq = cpu_rq(cpu);
1098 
1099 	for (;;) {
1100 		struct task_struct *curr;
1101 
1102 		rcu_read_lock();
1103 		curr = rcu_dereference(rq->curr);
1104 		if (WARN_ON_ONCE(!curr)) {
1105 			rcu_read_unlock();
1106 			*dst = *src;
1107 			return;
1108 		}
1109 
1110 		err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1111 		rcu_read_unlock();
1112 
1113 		if (!err)
1114 			return;
1115 
1116 		cpu_relax();
1117 	}
1118 }
1119 EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1120 
1121 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
1122