1 #include <linux/export.h> 2 #include <linux/sched.h> 3 #include <linux/tsacct_kern.h> 4 #include <linux/kernel_stat.h> 5 #include <linux/static_key.h> 6 #include <linux/context_tracking.h> 7 #include "sched.h" 8 9 10 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 11 12 /* 13 * There are no locks covering percpu hardirq/softirq time. 14 * They are only modified in vtime_account, on corresponding CPU 15 * with interrupts disabled. So, writes are safe. 16 * They are read and saved off onto struct rq in update_rq_clock(). 17 * This may result in other CPU reading this CPU's irq time and can 18 * race with irq/vtime_account on this CPU. We would either get old 19 * or new value with a side effect of accounting a slice of irq time to wrong 20 * task when irq is in progress while we read rq->clock. That is a worthy 21 * compromise in place of having locks on each irq in account_system_time. 22 */ 23 DEFINE_PER_CPU(u64, cpu_hardirq_time); 24 DEFINE_PER_CPU(u64, cpu_softirq_time); 25 26 static DEFINE_PER_CPU(u64, irq_start_time); 27 static int sched_clock_irqtime; 28 29 void enable_sched_clock_irqtime(void) 30 { 31 sched_clock_irqtime = 1; 32 } 33 34 void disable_sched_clock_irqtime(void) 35 { 36 sched_clock_irqtime = 0; 37 } 38 39 #ifndef CONFIG_64BIT 40 DEFINE_PER_CPU(seqcount_t, irq_time_seq); 41 #endif /* CONFIG_64BIT */ 42 43 /* 44 * Called before incrementing preempt_count on {soft,}irq_enter 45 * and before decrementing preempt_count on {soft,}irq_exit. 46 */ 47 void irqtime_account_irq(struct task_struct *curr) 48 { 49 unsigned long flags; 50 s64 delta; 51 int cpu; 52 53 if (!sched_clock_irqtime) 54 return; 55 56 local_irq_save(flags); 57 58 cpu = smp_processor_id(); 59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); 60 __this_cpu_add(irq_start_time, delta); 61 62 irq_time_write_begin(); 63 /* 64 * We do not account for softirq time from ksoftirqd here. 65 * We want to continue accounting softirq time to ksoftirqd thread 66 * in that case, so as not to confuse scheduler with a special task 67 * that do not consume any time, but still wants to run. 68 */ 69 if (hardirq_count()) 70 __this_cpu_add(cpu_hardirq_time, delta); 71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) 72 __this_cpu_add(cpu_softirq_time, delta); 73 74 irq_time_write_end(); 75 local_irq_restore(flags); 76 } 77 EXPORT_SYMBOL_GPL(irqtime_account_irq); 78 79 static int irqtime_account_hi_update(void) 80 { 81 u64 *cpustat = kcpustat_this_cpu->cpustat; 82 unsigned long flags; 83 u64 latest_ns; 84 int ret = 0; 85 86 local_irq_save(flags); 87 latest_ns = this_cpu_read(cpu_hardirq_time); 88 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) 89 ret = 1; 90 local_irq_restore(flags); 91 return ret; 92 } 93 94 static int irqtime_account_si_update(void) 95 { 96 u64 *cpustat = kcpustat_this_cpu->cpustat; 97 unsigned long flags; 98 u64 latest_ns; 99 int ret = 0; 100 101 local_irq_save(flags); 102 latest_ns = this_cpu_read(cpu_softirq_time); 103 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) 104 ret = 1; 105 local_irq_restore(flags); 106 return ret; 107 } 108 109 #else /* CONFIG_IRQ_TIME_ACCOUNTING */ 110 111 #define sched_clock_irqtime (0) 112 113 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ 114 115 static inline void task_group_account_field(struct task_struct *p, int index, 116 u64 tmp) 117 { 118 /* 119 * Since all updates are sure to touch the root cgroup, we 120 * get ourselves ahead and touch it first. If the root cgroup 121 * is the only cgroup, then nothing else should be necessary. 122 * 123 */ 124 __this_cpu_add(kernel_cpustat.cpustat[index], tmp); 125 126 cpuacct_account_field(p, index, tmp); 127 } 128 129 /* 130 * Account user cpu time to a process. 131 * @p: the process that the cpu time gets accounted to 132 * @cputime: the cpu time spent in user space since the last update 133 * @cputime_scaled: cputime scaled by cpu frequency 134 */ 135 void account_user_time(struct task_struct *p, cputime_t cputime, 136 cputime_t cputime_scaled) 137 { 138 int index; 139 140 /* Add user time to process. */ 141 p->utime += cputime; 142 p->utimescaled += cputime_scaled; 143 account_group_user_time(p, cputime); 144 145 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; 146 147 /* Add user time to cpustat. */ 148 task_group_account_field(p, index, (__force u64) cputime); 149 150 /* Account for user time used */ 151 acct_account_cputime(p); 152 } 153 154 /* 155 * Account guest cpu time to a process. 156 * @p: the process that the cpu time gets accounted to 157 * @cputime: the cpu time spent in virtual machine since the last update 158 * @cputime_scaled: cputime scaled by cpu frequency 159 */ 160 static void account_guest_time(struct task_struct *p, cputime_t cputime, 161 cputime_t cputime_scaled) 162 { 163 u64 *cpustat = kcpustat_this_cpu->cpustat; 164 165 /* Add guest time to process. */ 166 p->utime += cputime; 167 p->utimescaled += cputime_scaled; 168 account_group_user_time(p, cputime); 169 p->gtime += cputime; 170 171 /* Add guest time to cpustat. */ 172 if (task_nice(p) > 0) { 173 cpustat[CPUTIME_NICE] += (__force u64) cputime; 174 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; 175 } else { 176 cpustat[CPUTIME_USER] += (__force u64) cputime; 177 cpustat[CPUTIME_GUEST] += (__force u64) cputime; 178 } 179 } 180 181 /* 182 * Account system cpu time to a process and desired cpustat field 183 * @p: the process that the cpu time gets accounted to 184 * @cputime: the cpu time spent in kernel space since the last update 185 * @cputime_scaled: cputime scaled by cpu frequency 186 * @target_cputime64: pointer to cpustat field that has to be updated 187 */ 188 static inline 189 void __account_system_time(struct task_struct *p, cputime_t cputime, 190 cputime_t cputime_scaled, int index) 191 { 192 /* Add system time to process. */ 193 p->stime += cputime; 194 p->stimescaled += cputime_scaled; 195 account_group_system_time(p, cputime); 196 197 /* Add system time to cpustat. */ 198 task_group_account_field(p, index, (__force u64) cputime); 199 200 /* Account for system time used */ 201 acct_account_cputime(p); 202 } 203 204 /* 205 * Account system cpu time to a process. 206 * @p: the process that the cpu time gets accounted to 207 * @hardirq_offset: the offset to subtract from hardirq_count() 208 * @cputime: the cpu time spent in kernel space since the last update 209 * @cputime_scaled: cputime scaled by cpu frequency 210 */ 211 void account_system_time(struct task_struct *p, int hardirq_offset, 212 cputime_t cputime, cputime_t cputime_scaled) 213 { 214 int index; 215 216 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { 217 account_guest_time(p, cputime, cputime_scaled); 218 return; 219 } 220 221 if (hardirq_count() - hardirq_offset) 222 index = CPUTIME_IRQ; 223 else if (in_serving_softirq()) 224 index = CPUTIME_SOFTIRQ; 225 else 226 index = CPUTIME_SYSTEM; 227 228 __account_system_time(p, cputime, cputime_scaled, index); 229 } 230 231 /* 232 * Account for involuntary wait time. 233 * @cputime: the cpu time spent in involuntary wait 234 */ 235 void account_steal_time(cputime_t cputime) 236 { 237 u64 *cpustat = kcpustat_this_cpu->cpustat; 238 239 cpustat[CPUTIME_STEAL] += (__force u64) cputime; 240 } 241 242 /* 243 * Account for idle time. 244 * @cputime: the cpu time spent in idle wait 245 */ 246 void account_idle_time(cputime_t cputime) 247 { 248 u64 *cpustat = kcpustat_this_cpu->cpustat; 249 struct rq *rq = this_rq(); 250 251 if (atomic_read(&rq->nr_iowait) > 0) 252 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; 253 else 254 cpustat[CPUTIME_IDLE] += (__force u64) cputime; 255 } 256 257 static __always_inline bool steal_account_process_tick(void) 258 { 259 #ifdef CONFIG_PARAVIRT 260 if (static_key_false(¶virt_steal_enabled)) { 261 u64 steal; 262 cputime_t steal_ct; 263 264 steal = paravirt_steal_clock(smp_processor_id()); 265 steal -= this_rq()->prev_steal_time; 266 267 /* 268 * cputime_t may be less precise than nsecs (eg: if it's 269 * based on jiffies). Lets cast the result to cputime 270 * granularity and account the rest on the next rounds. 271 */ 272 steal_ct = nsecs_to_cputime(steal); 273 this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct); 274 275 account_steal_time(steal_ct); 276 return steal_ct; 277 } 278 #endif 279 return false; 280 } 281 282 /* 283 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live 284 * tasks (sum on group iteration) belonging to @tsk's group. 285 */ 286 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) 287 { 288 struct signal_struct *sig = tsk->signal; 289 cputime_t utime, stime; 290 struct task_struct *t; 291 unsigned int seq, nextseq; 292 unsigned long flags; 293 294 rcu_read_lock(); 295 /* Attempt a lockless read on the first round. */ 296 nextseq = 0; 297 do { 298 seq = nextseq; 299 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); 300 times->utime = sig->utime; 301 times->stime = sig->stime; 302 times->sum_exec_runtime = sig->sum_sched_runtime; 303 304 for_each_thread(tsk, t) { 305 task_cputime(t, &utime, &stime); 306 times->utime += utime; 307 times->stime += stime; 308 times->sum_exec_runtime += task_sched_runtime(t); 309 } 310 /* If lockless access failed, take the lock. */ 311 nextseq = 1; 312 } while (need_seqretry(&sig->stats_lock, seq)); 313 done_seqretry_irqrestore(&sig->stats_lock, seq, flags); 314 rcu_read_unlock(); 315 } 316 317 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 318 /* 319 * Account a tick to a process and cpustat 320 * @p: the process that the cpu time gets accounted to 321 * @user_tick: is the tick from userspace 322 * @rq: the pointer to rq 323 * 324 * Tick demultiplexing follows the order 325 * - pending hardirq update 326 * - pending softirq update 327 * - user_time 328 * - idle_time 329 * - system time 330 * - check for guest_time 331 * - else account as system_time 332 * 333 * Check for hardirq is done both for system and user time as there is 334 * no timer going off while we are on hardirq and hence we may never get an 335 * opportunity to update it solely in system time. 336 * p->stime and friends are only updated on system time and not on irq 337 * softirq as those do not count in task exec_runtime any more. 338 */ 339 static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 340 struct rq *rq, int ticks) 341 { 342 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy); 343 u64 cputime = (__force u64) cputime_one_jiffy; 344 u64 *cpustat = kcpustat_this_cpu->cpustat; 345 346 if (steal_account_process_tick()) 347 return; 348 349 cputime *= ticks; 350 scaled *= ticks; 351 352 if (irqtime_account_hi_update()) { 353 cpustat[CPUTIME_IRQ] += cputime; 354 } else if (irqtime_account_si_update()) { 355 cpustat[CPUTIME_SOFTIRQ] += cputime; 356 } else if (this_cpu_ksoftirqd() == p) { 357 /* 358 * ksoftirqd time do not get accounted in cpu_softirq_time. 359 * So, we have to handle it separately here. 360 * Also, p->stime needs to be updated for ksoftirqd. 361 */ 362 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ); 363 } else if (user_tick) { 364 account_user_time(p, cputime, scaled); 365 } else if (p == rq->idle) { 366 account_idle_time(cputime); 367 } else if (p->flags & PF_VCPU) { /* System time or guest time */ 368 account_guest_time(p, cputime, scaled); 369 } else { 370 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM); 371 } 372 } 373 374 static void irqtime_account_idle_ticks(int ticks) 375 { 376 struct rq *rq = this_rq(); 377 378 irqtime_account_process_tick(current, 0, rq, ticks); 379 } 380 #else /* CONFIG_IRQ_TIME_ACCOUNTING */ 381 static inline void irqtime_account_idle_ticks(int ticks) {} 382 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, 383 struct rq *rq, int nr_ticks) {} 384 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 385 386 /* 387 * Use precise platform statistics if available: 388 */ 389 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 390 391 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH 392 void vtime_common_task_switch(struct task_struct *prev) 393 { 394 if (is_idle_task(prev)) 395 vtime_account_idle(prev); 396 else 397 vtime_account_system(prev); 398 399 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 400 vtime_account_user(prev); 401 #endif 402 arch_vtime_task_switch(prev); 403 } 404 #endif 405 406 /* 407 * Archs that account the whole time spent in the idle task 408 * (outside irq) as idle time can rely on this and just implement 409 * vtime_account_system() and vtime_account_idle(). Archs that 410 * have other meaning of the idle time (s390 only includes the 411 * time spent by the CPU when it's in low power mode) must override 412 * vtime_account(). 413 */ 414 #ifndef __ARCH_HAS_VTIME_ACCOUNT 415 void vtime_common_account_irq_enter(struct task_struct *tsk) 416 { 417 if (!in_interrupt()) { 418 /* 419 * If we interrupted user, context_tracking_in_user() 420 * is 1 because the context tracking don't hook 421 * on irq entry/exit. This way we know if 422 * we need to flush user time on kernel entry. 423 */ 424 if (context_tracking_in_user()) { 425 vtime_account_user(tsk); 426 return; 427 } 428 429 if (is_idle_task(tsk)) { 430 vtime_account_idle(tsk); 431 return; 432 } 433 } 434 vtime_account_system(tsk); 435 } 436 EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter); 437 #endif /* __ARCH_HAS_VTIME_ACCOUNT */ 438 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 439 440 441 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 442 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 443 { 444 *ut = p->utime; 445 *st = p->stime; 446 } 447 EXPORT_SYMBOL_GPL(task_cputime_adjusted); 448 449 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 450 { 451 struct task_cputime cputime; 452 453 thread_group_cputime(p, &cputime); 454 455 *ut = cputime.utime; 456 *st = cputime.stime; 457 } 458 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 459 /* 460 * Account a single tick of cpu time. 461 * @p: the process that the cpu time gets accounted to 462 * @user_tick: indicates if the tick is a user or a system tick 463 */ 464 void account_process_tick(struct task_struct *p, int user_tick) 465 { 466 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 467 struct rq *rq = this_rq(); 468 469 if (vtime_accounting_enabled()) 470 return; 471 472 if (sched_clock_irqtime) { 473 irqtime_account_process_tick(p, user_tick, rq, 1); 474 return; 475 } 476 477 if (steal_account_process_tick()) 478 return; 479 480 if (user_tick) 481 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 482 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 483 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, 484 one_jiffy_scaled); 485 else 486 account_idle_time(cputime_one_jiffy); 487 } 488 489 /* 490 * Account multiple ticks of steal time. 491 * @p: the process from which the cpu time has been stolen 492 * @ticks: number of stolen ticks 493 */ 494 void account_steal_ticks(unsigned long ticks) 495 { 496 account_steal_time(jiffies_to_cputime(ticks)); 497 } 498 499 /* 500 * Account multiple ticks of idle time. 501 * @ticks: number of stolen ticks 502 */ 503 void account_idle_ticks(unsigned long ticks) 504 { 505 506 if (sched_clock_irqtime) { 507 irqtime_account_idle_ticks(ticks); 508 return; 509 } 510 511 account_idle_time(jiffies_to_cputime(ticks)); 512 } 513 514 /* 515 * Perform (stime * rtime) / total, but avoid multiplication overflow by 516 * loosing precision when the numbers are big. 517 */ 518 static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) 519 { 520 u64 scaled; 521 522 for (;;) { 523 /* Make sure "rtime" is the bigger of stime/rtime */ 524 if (stime > rtime) 525 swap(rtime, stime); 526 527 /* Make sure 'total' fits in 32 bits */ 528 if (total >> 32) 529 goto drop_precision; 530 531 /* Does rtime (and thus stime) fit in 32 bits? */ 532 if (!(rtime >> 32)) 533 break; 534 535 /* Can we just balance rtime/stime rather than dropping bits? */ 536 if (stime >> 31) 537 goto drop_precision; 538 539 /* We can grow stime and shrink rtime and try to make them both fit */ 540 stime <<= 1; 541 rtime >>= 1; 542 continue; 543 544 drop_precision: 545 /* We drop from rtime, it has more bits than stime */ 546 rtime >>= 1; 547 total >>= 1; 548 } 549 550 /* 551 * Make sure gcc understands that this is a 32x32->64 multiply, 552 * followed by a 64/32->64 divide. 553 */ 554 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); 555 return (__force cputime_t) scaled; 556 } 557 558 /* 559 * Adjust tick based cputime random precision against scheduler runtime 560 * accounting. 561 * 562 * Tick based cputime accounting depend on random scheduling timeslices of a 563 * task to be interrupted or not by the timer. Depending on these 564 * circumstances, the number of these interrupts may be over or 565 * under-optimistic, matching the real user and system cputime with a variable 566 * precision. 567 * 568 * Fix this by scaling these tick based values against the total runtime 569 * accounted by the CFS scheduler. 570 * 571 * This code provides the following guarantees: 572 * 573 * stime + utime == rtime 574 * stime_i+1 >= stime_i, utime_i+1 >= utime_i 575 * 576 * Assuming that rtime_i+1 >= rtime_i. 577 */ 578 static void cputime_adjust(struct task_cputime *curr, 579 struct prev_cputime *prev, 580 cputime_t *ut, cputime_t *st) 581 { 582 cputime_t rtime, stime, utime; 583 unsigned long flags; 584 585 /* Serialize concurrent callers such that we can honour our guarantees */ 586 raw_spin_lock_irqsave(&prev->lock, flags); 587 rtime = nsecs_to_cputime(curr->sum_exec_runtime); 588 589 /* 590 * This is possible under two circumstances: 591 * - rtime isn't monotonic after all (a bug); 592 * - we got reordered by the lock. 593 * 594 * In both cases this acts as a filter such that the rest of the code 595 * can assume it is monotonic regardless of anything else. 596 */ 597 if (prev->stime + prev->utime >= rtime) 598 goto out; 599 600 stime = curr->stime; 601 utime = curr->utime; 602 603 if (utime == 0) { 604 stime = rtime; 605 goto update; 606 } 607 608 if (stime == 0) { 609 utime = rtime; 610 goto update; 611 } 612 613 stime = scale_stime((__force u64)stime, (__force u64)rtime, 614 (__force u64)(stime + utime)); 615 616 /* 617 * Make sure stime doesn't go backwards; this preserves monotonicity 618 * for utime because rtime is monotonic. 619 * 620 * utime_i+1 = rtime_i+1 - stime_i 621 * = rtime_i+1 - (rtime_i - utime_i) 622 * = (rtime_i+1 - rtime_i) + utime_i 623 * >= utime_i 624 */ 625 if (stime < prev->stime) 626 stime = prev->stime; 627 utime = rtime - stime; 628 629 /* 630 * Make sure utime doesn't go backwards; this still preserves 631 * monotonicity for stime, analogous argument to above. 632 */ 633 if (utime < prev->utime) { 634 utime = prev->utime; 635 stime = rtime - utime; 636 } 637 638 update: 639 prev->stime = stime; 640 prev->utime = utime; 641 out: 642 *ut = prev->utime; 643 *st = prev->stime; 644 raw_spin_unlock_irqrestore(&prev->lock, flags); 645 } 646 647 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 648 { 649 struct task_cputime cputime = { 650 .sum_exec_runtime = p->se.sum_exec_runtime, 651 }; 652 653 task_cputime(p, &cputime.utime, &cputime.stime); 654 cputime_adjust(&cputime, &p->prev_cputime, ut, st); 655 } 656 EXPORT_SYMBOL_GPL(task_cputime_adjusted); 657 658 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 659 { 660 struct task_cputime cputime; 661 662 thread_group_cputime(p, &cputime); 663 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); 664 } 665 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 666 667 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 668 static unsigned long long vtime_delta(struct task_struct *tsk) 669 { 670 unsigned long long clock; 671 672 clock = local_clock(); 673 if (clock < tsk->vtime_snap) 674 return 0; 675 676 return clock - tsk->vtime_snap; 677 } 678 679 static cputime_t get_vtime_delta(struct task_struct *tsk) 680 { 681 unsigned long long delta = vtime_delta(tsk); 682 683 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING); 684 tsk->vtime_snap += delta; 685 686 /* CHECKME: always safe to convert nsecs to cputime? */ 687 return nsecs_to_cputime(delta); 688 } 689 690 static void __vtime_account_system(struct task_struct *tsk) 691 { 692 cputime_t delta_cpu = get_vtime_delta(tsk); 693 694 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu)); 695 } 696 697 void vtime_account_system(struct task_struct *tsk) 698 { 699 write_seqlock(&tsk->vtime_seqlock); 700 __vtime_account_system(tsk); 701 write_sequnlock(&tsk->vtime_seqlock); 702 } 703 704 void vtime_gen_account_irq_exit(struct task_struct *tsk) 705 { 706 write_seqlock(&tsk->vtime_seqlock); 707 __vtime_account_system(tsk); 708 if (context_tracking_in_user()) 709 tsk->vtime_snap_whence = VTIME_USER; 710 write_sequnlock(&tsk->vtime_seqlock); 711 } 712 713 void vtime_account_user(struct task_struct *tsk) 714 { 715 cputime_t delta_cpu; 716 717 write_seqlock(&tsk->vtime_seqlock); 718 delta_cpu = get_vtime_delta(tsk); 719 tsk->vtime_snap_whence = VTIME_SYS; 720 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); 721 write_sequnlock(&tsk->vtime_seqlock); 722 } 723 724 void vtime_user_enter(struct task_struct *tsk) 725 { 726 write_seqlock(&tsk->vtime_seqlock); 727 __vtime_account_system(tsk); 728 tsk->vtime_snap_whence = VTIME_USER; 729 write_sequnlock(&tsk->vtime_seqlock); 730 } 731 732 void vtime_guest_enter(struct task_struct *tsk) 733 { 734 /* 735 * The flags must be updated under the lock with 736 * the vtime_snap flush and update. 737 * That enforces a right ordering and update sequence 738 * synchronization against the reader (task_gtime()) 739 * that can thus safely catch up with a tickless delta. 740 */ 741 write_seqlock(&tsk->vtime_seqlock); 742 __vtime_account_system(tsk); 743 current->flags |= PF_VCPU; 744 write_sequnlock(&tsk->vtime_seqlock); 745 } 746 EXPORT_SYMBOL_GPL(vtime_guest_enter); 747 748 void vtime_guest_exit(struct task_struct *tsk) 749 { 750 write_seqlock(&tsk->vtime_seqlock); 751 __vtime_account_system(tsk); 752 current->flags &= ~PF_VCPU; 753 write_sequnlock(&tsk->vtime_seqlock); 754 } 755 EXPORT_SYMBOL_GPL(vtime_guest_exit); 756 757 void vtime_account_idle(struct task_struct *tsk) 758 { 759 cputime_t delta_cpu = get_vtime_delta(tsk); 760 761 account_idle_time(delta_cpu); 762 } 763 764 void arch_vtime_task_switch(struct task_struct *prev) 765 { 766 write_seqlock(&prev->vtime_seqlock); 767 prev->vtime_snap_whence = VTIME_SLEEPING; 768 write_sequnlock(&prev->vtime_seqlock); 769 770 write_seqlock(¤t->vtime_seqlock); 771 current->vtime_snap_whence = VTIME_SYS; 772 current->vtime_snap = sched_clock_cpu(smp_processor_id()); 773 write_sequnlock(¤t->vtime_seqlock); 774 } 775 776 void vtime_init_idle(struct task_struct *t, int cpu) 777 { 778 unsigned long flags; 779 780 write_seqlock_irqsave(&t->vtime_seqlock, flags); 781 t->vtime_snap_whence = VTIME_SYS; 782 t->vtime_snap = sched_clock_cpu(cpu); 783 write_sequnlock_irqrestore(&t->vtime_seqlock, flags); 784 } 785 786 cputime_t task_gtime(struct task_struct *t) 787 { 788 unsigned int seq; 789 cputime_t gtime; 790 791 if (!context_tracking_is_enabled()) 792 return t->gtime; 793 794 do { 795 seq = read_seqbegin(&t->vtime_seqlock); 796 797 gtime = t->gtime; 798 if (t->flags & PF_VCPU) 799 gtime += vtime_delta(t); 800 801 } while (read_seqretry(&t->vtime_seqlock, seq)); 802 803 return gtime; 804 } 805 806 /* 807 * Fetch cputime raw values from fields of task_struct and 808 * add up the pending nohz execution time since the last 809 * cputime snapshot. 810 */ 811 static void 812 fetch_task_cputime(struct task_struct *t, 813 cputime_t *u_dst, cputime_t *s_dst, 814 cputime_t *u_src, cputime_t *s_src, 815 cputime_t *udelta, cputime_t *sdelta) 816 { 817 unsigned int seq; 818 unsigned long long delta; 819 820 do { 821 *udelta = 0; 822 *sdelta = 0; 823 824 seq = read_seqbegin(&t->vtime_seqlock); 825 826 if (u_dst) 827 *u_dst = *u_src; 828 if (s_dst) 829 *s_dst = *s_src; 830 831 /* Task is sleeping, nothing to add */ 832 if (t->vtime_snap_whence == VTIME_SLEEPING || 833 is_idle_task(t)) 834 continue; 835 836 delta = vtime_delta(t); 837 838 /* 839 * Task runs either in user or kernel space, add pending nohz time to 840 * the right place. 841 */ 842 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) { 843 *udelta = delta; 844 } else { 845 if (t->vtime_snap_whence == VTIME_SYS) 846 *sdelta = delta; 847 } 848 } while (read_seqretry(&t->vtime_seqlock, seq)); 849 } 850 851 852 void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) 853 { 854 cputime_t udelta, sdelta; 855 856 fetch_task_cputime(t, utime, stime, &t->utime, 857 &t->stime, &udelta, &sdelta); 858 if (utime) 859 *utime += udelta; 860 if (stime) 861 *stime += sdelta; 862 } 863 864 void task_cputime_scaled(struct task_struct *t, 865 cputime_t *utimescaled, cputime_t *stimescaled) 866 { 867 cputime_t udelta, sdelta; 868 869 fetch_task_cputime(t, utimescaled, stimescaled, 870 &t->utimescaled, &t->stimescaled, &udelta, &sdelta); 871 if (utimescaled) 872 *utimescaled += cputime_to_scaled(udelta); 873 if (stimescaled) 874 *stimescaled += cputime_to_scaled(sdelta); 875 } 876 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 877