1 /* delayacct.c - per-task delay accounting 2 * 3 * Copyright (C) Shailabh Nagar, IBM Corp. 2006 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it would be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 */ 15 16 #include <linux/sched.h> 17 #include <linux/sched/task.h> 18 #include <linux/sched/cputime.h> 19 #include <linux/slab.h> 20 #include <linux/taskstats.h> 21 #include <linux/time.h> 22 #include <linux/sysctl.h> 23 #include <linux/delayacct.h> 24 #include <linux/module.h> 25 26 int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */ 27 EXPORT_SYMBOL_GPL(delayacct_on); 28 struct kmem_cache *delayacct_cache; 29 30 static int __init delayacct_setup_disable(char *str) 31 { 32 delayacct_on = 0; 33 return 1; 34 } 35 __setup("nodelayacct", delayacct_setup_disable); 36 37 void delayacct_init(void) 38 { 39 delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT); 40 delayacct_tsk_init(&init_task); 41 } 42 43 void __delayacct_tsk_init(struct task_struct *tsk) 44 { 45 tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); 46 if (tsk->delays) 47 raw_spin_lock_init(&tsk->delays->lock); 48 } 49 50 /* 51 * Finish delay accounting for a statistic using its timestamps (@start), 52 * accumalator (@total) and @count 53 */ 54 static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, 55 u32 *count) 56 { 57 s64 ns = ktime_get_ns() - *start; 58 unsigned long flags; 59 60 if (ns > 0) { 61 raw_spin_lock_irqsave(lock, flags); 62 *total += ns; 63 (*count)++; 64 raw_spin_unlock_irqrestore(lock, flags); 65 } 66 } 67 68 void __delayacct_blkio_start(void) 69 { 70 current->delays->blkio_start = ktime_get_ns(); 71 } 72 73 /* 74 * We cannot rely on the `current` macro, as we haven't yet switched back to 75 * the process being woken. 76 */ 77 void __delayacct_blkio_end(struct task_struct *p) 78 { 79 struct task_delay_info *delays = p->delays; 80 u64 *total; 81 u32 *count; 82 83 if (p->delays->flags & DELAYACCT_PF_SWAPIN) { 84 total = &delays->swapin_delay; 85 count = &delays->swapin_count; 86 } else { 87 total = &delays->blkio_delay; 88 count = &delays->blkio_count; 89 } 90 91 delayacct_end(&delays->lock, &delays->blkio_start, total, count); 92 } 93 94 int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) 95 { 96 u64 utime, stime, stimescaled, utimescaled; 97 unsigned long long t2, t3; 98 unsigned long flags, t1; 99 s64 tmp; 100 101 task_cputime(tsk, &utime, &stime); 102 tmp = (s64)d->cpu_run_real_total; 103 tmp += utime + stime; 104 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; 105 106 task_cputime_scaled(tsk, &utimescaled, &stimescaled); 107 tmp = (s64)d->cpu_scaled_run_real_total; 108 tmp += utimescaled + stimescaled; 109 d->cpu_scaled_run_real_total = 110 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; 111 112 /* 113 * No locking available for sched_info (and too expensive to add one) 114 * Mitigate by taking snapshot of values 115 */ 116 t1 = tsk->sched_info.pcount; 117 t2 = tsk->sched_info.run_delay; 118 t3 = tsk->se.sum_exec_runtime; 119 120 d->cpu_count += t1; 121 122 tmp = (s64)d->cpu_delay_total + t2; 123 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; 124 125 tmp = (s64)d->cpu_run_virtual_total + t3; 126 d->cpu_run_virtual_total = 127 (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; 128 129 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ 130 131 raw_spin_lock_irqsave(&tsk->delays->lock, flags); 132 tmp = d->blkio_delay_total + tsk->delays->blkio_delay; 133 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; 134 tmp = d->swapin_delay_total + tsk->delays->swapin_delay; 135 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; 136 tmp = d->freepages_delay_total + tsk->delays->freepages_delay; 137 d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; 138 d->blkio_count += tsk->delays->blkio_count; 139 d->swapin_count += tsk->delays->swapin_count; 140 d->freepages_count += tsk->delays->freepages_count; 141 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); 142 143 return 0; 144 } 145 146 __u64 __delayacct_blkio_ticks(struct task_struct *tsk) 147 { 148 __u64 ret; 149 unsigned long flags; 150 151 raw_spin_lock_irqsave(&tsk->delays->lock, flags); 152 ret = nsec_to_clock_t(tsk->delays->blkio_delay + 153 tsk->delays->swapin_delay); 154 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); 155 return ret; 156 } 157 158 void __delayacct_freepages_start(void) 159 { 160 current->delays->freepages_start = ktime_get_ns(); 161 } 162 163 void __delayacct_freepages_end(void) 164 { 165 delayacct_end( 166 ¤t->delays->lock, 167 ¤t->delays->freepages_start, 168 ¤t->delays->freepages_delay, 169 ¤t->delays->freepages_count); 170 } 171 172