1 #include <linux/cpumask.h> 2 #include <linux/fs.h> 3 #include <linux/init.h> 4 #include <linux/interrupt.h> 5 #include <linux/kernel_stat.h> 6 #include <linux/proc_fs.h> 7 #include <linux/sched.h> 8 #include <linux/sched/stat.h> 9 #include <linux/seq_file.h> 10 #include <linux/slab.h> 11 #include <linux/time.h> 12 #include <linux/irqnr.h> 13 #include <linux/sched/cputime.h> 14 #include <linux/tick.h> 15 16 #ifndef arch_irq_stat_cpu 17 #define arch_irq_stat_cpu(cpu) 0 18 #endif 19 #ifndef arch_irq_stat 20 #define arch_irq_stat() 0 21 #endif 22 23 #ifdef arch_idle_time 24 25 static u64 get_idle_time(int cpu) 26 { 27 u64 idle; 28 29 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; 30 if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) 31 idle += arch_idle_time(cpu); 32 return idle; 33 } 34 35 static u64 get_iowait_time(int cpu) 36 { 37 u64 iowait; 38 39 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; 40 if (cpu_online(cpu) && nr_iowait_cpu(cpu)) 41 iowait += arch_idle_time(cpu); 42 return iowait; 43 } 44 45 #else 46 47 static u64 get_idle_time(int cpu) 48 { 49 u64 idle, idle_usecs = -1ULL; 50 51 if (cpu_online(cpu)) 52 idle_usecs = get_cpu_idle_time_us(cpu, NULL); 53 54 if (idle_usecs == -1ULL) 55 /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ 56 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; 57 else 58 idle = idle_usecs * NSEC_PER_USEC; 59 60 return idle; 61 } 62 63 static u64 get_iowait_time(int cpu) 64 { 65 u64 iowait, iowait_usecs = -1ULL; 66 67 if (cpu_online(cpu)) 68 iowait_usecs = get_cpu_iowait_time_us(cpu, NULL); 69 70 if (iowait_usecs == -1ULL) 71 /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ 72 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; 73 else 74 iowait = iowait_usecs * NSEC_PER_USEC; 75 76 return iowait; 77 } 78 79 #endif 80 81 static int show_stat(struct seq_file *p, void *v) 82 { 83 int i, j; 84 u64 user, nice, system, idle, iowait, irq, softirq, steal; 85 u64 guest, guest_nice; 86 u64 sum = 0; 87 u64 sum_softirq = 0; 88 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; 89 struct timespec64 boottime; 90 91 user = nice = system = idle = iowait = 92 irq = softirq = steal = 0; 93 guest = guest_nice = 0; 94 getboottime64(&boottime); 95 96 for_each_possible_cpu(i) { 97 user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; 98 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; 99 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; 100 idle += get_idle_time(i); 101 iowait += get_iowait_time(i); 102 irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; 103 softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; 104 steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; 105 guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; 106 guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; 107 sum += kstat_cpu_irqs_sum(i); 108 sum += arch_irq_stat_cpu(i); 109 110 for (j = 0; j < NR_SOFTIRQS; j++) { 111 unsigned int softirq_stat = kstat_softirqs_cpu(j, i); 112 113 per_softirq_sums[j] += softirq_stat; 114 sum_softirq += softirq_stat; 115 } 116 } 117 sum += arch_irq_stat(); 118 119 seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user)); 120 seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); 121 seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); 122 seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); 123 seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); 124 seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); 125 seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); 126 seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); 127 seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); 128 seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); 129 seq_putc(p, '\n'); 130 131 for_each_online_cpu(i) { 132 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ 133 user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; 134 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; 135 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; 136 idle = get_idle_time(i); 137 iowait = get_iowait_time(i); 138 irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; 139 softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; 140 steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; 141 guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; 142 guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; 143 seq_printf(p, "cpu%d", i); 144 seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); 145 seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); 146 seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); 147 seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); 148 seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); 149 seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); 150 seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); 151 seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); 152 seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); 153 seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); 154 seq_putc(p, '\n'); 155 } 156 seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); 157 158 /* sum again ? it could be updated? */ 159 for_each_irq_nr(j) 160 seq_put_decimal_ull(p, " ", kstat_irqs_usr(j)); 161 162 seq_printf(p, 163 "\nctxt %llu\n" 164 "btime %llu\n" 165 "processes %lu\n" 166 "procs_running %lu\n" 167 "procs_blocked %lu\n", 168 nr_context_switches(), 169 (unsigned long long)boottime.tv_sec, 170 total_forks, 171 nr_running(), 172 nr_iowait()); 173 174 seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq); 175 176 for (i = 0; i < NR_SOFTIRQS; i++) 177 seq_put_decimal_ull(p, " ", per_softirq_sums[i]); 178 seq_putc(p, '\n'); 179 180 return 0; 181 } 182 183 static int stat_open(struct inode *inode, struct file *file) 184 { 185 size_t size = 1024 + 128 * num_online_cpus(); 186 187 /* minimum size to display an interrupt count : 2 bytes */ 188 size += 2 * nr_irqs; 189 return single_open_size(file, show_stat, NULL, size); 190 } 191 192 static const struct file_operations proc_stat_operations = { 193 .open = stat_open, 194 .read = seq_read, 195 .llseek = seq_lseek, 196 .release = single_release, 197 }; 198 199 static int __init proc_stat_init(void) 200 { 201 proc_create("stat", 0, NULL, &proc_stat_operations); 202 return 0; 203 } 204 fs_initcall(proc_stat_init); 205