1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * /proc/schedstat implementation 4 */ 5 #include "sched.h" 6 7 /* 8 * Current schedstat API version. 9 * 10 * Bump this up when changing the output format or the meaning of an existing 11 * format, so that tools can adapt (or abort) 12 */ 13 #define SCHEDSTAT_VERSION 15 14 15 static int show_schedstat(struct seq_file *seq, void *v) 16 { 17 int cpu; 18 19 if (v == (void *)1) { 20 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); 21 seq_printf(seq, "timestamp %lu\n", jiffies); 22 } else { 23 struct rq *rq; 24 #ifdef CONFIG_SMP 25 struct sched_domain *sd; 26 int dcount = 0; 27 #endif 28 cpu = (unsigned long)(v - 2); 29 rq = cpu_rq(cpu); 30 31 /* runqueue-specific stats */ 32 seq_printf(seq, 33 "cpu%d %u 0 %u %u %u %u %llu %llu %lu", 34 cpu, rq->yld_count, 35 rq->sched_count, rq->sched_goidle, 36 rq->ttwu_count, rq->ttwu_local, 37 rq->rq_cpu_time, 38 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); 39 40 seq_printf(seq, "\n"); 41 42 #ifdef CONFIG_SMP 43 /* domain-specific stats */ 44 rcu_read_lock(); 45 for_each_domain(cpu, sd) { 46 enum cpu_idle_type itype; 47 48 seq_printf(seq, "domain%d %*pb", dcount++, 49 cpumask_pr_args(sched_domain_span(sd))); 50 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; 51 itype++) { 52 seq_printf(seq, " %u %u %u %u %u %u %u %u", 53 sd->lb_count[itype], 54 sd->lb_balanced[itype], 55 sd->lb_failed[itype], 56 sd->lb_imbalance[itype], 57 sd->lb_gained[itype], 58 sd->lb_hot_gained[itype], 59 sd->lb_nobusyq[itype], 60 sd->lb_nobusyg[itype]); 61 } 62 seq_printf(seq, 63 " %u %u %u %u %u %u %u %u %u %u %u %u\n", 64 sd->alb_count, sd->alb_failed, sd->alb_pushed, 65 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, 66 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, 67 sd->ttwu_wake_remote, sd->ttwu_move_affine, 68 sd->ttwu_move_balance); 69 } 70 rcu_read_unlock(); 71 #endif 72 } 73 return 0; 74 } 75 76 /* 77 * This itererator needs some explanation. 78 * It returns 1 for the header position. 79 * This means 2 is cpu 0. 80 * In a hotplugged system some CPUs, including cpu 0, may be missing so we have 81 * to use cpumask_* to iterate over the CPUs. 82 */ 83 static void *schedstat_start(struct seq_file *file, loff_t *offset) 84 { 85 unsigned long n = *offset; 86 87 if (n == 0) 88 return (void *) 1; 89 90 n--; 91 92 if (n > 0) 93 n = cpumask_next(n - 1, cpu_online_mask); 94 else 95 n = cpumask_first(cpu_online_mask); 96 97 *offset = n + 1; 98 99 if (n < nr_cpu_ids) 100 return (void *)(unsigned long)(n + 2); 101 102 return NULL; 103 } 104 105 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset) 106 { 107 (*offset)++; 108 109 return schedstat_start(file, offset); 110 } 111 112 static void schedstat_stop(struct seq_file *file, void *data) 113 { 114 } 115 116 static const struct seq_operations schedstat_sops = { 117 .start = schedstat_start, 118 .next = schedstat_next, 119 .stop = schedstat_stop, 120 .show = show_schedstat, 121 }; 122 123 static int schedstat_open(struct inode *inode, struct file *file) 124 { 125 return seq_open(file, &schedstat_sops); 126 } 127 128 static const struct file_operations proc_schedstat_operations = { 129 .open = schedstat_open, 130 .read = seq_read, 131 .llseek = seq_lseek, 132 .release = seq_release, 133 }; 134 135 static int __init proc_schedstat_init(void) 136 { 137 proc_create("schedstat", 0, NULL, &proc_schedstat_operations); 138 139 return 0; 140 } 141 subsys_initcall(proc_schedstat_init); 142