1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * latencytop.c: Latency display infrastructure 4 * 5 * (C) Copyright 2008 Intel Corporation 6 * Author: Arjan van de Ven <arjan@linux.intel.com> 7 */ 8 9 /* 10 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is 11 * used by the "latencytop" userspace tool. The latency that is tracked is not 12 * the 'traditional' interrupt latency (which is primarily caused by something 13 * else consuming CPU), but instead, it is the latency an application encounters 14 * because the kernel sleeps on its behalf for various reasons. 15 * 16 * This code tracks 2 levels of statistics: 17 * 1) System level latency 18 * 2) Per process latency 19 * 20 * The latency is stored in fixed sized data structures in an accumulated form; 21 * if the "same" latency cause is hit twice, this will be tracked as one entry 22 * in the data structure. Both the count, total accumulated latency and maximum 23 * latency are tracked in this data structure. When the fixed size structure is 24 * full, no new causes are tracked until the buffer is flushed by writing to 25 * the /proc file; the userspace tool does this on a regular basis. 26 * 27 * A latency cause is identified by a stringified backtrace at the point that 28 * the scheduler gets invoked. The userland tool will use this string to 29 * identify the cause of the latency in human readable form. 30 * 31 * The information is exported via /proc/latency_stats and /proc/<pid>/latency. 32 * These files look like this: 33 * 34 * Latency Top version : v0.1 35 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl 36 * | | | | 37 * | | | +----> the stringified backtrace 38 * | | +---------> The maximum latency for this entry in microseconds 39 * | +--------------> The accumulated latency for this entry (microseconds) 40 * +-------------------> The number of times this entry is hit 41 * 42 * (note: the average latency is the accumulated latency divided by the number 43 * of times) 44 */ 45 46 #include <linux/kallsyms.h> 47 #include <linux/seq_file.h> 48 #include <linux/notifier.h> 49 #include <linux/spinlock.h> 50 #include <linux/proc_fs.h> 51 #include <linux/latencytop.h> 52 #include <linux/export.h> 53 #include <linux/sched.h> 54 #include <linux/sched/debug.h> 55 #include <linux/sched/stat.h> 56 #include <linux/list.h> 57 #include <linux/stacktrace.h> 58 59 static DEFINE_RAW_SPINLOCK(latency_lock); 60 61 #define MAXLR 128 62 static struct latency_record latency_record[MAXLR]; 63 64 int latencytop_enabled; 65 66 void clear_tsk_latency_tracing(struct task_struct *p) 67 { 68 unsigned long flags; 69 70 raw_spin_lock_irqsave(&latency_lock, flags); 71 memset(&p->latency_record, 0, sizeof(p->latency_record)); 72 p->latency_record_count = 0; 73 raw_spin_unlock_irqrestore(&latency_lock, flags); 74 } 75 76 static void clear_global_latency_tracing(void) 77 { 78 unsigned long flags; 79 80 raw_spin_lock_irqsave(&latency_lock, flags); 81 memset(&latency_record, 0, sizeof(latency_record)); 82 raw_spin_unlock_irqrestore(&latency_lock, flags); 83 } 84 85 static void __sched 86 account_global_scheduler_latency(struct task_struct *tsk, 87 struct latency_record *lat) 88 { 89 int firstnonnull = MAXLR + 1; 90 int i; 91 92 /* skip kernel threads for now */ 93 if (!tsk->mm) 94 return; 95 96 for (i = 0; i < MAXLR; i++) { 97 int q, same = 1; 98 99 /* Nothing stored: */ 100 if (!latency_record[i].backtrace[0]) { 101 if (firstnonnull > i) 102 firstnonnull = i; 103 continue; 104 } 105 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 106 unsigned long record = lat->backtrace[q]; 107 108 if (latency_record[i].backtrace[q] != record) { 109 same = 0; 110 break; 111 } 112 113 /* 0 entry marks end of backtrace: */ 114 if (!record) 115 break; 116 } 117 if (same) { 118 latency_record[i].count++; 119 latency_record[i].time += lat->time; 120 if (lat->time > latency_record[i].max) 121 latency_record[i].max = lat->time; 122 return; 123 } 124 } 125 126 i = firstnonnull; 127 if (i >= MAXLR - 1) 128 return; 129 130 /* Allocted a new one: */ 131 memcpy(&latency_record[i], lat, sizeof(struct latency_record)); 132 } 133 134 /** 135 * __account_scheduler_latency - record an occurred latency 136 * @tsk - the task struct of the task hitting the latency 137 * @usecs - the duration of the latency in microseconds 138 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible 139 * 140 * This function is the main entry point for recording latency entries 141 * as called by the scheduler. 142 * 143 * This function has a few special cases to deal with normal 'non-latency' 144 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped 145 * since this usually is caused by waiting for events via select() and co. 146 * 147 * Negative latencies (caused by time going backwards) are also explicitly 148 * skipped. 149 */ 150 void __sched 151 __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) 152 { 153 unsigned long flags; 154 int i, q; 155 struct latency_record lat; 156 157 /* Long interruptible waits are generally user requested... */ 158 if (inter && usecs > 5000) 159 return; 160 161 /* Negative sleeps are time going backwards */ 162 /* Zero-time sleeps are non-interesting */ 163 if (usecs <= 0) 164 return; 165 166 memset(&lat, 0, sizeof(lat)); 167 lat.count = 1; 168 lat.time = usecs; 169 lat.max = usecs; 170 171 stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0); 172 173 raw_spin_lock_irqsave(&latency_lock, flags); 174 175 account_global_scheduler_latency(tsk, &lat); 176 177 for (i = 0; i < tsk->latency_record_count; i++) { 178 struct latency_record *mylat; 179 int same = 1; 180 181 mylat = &tsk->latency_record[i]; 182 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 183 unsigned long record = lat.backtrace[q]; 184 185 if (mylat->backtrace[q] != record) { 186 same = 0; 187 break; 188 } 189 190 /* 0 entry is end of backtrace */ 191 if (!record) 192 break; 193 } 194 if (same) { 195 mylat->count++; 196 mylat->time += lat.time; 197 if (lat.time > mylat->max) 198 mylat->max = lat.time; 199 goto out_unlock; 200 } 201 } 202 203 /* 204 * short term hack; if we're > 32 we stop; future we recycle: 205 */ 206 if (tsk->latency_record_count >= LT_SAVECOUNT) 207 goto out_unlock; 208 209 /* Allocated a new one: */ 210 i = tsk->latency_record_count++; 211 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); 212 213 out_unlock: 214 raw_spin_unlock_irqrestore(&latency_lock, flags); 215 } 216 217 static int lstats_show(struct seq_file *m, void *v) 218 { 219 int i; 220 221 seq_puts(m, "Latency Top version : v0.1\n"); 222 223 for (i = 0; i < MAXLR; i++) { 224 struct latency_record *lr = &latency_record[i]; 225 226 if (lr->backtrace[0]) { 227 int q; 228 seq_printf(m, "%i %lu %lu", 229 lr->count, lr->time, lr->max); 230 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 231 unsigned long bt = lr->backtrace[q]; 232 233 if (!bt) 234 break; 235 236 seq_printf(m, " %ps", (void *)bt); 237 } 238 seq_puts(m, "\n"); 239 } 240 } 241 return 0; 242 } 243 244 static ssize_t 245 lstats_write(struct file *file, const char __user *buf, size_t count, 246 loff_t *offs) 247 { 248 clear_global_latency_tracing(); 249 250 return count; 251 } 252 253 static int lstats_open(struct inode *inode, struct file *filp) 254 { 255 return single_open(filp, lstats_show, NULL); 256 } 257 258 static const struct proc_ops lstats_proc_ops = { 259 .proc_open = lstats_open, 260 .proc_read = seq_read, 261 .proc_write = lstats_write, 262 .proc_lseek = seq_lseek, 263 .proc_release = single_release, 264 }; 265 266 static int __init init_lstats_procfs(void) 267 { 268 proc_create("latency_stats", 0644, NULL, &lstats_proc_ops); 269 return 0; 270 } 271 272 int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, 273 size_t *lenp, loff_t *ppos) 274 { 275 int err; 276 277 err = proc_dointvec(table, write, buffer, lenp, ppos); 278 if (latencytop_enabled) 279 force_schedstat_enabled(); 280 281 return err; 282 } 283 device_initcall(init_lstats_procfs); 284