xref: /openbmc/linux/kernel/trace/trace_hwlat.c (revision 94588c1b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_hwlat.c - A simple Hardware Latency detector.
4  *
5  * Use this tracer to detect large system latencies induced by the behavior of
6  * certain underlying system hardware or firmware, independent of Linux itself.
7  * The code was developed originally to detect the presence of SMIs on Intel
8  * and AMD systems, although there is no dependency upon x86 herein.
9  *
10  * The classical example usage of this tracer is in detecting the presence of
11  * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
12  * somewhat special form of hardware interrupt spawned from earlier CPU debug
13  * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
14  * LPC (or other device) to generate a special interrupt under certain
15  * circumstances, for example, upon expiration of a special SMI timer device,
16  * due to certain external thermal readings, on certain I/O address accesses,
17  * and other situations. An SMI hits a special CPU pin, triggers a special
18  * SMI mode (complete with special memory map), and the OS is unaware.
19  *
20  * Although certain hardware-inducing latencies are necessary (for example,
21  * a modern system often requires an SMI handler for correct thermal control
22  * and remote management) they can wreak havoc upon any OS-level performance
23  * guarantees toward low-latency, especially when the OS is not even made
24  * aware of the presence of these interrupts. For this reason, we need a
25  * somewhat brute force mechanism to detect these interrupts. In this case,
26  * we do it by hogging all of the CPU(s) for configurable timer intervals,
27  * sampling the built-in CPU timer, looking for discontiguous readings.
28  *
29  * WARNING: This implementation necessarily introduces latencies. Therefore,
30  *          you should NEVER use this tracer while running in a production
31  *          environment requiring any kind of low-latency performance
32  *          guarantee(s).
33  *
34  * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
35  * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
36  *
37  * Includes useful feedback from Clark Williams <clark@redhat.com>
38  *
39  */
40 #include <linux/kthread.h>
41 #include <linux/tracefs.h>
42 #include <linux/uaccess.h>
43 #include <linux/cpumask.h>
44 #include <linux/delay.h>
45 #include <linux/sched/clock.h>
46 #include "trace.h"
47 
48 static struct trace_array	*hwlat_trace;
49 
50 #define U64STR_SIZE		22			/* 20 digits max */
51 
52 #define BANNER			"hwlat_detector: "
53 #define DEFAULT_SAMPLE_WINDOW	1000000			/* 1s */
54 #define DEFAULT_SAMPLE_WIDTH	500000			/* 0.5s */
55 #define DEFAULT_LAT_THRESHOLD	10			/* 10us */
56 
57 /* sampling thread*/
58 static struct task_struct *hwlat_kthread;
59 
60 static struct dentry *hwlat_sample_width;	/* sample width us */
61 static struct dentry *hwlat_sample_window;	/* sample window us */
62 
63 /* Save the previous tracing_thresh value */
64 static unsigned long save_tracing_thresh;
65 
66 /* NMI timestamp counters */
67 static u64 nmi_ts_start;
68 static u64 nmi_total_ts;
69 static int nmi_count;
70 static int nmi_cpu;
71 
72 /* Tells NMIs to call back to the hwlat tracer to record timestamps */
73 bool trace_hwlat_callback_enabled;
74 
75 /* If the user changed threshold, remember it */
76 static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
77 
78 /* Individual latency samples are stored here when detected. */
79 struct hwlat_sample {
80 	u64			seqnum;		/* unique sequence */
81 	u64			duration;	/* delta */
82 	u64			outer_duration;	/* delta (outer loop) */
83 	u64			nmi_total_ts;	/* Total time spent in NMIs */
84 	struct timespec64	timestamp;	/* wall time */
85 	int			nmi_count;	/* # NMIs during this sample */
86 	int			count;		/* # of iterations over thresh */
87 };
88 
89 /* keep the global state somewhere. */
90 static struct hwlat_data {
91 
92 	struct mutex lock;		/* protect changes */
93 
94 	u64	count;			/* total since reset */
95 
96 	u64	sample_window;		/* total sampling window (on+off) */
97 	u64	sample_width;		/* active sampling portion of window */
98 
99 } hwlat_data = {
100 	.sample_window		= DEFAULT_SAMPLE_WINDOW,
101 	.sample_width		= DEFAULT_SAMPLE_WIDTH,
102 };
103 
104 static void trace_hwlat_sample(struct hwlat_sample *sample)
105 {
106 	struct trace_array *tr = hwlat_trace;
107 	struct trace_event_call *call = &event_hwlat;
108 	struct trace_buffer *buffer = tr->array_buffer.buffer;
109 	struct ring_buffer_event *event;
110 	struct hwlat_entry *entry;
111 
112 	event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
113 					  tracing_gen_ctx());
114 	if (!event)
115 		return;
116 	entry	= ring_buffer_event_data(event);
117 	entry->seqnum			= sample->seqnum;
118 	entry->duration			= sample->duration;
119 	entry->outer_duration		= sample->outer_duration;
120 	entry->timestamp		= sample->timestamp;
121 	entry->nmi_total_ts		= sample->nmi_total_ts;
122 	entry->nmi_count		= sample->nmi_count;
123 	entry->count			= sample->count;
124 
125 	if (!call_filter_check_discard(call, entry, buffer, event))
126 		trace_buffer_unlock_commit_nostack(buffer, event);
127 }
128 
129 /* Macros to encapsulate the time capturing infrastructure */
130 #define time_type	u64
131 #define time_get()	trace_clock_local()
132 #define time_to_us(x)	div_u64(x, 1000)
133 #define time_sub(a, b)	((a) - (b))
134 #define init_time(a, b)	(a = b)
135 #define time_u64(a)	a
136 
137 void trace_hwlat_callback(bool enter)
138 {
139 	if (smp_processor_id() != nmi_cpu)
140 		return;
141 
142 	/*
143 	 * Currently trace_clock_local() calls sched_clock() and the
144 	 * generic version is not NMI safe.
145 	 */
146 	if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
147 		if (enter)
148 			nmi_ts_start = time_get();
149 		else
150 			nmi_total_ts += time_get() - nmi_ts_start;
151 	}
152 
153 	if (enter)
154 		nmi_count++;
155 }
156 
157 /**
158  * get_sample - sample the CPU TSC and look for likely hardware latencies
159  *
160  * Used to repeatedly capture the CPU TSC (or similar), looking for potential
161  * hardware-induced latency. Called with interrupts disabled and with
162  * hwlat_data.lock held.
163  */
164 static int get_sample(void)
165 {
166 	struct trace_array *tr = hwlat_trace;
167 	struct hwlat_sample s;
168 	time_type start, t1, t2, last_t2;
169 	s64 diff, outer_diff, total, last_total = 0;
170 	u64 sample = 0;
171 	u64 thresh = tracing_thresh;
172 	u64 outer_sample = 0;
173 	int ret = -1;
174 	unsigned int count = 0;
175 
176 	do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
177 
178 	nmi_cpu = smp_processor_id();
179 	nmi_total_ts = 0;
180 	nmi_count = 0;
181 	/* Make sure NMIs see this first */
182 	barrier();
183 
184 	trace_hwlat_callback_enabled = true;
185 
186 	init_time(last_t2, 0);
187 	start = time_get(); /* start timestamp */
188 	outer_diff = 0;
189 
190 	do {
191 
192 		t1 = time_get();	/* we'll look for a discontinuity */
193 		t2 = time_get();
194 
195 		if (time_u64(last_t2)) {
196 			/* Check the delta from outer loop (t2 to next t1) */
197 			outer_diff = time_to_us(time_sub(t1, last_t2));
198 			/* This shouldn't happen */
199 			if (outer_diff < 0) {
200 				pr_err(BANNER "time running backwards\n");
201 				goto out;
202 			}
203 			if (outer_diff > outer_sample)
204 				outer_sample = outer_diff;
205 		}
206 		last_t2 = t2;
207 
208 		total = time_to_us(time_sub(t2, start)); /* sample width */
209 
210 		/* Check for possible overflows */
211 		if (total < last_total) {
212 			pr_err("Time total overflowed\n");
213 			break;
214 		}
215 		last_total = total;
216 
217 		/* This checks the inner loop (t1 to t2) */
218 		diff = time_to_us(time_sub(t2, t1));     /* current diff */
219 
220 		if (diff > thresh || outer_diff > thresh) {
221 			if (!count)
222 				ktime_get_real_ts64(&s.timestamp);
223 			count++;
224 		}
225 
226 		/* This shouldn't happen */
227 		if (diff < 0) {
228 			pr_err(BANNER "time running backwards\n");
229 			goto out;
230 		}
231 
232 		if (diff > sample)
233 			sample = diff; /* only want highest value */
234 
235 	} while (total <= hwlat_data.sample_width);
236 
237 	barrier(); /* finish the above in the view for NMIs */
238 	trace_hwlat_callback_enabled = false;
239 	barrier(); /* Make sure nmi_total_ts is no longer updated */
240 
241 	ret = 0;
242 
243 	/* If we exceed the threshold value, we have found a hardware latency */
244 	if (sample > thresh || outer_sample > thresh) {
245 		u64 latency;
246 
247 		ret = 1;
248 
249 		/* We read in microseconds */
250 		if (nmi_total_ts)
251 			do_div(nmi_total_ts, NSEC_PER_USEC);
252 
253 		hwlat_data.count++;
254 		s.seqnum = hwlat_data.count;
255 		s.duration = sample;
256 		s.outer_duration = outer_sample;
257 		s.nmi_total_ts = nmi_total_ts;
258 		s.nmi_count = nmi_count;
259 		s.count = count;
260 		trace_hwlat_sample(&s);
261 
262 		latency = max(sample, outer_sample);
263 
264 		/* Keep a running maximum ever recorded hardware latency */
265 		if (latency > tr->max_latency) {
266 			tr->max_latency = latency;
267 			latency_fsnotify(tr);
268 		}
269 	}
270 
271 out:
272 	return ret;
273 }
274 
275 static struct cpumask save_cpumask;
276 static bool disable_migrate;
277 
278 static void move_to_next_cpu(void)
279 {
280 	struct cpumask *current_mask = &save_cpumask;
281 	struct trace_array *tr = hwlat_trace;
282 	int next_cpu;
283 
284 	if (disable_migrate)
285 		return;
286 	/*
287 	 * If for some reason the user modifies the CPU affinity
288 	 * of this thread, then stop migrating for the duration
289 	 * of the current test.
290 	 */
291 	if (!cpumask_equal(current_mask, current->cpus_ptr))
292 		goto disable;
293 
294 	get_online_cpus();
295 	cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
296 	next_cpu = cpumask_next(smp_processor_id(), current_mask);
297 	put_online_cpus();
298 
299 	if (next_cpu >= nr_cpu_ids)
300 		next_cpu = cpumask_first(current_mask);
301 
302 	if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
303 		goto disable;
304 
305 	cpumask_clear(current_mask);
306 	cpumask_set_cpu(next_cpu, current_mask);
307 
308 	sched_setaffinity(0, current_mask);
309 	return;
310 
311  disable:
312 	disable_migrate = true;
313 }
314 
315 /*
316  * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
317  *
318  * Used to periodically sample the CPU TSC via a call to get_sample. We
319  * disable interrupts, which does (intentionally) introduce latency since we
320  * need to ensure nothing else might be running (and thus preempting).
321  * Obviously this should never be used in production environments.
322  *
323  * Executes one loop interaction on each CPU in tracing_cpumask sysfs file.
324  */
325 static int kthread_fn(void *data)
326 {
327 	u64 interval;
328 
329 	while (!kthread_should_stop()) {
330 
331 		move_to_next_cpu();
332 
333 		local_irq_disable();
334 		get_sample();
335 		local_irq_enable();
336 
337 		mutex_lock(&hwlat_data.lock);
338 		interval = hwlat_data.sample_window - hwlat_data.sample_width;
339 		mutex_unlock(&hwlat_data.lock);
340 
341 		do_div(interval, USEC_PER_MSEC); /* modifies interval value */
342 
343 		/* Always sleep for at least 1ms */
344 		if (interval < 1)
345 			interval = 1;
346 
347 		if (msleep_interruptible(interval))
348 			break;
349 	}
350 
351 	return 0;
352 }
353 
354 /**
355  * start_kthread - Kick off the hardware latency sampling/detector kthread
356  *
357  * This starts the kernel thread that will sit and sample the CPU timestamp
358  * counter (TSC or similar) and look for potential hardware latencies.
359  */
360 static int start_kthread(struct trace_array *tr)
361 {
362 	struct cpumask *current_mask = &save_cpumask;
363 	struct task_struct *kthread;
364 	int next_cpu;
365 
366 	if (hwlat_kthread)
367 		return 0;
368 
369 	/* Just pick the first CPU on first iteration */
370 	get_online_cpus();
371 	cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
372 	put_online_cpus();
373 	next_cpu = cpumask_first(current_mask);
374 
375 	kthread = kthread_create(kthread_fn, NULL, "hwlatd");
376 	if (IS_ERR(kthread)) {
377 		pr_err(BANNER "could not start sampling thread\n");
378 		return -ENOMEM;
379 	}
380 
381 	cpumask_clear(current_mask);
382 	cpumask_set_cpu(next_cpu, current_mask);
383 	sched_setaffinity(kthread->pid, current_mask);
384 
385 	hwlat_kthread = kthread;
386 	wake_up_process(kthread);
387 
388 	return 0;
389 }
390 
391 /**
392  * stop_kthread - Inform the hardware latency sampling/detector kthread to stop
393  *
394  * This kicks the running hardware latency sampling/detector kernel thread and
395  * tells it to stop sampling now. Use this on unload and at system shutdown.
396  */
397 static void stop_kthread(void)
398 {
399 	if (!hwlat_kthread)
400 		return;
401 	kthread_stop(hwlat_kthread);
402 	hwlat_kthread = NULL;
403 }
404 
405 /*
406  * hwlat_read - Wrapper read function for reading both window and width
407  * @filp: The active open file structure
408  * @ubuf: The userspace provided buffer to read value into
409  * @cnt: The maximum number of bytes to read
410  * @ppos: The current "file" position
411  *
412  * This function provides a generic read implementation for the global state
413  * "hwlat_data" structure filesystem entries.
414  */
415 static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
416 			  size_t cnt, loff_t *ppos)
417 {
418 	char buf[U64STR_SIZE];
419 	u64 *entry = filp->private_data;
420 	u64 val;
421 	int len;
422 
423 	if (!entry)
424 		return -EFAULT;
425 
426 	if (cnt > sizeof(buf))
427 		cnt = sizeof(buf);
428 
429 	val = *entry;
430 
431 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
432 
433 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
434 }
435 
436 /**
437  * hwlat_width_write - Write function for "width" entry
438  * @filp: The active open file structure
439  * @ubuf: The user buffer that contains the value to write
440  * @cnt: The maximum number of bytes to write to "file"
441  * @ppos: The current position in @file
442  *
443  * This function provides a write implementation for the "width" interface
444  * to the hardware latency detector. It can be used to configure
445  * for how many us of the total window us we will actively sample for any
446  * hardware-induced latency periods. Obviously, it is not possible to
447  * sample constantly and have the system respond to a sample reader, or,
448  * worse, without having the system appear to have gone out to lunch. It
449  * is enforced that width is less that the total window size.
450  */
451 static ssize_t
452 hwlat_width_write(struct file *filp, const char __user *ubuf,
453 		  size_t cnt, loff_t *ppos)
454 {
455 	u64 val;
456 	int err;
457 
458 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
459 	if (err)
460 		return err;
461 
462 	mutex_lock(&hwlat_data.lock);
463 	if (val < hwlat_data.sample_window)
464 		hwlat_data.sample_width = val;
465 	else
466 		err = -EINVAL;
467 	mutex_unlock(&hwlat_data.lock);
468 
469 	if (err)
470 		return err;
471 
472 	return cnt;
473 }
474 
475 /**
476  * hwlat_window_write - Write function for "window" entry
477  * @filp: The active open file structure
478  * @ubuf: The user buffer that contains the value to write
479  * @cnt: The maximum number of bytes to write to "file"
480  * @ppos: The current position in @file
481  *
482  * This function provides a write implementation for the "window" interface
483  * to the hardware latency detector. The window is the total time
484  * in us that will be considered one sample period. Conceptually, windows
485  * occur back-to-back and contain a sample width period during which
486  * actual sampling occurs. Can be used to write a new total window size. It
487  * is enforced that any value written must be greater than the sample width
488  * size, or an error results.
489  */
490 static ssize_t
491 hwlat_window_write(struct file *filp, const char __user *ubuf,
492 		   size_t cnt, loff_t *ppos)
493 {
494 	u64 val;
495 	int err;
496 
497 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
498 	if (err)
499 		return err;
500 
501 	mutex_lock(&hwlat_data.lock);
502 	if (hwlat_data.sample_width < val)
503 		hwlat_data.sample_window = val;
504 	else
505 		err = -EINVAL;
506 	mutex_unlock(&hwlat_data.lock);
507 
508 	if (err)
509 		return err;
510 
511 	return cnt;
512 }
513 
514 static const struct file_operations width_fops = {
515 	.open		= tracing_open_generic,
516 	.read		= hwlat_read,
517 	.write		= hwlat_width_write,
518 };
519 
520 static const struct file_operations window_fops = {
521 	.open		= tracing_open_generic,
522 	.read		= hwlat_read,
523 	.write		= hwlat_window_write,
524 };
525 
526 /**
527  * init_tracefs - A function to initialize the tracefs interface files
528  *
529  * This function creates entries in tracefs for "hwlat_detector".
530  * It creates the hwlat_detector directory in the tracing directory,
531  * and within that directory is the count, width and window files to
532  * change and view those values.
533  */
534 static int init_tracefs(void)
535 {
536 	int ret;
537 	struct dentry *top_dir;
538 
539 	ret = tracing_init_dentry();
540 	if (ret)
541 		return -ENOMEM;
542 
543 	top_dir = tracefs_create_dir("hwlat_detector", NULL);
544 	if (!top_dir)
545 		return -ENOMEM;
546 
547 	hwlat_sample_window = tracefs_create_file("window", 0640,
548 						  top_dir,
549 						  &hwlat_data.sample_window,
550 						  &window_fops);
551 	if (!hwlat_sample_window)
552 		goto err;
553 
554 	hwlat_sample_width = tracefs_create_file("width", 0644,
555 						 top_dir,
556 						 &hwlat_data.sample_width,
557 						 &width_fops);
558 	if (!hwlat_sample_width)
559 		goto err;
560 
561 	return 0;
562 
563  err:
564 	tracefs_remove(top_dir);
565 	return -ENOMEM;
566 }
567 
568 static void hwlat_tracer_start(struct trace_array *tr)
569 {
570 	int err;
571 
572 	err = start_kthread(tr);
573 	if (err)
574 		pr_err(BANNER "Cannot start hwlat kthread\n");
575 }
576 
577 static void hwlat_tracer_stop(struct trace_array *tr)
578 {
579 	stop_kthread();
580 }
581 
582 static bool hwlat_busy;
583 
584 static int hwlat_tracer_init(struct trace_array *tr)
585 {
586 	/* Only allow one instance to enable this */
587 	if (hwlat_busy)
588 		return -EBUSY;
589 
590 	hwlat_trace = tr;
591 
592 	disable_migrate = false;
593 	hwlat_data.count = 0;
594 	tr->max_latency = 0;
595 	save_tracing_thresh = tracing_thresh;
596 
597 	/* tracing_thresh is in nsecs, we speak in usecs */
598 	if (!tracing_thresh)
599 		tracing_thresh = last_tracing_thresh;
600 
601 	if (tracer_tracing_is_on(tr))
602 		hwlat_tracer_start(tr);
603 
604 	hwlat_busy = true;
605 
606 	return 0;
607 }
608 
609 static void hwlat_tracer_reset(struct trace_array *tr)
610 {
611 	stop_kthread();
612 
613 	/* the tracing threshold is static between runs */
614 	last_tracing_thresh = tracing_thresh;
615 
616 	tracing_thresh = save_tracing_thresh;
617 	hwlat_busy = false;
618 }
619 
620 static struct tracer hwlat_tracer __read_mostly =
621 {
622 	.name		= "hwlat",
623 	.init		= hwlat_tracer_init,
624 	.reset		= hwlat_tracer_reset,
625 	.start		= hwlat_tracer_start,
626 	.stop		= hwlat_tracer_stop,
627 	.allow_instances = true,
628 };
629 
630 __init static int init_hwlat_tracer(void)
631 {
632 	int ret;
633 
634 	mutex_init(&hwlat_data.lock);
635 
636 	ret = register_tracer(&hwlat_tracer);
637 	if (ret)
638 		return ret;
639 
640 	init_tracefs();
641 
642 	return 0;
643 }
644 late_initcall(init_hwlat_tracer);
645