xref: /openbmc/linux/kernel/trace/trace_hwlat.c (revision f447c196fe7a3a92c6396f7628020cb8d564be15)
1e7c15cd8SSteven Rostedt (Red Hat) /*
2e7c15cd8SSteven Rostedt (Red Hat)  * trace_hwlatdetect.c - A simple Hardware Latency detector.
3e7c15cd8SSteven Rostedt (Red Hat)  *
4e7c15cd8SSteven Rostedt (Red Hat)  * Use this tracer to detect large system latencies induced by the behavior of
5e7c15cd8SSteven Rostedt (Red Hat)  * certain underlying system hardware or firmware, independent of Linux itself.
6e7c15cd8SSteven Rostedt (Red Hat)  * The code was developed originally to detect the presence of SMIs on Intel
7e7c15cd8SSteven Rostedt (Red Hat)  * and AMD systems, although there is no dependency upon x86 herein.
8e7c15cd8SSteven Rostedt (Red Hat)  *
9e7c15cd8SSteven Rostedt (Red Hat)  * The classical example usage of this tracer is in detecting the presence of
10e7c15cd8SSteven Rostedt (Red Hat)  * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
11e7c15cd8SSteven Rostedt (Red Hat)  * somewhat special form of hardware interrupt spawned from earlier CPU debug
12e7c15cd8SSteven Rostedt (Red Hat)  * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
13e7c15cd8SSteven Rostedt (Red Hat)  * LPC (or other device) to generate a special interrupt under certain
14e7c15cd8SSteven Rostedt (Red Hat)  * circumstances, for example, upon expiration of a special SMI timer device,
15e7c15cd8SSteven Rostedt (Red Hat)  * due to certain external thermal readings, on certain I/O address accesses,
16e7c15cd8SSteven Rostedt (Red Hat)  * and other situations. An SMI hits a special CPU pin, triggers a special
17e7c15cd8SSteven Rostedt (Red Hat)  * SMI mode (complete with special memory map), and the OS is unaware.
18e7c15cd8SSteven Rostedt (Red Hat)  *
19e7c15cd8SSteven Rostedt (Red Hat)  * Although certain hardware-inducing latencies are necessary (for example,
20e7c15cd8SSteven Rostedt (Red Hat)  * a modern system often requires an SMI handler for correct thermal control
21e7c15cd8SSteven Rostedt (Red Hat)  * and remote management) they can wreak havoc upon any OS-level performance
22e7c15cd8SSteven Rostedt (Red Hat)  * guarantees toward low-latency, especially when the OS is not even made
23e7c15cd8SSteven Rostedt (Red Hat)  * aware of the presence of these interrupts. For this reason, we need a
24e7c15cd8SSteven Rostedt (Red Hat)  * somewhat brute force mechanism to detect these interrupts. In this case,
25e7c15cd8SSteven Rostedt (Red Hat)  * we do it by hogging all of the CPU(s) for configurable timer intervals,
26e7c15cd8SSteven Rostedt (Red Hat)  * sampling the built-in CPU timer, looking for discontiguous readings.
27e7c15cd8SSteven Rostedt (Red Hat)  *
28e7c15cd8SSteven Rostedt (Red Hat)  * WARNING: This implementation necessarily introduces latencies. Therefore,
29e7c15cd8SSteven Rostedt (Red Hat)  *          you should NEVER use this tracer while running in a production
30e7c15cd8SSteven Rostedt (Red Hat)  *          environment requiring any kind of low-latency performance
31e7c15cd8SSteven Rostedt (Red Hat)  *          guarantee(s).
32e7c15cd8SSteven Rostedt (Red Hat)  *
33e7c15cd8SSteven Rostedt (Red Hat)  * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
34e7c15cd8SSteven Rostedt (Red Hat)  * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
35e7c15cd8SSteven Rostedt (Red Hat)  *
36e7c15cd8SSteven Rostedt (Red Hat)  * Includes useful feedback from Clark Williams <clark@redhat.com>
37e7c15cd8SSteven Rostedt (Red Hat)  *
38e7c15cd8SSteven Rostedt (Red Hat)  * This file is licensed under the terms of the GNU General Public
39e7c15cd8SSteven Rostedt (Red Hat)  * License version 2. This program is licensed "as is" without any
40e7c15cd8SSteven Rostedt (Red Hat)  * warranty of any kind, whether express or implied.
41e7c15cd8SSteven Rostedt (Red Hat)  */
42e7c15cd8SSteven Rostedt (Red Hat) #include <linux/kthread.h>
43e7c15cd8SSteven Rostedt (Red Hat) #include <linux/tracefs.h>
44e7c15cd8SSteven Rostedt (Red Hat) #include <linux/uaccess.h>
450330f7aaSSteven Rostedt (Red Hat) #include <linux/cpumask.h>
46e7c15cd8SSteven Rostedt (Red Hat) #include <linux/delay.h>
47e7c15cd8SSteven Rostedt (Red Hat) #include "trace.h"
48e7c15cd8SSteven Rostedt (Red Hat) 
49e7c15cd8SSteven Rostedt (Red Hat) static struct trace_array	*hwlat_trace;
50e7c15cd8SSteven Rostedt (Red Hat) 
51e7c15cd8SSteven Rostedt (Red Hat) #define U64STR_SIZE		22			/* 20 digits max */
52e7c15cd8SSteven Rostedt (Red Hat) 
53e7c15cd8SSteven Rostedt (Red Hat) #define BANNER			"hwlat_detector: "
54e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_SAMPLE_WINDOW	1000000			/* 1s */
55e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_SAMPLE_WIDTH	500000			/* 0.5s */
56e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_LAT_THRESHOLD	10			/* 10us */
57e7c15cd8SSteven Rostedt (Red Hat) 
58e7c15cd8SSteven Rostedt (Red Hat) /* sampling thread*/
59e7c15cd8SSteven Rostedt (Red Hat) static struct task_struct *hwlat_kthread;
60e7c15cd8SSteven Rostedt (Red Hat) 
61e7c15cd8SSteven Rostedt (Red Hat) static struct dentry *hwlat_sample_width;	/* sample width us */
62e7c15cd8SSteven Rostedt (Red Hat) static struct dentry *hwlat_sample_window;	/* sample window us */
63e7c15cd8SSteven Rostedt (Red Hat) 
64e7c15cd8SSteven Rostedt (Red Hat) /* Save the previous tracing_thresh value */
65e7c15cd8SSteven Rostedt (Red Hat) static unsigned long save_tracing_thresh;
66e7c15cd8SSteven Rostedt (Red Hat) 
677b2c8625SSteven Rostedt (Red Hat) /* NMI timestamp counters */
687b2c8625SSteven Rostedt (Red Hat) static u64 nmi_ts_start;
697b2c8625SSteven Rostedt (Red Hat) static u64 nmi_total_ts;
707b2c8625SSteven Rostedt (Red Hat) static int nmi_count;
717b2c8625SSteven Rostedt (Red Hat) static int nmi_cpu;
727b2c8625SSteven Rostedt (Red Hat) 
737b2c8625SSteven Rostedt (Red Hat) /* Tells NMIs to call back to the hwlat tracer to record timestamps */
747b2c8625SSteven Rostedt (Red Hat) bool trace_hwlat_callback_enabled;
757b2c8625SSteven Rostedt (Red Hat) 
76e7c15cd8SSteven Rostedt (Red Hat) /* If the user changed threshold, remember it */
77e7c15cd8SSteven Rostedt (Red Hat) static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
78e7c15cd8SSteven Rostedt (Red Hat) 
79e7c15cd8SSteven Rostedt (Red Hat) /* Individual latency samples are stored here when detected. */
80e7c15cd8SSteven Rostedt (Red Hat) struct hwlat_sample {
81e7c15cd8SSteven Rostedt (Red Hat) 	u64		seqnum;		/* unique sequence */
82e7c15cd8SSteven Rostedt (Red Hat) 	u64		duration;	/* delta */
83e7c15cd8SSteven Rostedt (Red Hat) 	u64		outer_duration;	/* delta (outer loop) */
847b2c8625SSteven Rostedt (Red Hat) 	u64		nmi_total_ts;	/* Total time spent in NMIs */
85e7c15cd8SSteven Rostedt (Red Hat) 	struct timespec	timestamp;	/* wall time */
867b2c8625SSteven Rostedt (Red Hat) 	int		nmi_count;	/* # NMIs during this sample */
87e7c15cd8SSteven Rostedt (Red Hat) };
88e7c15cd8SSteven Rostedt (Red Hat) 
89e7c15cd8SSteven Rostedt (Red Hat) /* keep the global state somewhere. */
90e7c15cd8SSteven Rostedt (Red Hat) static struct hwlat_data {
91e7c15cd8SSteven Rostedt (Red Hat) 
92e7c15cd8SSteven Rostedt (Red Hat) 	struct mutex lock;		/* protect changes */
93e7c15cd8SSteven Rostedt (Red Hat) 
94e7c15cd8SSteven Rostedt (Red Hat) 	u64	count;			/* total since reset */
95e7c15cd8SSteven Rostedt (Red Hat) 
96e7c15cd8SSteven Rostedt (Red Hat) 	u64	sample_window;		/* total sampling window (on+off) */
97e7c15cd8SSteven Rostedt (Red Hat) 	u64	sample_width;		/* active sampling portion of window */
98e7c15cd8SSteven Rostedt (Red Hat) 
99e7c15cd8SSteven Rostedt (Red Hat) } hwlat_data = {
100e7c15cd8SSteven Rostedt (Red Hat) 	.sample_window		= DEFAULT_SAMPLE_WINDOW,
101e7c15cd8SSteven Rostedt (Red Hat) 	.sample_width		= DEFAULT_SAMPLE_WIDTH,
102e7c15cd8SSteven Rostedt (Red Hat) };
103e7c15cd8SSteven Rostedt (Red Hat) 
104e7c15cd8SSteven Rostedt (Red Hat) static void trace_hwlat_sample(struct hwlat_sample *sample)
105e7c15cd8SSteven Rostedt (Red Hat) {
106e7c15cd8SSteven Rostedt (Red Hat) 	struct trace_array *tr = hwlat_trace;
107e7c15cd8SSteven Rostedt (Red Hat) 	struct trace_event_call *call = &event_hwlat;
108e7c15cd8SSteven Rostedt (Red Hat) 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
109e7c15cd8SSteven Rostedt (Red Hat) 	struct ring_buffer_event *event;
110e7c15cd8SSteven Rostedt (Red Hat) 	struct hwlat_entry *entry;
111e7c15cd8SSteven Rostedt (Red Hat) 	unsigned long flags;
112e7c15cd8SSteven Rostedt (Red Hat) 	int pc;
113e7c15cd8SSteven Rostedt (Red Hat) 
114e7c15cd8SSteven Rostedt (Red Hat) 	pc = preempt_count();
115e7c15cd8SSteven Rostedt (Red Hat) 	local_save_flags(flags);
116e7c15cd8SSteven Rostedt (Red Hat) 
117e7c15cd8SSteven Rostedt (Red Hat) 	event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
118e7c15cd8SSteven Rostedt (Red Hat) 					  flags, pc);
119e7c15cd8SSteven Rostedt (Red Hat) 	if (!event)
120e7c15cd8SSteven Rostedt (Red Hat) 		return;
121e7c15cd8SSteven Rostedt (Red Hat) 	entry	= ring_buffer_event_data(event);
122e7c15cd8SSteven Rostedt (Red Hat) 	entry->seqnum			= sample->seqnum;
123e7c15cd8SSteven Rostedt (Red Hat) 	entry->duration			= sample->duration;
124e7c15cd8SSteven Rostedt (Red Hat) 	entry->outer_duration		= sample->outer_duration;
125e7c15cd8SSteven Rostedt (Red Hat) 	entry->timestamp		= sample->timestamp;
1267b2c8625SSteven Rostedt (Red Hat) 	entry->nmi_total_ts		= sample->nmi_total_ts;
1277b2c8625SSteven Rostedt (Red Hat) 	entry->nmi_count		= sample->nmi_count;
128e7c15cd8SSteven Rostedt (Red Hat) 
129e7c15cd8SSteven Rostedt (Red Hat) 	if (!call_filter_check_discard(call, entry, buffer, event))
13052ffabe3SSteven Rostedt (Red Hat) 		trace_buffer_unlock_commit_nostack(buffer, event);
131e7c15cd8SSteven Rostedt (Red Hat) }
132e7c15cd8SSteven Rostedt (Red Hat) 
133e7c15cd8SSteven Rostedt (Red Hat) /* Macros to encapsulate the time capturing infrastructure */
134e7c15cd8SSteven Rostedt (Red Hat) #define time_type	u64
135e7c15cd8SSteven Rostedt (Red Hat) #define time_get()	trace_clock_local()
136e7c15cd8SSteven Rostedt (Red Hat) #define time_to_us(x)	div_u64(x, 1000)
137e7c15cd8SSteven Rostedt (Red Hat) #define time_sub(a, b)	((a) - (b))
138e7c15cd8SSteven Rostedt (Red Hat) #define init_time(a, b)	(a = b)
139e7c15cd8SSteven Rostedt (Red Hat) #define time_u64(a)	a
140e7c15cd8SSteven Rostedt (Red Hat) 
1417b2c8625SSteven Rostedt (Red Hat) void trace_hwlat_callback(bool enter)
1427b2c8625SSteven Rostedt (Red Hat) {
1437b2c8625SSteven Rostedt (Red Hat) 	if (smp_processor_id() != nmi_cpu)
1447b2c8625SSteven Rostedt (Red Hat) 		return;
1457b2c8625SSteven Rostedt (Red Hat) 
1467b2c8625SSteven Rostedt (Red Hat) 	/*
1477b2c8625SSteven Rostedt (Red Hat) 	 * Currently trace_clock_local() calls sched_clock() and the
1487b2c8625SSteven Rostedt (Red Hat) 	 * generic version is not NMI safe.
1497b2c8625SSteven Rostedt (Red Hat) 	 */
1507b2c8625SSteven Rostedt (Red Hat) 	if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
1517b2c8625SSteven Rostedt (Red Hat) 		if (enter)
1527b2c8625SSteven Rostedt (Red Hat) 			nmi_ts_start = time_get();
1537b2c8625SSteven Rostedt (Red Hat) 		else
1547b2c8625SSteven Rostedt (Red Hat) 			nmi_total_ts = time_get() - nmi_ts_start;
1557b2c8625SSteven Rostedt (Red Hat) 	}
1567b2c8625SSteven Rostedt (Red Hat) 
1577b2c8625SSteven Rostedt (Red Hat) 	if (enter)
1587b2c8625SSteven Rostedt (Red Hat) 		nmi_count++;
1597b2c8625SSteven Rostedt (Red Hat) }
1607b2c8625SSteven Rostedt (Red Hat) 
161e7c15cd8SSteven Rostedt (Red Hat) /**
162e7c15cd8SSteven Rostedt (Red Hat)  * get_sample - sample the CPU TSC and look for likely hardware latencies
163e7c15cd8SSteven Rostedt (Red Hat)  *
164e7c15cd8SSteven Rostedt (Red Hat)  * Used to repeatedly capture the CPU TSC (or similar), looking for potential
165e7c15cd8SSteven Rostedt (Red Hat)  * hardware-induced latency. Called with interrupts disabled and with
166e7c15cd8SSteven Rostedt (Red Hat)  * hwlat_data.lock held.
167e7c15cd8SSteven Rostedt (Red Hat)  */
168e7c15cd8SSteven Rostedt (Red Hat) static int get_sample(void)
169e7c15cd8SSteven Rostedt (Red Hat) {
170e7c15cd8SSteven Rostedt (Red Hat) 	struct trace_array *tr = hwlat_trace;
171e7c15cd8SSteven Rostedt (Red Hat) 	time_type start, t1, t2, last_t2;
172e7c15cd8SSteven Rostedt (Red Hat) 	s64 diff, total, last_total = 0;
173e7c15cd8SSteven Rostedt (Red Hat) 	u64 sample = 0;
174e7c15cd8SSteven Rostedt (Red Hat) 	u64 thresh = tracing_thresh;
175e7c15cd8SSteven Rostedt (Red Hat) 	u64 outer_sample = 0;
176e7c15cd8SSteven Rostedt (Red Hat) 	int ret = -1;
177e7c15cd8SSteven Rostedt (Red Hat) 
178e7c15cd8SSteven Rostedt (Red Hat) 	do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
179e7c15cd8SSteven Rostedt (Red Hat) 
1807b2c8625SSteven Rostedt (Red Hat) 	nmi_cpu = smp_processor_id();
1817b2c8625SSteven Rostedt (Red Hat) 	nmi_total_ts = 0;
1827b2c8625SSteven Rostedt (Red Hat) 	nmi_count = 0;
1837b2c8625SSteven Rostedt (Red Hat) 	/* Make sure NMIs see this first */
1847b2c8625SSteven Rostedt (Red Hat) 	barrier();
1857b2c8625SSteven Rostedt (Red Hat) 
1867b2c8625SSteven Rostedt (Red Hat) 	trace_hwlat_callback_enabled = true;
1877b2c8625SSteven Rostedt (Red Hat) 
188e7c15cd8SSteven Rostedt (Red Hat) 	init_time(last_t2, 0);
189e7c15cd8SSteven Rostedt (Red Hat) 	start = time_get(); /* start timestamp */
190e7c15cd8SSteven Rostedt (Red Hat) 
191e7c15cd8SSteven Rostedt (Red Hat) 	do {
192e7c15cd8SSteven Rostedt (Red Hat) 
193e7c15cd8SSteven Rostedt (Red Hat) 		t1 = time_get();	/* we'll look for a discontinuity */
194e7c15cd8SSteven Rostedt (Red Hat) 		t2 = time_get();
195e7c15cd8SSteven Rostedt (Red Hat) 
196e7c15cd8SSteven Rostedt (Red Hat) 		if (time_u64(last_t2)) {
197e7c15cd8SSteven Rostedt (Red Hat) 			/* Check the delta from outer loop (t2 to next t1) */
198e7c15cd8SSteven Rostedt (Red Hat) 			diff = time_to_us(time_sub(t1, last_t2));
199e7c15cd8SSteven Rostedt (Red Hat) 			/* This shouldn't happen */
200e7c15cd8SSteven Rostedt (Red Hat) 			if (diff < 0) {
201e7c15cd8SSteven Rostedt (Red Hat) 				pr_err(BANNER "time running backwards\n");
202e7c15cd8SSteven Rostedt (Red Hat) 				goto out;
203e7c15cd8SSteven Rostedt (Red Hat) 			}
204e7c15cd8SSteven Rostedt (Red Hat) 			if (diff > outer_sample)
205e7c15cd8SSteven Rostedt (Red Hat) 				outer_sample = diff;
206e7c15cd8SSteven Rostedt (Red Hat) 		}
207e7c15cd8SSteven Rostedt (Red Hat) 		last_t2 = t2;
208e7c15cd8SSteven Rostedt (Red Hat) 
209e7c15cd8SSteven Rostedt (Red Hat) 		total = time_to_us(time_sub(t2, start)); /* sample width */
210e7c15cd8SSteven Rostedt (Red Hat) 
211e7c15cd8SSteven Rostedt (Red Hat) 		/* Check for possible overflows */
212e7c15cd8SSteven Rostedt (Red Hat) 		if (total < last_total) {
213e7c15cd8SSteven Rostedt (Red Hat) 			pr_err("Time total overflowed\n");
214e7c15cd8SSteven Rostedt (Red Hat) 			break;
215e7c15cd8SSteven Rostedt (Red Hat) 		}
216e7c15cd8SSteven Rostedt (Red Hat) 		last_total = total;
217e7c15cd8SSteven Rostedt (Red Hat) 
218e7c15cd8SSteven Rostedt (Red Hat) 		/* This checks the inner loop (t1 to t2) */
219e7c15cd8SSteven Rostedt (Red Hat) 		diff = time_to_us(time_sub(t2, t1));     /* current diff */
220e7c15cd8SSteven Rostedt (Red Hat) 
221e7c15cd8SSteven Rostedt (Red Hat) 		/* This shouldn't happen */
222e7c15cd8SSteven Rostedt (Red Hat) 		if (diff < 0) {
223e7c15cd8SSteven Rostedt (Red Hat) 			pr_err(BANNER "time running backwards\n");
224e7c15cd8SSteven Rostedt (Red Hat) 			goto out;
225e7c15cd8SSteven Rostedt (Red Hat) 		}
226e7c15cd8SSteven Rostedt (Red Hat) 
227e7c15cd8SSteven Rostedt (Red Hat) 		if (diff > sample)
228e7c15cd8SSteven Rostedt (Red Hat) 			sample = diff; /* only want highest value */
229e7c15cd8SSteven Rostedt (Red Hat) 
230e7c15cd8SSteven Rostedt (Red Hat) 	} while (total <= hwlat_data.sample_width);
231e7c15cd8SSteven Rostedt (Red Hat) 
2327b2c8625SSteven Rostedt (Red Hat) 	barrier(); /* finish the above in the view for NMIs */
2337b2c8625SSteven Rostedt (Red Hat) 	trace_hwlat_callback_enabled = false;
2347b2c8625SSteven Rostedt (Red Hat) 	barrier(); /* Make sure nmi_total_ts is no longer updated */
2357b2c8625SSteven Rostedt (Red Hat) 
236e7c15cd8SSteven Rostedt (Red Hat) 	ret = 0;
237e7c15cd8SSteven Rostedt (Red Hat) 
238e7c15cd8SSteven Rostedt (Red Hat) 	/* If we exceed the threshold value, we have found a hardware latency */
239e7c15cd8SSteven Rostedt (Red Hat) 	if (sample > thresh || outer_sample > thresh) {
240e7c15cd8SSteven Rostedt (Red Hat) 		struct hwlat_sample s;
241e7c15cd8SSteven Rostedt (Red Hat) 
242e7c15cd8SSteven Rostedt (Red Hat) 		ret = 1;
243e7c15cd8SSteven Rostedt (Red Hat) 
2447b2c8625SSteven Rostedt (Red Hat) 		/* We read in microseconds */
2457b2c8625SSteven Rostedt (Red Hat) 		if (nmi_total_ts)
2467b2c8625SSteven Rostedt (Red Hat) 			do_div(nmi_total_ts, NSEC_PER_USEC);
2477b2c8625SSteven Rostedt (Red Hat) 
248e7c15cd8SSteven Rostedt (Red Hat) 		hwlat_data.count++;
249e7c15cd8SSteven Rostedt (Red Hat) 		s.seqnum = hwlat_data.count;
250e7c15cd8SSteven Rostedt (Red Hat) 		s.duration = sample;
251e7c15cd8SSteven Rostedt (Red Hat) 		s.outer_duration = outer_sample;
252e7c15cd8SSteven Rostedt (Red Hat) 		s.timestamp = CURRENT_TIME;
2537b2c8625SSteven Rostedt (Red Hat) 		s.nmi_total_ts = nmi_total_ts;
2547b2c8625SSteven Rostedt (Red Hat) 		s.nmi_count = nmi_count;
255e7c15cd8SSteven Rostedt (Red Hat) 		trace_hwlat_sample(&s);
256e7c15cd8SSteven Rostedt (Red Hat) 
257e7c15cd8SSteven Rostedt (Red Hat) 		/* Keep a running maximum ever recorded hardware latency */
258e7c15cd8SSteven Rostedt (Red Hat) 		if (sample > tr->max_latency)
259e7c15cd8SSteven Rostedt (Red Hat) 			tr->max_latency = sample;
260e7c15cd8SSteven Rostedt (Red Hat) 	}
261e7c15cd8SSteven Rostedt (Red Hat) 
262e7c15cd8SSteven Rostedt (Red Hat) out:
263e7c15cd8SSteven Rostedt (Red Hat) 	return ret;
264e7c15cd8SSteven Rostedt (Red Hat) }
265e7c15cd8SSteven Rostedt (Red Hat) 
2660330f7aaSSteven Rostedt (Red Hat) static struct cpumask save_cpumask;
2670330f7aaSSteven Rostedt (Red Hat) static bool disable_migrate;
2680330f7aaSSteven Rostedt (Red Hat) 
269*f447c196SSteven Rostedt (VMware) static void move_to_next_cpu(void)
2700330f7aaSSteven Rostedt (Red Hat) {
271*f447c196SSteven Rostedt (VMware) 	struct cpumask *current_mask = &save_cpumask;
2720330f7aaSSteven Rostedt (Red Hat) 	int next_cpu;
2730330f7aaSSteven Rostedt (Red Hat) 
2740330f7aaSSteven Rostedt (Red Hat) 	if (disable_migrate)
2750330f7aaSSteven Rostedt (Red Hat) 		return;
2760330f7aaSSteven Rostedt (Red Hat) 	/*
2770330f7aaSSteven Rostedt (Red Hat) 	 * If for some reason the user modifies the CPU affinity
2780330f7aaSSteven Rostedt (Red Hat) 	 * of this thread, than stop migrating for the duration
2790330f7aaSSteven Rostedt (Red Hat) 	 * of the current test.
2800330f7aaSSteven Rostedt (Red Hat) 	 */
2810330f7aaSSteven Rostedt (Red Hat) 	if (!cpumask_equal(current_mask, &current->cpus_allowed))
2820330f7aaSSteven Rostedt (Red Hat) 		goto disable;
2830330f7aaSSteven Rostedt (Red Hat) 
2840330f7aaSSteven Rostedt (Red Hat) 	get_online_cpus();
2850330f7aaSSteven Rostedt (Red Hat) 	cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
2860330f7aaSSteven Rostedt (Red Hat) 	next_cpu = cpumask_next(smp_processor_id(), current_mask);
2870330f7aaSSteven Rostedt (Red Hat) 	put_online_cpus();
2880330f7aaSSteven Rostedt (Red Hat) 
2890330f7aaSSteven Rostedt (Red Hat) 	if (next_cpu >= nr_cpu_ids)
2900330f7aaSSteven Rostedt (Red Hat) 		next_cpu = cpumask_first(current_mask);
2910330f7aaSSteven Rostedt (Red Hat) 
2920330f7aaSSteven Rostedt (Red Hat) 	if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
2930330f7aaSSteven Rostedt (Red Hat) 		goto disable;
2940330f7aaSSteven Rostedt (Red Hat) 
2950330f7aaSSteven Rostedt (Red Hat) 	cpumask_clear(current_mask);
2960330f7aaSSteven Rostedt (Red Hat) 	cpumask_set_cpu(next_cpu, current_mask);
2970330f7aaSSteven Rostedt (Red Hat) 
2980330f7aaSSteven Rostedt (Red Hat) 	sched_setaffinity(0, current_mask);
2990330f7aaSSteven Rostedt (Red Hat) 	return;
3000330f7aaSSteven Rostedt (Red Hat) 
3010330f7aaSSteven Rostedt (Red Hat)  disable:
3020330f7aaSSteven Rostedt (Red Hat) 	disable_migrate = true;
3030330f7aaSSteven Rostedt (Red Hat) }
3040330f7aaSSteven Rostedt (Red Hat) 
305e7c15cd8SSteven Rostedt (Red Hat) /*
306e7c15cd8SSteven Rostedt (Red Hat)  * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
307e7c15cd8SSteven Rostedt (Red Hat)  *
308e7c15cd8SSteven Rostedt (Red Hat)  * Used to periodically sample the CPU TSC via a call to get_sample. We
309e7c15cd8SSteven Rostedt (Red Hat)  * disable interrupts, which does (intentionally) introduce latency since we
310e7c15cd8SSteven Rostedt (Red Hat)  * need to ensure nothing else might be running (and thus preempting).
311e7c15cd8SSteven Rostedt (Red Hat)  * Obviously this should never be used in production environments.
312e7c15cd8SSteven Rostedt (Red Hat)  *
313e7c15cd8SSteven Rostedt (Red Hat)  * Currently this runs on which ever CPU it was scheduled on, but most
314e7c15cd8SSteven Rostedt (Red Hat)  * real-world hardware latency situations occur across several CPUs,
315e7c15cd8SSteven Rostedt (Red Hat)  * but we might later generalize this if we find there are any actualy
316e7c15cd8SSteven Rostedt (Red Hat)  * systems with alternate SMI delivery or other hardware latencies.
317e7c15cd8SSteven Rostedt (Red Hat)  */
318e7c15cd8SSteven Rostedt (Red Hat) static int kthread_fn(void *data)
319e7c15cd8SSteven Rostedt (Red Hat) {
320e7c15cd8SSteven Rostedt (Red Hat) 	u64 interval;
321e7c15cd8SSteven Rostedt (Red Hat) 
322e7c15cd8SSteven Rostedt (Red Hat) 	while (!kthread_should_stop()) {
323e7c15cd8SSteven Rostedt (Red Hat) 
324*f447c196SSteven Rostedt (VMware) 		move_to_next_cpu();
3250330f7aaSSteven Rostedt (Red Hat) 
326e7c15cd8SSteven Rostedt (Red Hat) 		local_irq_disable();
327e7c15cd8SSteven Rostedt (Red Hat) 		get_sample();
328e7c15cd8SSteven Rostedt (Red Hat) 		local_irq_enable();
329e7c15cd8SSteven Rostedt (Red Hat) 
330e7c15cd8SSteven Rostedt (Red Hat) 		mutex_lock(&hwlat_data.lock);
331e7c15cd8SSteven Rostedt (Red Hat) 		interval = hwlat_data.sample_window - hwlat_data.sample_width;
332e7c15cd8SSteven Rostedt (Red Hat) 		mutex_unlock(&hwlat_data.lock);
333e7c15cd8SSteven Rostedt (Red Hat) 
334e7c15cd8SSteven Rostedt (Red Hat) 		do_div(interval, USEC_PER_MSEC); /* modifies interval value */
335e7c15cd8SSteven Rostedt (Red Hat) 
336e7c15cd8SSteven Rostedt (Red Hat) 		/* Always sleep for at least 1ms */
337e7c15cd8SSteven Rostedt (Red Hat) 		if (interval < 1)
338e7c15cd8SSteven Rostedt (Red Hat) 			interval = 1;
339e7c15cd8SSteven Rostedt (Red Hat) 
340e7c15cd8SSteven Rostedt (Red Hat) 		if (msleep_interruptible(interval))
341e7c15cd8SSteven Rostedt (Red Hat) 			break;
342e7c15cd8SSteven Rostedt (Red Hat) 	}
343e7c15cd8SSteven Rostedt (Red Hat) 
344e7c15cd8SSteven Rostedt (Red Hat) 	return 0;
345e7c15cd8SSteven Rostedt (Red Hat) }
346e7c15cd8SSteven Rostedt (Red Hat) 
347e7c15cd8SSteven Rostedt (Red Hat) /**
348e7c15cd8SSteven Rostedt (Red Hat)  * start_kthread - Kick off the hardware latency sampling/detector kthread
349e7c15cd8SSteven Rostedt (Red Hat)  *
350e7c15cd8SSteven Rostedt (Red Hat)  * This starts the kernel thread that will sit and sample the CPU timestamp
351e7c15cd8SSteven Rostedt (Red Hat)  * counter (TSC or similar) and look for potential hardware latencies.
352e7c15cd8SSteven Rostedt (Red Hat)  */
353e7c15cd8SSteven Rostedt (Red Hat) static int start_kthread(struct trace_array *tr)
354e7c15cd8SSteven Rostedt (Red Hat) {
355*f447c196SSteven Rostedt (VMware) 	struct cpumask *current_mask = &save_cpumask;
356e7c15cd8SSteven Rostedt (Red Hat) 	struct task_struct *kthread;
357*f447c196SSteven Rostedt (VMware) 	int next_cpu;
358*f447c196SSteven Rostedt (VMware) 
359*f447c196SSteven Rostedt (VMware) 	/* Just pick the first CPU on first iteration */
360*f447c196SSteven Rostedt (VMware) 	current_mask = &save_cpumask;
361*f447c196SSteven Rostedt (VMware) 	get_online_cpus();
362*f447c196SSteven Rostedt (VMware) 	cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
363*f447c196SSteven Rostedt (VMware) 	put_online_cpus();
364*f447c196SSteven Rostedt (VMware) 	next_cpu = cpumask_first(current_mask);
365e7c15cd8SSteven Rostedt (Red Hat) 
366e7c15cd8SSteven Rostedt (Red Hat) 	kthread = kthread_create(kthread_fn, NULL, "hwlatd");
367e7c15cd8SSteven Rostedt (Red Hat) 	if (IS_ERR(kthread)) {
368e7c15cd8SSteven Rostedt (Red Hat) 		pr_err(BANNER "could not start sampling thread\n");
369e7c15cd8SSteven Rostedt (Red Hat) 		return -ENOMEM;
370e7c15cd8SSteven Rostedt (Red Hat) 	}
371*f447c196SSteven Rostedt (VMware) 
372*f447c196SSteven Rostedt (VMware) 	cpumask_clear(current_mask);
373*f447c196SSteven Rostedt (VMware) 	cpumask_set_cpu(next_cpu, current_mask);
374*f447c196SSteven Rostedt (VMware) 	sched_setaffinity(kthread->pid, current_mask);
375*f447c196SSteven Rostedt (VMware) 
376e7c15cd8SSteven Rostedt (Red Hat) 	hwlat_kthread = kthread;
377e7c15cd8SSteven Rostedt (Red Hat) 	wake_up_process(kthread);
378e7c15cd8SSteven Rostedt (Red Hat) 
379e7c15cd8SSteven Rostedt (Red Hat) 	return 0;
380e7c15cd8SSteven Rostedt (Red Hat) }
381e7c15cd8SSteven Rostedt (Red Hat) 
382e7c15cd8SSteven Rostedt (Red Hat) /**
383e7c15cd8SSteven Rostedt (Red Hat)  * stop_kthread - Inform the hardware latency samping/detector kthread to stop
384e7c15cd8SSteven Rostedt (Red Hat)  *
385e7c15cd8SSteven Rostedt (Red Hat)  * This kicks the running hardware latency sampling/detector kernel thread and
386e7c15cd8SSteven Rostedt (Red Hat)  * tells it to stop sampling now. Use this on unload and at system shutdown.
387e7c15cd8SSteven Rostedt (Red Hat)  */
388e7c15cd8SSteven Rostedt (Red Hat) static void stop_kthread(void)
389e7c15cd8SSteven Rostedt (Red Hat) {
390e7c15cd8SSteven Rostedt (Red Hat) 	if (!hwlat_kthread)
391e7c15cd8SSteven Rostedt (Red Hat) 		return;
392e7c15cd8SSteven Rostedt (Red Hat) 	kthread_stop(hwlat_kthread);
393e7c15cd8SSteven Rostedt (Red Hat) 	hwlat_kthread = NULL;
394e7c15cd8SSteven Rostedt (Red Hat) }
395e7c15cd8SSteven Rostedt (Red Hat) 
396e7c15cd8SSteven Rostedt (Red Hat) /*
397e7c15cd8SSteven Rostedt (Red Hat)  * hwlat_read - Wrapper read function for reading both window and width
398e7c15cd8SSteven Rostedt (Red Hat)  * @filp: The active open file structure
399e7c15cd8SSteven Rostedt (Red Hat)  * @ubuf: The userspace provided buffer to read value into
400e7c15cd8SSteven Rostedt (Red Hat)  * @cnt: The maximum number of bytes to read
401e7c15cd8SSteven Rostedt (Red Hat)  * @ppos: The current "file" position
402e7c15cd8SSteven Rostedt (Red Hat)  *
403e7c15cd8SSteven Rostedt (Red Hat)  * This function provides a generic read implementation for the global state
404e7c15cd8SSteven Rostedt (Red Hat)  * "hwlat_data" structure filesystem entries.
405e7c15cd8SSteven Rostedt (Red Hat)  */
406e7c15cd8SSteven Rostedt (Red Hat) static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
407e7c15cd8SSteven Rostedt (Red Hat) 			  size_t cnt, loff_t *ppos)
408e7c15cd8SSteven Rostedt (Red Hat) {
409e7c15cd8SSteven Rostedt (Red Hat) 	char buf[U64STR_SIZE];
410e7c15cd8SSteven Rostedt (Red Hat) 	u64 *entry = filp->private_data;
411e7c15cd8SSteven Rostedt (Red Hat) 	u64 val;
412e7c15cd8SSteven Rostedt (Red Hat) 	int len;
413e7c15cd8SSteven Rostedt (Red Hat) 
414e7c15cd8SSteven Rostedt (Red Hat) 	if (!entry)
415e7c15cd8SSteven Rostedt (Red Hat) 		return -EFAULT;
416e7c15cd8SSteven Rostedt (Red Hat) 
417e7c15cd8SSteven Rostedt (Red Hat) 	if (cnt > sizeof(buf))
418e7c15cd8SSteven Rostedt (Red Hat) 		cnt = sizeof(buf);
419e7c15cd8SSteven Rostedt (Red Hat) 
420e7c15cd8SSteven Rostedt (Red Hat) 	val = *entry;
421e7c15cd8SSteven Rostedt (Red Hat) 
422e7c15cd8SSteven Rostedt (Red Hat) 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
423e7c15cd8SSteven Rostedt (Red Hat) 
424e7c15cd8SSteven Rostedt (Red Hat) 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
425e7c15cd8SSteven Rostedt (Red Hat) }
426e7c15cd8SSteven Rostedt (Red Hat) 
427e7c15cd8SSteven Rostedt (Red Hat) /**
428e7c15cd8SSteven Rostedt (Red Hat)  * hwlat_width_write - Write function for "width" entry
429e7c15cd8SSteven Rostedt (Red Hat)  * @filp: The active open file structure
430e7c15cd8SSteven Rostedt (Red Hat)  * @ubuf: The user buffer that contains the value to write
431e7c15cd8SSteven Rostedt (Red Hat)  * @cnt: The maximum number of bytes to write to "file"
432e7c15cd8SSteven Rostedt (Red Hat)  * @ppos: The current position in @file
433e7c15cd8SSteven Rostedt (Red Hat)  *
434e7c15cd8SSteven Rostedt (Red Hat)  * This function provides a write implementation for the "width" interface
435e7c15cd8SSteven Rostedt (Red Hat)  * to the hardware latency detector. It can be used to configure
436e7c15cd8SSteven Rostedt (Red Hat)  * for how many us of the total window us we will actively sample for any
437e7c15cd8SSteven Rostedt (Red Hat)  * hardware-induced latency periods. Obviously, it is not possible to
438e7c15cd8SSteven Rostedt (Red Hat)  * sample constantly and have the system respond to a sample reader, or,
439e7c15cd8SSteven Rostedt (Red Hat)  * worse, without having the system appear to have gone out to lunch. It
440e7c15cd8SSteven Rostedt (Red Hat)  * is enforced that width is less that the total window size.
441e7c15cd8SSteven Rostedt (Red Hat)  */
442e7c15cd8SSteven Rostedt (Red Hat) static ssize_t
443e7c15cd8SSteven Rostedt (Red Hat) hwlat_width_write(struct file *filp, const char __user *ubuf,
444e7c15cd8SSteven Rostedt (Red Hat) 		  size_t cnt, loff_t *ppos)
445e7c15cd8SSteven Rostedt (Red Hat) {
446e7c15cd8SSteven Rostedt (Red Hat) 	u64 val;
447e7c15cd8SSteven Rostedt (Red Hat) 	int err;
448e7c15cd8SSteven Rostedt (Red Hat) 
449e7c15cd8SSteven Rostedt (Red Hat) 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
450e7c15cd8SSteven Rostedt (Red Hat) 	if (err)
451e7c15cd8SSteven Rostedt (Red Hat) 		return err;
452e7c15cd8SSteven Rostedt (Red Hat) 
453e7c15cd8SSteven Rostedt (Red Hat) 	mutex_lock(&hwlat_data.lock);
454e7c15cd8SSteven Rostedt (Red Hat) 	if (val < hwlat_data.sample_window)
455e7c15cd8SSteven Rostedt (Red Hat) 		hwlat_data.sample_width = val;
456e7c15cd8SSteven Rostedt (Red Hat) 	else
457e7c15cd8SSteven Rostedt (Red Hat) 		err = -EINVAL;
458e7c15cd8SSteven Rostedt (Red Hat) 	mutex_unlock(&hwlat_data.lock);
459e7c15cd8SSteven Rostedt (Red Hat) 
460e7c15cd8SSteven Rostedt (Red Hat) 	if (err)
461e7c15cd8SSteven Rostedt (Red Hat) 		return err;
462e7c15cd8SSteven Rostedt (Red Hat) 
463e7c15cd8SSteven Rostedt (Red Hat) 	return cnt;
464e7c15cd8SSteven Rostedt (Red Hat) }
465e7c15cd8SSteven Rostedt (Red Hat) 
466e7c15cd8SSteven Rostedt (Red Hat) /**
467e7c15cd8SSteven Rostedt (Red Hat)  * hwlat_window_write - Write function for "window" entry
468e7c15cd8SSteven Rostedt (Red Hat)  * @filp: The active open file structure
469e7c15cd8SSteven Rostedt (Red Hat)  * @ubuf: The user buffer that contains the value to write
470e7c15cd8SSteven Rostedt (Red Hat)  * @cnt: The maximum number of bytes to write to "file"
471e7c15cd8SSteven Rostedt (Red Hat)  * @ppos: The current position in @file
472e7c15cd8SSteven Rostedt (Red Hat)  *
473e7c15cd8SSteven Rostedt (Red Hat)  * This function provides a write implementation for the "window" interface
474e7c15cd8SSteven Rostedt (Red Hat)  * to the hardware latency detetector. The window is the total time
475e7c15cd8SSteven Rostedt (Red Hat)  * in us that will be considered one sample period. Conceptually, windows
476e7c15cd8SSteven Rostedt (Red Hat)  * occur back-to-back and contain a sample width period during which
477e7c15cd8SSteven Rostedt (Red Hat)  * actual sampling occurs. Can be used to write a new total window size. It
478e7c15cd8SSteven Rostedt (Red Hat)  * is enfoced that any value written must be greater than the sample width
479e7c15cd8SSteven Rostedt (Red Hat)  * size, or an error results.
480e7c15cd8SSteven Rostedt (Red Hat)  */
481e7c15cd8SSteven Rostedt (Red Hat) static ssize_t
482e7c15cd8SSteven Rostedt (Red Hat) hwlat_window_write(struct file *filp, const char __user *ubuf,
483e7c15cd8SSteven Rostedt (Red Hat) 		   size_t cnt, loff_t *ppos)
484e7c15cd8SSteven Rostedt (Red Hat) {
485e7c15cd8SSteven Rostedt (Red Hat) 	u64 val;
486e7c15cd8SSteven Rostedt (Red Hat) 	int err;
487e7c15cd8SSteven Rostedt (Red Hat) 
488e7c15cd8SSteven Rostedt (Red Hat) 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
489e7c15cd8SSteven Rostedt (Red Hat) 	if (err)
490e7c15cd8SSteven Rostedt (Red Hat) 		return err;
491e7c15cd8SSteven Rostedt (Red Hat) 
492e7c15cd8SSteven Rostedt (Red Hat) 	mutex_lock(&hwlat_data.lock);
493e7c15cd8SSteven Rostedt (Red Hat) 	if (hwlat_data.sample_width < val)
494e7c15cd8SSteven Rostedt (Red Hat) 		hwlat_data.sample_window = val;
495e7c15cd8SSteven Rostedt (Red Hat) 	else
496e7c15cd8SSteven Rostedt (Red Hat) 		err = -EINVAL;
497e7c15cd8SSteven Rostedt (Red Hat) 	mutex_unlock(&hwlat_data.lock);
498e7c15cd8SSteven Rostedt (Red Hat) 
499e7c15cd8SSteven Rostedt (Red Hat) 	if (err)
500e7c15cd8SSteven Rostedt (Red Hat) 		return err;
501e7c15cd8SSteven Rostedt (Red Hat) 
502e7c15cd8SSteven Rostedt (Red Hat) 	return cnt;
503e7c15cd8SSteven Rostedt (Red Hat) }
504e7c15cd8SSteven Rostedt (Red Hat) 
505e7c15cd8SSteven Rostedt (Red Hat) static const struct file_operations width_fops = {
506e7c15cd8SSteven Rostedt (Red Hat) 	.open		= tracing_open_generic,
507e7c15cd8SSteven Rostedt (Red Hat) 	.read		= hwlat_read,
508e7c15cd8SSteven Rostedt (Red Hat) 	.write		= hwlat_width_write,
509e7c15cd8SSteven Rostedt (Red Hat) };
510e7c15cd8SSteven Rostedt (Red Hat) 
511e7c15cd8SSteven Rostedt (Red Hat) static const struct file_operations window_fops = {
512e7c15cd8SSteven Rostedt (Red Hat) 	.open		= tracing_open_generic,
513e7c15cd8SSteven Rostedt (Red Hat) 	.read		= hwlat_read,
514e7c15cd8SSteven Rostedt (Red Hat) 	.write		= hwlat_window_write,
515e7c15cd8SSteven Rostedt (Red Hat) };
516e7c15cd8SSteven Rostedt (Red Hat) 
517e7c15cd8SSteven Rostedt (Red Hat) /**
518e7c15cd8SSteven Rostedt (Red Hat)  * init_tracefs - A function to initialize the tracefs interface files
519e7c15cd8SSteven Rostedt (Red Hat)  *
520e7c15cd8SSteven Rostedt (Red Hat)  * This function creates entries in tracefs for "hwlat_detector".
521e7c15cd8SSteven Rostedt (Red Hat)  * It creates the hwlat_detector directory in the tracing directory,
522e7c15cd8SSteven Rostedt (Red Hat)  * and within that directory is the count, width and window files to
523e7c15cd8SSteven Rostedt (Red Hat)  * change and view those values.
524e7c15cd8SSteven Rostedt (Red Hat)  */
525e7c15cd8SSteven Rostedt (Red Hat) static int init_tracefs(void)
526e7c15cd8SSteven Rostedt (Red Hat) {
527e7c15cd8SSteven Rostedt (Red Hat) 	struct dentry *d_tracer;
528e7c15cd8SSteven Rostedt (Red Hat) 	struct dentry *top_dir;
529e7c15cd8SSteven Rostedt (Red Hat) 
530e7c15cd8SSteven Rostedt (Red Hat) 	d_tracer = tracing_init_dentry();
531e7c15cd8SSteven Rostedt (Red Hat) 	if (IS_ERR(d_tracer))
532e7c15cd8SSteven Rostedt (Red Hat) 		return -ENOMEM;
533e7c15cd8SSteven Rostedt (Red Hat) 
534e7c15cd8SSteven Rostedt (Red Hat) 	top_dir = tracefs_create_dir("hwlat_detector", d_tracer);
535e7c15cd8SSteven Rostedt (Red Hat) 	if (!top_dir)
536e7c15cd8SSteven Rostedt (Red Hat) 		return -ENOMEM;
537e7c15cd8SSteven Rostedt (Red Hat) 
538e7c15cd8SSteven Rostedt (Red Hat) 	hwlat_sample_window = tracefs_create_file("window", 0640,
539e7c15cd8SSteven Rostedt (Red Hat) 						  top_dir,
540e7c15cd8SSteven Rostedt (Red Hat) 						  &hwlat_data.sample_window,
541e7c15cd8SSteven Rostedt (Red Hat) 						  &window_fops);
542e7c15cd8SSteven Rostedt (Red Hat) 	if (!hwlat_sample_window)
543e7c15cd8SSteven Rostedt (Red Hat) 		goto err;
544e7c15cd8SSteven Rostedt (Red Hat) 
545e7c15cd8SSteven Rostedt (Red Hat) 	hwlat_sample_width = tracefs_create_file("width", 0644,
546e7c15cd8SSteven Rostedt (Red Hat) 						 top_dir,
547e7c15cd8SSteven Rostedt (Red Hat) 						 &hwlat_data.sample_width,
548e7c15cd8SSteven Rostedt (Red Hat) 						 &width_fops);
549e7c15cd8SSteven Rostedt (Red Hat) 	if (!hwlat_sample_width)
550e7c15cd8SSteven Rostedt (Red Hat) 		goto err;
551e7c15cd8SSteven Rostedt (Red Hat) 
552e7c15cd8SSteven Rostedt (Red Hat) 	return 0;
553e7c15cd8SSteven Rostedt (Red Hat) 
554e7c15cd8SSteven Rostedt (Red Hat)  err:
555e7c15cd8SSteven Rostedt (Red Hat) 	tracefs_remove_recursive(top_dir);
556e7c15cd8SSteven Rostedt (Red Hat) 	return -ENOMEM;
557e7c15cd8SSteven Rostedt (Red Hat) }
558e7c15cd8SSteven Rostedt (Red Hat) 
559e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_start(struct trace_array *tr)
560e7c15cd8SSteven Rostedt (Red Hat) {
561e7c15cd8SSteven Rostedt (Red Hat) 	int err;
562e7c15cd8SSteven Rostedt (Red Hat) 
563e7c15cd8SSteven Rostedt (Red Hat) 	err = start_kthread(tr);
564e7c15cd8SSteven Rostedt (Red Hat) 	if (err)
565e7c15cd8SSteven Rostedt (Red Hat) 		pr_err(BANNER "Cannot start hwlat kthread\n");
566e7c15cd8SSteven Rostedt (Red Hat) }
567e7c15cd8SSteven Rostedt (Red Hat) 
568e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_stop(struct trace_array *tr)
569e7c15cd8SSteven Rostedt (Red Hat) {
570e7c15cd8SSteven Rostedt (Red Hat) 	stop_kthread();
571e7c15cd8SSteven Rostedt (Red Hat) }
572e7c15cd8SSteven Rostedt (Red Hat) 
573e7c15cd8SSteven Rostedt (Red Hat) static bool hwlat_busy;
574e7c15cd8SSteven Rostedt (Red Hat) 
575e7c15cd8SSteven Rostedt (Red Hat) static int hwlat_tracer_init(struct trace_array *tr)
576e7c15cd8SSteven Rostedt (Red Hat) {
577e7c15cd8SSteven Rostedt (Red Hat) 	/* Only allow one instance to enable this */
578e7c15cd8SSteven Rostedt (Red Hat) 	if (hwlat_busy)
579e7c15cd8SSteven Rostedt (Red Hat) 		return -EBUSY;
580e7c15cd8SSteven Rostedt (Red Hat) 
581e7c15cd8SSteven Rostedt (Red Hat) 	hwlat_trace = tr;
582e7c15cd8SSteven Rostedt (Red Hat) 
5830330f7aaSSteven Rostedt (Red Hat) 	disable_migrate = false;
584e7c15cd8SSteven Rostedt (Red Hat) 	hwlat_data.count = 0;
585e7c15cd8SSteven Rostedt (Red Hat) 	tr->max_latency = 0;
586e7c15cd8SSteven Rostedt (Red Hat) 	save_tracing_thresh = tracing_thresh;
587e7c15cd8SSteven Rostedt (Red Hat) 
588e7c15cd8SSteven Rostedt (Red Hat) 	/* tracing_thresh is in nsecs, we speak in usecs */
589e7c15cd8SSteven Rostedt (Red Hat) 	if (!tracing_thresh)
590e7c15cd8SSteven Rostedt (Red Hat) 		tracing_thresh = last_tracing_thresh;
591e7c15cd8SSteven Rostedt (Red Hat) 
592e7c15cd8SSteven Rostedt (Red Hat) 	if (tracer_tracing_is_on(tr))
593e7c15cd8SSteven Rostedt (Red Hat) 		hwlat_tracer_start(tr);
594e7c15cd8SSteven Rostedt (Red Hat) 
595e7c15cd8SSteven Rostedt (Red Hat) 	hwlat_busy = true;
596e7c15cd8SSteven Rostedt (Red Hat) 
597e7c15cd8SSteven Rostedt (Red Hat) 	return 0;
598e7c15cd8SSteven Rostedt (Red Hat) }
599e7c15cd8SSteven Rostedt (Red Hat) 
600e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_reset(struct trace_array *tr)
601e7c15cd8SSteven Rostedt (Red Hat) {
602e7c15cd8SSteven Rostedt (Red Hat) 	stop_kthread();
603e7c15cd8SSteven Rostedt (Red Hat) 
604e7c15cd8SSteven Rostedt (Red Hat) 	/* the tracing threshold is static between runs */
605e7c15cd8SSteven Rostedt (Red Hat) 	last_tracing_thresh = tracing_thresh;
606e7c15cd8SSteven Rostedt (Red Hat) 
607e7c15cd8SSteven Rostedt (Red Hat) 	tracing_thresh = save_tracing_thresh;
608e7c15cd8SSteven Rostedt (Red Hat) 	hwlat_busy = false;
609e7c15cd8SSteven Rostedt (Red Hat) }
610e7c15cd8SSteven Rostedt (Red Hat) 
611e7c15cd8SSteven Rostedt (Red Hat) static struct tracer hwlat_tracer __read_mostly =
612e7c15cd8SSteven Rostedt (Red Hat) {
613e7c15cd8SSteven Rostedt (Red Hat) 	.name		= "hwlat",
614e7c15cd8SSteven Rostedt (Red Hat) 	.init		= hwlat_tracer_init,
615e7c15cd8SSteven Rostedt (Red Hat) 	.reset		= hwlat_tracer_reset,
616e7c15cd8SSteven Rostedt (Red Hat) 	.start		= hwlat_tracer_start,
617e7c15cd8SSteven Rostedt (Red Hat) 	.stop		= hwlat_tracer_stop,
618e7c15cd8SSteven Rostedt (Red Hat) 	.allow_instances = true,
619e7c15cd8SSteven Rostedt (Red Hat) };
620e7c15cd8SSteven Rostedt (Red Hat) 
621e7c15cd8SSteven Rostedt (Red Hat) __init static int init_hwlat_tracer(void)
622e7c15cd8SSteven Rostedt (Red Hat) {
623e7c15cd8SSteven Rostedt (Red Hat) 	int ret;
624e7c15cd8SSteven Rostedt (Red Hat) 
625e7c15cd8SSteven Rostedt (Red Hat) 	mutex_init(&hwlat_data.lock);
626e7c15cd8SSteven Rostedt (Red Hat) 
627e7c15cd8SSteven Rostedt (Red Hat) 	ret = register_tracer(&hwlat_tracer);
628e7c15cd8SSteven Rostedt (Red Hat) 	if (ret)
629e7c15cd8SSteven Rostedt (Red Hat) 		return ret;
630e7c15cd8SSteven Rostedt (Red Hat) 
631e7c15cd8SSteven Rostedt (Red Hat) 	init_tracefs();
632e7c15cd8SSteven Rostedt (Red Hat) 
633e7c15cd8SSteven Rostedt (Red Hat) 	return 0;
634e7c15cd8SSteven Rostedt (Red Hat) }
635e7c15cd8SSteven Rostedt (Red Hat) late_initcall(init_hwlat_tracer);
636