1bcea3f96SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0 2e7c15cd8SSteven Rostedt (Red Hat) /* 3e7c15cd8SSteven Rostedt (Red Hat) * trace_hwlatdetect.c - A simple Hardware Latency detector. 4e7c15cd8SSteven Rostedt (Red Hat) * 5e7c15cd8SSteven Rostedt (Red Hat) * Use this tracer to detect large system latencies induced by the behavior of 6e7c15cd8SSteven Rostedt (Red Hat) * certain underlying system hardware or firmware, independent of Linux itself. 7e7c15cd8SSteven Rostedt (Red Hat) * The code was developed originally to detect the presence of SMIs on Intel 8e7c15cd8SSteven Rostedt (Red Hat) * and AMD systems, although there is no dependency upon x86 herein. 9e7c15cd8SSteven Rostedt (Red Hat) * 10e7c15cd8SSteven Rostedt (Red Hat) * The classical example usage of this tracer is in detecting the presence of 11e7c15cd8SSteven Rostedt (Red Hat) * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a 12e7c15cd8SSteven Rostedt (Red Hat) * somewhat special form of hardware interrupt spawned from earlier CPU debug 13e7c15cd8SSteven Rostedt (Red Hat) * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge 14e7c15cd8SSteven Rostedt (Red Hat) * LPC (or other device) to generate a special interrupt under certain 15e7c15cd8SSteven Rostedt (Red Hat) * circumstances, for example, upon expiration of a special SMI timer device, 16e7c15cd8SSteven Rostedt (Red Hat) * due to certain external thermal readings, on certain I/O address accesses, 17e7c15cd8SSteven Rostedt (Red Hat) * and other situations. An SMI hits a special CPU pin, triggers a special 18e7c15cd8SSteven Rostedt (Red Hat) * SMI mode (complete with special memory map), and the OS is unaware. 19e7c15cd8SSteven Rostedt (Red Hat) * 20e7c15cd8SSteven Rostedt (Red Hat) * Although certain hardware-inducing latencies are necessary (for example, 21e7c15cd8SSteven Rostedt (Red Hat) * a modern system often requires an SMI handler for correct thermal control 22e7c15cd8SSteven Rostedt (Red Hat) * and remote management) they can wreak havoc upon any OS-level performance 23e7c15cd8SSteven Rostedt (Red Hat) * guarantees toward low-latency, especially when the OS is not even made 24e7c15cd8SSteven Rostedt (Red Hat) * aware of the presence of these interrupts. For this reason, we need a 25e7c15cd8SSteven Rostedt (Red Hat) * somewhat brute force mechanism to detect these interrupts. In this case, 26e7c15cd8SSteven Rostedt (Red Hat) * we do it by hogging all of the CPU(s) for configurable timer intervals, 27e7c15cd8SSteven Rostedt (Red Hat) * sampling the built-in CPU timer, looking for discontiguous readings. 28e7c15cd8SSteven Rostedt (Red Hat) * 29e7c15cd8SSteven Rostedt (Red Hat) * WARNING: This implementation necessarily introduces latencies. Therefore, 30e7c15cd8SSteven Rostedt (Red Hat) * you should NEVER use this tracer while running in a production 31e7c15cd8SSteven Rostedt (Red Hat) * environment requiring any kind of low-latency performance 32e7c15cd8SSteven Rostedt (Red Hat) * guarantee(s). 33e7c15cd8SSteven Rostedt (Red Hat) * 34e7c15cd8SSteven Rostedt (Red Hat) * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com> 35e7c15cd8SSteven Rostedt (Red Hat) * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com> 36e7c15cd8SSteven Rostedt (Red Hat) * 37e7c15cd8SSteven Rostedt (Red Hat) * Includes useful feedback from Clark Williams <clark@redhat.com> 38e7c15cd8SSteven Rostedt (Red Hat) * 39e7c15cd8SSteven Rostedt (Red Hat) */ 40e7c15cd8SSteven Rostedt (Red Hat) #include <linux/kthread.h> 41e7c15cd8SSteven Rostedt (Red Hat) #include <linux/tracefs.h> 42e7c15cd8SSteven Rostedt (Red Hat) #include <linux/uaccess.h> 430330f7aaSSteven Rostedt (Red Hat) #include <linux/cpumask.h> 44e7c15cd8SSteven Rostedt (Red Hat) #include <linux/delay.h> 45e6017571SIngo Molnar #include <linux/sched/clock.h> 46e7c15cd8SSteven Rostedt (Red Hat) #include "trace.h" 47e7c15cd8SSteven Rostedt (Red Hat) 48e7c15cd8SSteven Rostedt (Red Hat) static struct trace_array *hwlat_trace; 49e7c15cd8SSteven Rostedt (Red Hat) 50e7c15cd8SSteven Rostedt (Red Hat) #define U64STR_SIZE 22 /* 20 digits max */ 51e7c15cd8SSteven Rostedt (Red Hat) 52e7c15cd8SSteven Rostedt (Red Hat) #define BANNER "hwlat_detector: " 53e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ 54e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ 55e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_LAT_THRESHOLD 10 /* 10us */ 56e7c15cd8SSteven Rostedt (Red Hat) 57e7c15cd8SSteven Rostedt (Red Hat) /* sampling thread*/ 58e7c15cd8SSteven Rostedt (Red Hat) static struct task_struct *hwlat_kthread; 59e7c15cd8SSteven Rostedt (Red Hat) 60e7c15cd8SSteven Rostedt (Red Hat) static struct dentry *hwlat_sample_width; /* sample width us */ 61e7c15cd8SSteven Rostedt (Red Hat) static struct dentry *hwlat_sample_window; /* sample window us */ 62e7c15cd8SSteven Rostedt (Red Hat) 63e7c15cd8SSteven Rostedt (Red Hat) /* Save the previous tracing_thresh value */ 64e7c15cd8SSteven Rostedt (Red Hat) static unsigned long save_tracing_thresh; 65e7c15cd8SSteven Rostedt (Red Hat) 667b2c8625SSteven Rostedt (Red Hat) /* NMI timestamp counters */ 677b2c8625SSteven Rostedt (Red Hat) static u64 nmi_ts_start; 687b2c8625SSteven Rostedt (Red Hat) static u64 nmi_total_ts; 697b2c8625SSteven Rostedt (Red Hat) static int nmi_count; 707b2c8625SSteven Rostedt (Red Hat) static int nmi_cpu; 717b2c8625SSteven Rostedt (Red Hat) 727b2c8625SSteven Rostedt (Red Hat) /* Tells NMIs to call back to the hwlat tracer to record timestamps */ 737b2c8625SSteven Rostedt (Red Hat) bool trace_hwlat_callback_enabled; 747b2c8625SSteven Rostedt (Red Hat) 75e7c15cd8SSteven Rostedt (Red Hat) /* If the user changed threshold, remember it */ 76e7c15cd8SSteven Rostedt (Red Hat) static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC; 77e7c15cd8SSteven Rostedt (Red Hat) 78e7c15cd8SSteven Rostedt (Red Hat) /* Individual latency samples are stored here when detected. */ 79e7c15cd8SSteven Rostedt (Red Hat) struct hwlat_sample { 80e7c15cd8SSteven Rostedt (Red Hat) u64 seqnum; /* unique sequence */ 81e7c15cd8SSteven Rostedt (Red Hat) u64 duration; /* delta */ 82e7c15cd8SSteven Rostedt (Red Hat) u64 outer_duration; /* delta (outer loop) */ 837b2c8625SSteven Rostedt (Red Hat) u64 nmi_total_ts; /* Total time spent in NMIs */ 8451aad0aeSDeepa Dinamani struct timespec64 timestamp; /* wall time */ 857b2c8625SSteven Rostedt (Red Hat) int nmi_count; /* # NMIs during this sample */ 86e7c15cd8SSteven Rostedt (Red Hat) }; 87e7c15cd8SSteven Rostedt (Red Hat) 88e7c15cd8SSteven Rostedt (Red Hat) /* keep the global state somewhere. */ 89e7c15cd8SSteven Rostedt (Red Hat) static struct hwlat_data { 90e7c15cd8SSteven Rostedt (Red Hat) 91e7c15cd8SSteven Rostedt (Red Hat) struct mutex lock; /* protect changes */ 92e7c15cd8SSteven Rostedt (Red Hat) 93e7c15cd8SSteven Rostedt (Red Hat) u64 count; /* total since reset */ 94e7c15cd8SSteven Rostedt (Red Hat) 95e7c15cd8SSteven Rostedt (Red Hat) u64 sample_window; /* total sampling window (on+off) */ 96e7c15cd8SSteven Rostedt (Red Hat) u64 sample_width; /* active sampling portion of window */ 97e7c15cd8SSteven Rostedt (Red Hat) 98e7c15cd8SSteven Rostedt (Red Hat) } hwlat_data = { 99e7c15cd8SSteven Rostedt (Red Hat) .sample_window = DEFAULT_SAMPLE_WINDOW, 100e7c15cd8SSteven Rostedt (Red Hat) .sample_width = DEFAULT_SAMPLE_WIDTH, 101e7c15cd8SSteven Rostedt (Red Hat) }; 102e7c15cd8SSteven Rostedt (Red Hat) 103e7c15cd8SSteven Rostedt (Red Hat) static void trace_hwlat_sample(struct hwlat_sample *sample) 104e7c15cd8SSteven Rostedt (Red Hat) { 105e7c15cd8SSteven Rostedt (Red Hat) struct trace_array *tr = hwlat_trace; 106e7c15cd8SSteven Rostedt (Red Hat) struct trace_event_call *call = &event_hwlat; 107e7c15cd8SSteven Rostedt (Red Hat) struct ring_buffer *buffer = tr->trace_buffer.buffer; 108e7c15cd8SSteven Rostedt (Red Hat) struct ring_buffer_event *event; 109e7c15cd8SSteven Rostedt (Red Hat) struct hwlat_entry *entry; 110e7c15cd8SSteven Rostedt (Red Hat) unsigned long flags; 111e7c15cd8SSteven Rostedt (Red Hat) int pc; 112e7c15cd8SSteven Rostedt (Red Hat) 113e7c15cd8SSteven Rostedt (Red Hat) pc = preempt_count(); 114e7c15cd8SSteven Rostedt (Red Hat) local_save_flags(flags); 115e7c15cd8SSteven Rostedt (Red Hat) 116e7c15cd8SSteven Rostedt (Red Hat) event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry), 117e7c15cd8SSteven Rostedt (Red Hat) flags, pc); 118e7c15cd8SSteven Rostedt (Red Hat) if (!event) 119e7c15cd8SSteven Rostedt (Red Hat) return; 120e7c15cd8SSteven Rostedt (Red Hat) entry = ring_buffer_event_data(event); 121e7c15cd8SSteven Rostedt (Red Hat) entry->seqnum = sample->seqnum; 122e7c15cd8SSteven Rostedt (Red Hat) entry->duration = sample->duration; 123e7c15cd8SSteven Rostedt (Red Hat) entry->outer_duration = sample->outer_duration; 124e7c15cd8SSteven Rostedt (Red Hat) entry->timestamp = sample->timestamp; 1257b2c8625SSteven Rostedt (Red Hat) entry->nmi_total_ts = sample->nmi_total_ts; 1267b2c8625SSteven Rostedt (Red Hat) entry->nmi_count = sample->nmi_count; 127e7c15cd8SSteven Rostedt (Red Hat) 128e7c15cd8SSteven Rostedt (Red Hat) if (!call_filter_check_discard(call, entry, buffer, event)) 12952ffabe3SSteven Rostedt (Red Hat) trace_buffer_unlock_commit_nostack(buffer, event); 130e7c15cd8SSteven Rostedt (Red Hat) } 131e7c15cd8SSteven Rostedt (Red Hat) 132e7c15cd8SSteven Rostedt (Red Hat) /* Macros to encapsulate the time capturing infrastructure */ 133e7c15cd8SSteven Rostedt (Red Hat) #define time_type u64 134e7c15cd8SSteven Rostedt (Red Hat) #define time_get() trace_clock_local() 135e7c15cd8SSteven Rostedt (Red Hat) #define time_to_us(x) div_u64(x, 1000) 136e7c15cd8SSteven Rostedt (Red Hat) #define time_sub(a, b) ((a) - (b)) 137e7c15cd8SSteven Rostedt (Red Hat) #define init_time(a, b) (a = b) 138e7c15cd8SSteven Rostedt (Red Hat) #define time_u64(a) a 139e7c15cd8SSteven Rostedt (Red Hat) 1407b2c8625SSteven Rostedt (Red Hat) void trace_hwlat_callback(bool enter) 1417b2c8625SSteven Rostedt (Red Hat) { 1427b2c8625SSteven Rostedt (Red Hat) if (smp_processor_id() != nmi_cpu) 1437b2c8625SSteven Rostedt (Red Hat) return; 1447b2c8625SSteven Rostedt (Red Hat) 1457b2c8625SSteven Rostedt (Red Hat) /* 1467b2c8625SSteven Rostedt (Red Hat) * Currently trace_clock_local() calls sched_clock() and the 1477b2c8625SSteven Rostedt (Red Hat) * generic version is not NMI safe. 1487b2c8625SSteven Rostedt (Red Hat) */ 1497b2c8625SSteven Rostedt (Red Hat) if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) { 1507b2c8625SSteven Rostedt (Red Hat) if (enter) 1517b2c8625SSteven Rostedt (Red Hat) nmi_ts_start = time_get(); 1527b2c8625SSteven Rostedt (Red Hat) else 15398dc19c1SSrivatsa S. Bhat (VMware) nmi_total_ts += time_get() - nmi_ts_start; 1547b2c8625SSteven Rostedt (Red Hat) } 1557b2c8625SSteven Rostedt (Red Hat) 1567b2c8625SSteven Rostedt (Red Hat) if (enter) 1577b2c8625SSteven Rostedt (Red Hat) nmi_count++; 1587b2c8625SSteven Rostedt (Red Hat) } 1597b2c8625SSteven Rostedt (Red Hat) 160e7c15cd8SSteven Rostedt (Red Hat) /** 161e7c15cd8SSteven Rostedt (Red Hat) * get_sample - sample the CPU TSC and look for likely hardware latencies 162e7c15cd8SSteven Rostedt (Red Hat) * 163e7c15cd8SSteven Rostedt (Red Hat) * Used to repeatedly capture the CPU TSC (or similar), looking for potential 164e7c15cd8SSteven Rostedt (Red Hat) * hardware-induced latency. Called with interrupts disabled and with 165e7c15cd8SSteven Rostedt (Red Hat) * hwlat_data.lock held. 166e7c15cd8SSteven Rostedt (Red Hat) */ 167e7c15cd8SSteven Rostedt (Red Hat) static int get_sample(void) 168e7c15cd8SSteven Rostedt (Red Hat) { 169e7c15cd8SSteven Rostedt (Red Hat) struct trace_array *tr = hwlat_trace; 170e7c15cd8SSteven Rostedt (Red Hat) time_type start, t1, t2, last_t2; 171e7c15cd8SSteven Rostedt (Red Hat) s64 diff, total, last_total = 0; 172e7c15cd8SSteven Rostedt (Red Hat) u64 sample = 0; 173e7c15cd8SSteven Rostedt (Red Hat) u64 thresh = tracing_thresh; 174e7c15cd8SSteven Rostedt (Red Hat) u64 outer_sample = 0; 175e7c15cd8SSteven Rostedt (Red Hat) int ret = -1; 176e7c15cd8SSteven Rostedt (Red Hat) 177e7c15cd8SSteven Rostedt (Red Hat) do_div(thresh, NSEC_PER_USEC); /* modifies interval value */ 178e7c15cd8SSteven Rostedt (Red Hat) 1797b2c8625SSteven Rostedt (Red Hat) nmi_cpu = smp_processor_id(); 1807b2c8625SSteven Rostedt (Red Hat) nmi_total_ts = 0; 1817b2c8625SSteven Rostedt (Red Hat) nmi_count = 0; 1827b2c8625SSteven Rostedt (Red Hat) /* Make sure NMIs see this first */ 1837b2c8625SSteven Rostedt (Red Hat) barrier(); 1847b2c8625SSteven Rostedt (Red Hat) 1857b2c8625SSteven Rostedt (Red Hat) trace_hwlat_callback_enabled = true; 1867b2c8625SSteven Rostedt (Red Hat) 187e7c15cd8SSteven Rostedt (Red Hat) init_time(last_t2, 0); 188e7c15cd8SSteven Rostedt (Red Hat) start = time_get(); /* start timestamp */ 189e7c15cd8SSteven Rostedt (Red Hat) 190e7c15cd8SSteven Rostedt (Red Hat) do { 191e7c15cd8SSteven Rostedt (Red Hat) 192e7c15cd8SSteven Rostedt (Red Hat) t1 = time_get(); /* we'll look for a discontinuity */ 193e7c15cd8SSteven Rostedt (Red Hat) t2 = time_get(); 194e7c15cd8SSteven Rostedt (Red Hat) 195e7c15cd8SSteven Rostedt (Red Hat) if (time_u64(last_t2)) { 196e7c15cd8SSteven Rostedt (Red Hat) /* Check the delta from outer loop (t2 to next t1) */ 197e7c15cd8SSteven Rostedt (Red Hat) diff = time_to_us(time_sub(t1, last_t2)); 198e7c15cd8SSteven Rostedt (Red Hat) /* This shouldn't happen */ 199e7c15cd8SSteven Rostedt (Red Hat) if (diff < 0) { 200e7c15cd8SSteven Rostedt (Red Hat) pr_err(BANNER "time running backwards\n"); 201e7c15cd8SSteven Rostedt (Red Hat) goto out; 202e7c15cd8SSteven Rostedt (Red Hat) } 203e7c15cd8SSteven Rostedt (Red Hat) if (diff > outer_sample) 204e7c15cd8SSteven Rostedt (Red Hat) outer_sample = diff; 205e7c15cd8SSteven Rostedt (Red Hat) } 206e7c15cd8SSteven Rostedt (Red Hat) last_t2 = t2; 207e7c15cd8SSteven Rostedt (Red Hat) 208e7c15cd8SSteven Rostedt (Red Hat) total = time_to_us(time_sub(t2, start)); /* sample width */ 209e7c15cd8SSteven Rostedt (Red Hat) 210e7c15cd8SSteven Rostedt (Red Hat) /* Check for possible overflows */ 211e7c15cd8SSteven Rostedt (Red Hat) if (total < last_total) { 212e7c15cd8SSteven Rostedt (Red Hat) pr_err("Time total overflowed\n"); 213e7c15cd8SSteven Rostedt (Red Hat) break; 214e7c15cd8SSteven Rostedt (Red Hat) } 215e7c15cd8SSteven Rostedt (Red Hat) last_total = total; 216e7c15cd8SSteven Rostedt (Red Hat) 217e7c15cd8SSteven Rostedt (Red Hat) /* This checks the inner loop (t1 to t2) */ 218e7c15cd8SSteven Rostedt (Red Hat) diff = time_to_us(time_sub(t2, t1)); /* current diff */ 219e7c15cd8SSteven Rostedt (Red Hat) 220e7c15cd8SSteven Rostedt (Red Hat) /* This shouldn't happen */ 221e7c15cd8SSteven Rostedt (Red Hat) if (diff < 0) { 222e7c15cd8SSteven Rostedt (Red Hat) pr_err(BANNER "time running backwards\n"); 223e7c15cd8SSteven Rostedt (Red Hat) goto out; 224e7c15cd8SSteven Rostedt (Red Hat) } 225e7c15cd8SSteven Rostedt (Red Hat) 226e7c15cd8SSteven Rostedt (Red Hat) if (diff > sample) 227e7c15cd8SSteven Rostedt (Red Hat) sample = diff; /* only want highest value */ 228e7c15cd8SSteven Rostedt (Red Hat) 229e7c15cd8SSteven Rostedt (Red Hat) } while (total <= hwlat_data.sample_width); 230e7c15cd8SSteven Rostedt (Red Hat) 2317b2c8625SSteven Rostedt (Red Hat) barrier(); /* finish the above in the view for NMIs */ 2327b2c8625SSteven Rostedt (Red Hat) trace_hwlat_callback_enabled = false; 2337b2c8625SSteven Rostedt (Red Hat) barrier(); /* Make sure nmi_total_ts is no longer updated */ 2347b2c8625SSteven Rostedt (Red Hat) 235e7c15cd8SSteven Rostedt (Red Hat) ret = 0; 236e7c15cd8SSteven Rostedt (Red Hat) 237e7c15cd8SSteven Rostedt (Red Hat) /* If we exceed the threshold value, we have found a hardware latency */ 238e7c15cd8SSteven Rostedt (Red Hat) if (sample > thresh || outer_sample > thresh) { 239e7c15cd8SSteven Rostedt (Red Hat) struct hwlat_sample s; 240*91edde2eSViktor Rosendahl (BMW) u64 latency; 241e7c15cd8SSteven Rostedt (Red Hat) 242e7c15cd8SSteven Rostedt (Red Hat) ret = 1; 243e7c15cd8SSteven Rostedt (Red Hat) 2447b2c8625SSteven Rostedt (Red Hat) /* We read in microseconds */ 2457b2c8625SSteven Rostedt (Red Hat) if (nmi_total_ts) 2467b2c8625SSteven Rostedt (Red Hat) do_div(nmi_total_ts, NSEC_PER_USEC); 2477b2c8625SSteven Rostedt (Red Hat) 248e7c15cd8SSteven Rostedt (Red Hat) hwlat_data.count++; 249e7c15cd8SSteven Rostedt (Red Hat) s.seqnum = hwlat_data.count; 250e7c15cd8SSteven Rostedt (Red Hat) s.duration = sample; 251e7c15cd8SSteven Rostedt (Red Hat) s.outer_duration = outer_sample; 25251aad0aeSDeepa Dinamani ktime_get_real_ts64(&s.timestamp); 2537b2c8625SSteven Rostedt (Red Hat) s.nmi_total_ts = nmi_total_ts; 2547b2c8625SSteven Rostedt (Red Hat) s.nmi_count = nmi_count; 255e7c15cd8SSteven Rostedt (Red Hat) trace_hwlat_sample(&s); 256e7c15cd8SSteven Rostedt (Red Hat) 257*91edde2eSViktor Rosendahl (BMW) latency = max(sample, outer_sample); 258*91edde2eSViktor Rosendahl (BMW) 259e7c15cd8SSteven Rostedt (Red Hat) /* Keep a running maximum ever recorded hardware latency */ 260*91edde2eSViktor Rosendahl (BMW) if (latency > tr->max_latency) { 261*91edde2eSViktor Rosendahl (BMW) tr->max_latency = latency; 262*91edde2eSViktor Rosendahl (BMW) latency_fsnotify(tr); 263*91edde2eSViktor Rosendahl (BMW) } 264e7c15cd8SSteven Rostedt (Red Hat) } 265e7c15cd8SSteven Rostedt (Red Hat) 266e7c15cd8SSteven Rostedt (Red Hat) out: 267e7c15cd8SSteven Rostedt (Red Hat) return ret; 268e7c15cd8SSteven Rostedt (Red Hat) } 269e7c15cd8SSteven Rostedt (Red Hat) 2700330f7aaSSteven Rostedt (Red Hat) static struct cpumask save_cpumask; 2710330f7aaSSteven Rostedt (Red Hat) static bool disable_migrate; 2720330f7aaSSteven Rostedt (Red Hat) 273f447c196SSteven Rostedt (VMware) static void move_to_next_cpu(void) 2740330f7aaSSteven Rostedt (Red Hat) { 275f447c196SSteven Rostedt (VMware) struct cpumask *current_mask = &save_cpumask; 2760330f7aaSSteven Rostedt (Red Hat) int next_cpu; 2770330f7aaSSteven Rostedt (Red Hat) 2780330f7aaSSteven Rostedt (Red Hat) if (disable_migrate) 2790330f7aaSSteven Rostedt (Red Hat) return; 2800330f7aaSSteven Rostedt (Red Hat) /* 2810330f7aaSSteven Rostedt (Red Hat) * If for some reason the user modifies the CPU affinity 2820330f7aaSSteven Rostedt (Red Hat) * of this thread, than stop migrating for the duration 2830330f7aaSSteven Rostedt (Red Hat) * of the current test. 2840330f7aaSSteven Rostedt (Red Hat) */ 2853bd37062SSebastian Andrzej Siewior if (!cpumask_equal(current_mask, current->cpus_ptr)) 2860330f7aaSSteven Rostedt (Red Hat) goto disable; 2870330f7aaSSteven Rostedt (Red Hat) 2880330f7aaSSteven Rostedt (Red Hat) get_online_cpus(); 2890330f7aaSSteven Rostedt (Red Hat) cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); 2900330f7aaSSteven Rostedt (Red Hat) next_cpu = cpumask_next(smp_processor_id(), current_mask); 2910330f7aaSSteven Rostedt (Red Hat) put_online_cpus(); 2920330f7aaSSteven Rostedt (Red Hat) 2930330f7aaSSteven Rostedt (Red Hat) if (next_cpu >= nr_cpu_ids) 2940330f7aaSSteven Rostedt (Red Hat) next_cpu = cpumask_first(current_mask); 2950330f7aaSSteven Rostedt (Red Hat) 2960330f7aaSSteven Rostedt (Red Hat) if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ 2970330f7aaSSteven Rostedt (Red Hat) goto disable; 2980330f7aaSSteven Rostedt (Red Hat) 2990330f7aaSSteven Rostedt (Red Hat) cpumask_clear(current_mask); 3000330f7aaSSteven Rostedt (Red Hat) cpumask_set_cpu(next_cpu, current_mask); 3010330f7aaSSteven Rostedt (Red Hat) 3020330f7aaSSteven Rostedt (Red Hat) sched_setaffinity(0, current_mask); 3030330f7aaSSteven Rostedt (Red Hat) return; 3040330f7aaSSteven Rostedt (Red Hat) 3050330f7aaSSteven Rostedt (Red Hat) disable: 3060330f7aaSSteven Rostedt (Red Hat) disable_migrate = true; 3070330f7aaSSteven Rostedt (Red Hat) } 3080330f7aaSSteven Rostedt (Red Hat) 309e7c15cd8SSteven Rostedt (Red Hat) /* 310e7c15cd8SSteven Rostedt (Red Hat) * kthread_fn - The CPU time sampling/hardware latency detection kernel thread 311e7c15cd8SSteven Rostedt (Red Hat) * 312e7c15cd8SSteven Rostedt (Red Hat) * Used to periodically sample the CPU TSC via a call to get_sample. We 313e7c15cd8SSteven Rostedt (Red Hat) * disable interrupts, which does (intentionally) introduce latency since we 314e7c15cd8SSteven Rostedt (Red Hat) * need to ensure nothing else might be running (and thus preempting). 315e7c15cd8SSteven Rostedt (Red Hat) * Obviously this should never be used in production environments. 316e7c15cd8SSteven Rostedt (Red Hat) * 3178e0f1142SLuiz Capitulino * Executes one loop interaction on each CPU in tracing_cpumask sysfs file. 318e7c15cd8SSteven Rostedt (Red Hat) */ 319e7c15cd8SSteven Rostedt (Red Hat) static int kthread_fn(void *data) 320e7c15cd8SSteven Rostedt (Red Hat) { 321e7c15cd8SSteven Rostedt (Red Hat) u64 interval; 322e7c15cd8SSteven Rostedt (Red Hat) 323e7c15cd8SSteven Rostedt (Red Hat) while (!kthread_should_stop()) { 324e7c15cd8SSteven Rostedt (Red Hat) 325f447c196SSteven Rostedt (VMware) move_to_next_cpu(); 3260330f7aaSSteven Rostedt (Red Hat) 327e7c15cd8SSteven Rostedt (Red Hat) local_irq_disable(); 328e7c15cd8SSteven Rostedt (Red Hat) get_sample(); 329e7c15cd8SSteven Rostedt (Red Hat) local_irq_enable(); 330e7c15cd8SSteven Rostedt (Red Hat) 331e7c15cd8SSteven Rostedt (Red Hat) mutex_lock(&hwlat_data.lock); 332e7c15cd8SSteven Rostedt (Red Hat) interval = hwlat_data.sample_window - hwlat_data.sample_width; 333e7c15cd8SSteven Rostedt (Red Hat) mutex_unlock(&hwlat_data.lock); 334e7c15cd8SSteven Rostedt (Red Hat) 335e7c15cd8SSteven Rostedt (Red Hat) do_div(interval, USEC_PER_MSEC); /* modifies interval value */ 336e7c15cd8SSteven Rostedt (Red Hat) 337e7c15cd8SSteven Rostedt (Red Hat) /* Always sleep for at least 1ms */ 338e7c15cd8SSteven Rostedt (Red Hat) if (interval < 1) 339e7c15cd8SSteven Rostedt (Red Hat) interval = 1; 340e7c15cd8SSteven Rostedt (Red Hat) 341e7c15cd8SSteven Rostedt (Red Hat) if (msleep_interruptible(interval)) 342e7c15cd8SSteven Rostedt (Red Hat) break; 343e7c15cd8SSteven Rostedt (Red Hat) } 344e7c15cd8SSteven Rostedt (Red Hat) 345e7c15cd8SSteven Rostedt (Red Hat) return 0; 346e7c15cd8SSteven Rostedt (Red Hat) } 347e7c15cd8SSteven Rostedt (Red Hat) 348e7c15cd8SSteven Rostedt (Red Hat) /** 349e7c15cd8SSteven Rostedt (Red Hat) * start_kthread - Kick off the hardware latency sampling/detector kthread 350e7c15cd8SSteven Rostedt (Red Hat) * 351e7c15cd8SSteven Rostedt (Red Hat) * This starts the kernel thread that will sit and sample the CPU timestamp 352e7c15cd8SSteven Rostedt (Red Hat) * counter (TSC or similar) and look for potential hardware latencies. 353e7c15cd8SSteven Rostedt (Red Hat) */ 354e7c15cd8SSteven Rostedt (Red Hat) static int start_kthread(struct trace_array *tr) 355e7c15cd8SSteven Rostedt (Red Hat) { 356f447c196SSteven Rostedt (VMware) struct cpumask *current_mask = &save_cpumask; 357e7c15cd8SSteven Rostedt (Red Hat) struct task_struct *kthread; 358f447c196SSteven Rostedt (VMware) int next_cpu; 359f447c196SSteven Rostedt (VMware) 360978defeeSSteven Rostedt (VMware) if (WARN_ON(hwlat_kthread)) 36182fbc8c4SErica Bugden return 0; 36282fbc8c4SErica Bugden 363f447c196SSteven Rostedt (VMware) /* Just pick the first CPU on first iteration */ 364f447c196SSteven Rostedt (VMware) current_mask = &save_cpumask; 365f447c196SSteven Rostedt (VMware) get_online_cpus(); 366f447c196SSteven Rostedt (VMware) cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); 367f447c196SSteven Rostedt (VMware) put_online_cpus(); 368f447c196SSteven Rostedt (VMware) next_cpu = cpumask_first(current_mask); 369e7c15cd8SSteven Rostedt (Red Hat) 370e7c15cd8SSteven Rostedt (Red Hat) kthread = kthread_create(kthread_fn, NULL, "hwlatd"); 371e7c15cd8SSteven Rostedt (Red Hat) if (IS_ERR(kthread)) { 372e7c15cd8SSteven Rostedt (Red Hat) pr_err(BANNER "could not start sampling thread\n"); 373e7c15cd8SSteven Rostedt (Red Hat) return -ENOMEM; 374e7c15cd8SSteven Rostedt (Red Hat) } 375f447c196SSteven Rostedt (VMware) 376f447c196SSteven Rostedt (VMware) cpumask_clear(current_mask); 377f447c196SSteven Rostedt (VMware) cpumask_set_cpu(next_cpu, current_mask); 378f447c196SSteven Rostedt (VMware) sched_setaffinity(kthread->pid, current_mask); 379f447c196SSteven Rostedt (VMware) 380e7c15cd8SSteven Rostedt (Red Hat) hwlat_kthread = kthread; 381e7c15cd8SSteven Rostedt (Red Hat) wake_up_process(kthread); 382e7c15cd8SSteven Rostedt (Red Hat) 383e7c15cd8SSteven Rostedt (Red Hat) return 0; 384e7c15cd8SSteven Rostedt (Red Hat) } 385e7c15cd8SSteven Rostedt (Red Hat) 386e7c15cd8SSteven Rostedt (Red Hat) /** 387e7c15cd8SSteven Rostedt (Red Hat) * stop_kthread - Inform the hardware latency samping/detector kthread to stop 388e7c15cd8SSteven Rostedt (Red Hat) * 389e7c15cd8SSteven Rostedt (Red Hat) * This kicks the running hardware latency sampling/detector kernel thread and 390e7c15cd8SSteven Rostedt (Red Hat) * tells it to stop sampling now. Use this on unload and at system shutdown. 391e7c15cd8SSteven Rostedt (Red Hat) */ 392e7c15cd8SSteven Rostedt (Red Hat) static void stop_kthread(void) 393e7c15cd8SSteven Rostedt (Red Hat) { 394e7c15cd8SSteven Rostedt (Red Hat) if (!hwlat_kthread) 395e7c15cd8SSteven Rostedt (Red Hat) return; 396e7c15cd8SSteven Rostedt (Red Hat) kthread_stop(hwlat_kthread); 397e7c15cd8SSteven Rostedt (Red Hat) hwlat_kthread = NULL; 398e7c15cd8SSteven Rostedt (Red Hat) } 399e7c15cd8SSteven Rostedt (Red Hat) 400e7c15cd8SSteven Rostedt (Red Hat) /* 401e7c15cd8SSteven Rostedt (Red Hat) * hwlat_read - Wrapper read function for reading both window and width 402e7c15cd8SSteven Rostedt (Red Hat) * @filp: The active open file structure 403e7c15cd8SSteven Rostedt (Red Hat) * @ubuf: The userspace provided buffer to read value into 404e7c15cd8SSteven Rostedt (Red Hat) * @cnt: The maximum number of bytes to read 405e7c15cd8SSteven Rostedt (Red Hat) * @ppos: The current "file" position 406e7c15cd8SSteven Rostedt (Red Hat) * 407e7c15cd8SSteven Rostedt (Red Hat) * This function provides a generic read implementation for the global state 408e7c15cd8SSteven Rostedt (Red Hat) * "hwlat_data" structure filesystem entries. 409e7c15cd8SSteven Rostedt (Red Hat) */ 410e7c15cd8SSteven Rostedt (Red Hat) static ssize_t hwlat_read(struct file *filp, char __user *ubuf, 411e7c15cd8SSteven Rostedt (Red Hat) size_t cnt, loff_t *ppos) 412e7c15cd8SSteven Rostedt (Red Hat) { 413e7c15cd8SSteven Rostedt (Red Hat) char buf[U64STR_SIZE]; 414e7c15cd8SSteven Rostedt (Red Hat) u64 *entry = filp->private_data; 415e7c15cd8SSteven Rostedt (Red Hat) u64 val; 416e7c15cd8SSteven Rostedt (Red Hat) int len; 417e7c15cd8SSteven Rostedt (Red Hat) 418e7c15cd8SSteven Rostedt (Red Hat) if (!entry) 419e7c15cd8SSteven Rostedt (Red Hat) return -EFAULT; 420e7c15cd8SSteven Rostedt (Red Hat) 421e7c15cd8SSteven Rostedt (Red Hat) if (cnt > sizeof(buf)) 422e7c15cd8SSteven Rostedt (Red Hat) cnt = sizeof(buf); 423e7c15cd8SSteven Rostedt (Red Hat) 424e7c15cd8SSteven Rostedt (Red Hat) val = *entry; 425e7c15cd8SSteven Rostedt (Red Hat) 426e7c15cd8SSteven Rostedt (Red Hat) len = snprintf(buf, sizeof(buf), "%llu\n", val); 427e7c15cd8SSteven Rostedt (Red Hat) 428e7c15cd8SSteven Rostedt (Red Hat) return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 429e7c15cd8SSteven Rostedt (Red Hat) } 430e7c15cd8SSteven Rostedt (Red Hat) 431e7c15cd8SSteven Rostedt (Red Hat) /** 432e7c15cd8SSteven Rostedt (Red Hat) * hwlat_width_write - Write function for "width" entry 433e7c15cd8SSteven Rostedt (Red Hat) * @filp: The active open file structure 434e7c15cd8SSteven Rostedt (Red Hat) * @ubuf: The user buffer that contains the value to write 435e7c15cd8SSteven Rostedt (Red Hat) * @cnt: The maximum number of bytes to write to "file" 436e7c15cd8SSteven Rostedt (Red Hat) * @ppos: The current position in @file 437e7c15cd8SSteven Rostedt (Red Hat) * 438e7c15cd8SSteven Rostedt (Red Hat) * This function provides a write implementation for the "width" interface 439e7c15cd8SSteven Rostedt (Red Hat) * to the hardware latency detector. It can be used to configure 440e7c15cd8SSteven Rostedt (Red Hat) * for how many us of the total window us we will actively sample for any 441e7c15cd8SSteven Rostedt (Red Hat) * hardware-induced latency periods. Obviously, it is not possible to 442e7c15cd8SSteven Rostedt (Red Hat) * sample constantly and have the system respond to a sample reader, or, 443e7c15cd8SSteven Rostedt (Red Hat) * worse, without having the system appear to have gone out to lunch. It 444e7c15cd8SSteven Rostedt (Red Hat) * is enforced that width is less that the total window size. 445e7c15cd8SSteven Rostedt (Red Hat) */ 446e7c15cd8SSteven Rostedt (Red Hat) static ssize_t 447e7c15cd8SSteven Rostedt (Red Hat) hwlat_width_write(struct file *filp, const char __user *ubuf, 448e7c15cd8SSteven Rostedt (Red Hat) size_t cnt, loff_t *ppos) 449e7c15cd8SSteven Rostedt (Red Hat) { 450e7c15cd8SSteven Rostedt (Red Hat) u64 val; 451e7c15cd8SSteven Rostedt (Red Hat) int err; 452e7c15cd8SSteven Rostedt (Red Hat) 453e7c15cd8SSteven Rostedt (Red Hat) err = kstrtoull_from_user(ubuf, cnt, 10, &val); 454e7c15cd8SSteven Rostedt (Red Hat) if (err) 455e7c15cd8SSteven Rostedt (Red Hat) return err; 456e7c15cd8SSteven Rostedt (Red Hat) 457e7c15cd8SSteven Rostedt (Red Hat) mutex_lock(&hwlat_data.lock); 458e7c15cd8SSteven Rostedt (Red Hat) if (val < hwlat_data.sample_window) 459e7c15cd8SSteven Rostedt (Red Hat) hwlat_data.sample_width = val; 460e7c15cd8SSteven Rostedt (Red Hat) else 461e7c15cd8SSteven Rostedt (Red Hat) err = -EINVAL; 462e7c15cd8SSteven Rostedt (Red Hat) mutex_unlock(&hwlat_data.lock); 463e7c15cd8SSteven Rostedt (Red Hat) 464e7c15cd8SSteven Rostedt (Red Hat) if (err) 465e7c15cd8SSteven Rostedt (Red Hat) return err; 466e7c15cd8SSteven Rostedt (Red Hat) 467e7c15cd8SSteven Rostedt (Red Hat) return cnt; 468e7c15cd8SSteven Rostedt (Red Hat) } 469e7c15cd8SSteven Rostedt (Red Hat) 470e7c15cd8SSteven Rostedt (Red Hat) /** 471e7c15cd8SSteven Rostedt (Red Hat) * hwlat_window_write - Write function for "window" entry 472e7c15cd8SSteven Rostedt (Red Hat) * @filp: The active open file structure 473e7c15cd8SSteven Rostedt (Red Hat) * @ubuf: The user buffer that contains the value to write 474e7c15cd8SSteven Rostedt (Red Hat) * @cnt: The maximum number of bytes to write to "file" 475e7c15cd8SSteven Rostedt (Red Hat) * @ppos: The current position in @file 476e7c15cd8SSteven Rostedt (Red Hat) * 477e7c15cd8SSteven Rostedt (Red Hat) * This function provides a write implementation for the "window" interface 478e7c15cd8SSteven Rostedt (Red Hat) * to the hardware latency detetector. The window is the total time 479e7c15cd8SSteven Rostedt (Red Hat) * in us that will be considered one sample period. Conceptually, windows 480e7c15cd8SSteven Rostedt (Red Hat) * occur back-to-back and contain a sample width period during which 481e7c15cd8SSteven Rostedt (Red Hat) * actual sampling occurs. Can be used to write a new total window size. It 482e7c15cd8SSteven Rostedt (Red Hat) * is enfoced that any value written must be greater than the sample width 483e7c15cd8SSteven Rostedt (Red Hat) * size, or an error results. 484e7c15cd8SSteven Rostedt (Red Hat) */ 485e7c15cd8SSteven Rostedt (Red Hat) static ssize_t 486e7c15cd8SSteven Rostedt (Red Hat) hwlat_window_write(struct file *filp, const char __user *ubuf, 487e7c15cd8SSteven Rostedt (Red Hat) size_t cnt, loff_t *ppos) 488e7c15cd8SSteven Rostedt (Red Hat) { 489e7c15cd8SSteven Rostedt (Red Hat) u64 val; 490e7c15cd8SSteven Rostedt (Red Hat) int err; 491e7c15cd8SSteven Rostedt (Red Hat) 492e7c15cd8SSteven Rostedt (Red Hat) err = kstrtoull_from_user(ubuf, cnt, 10, &val); 493e7c15cd8SSteven Rostedt (Red Hat) if (err) 494e7c15cd8SSteven Rostedt (Red Hat) return err; 495e7c15cd8SSteven Rostedt (Red Hat) 496e7c15cd8SSteven Rostedt (Red Hat) mutex_lock(&hwlat_data.lock); 497e7c15cd8SSteven Rostedt (Red Hat) if (hwlat_data.sample_width < val) 498e7c15cd8SSteven Rostedt (Red Hat) hwlat_data.sample_window = val; 499e7c15cd8SSteven Rostedt (Red Hat) else 500e7c15cd8SSteven Rostedt (Red Hat) err = -EINVAL; 501e7c15cd8SSteven Rostedt (Red Hat) mutex_unlock(&hwlat_data.lock); 502e7c15cd8SSteven Rostedt (Red Hat) 503e7c15cd8SSteven Rostedt (Red Hat) if (err) 504e7c15cd8SSteven Rostedt (Red Hat) return err; 505e7c15cd8SSteven Rostedt (Red Hat) 506e7c15cd8SSteven Rostedt (Red Hat) return cnt; 507e7c15cd8SSteven Rostedt (Red Hat) } 508e7c15cd8SSteven Rostedt (Red Hat) 509e7c15cd8SSteven Rostedt (Red Hat) static const struct file_operations width_fops = { 510e7c15cd8SSteven Rostedt (Red Hat) .open = tracing_open_generic, 511e7c15cd8SSteven Rostedt (Red Hat) .read = hwlat_read, 512e7c15cd8SSteven Rostedt (Red Hat) .write = hwlat_width_write, 513e7c15cd8SSteven Rostedt (Red Hat) }; 514e7c15cd8SSteven Rostedt (Red Hat) 515e7c15cd8SSteven Rostedt (Red Hat) static const struct file_operations window_fops = { 516e7c15cd8SSteven Rostedt (Red Hat) .open = tracing_open_generic, 517e7c15cd8SSteven Rostedt (Red Hat) .read = hwlat_read, 518e7c15cd8SSteven Rostedt (Red Hat) .write = hwlat_window_write, 519e7c15cd8SSteven Rostedt (Red Hat) }; 520e7c15cd8SSteven Rostedt (Red Hat) 521e7c15cd8SSteven Rostedt (Red Hat) /** 522e7c15cd8SSteven Rostedt (Red Hat) * init_tracefs - A function to initialize the tracefs interface files 523e7c15cd8SSteven Rostedt (Red Hat) * 524e7c15cd8SSteven Rostedt (Red Hat) * This function creates entries in tracefs for "hwlat_detector". 525e7c15cd8SSteven Rostedt (Red Hat) * It creates the hwlat_detector directory in the tracing directory, 526e7c15cd8SSteven Rostedt (Red Hat) * and within that directory is the count, width and window files to 527e7c15cd8SSteven Rostedt (Red Hat) * change and view those values. 528e7c15cd8SSteven Rostedt (Red Hat) */ 529e7c15cd8SSteven Rostedt (Red Hat) static int init_tracefs(void) 530e7c15cd8SSteven Rostedt (Red Hat) { 531e7c15cd8SSteven Rostedt (Red Hat) struct dentry *d_tracer; 532e7c15cd8SSteven Rostedt (Red Hat) struct dentry *top_dir; 533e7c15cd8SSteven Rostedt (Red Hat) 534e7c15cd8SSteven Rostedt (Red Hat) d_tracer = tracing_init_dentry(); 535e7c15cd8SSteven Rostedt (Red Hat) if (IS_ERR(d_tracer)) 536e7c15cd8SSteven Rostedt (Red Hat) return -ENOMEM; 537e7c15cd8SSteven Rostedt (Red Hat) 538e7c15cd8SSteven Rostedt (Red Hat) top_dir = tracefs_create_dir("hwlat_detector", d_tracer); 539e7c15cd8SSteven Rostedt (Red Hat) if (!top_dir) 540e7c15cd8SSteven Rostedt (Red Hat) return -ENOMEM; 541e7c15cd8SSteven Rostedt (Red Hat) 542e7c15cd8SSteven Rostedt (Red Hat) hwlat_sample_window = tracefs_create_file("window", 0640, 543e7c15cd8SSteven Rostedt (Red Hat) top_dir, 544e7c15cd8SSteven Rostedt (Red Hat) &hwlat_data.sample_window, 545e7c15cd8SSteven Rostedt (Red Hat) &window_fops); 546e7c15cd8SSteven Rostedt (Red Hat) if (!hwlat_sample_window) 547e7c15cd8SSteven Rostedt (Red Hat) goto err; 548e7c15cd8SSteven Rostedt (Red Hat) 549e7c15cd8SSteven Rostedt (Red Hat) hwlat_sample_width = tracefs_create_file("width", 0644, 550e7c15cd8SSteven Rostedt (Red Hat) top_dir, 551e7c15cd8SSteven Rostedt (Red Hat) &hwlat_data.sample_width, 552e7c15cd8SSteven Rostedt (Red Hat) &width_fops); 553e7c15cd8SSteven Rostedt (Red Hat) if (!hwlat_sample_width) 554e7c15cd8SSteven Rostedt (Red Hat) goto err; 555e7c15cd8SSteven Rostedt (Red Hat) 556e7c15cd8SSteven Rostedt (Red Hat) return 0; 557e7c15cd8SSteven Rostedt (Red Hat) 558e7c15cd8SSteven Rostedt (Red Hat) err: 559e7c15cd8SSteven Rostedt (Red Hat) tracefs_remove_recursive(top_dir); 560e7c15cd8SSteven Rostedt (Red Hat) return -ENOMEM; 561e7c15cd8SSteven Rostedt (Red Hat) } 562e7c15cd8SSteven Rostedt (Red Hat) 563e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_start(struct trace_array *tr) 564e7c15cd8SSteven Rostedt (Red Hat) { 565e7c15cd8SSteven Rostedt (Red Hat) int err; 566e7c15cd8SSteven Rostedt (Red Hat) 567e7c15cd8SSteven Rostedt (Red Hat) err = start_kthread(tr); 568e7c15cd8SSteven Rostedt (Red Hat) if (err) 569e7c15cd8SSteven Rostedt (Red Hat) pr_err(BANNER "Cannot start hwlat kthread\n"); 570e7c15cd8SSteven Rostedt (Red Hat) } 571e7c15cd8SSteven Rostedt (Red Hat) 572e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_stop(struct trace_array *tr) 573e7c15cd8SSteven Rostedt (Red Hat) { 574e7c15cd8SSteven Rostedt (Red Hat) stop_kthread(); 575e7c15cd8SSteven Rostedt (Red Hat) } 576e7c15cd8SSteven Rostedt (Red Hat) 577e7c15cd8SSteven Rostedt (Red Hat) static bool hwlat_busy; 578e7c15cd8SSteven Rostedt (Red Hat) 579e7c15cd8SSteven Rostedt (Red Hat) static int hwlat_tracer_init(struct trace_array *tr) 580e7c15cd8SSteven Rostedt (Red Hat) { 581e7c15cd8SSteven Rostedt (Red Hat) /* Only allow one instance to enable this */ 582e7c15cd8SSteven Rostedt (Red Hat) if (hwlat_busy) 583e7c15cd8SSteven Rostedt (Red Hat) return -EBUSY; 584e7c15cd8SSteven Rostedt (Red Hat) 585e7c15cd8SSteven Rostedt (Red Hat) hwlat_trace = tr; 586e7c15cd8SSteven Rostedt (Red Hat) 5870330f7aaSSteven Rostedt (Red Hat) disable_migrate = false; 588e7c15cd8SSteven Rostedt (Red Hat) hwlat_data.count = 0; 589e7c15cd8SSteven Rostedt (Red Hat) tr->max_latency = 0; 590e7c15cd8SSteven Rostedt (Red Hat) save_tracing_thresh = tracing_thresh; 591e7c15cd8SSteven Rostedt (Red Hat) 592e7c15cd8SSteven Rostedt (Red Hat) /* tracing_thresh is in nsecs, we speak in usecs */ 593e7c15cd8SSteven Rostedt (Red Hat) if (!tracing_thresh) 594e7c15cd8SSteven Rostedt (Red Hat) tracing_thresh = last_tracing_thresh; 595e7c15cd8SSteven Rostedt (Red Hat) 596e7c15cd8SSteven Rostedt (Red Hat) if (tracer_tracing_is_on(tr)) 597e7c15cd8SSteven Rostedt (Red Hat) hwlat_tracer_start(tr); 598e7c15cd8SSteven Rostedt (Red Hat) 599e7c15cd8SSteven Rostedt (Red Hat) hwlat_busy = true; 600e7c15cd8SSteven Rostedt (Red Hat) 601e7c15cd8SSteven Rostedt (Red Hat) return 0; 602e7c15cd8SSteven Rostedt (Red Hat) } 603e7c15cd8SSteven Rostedt (Red Hat) 604e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_reset(struct trace_array *tr) 605e7c15cd8SSteven Rostedt (Red Hat) { 606e7c15cd8SSteven Rostedt (Red Hat) stop_kthread(); 607e7c15cd8SSteven Rostedt (Red Hat) 608e7c15cd8SSteven Rostedt (Red Hat) /* the tracing threshold is static between runs */ 609e7c15cd8SSteven Rostedt (Red Hat) last_tracing_thresh = tracing_thresh; 610e7c15cd8SSteven Rostedt (Red Hat) 611e7c15cd8SSteven Rostedt (Red Hat) tracing_thresh = save_tracing_thresh; 612e7c15cd8SSteven Rostedt (Red Hat) hwlat_busy = false; 613e7c15cd8SSteven Rostedt (Red Hat) } 614e7c15cd8SSteven Rostedt (Red Hat) 615e7c15cd8SSteven Rostedt (Red Hat) static struct tracer hwlat_tracer __read_mostly = 616e7c15cd8SSteven Rostedt (Red Hat) { 617e7c15cd8SSteven Rostedt (Red Hat) .name = "hwlat", 618e7c15cd8SSteven Rostedt (Red Hat) .init = hwlat_tracer_init, 619e7c15cd8SSteven Rostedt (Red Hat) .reset = hwlat_tracer_reset, 620e7c15cd8SSteven Rostedt (Red Hat) .start = hwlat_tracer_start, 621e7c15cd8SSteven Rostedt (Red Hat) .stop = hwlat_tracer_stop, 622e7c15cd8SSteven Rostedt (Red Hat) .allow_instances = true, 623e7c15cd8SSteven Rostedt (Red Hat) }; 624e7c15cd8SSteven Rostedt (Red Hat) 625e7c15cd8SSteven Rostedt (Red Hat) __init static int init_hwlat_tracer(void) 626e7c15cd8SSteven Rostedt (Red Hat) { 627e7c15cd8SSteven Rostedt (Red Hat) int ret; 628e7c15cd8SSteven Rostedt (Red Hat) 629e7c15cd8SSteven Rostedt (Red Hat) mutex_init(&hwlat_data.lock); 630e7c15cd8SSteven Rostedt (Red Hat) 631e7c15cd8SSteven Rostedt (Red Hat) ret = register_tracer(&hwlat_tracer); 632e7c15cd8SSteven Rostedt (Red Hat) if (ret) 633e7c15cd8SSteven Rostedt (Red Hat) return ret; 634e7c15cd8SSteven Rostedt (Red Hat) 635e7c15cd8SSteven Rostedt (Red Hat) init_tracefs(); 636e7c15cd8SSteven Rostedt (Red Hat) 637e7c15cd8SSteven Rostedt (Red Hat) return 0; 638e7c15cd8SSteven Rostedt (Red Hat) } 639e7c15cd8SSteven Rostedt (Red Hat) late_initcall(init_hwlat_tracer); 640