1bcea3f96SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0 2e7c15cd8SSteven Rostedt (Red Hat) /* 30c3c86bdSSrivatsa S. Bhat (VMware) * trace_hwlat.c - A simple Hardware Latency detector. 4e7c15cd8SSteven Rostedt (Red Hat) * 5e7c15cd8SSteven Rostedt (Red Hat) * Use this tracer to detect large system latencies induced by the behavior of 6e7c15cd8SSteven Rostedt (Red Hat) * certain underlying system hardware or firmware, independent of Linux itself. 7e7c15cd8SSteven Rostedt (Red Hat) * The code was developed originally to detect the presence of SMIs on Intel 8e7c15cd8SSteven Rostedt (Red Hat) * and AMD systems, although there is no dependency upon x86 herein. 9e7c15cd8SSteven Rostedt (Red Hat) * 10e7c15cd8SSteven Rostedt (Red Hat) * The classical example usage of this tracer is in detecting the presence of 11e7c15cd8SSteven Rostedt (Red Hat) * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a 12e7c15cd8SSteven Rostedt (Red Hat) * somewhat special form of hardware interrupt spawned from earlier CPU debug 13e7c15cd8SSteven Rostedt (Red Hat) * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge 14e7c15cd8SSteven Rostedt (Red Hat) * LPC (or other device) to generate a special interrupt under certain 15e7c15cd8SSteven Rostedt (Red Hat) * circumstances, for example, upon expiration of a special SMI timer device, 16e7c15cd8SSteven Rostedt (Red Hat) * due to certain external thermal readings, on certain I/O address accesses, 17e7c15cd8SSteven Rostedt (Red Hat) * and other situations. An SMI hits a special CPU pin, triggers a special 18e7c15cd8SSteven Rostedt (Red Hat) * SMI mode (complete with special memory map), and the OS is unaware. 19e7c15cd8SSteven Rostedt (Red Hat) * 20e7c15cd8SSteven Rostedt (Red Hat) * Although certain hardware-inducing latencies are necessary (for example, 21e7c15cd8SSteven Rostedt (Red Hat) * a modern system often requires an SMI handler for correct thermal control 22e7c15cd8SSteven Rostedt (Red Hat) * and remote management) they can wreak havoc upon any OS-level performance 23e7c15cd8SSteven Rostedt (Red Hat) * guarantees toward low-latency, especially when the OS is not even made 24e7c15cd8SSteven Rostedt (Red Hat) * aware of the presence of these interrupts. For this reason, we need a 25e7c15cd8SSteven Rostedt (Red Hat) * somewhat brute force mechanism to detect these interrupts. In this case, 26e7c15cd8SSteven Rostedt (Red Hat) * we do it by hogging all of the CPU(s) for configurable timer intervals, 27e7c15cd8SSteven Rostedt (Red Hat) * sampling the built-in CPU timer, looking for discontiguous readings. 28e7c15cd8SSteven Rostedt (Red Hat) * 29e7c15cd8SSteven Rostedt (Red Hat) * WARNING: This implementation necessarily introduces latencies. Therefore, 30e7c15cd8SSteven Rostedt (Red Hat) * you should NEVER use this tracer while running in a production 31e7c15cd8SSteven Rostedt (Red Hat) * environment requiring any kind of low-latency performance 32e7c15cd8SSteven Rostedt (Red Hat) * guarantee(s). 33e7c15cd8SSteven Rostedt (Red Hat) * 34e7c15cd8SSteven Rostedt (Red Hat) * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com> 35e7c15cd8SSteven Rostedt (Red Hat) * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com> 36e7c15cd8SSteven Rostedt (Red Hat) * 37e7c15cd8SSteven Rostedt (Red Hat) * Includes useful feedback from Clark Williams <clark@redhat.com> 38e7c15cd8SSteven Rostedt (Red Hat) * 39e7c15cd8SSteven Rostedt (Red Hat) */ 40e7c15cd8SSteven Rostedt (Red Hat) #include <linux/kthread.h> 41e7c15cd8SSteven Rostedt (Red Hat) #include <linux/tracefs.h> 42e7c15cd8SSteven Rostedt (Red Hat) #include <linux/uaccess.h> 430330f7aaSSteven Rostedt (Red Hat) #include <linux/cpumask.h> 44e7c15cd8SSteven Rostedt (Red Hat) #include <linux/delay.h> 45e6017571SIngo Molnar #include <linux/sched/clock.h> 46e7c15cd8SSteven Rostedt (Red Hat) #include "trace.h" 47e7c15cd8SSteven Rostedt (Red Hat) 48e7c15cd8SSteven Rostedt (Red Hat) static struct trace_array *hwlat_trace; 49e7c15cd8SSteven Rostedt (Red Hat) 50e7c15cd8SSteven Rostedt (Red Hat) #define U64STR_SIZE 22 /* 20 digits max */ 51e7c15cd8SSteven Rostedt (Red Hat) 52e7c15cd8SSteven Rostedt (Red Hat) #define BANNER "hwlat_detector: " 53e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ 54e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ 55e7c15cd8SSteven Rostedt (Red Hat) #define DEFAULT_LAT_THRESHOLD 10 /* 10us */ 56e7c15cd8SSteven Rostedt (Red Hat) 57e7c15cd8SSteven Rostedt (Red Hat) /* sampling thread*/ 58e7c15cd8SSteven Rostedt (Red Hat) static struct task_struct *hwlat_kthread; 59e7c15cd8SSteven Rostedt (Red Hat) 60e7c15cd8SSteven Rostedt (Red Hat) static struct dentry *hwlat_sample_width; /* sample width us */ 61e7c15cd8SSteven Rostedt (Red Hat) static struct dentry *hwlat_sample_window; /* sample window us */ 62e7c15cd8SSteven Rostedt (Red Hat) 63e7c15cd8SSteven Rostedt (Red Hat) /* Save the previous tracing_thresh value */ 64e7c15cd8SSteven Rostedt (Red Hat) static unsigned long save_tracing_thresh; 65e7c15cd8SSteven Rostedt (Red Hat) 667b2c8625SSteven Rostedt (Red Hat) /* NMI timestamp counters */ 677b2c8625SSteven Rostedt (Red Hat) static u64 nmi_ts_start; 687b2c8625SSteven Rostedt (Red Hat) static u64 nmi_total_ts; 697b2c8625SSteven Rostedt (Red Hat) static int nmi_count; 707b2c8625SSteven Rostedt (Red Hat) static int nmi_cpu; 717b2c8625SSteven Rostedt (Red Hat) 727b2c8625SSteven Rostedt (Red Hat) /* Tells NMIs to call back to the hwlat tracer to record timestamps */ 737b2c8625SSteven Rostedt (Red Hat) bool trace_hwlat_callback_enabled; 747b2c8625SSteven Rostedt (Red Hat) 75e7c15cd8SSteven Rostedt (Red Hat) /* If the user changed threshold, remember it */ 76e7c15cd8SSteven Rostedt (Red Hat) static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC; 77e7c15cd8SSteven Rostedt (Red Hat) 78e7c15cd8SSteven Rostedt (Red Hat) /* Individual latency samples are stored here when detected. */ 79e7c15cd8SSteven Rostedt (Red Hat) struct hwlat_sample { 80e7c15cd8SSteven Rostedt (Red Hat) u64 seqnum; /* unique sequence */ 81e7c15cd8SSteven Rostedt (Red Hat) u64 duration; /* delta */ 82e7c15cd8SSteven Rostedt (Red Hat) u64 outer_duration; /* delta (outer loop) */ 837b2c8625SSteven Rostedt (Red Hat) u64 nmi_total_ts; /* Total time spent in NMIs */ 8451aad0aeSDeepa Dinamani struct timespec64 timestamp; /* wall time */ 857b2c8625SSteven Rostedt (Red Hat) int nmi_count; /* # NMIs during this sample */ 86*b396bfdeSSteven Rostedt (VMware) int count; /* # of iteratons over threash */ 87e7c15cd8SSteven Rostedt (Red Hat) }; 88e7c15cd8SSteven Rostedt (Red Hat) 89e7c15cd8SSteven Rostedt (Red Hat) /* keep the global state somewhere. */ 90e7c15cd8SSteven Rostedt (Red Hat) static struct hwlat_data { 91e7c15cd8SSteven Rostedt (Red Hat) 92e7c15cd8SSteven Rostedt (Red Hat) struct mutex lock; /* protect changes */ 93e7c15cd8SSteven Rostedt (Red Hat) 94e7c15cd8SSteven Rostedt (Red Hat) u64 count; /* total since reset */ 95e7c15cd8SSteven Rostedt (Red Hat) 96e7c15cd8SSteven Rostedt (Red Hat) u64 sample_window; /* total sampling window (on+off) */ 97e7c15cd8SSteven Rostedt (Red Hat) u64 sample_width; /* active sampling portion of window */ 98e7c15cd8SSteven Rostedt (Red Hat) 99e7c15cd8SSteven Rostedt (Red Hat) } hwlat_data = { 100e7c15cd8SSteven Rostedt (Red Hat) .sample_window = DEFAULT_SAMPLE_WINDOW, 101e7c15cd8SSteven Rostedt (Red Hat) .sample_width = DEFAULT_SAMPLE_WIDTH, 102e7c15cd8SSteven Rostedt (Red Hat) }; 103e7c15cd8SSteven Rostedt (Red Hat) 104e7c15cd8SSteven Rostedt (Red Hat) static void trace_hwlat_sample(struct hwlat_sample *sample) 105e7c15cd8SSteven Rostedt (Red Hat) { 106e7c15cd8SSteven Rostedt (Red Hat) struct trace_array *tr = hwlat_trace; 107e7c15cd8SSteven Rostedt (Red Hat) struct trace_event_call *call = &event_hwlat; 10813292494SSteven Rostedt (VMware) struct trace_buffer *buffer = tr->array_buffer.buffer; 109e7c15cd8SSteven Rostedt (Red Hat) struct ring_buffer_event *event; 110e7c15cd8SSteven Rostedt (Red Hat) struct hwlat_entry *entry; 111e7c15cd8SSteven Rostedt (Red Hat) unsigned long flags; 112e7c15cd8SSteven Rostedt (Red Hat) int pc; 113e7c15cd8SSteven Rostedt (Red Hat) 114e7c15cd8SSteven Rostedt (Red Hat) pc = preempt_count(); 115e7c15cd8SSteven Rostedt (Red Hat) local_save_flags(flags); 116e7c15cd8SSteven Rostedt (Red Hat) 117e7c15cd8SSteven Rostedt (Red Hat) event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry), 118e7c15cd8SSteven Rostedt (Red Hat) flags, pc); 119e7c15cd8SSteven Rostedt (Red Hat) if (!event) 120e7c15cd8SSteven Rostedt (Red Hat) return; 121e7c15cd8SSteven Rostedt (Red Hat) entry = ring_buffer_event_data(event); 122e7c15cd8SSteven Rostedt (Red Hat) entry->seqnum = sample->seqnum; 123e7c15cd8SSteven Rostedt (Red Hat) entry->duration = sample->duration; 124e7c15cd8SSteven Rostedt (Red Hat) entry->outer_duration = sample->outer_duration; 125e7c15cd8SSteven Rostedt (Red Hat) entry->timestamp = sample->timestamp; 1267b2c8625SSteven Rostedt (Red Hat) entry->nmi_total_ts = sample->nmi_total_ts; 1277b2c8625SSteven Rostedt (Red Hat) entry->nmi_count = sample->nmi_count; 128*b396bfdeSSteven Rostedt (VMware) entry->count = sample->count; 129e7c15cd8SSteven Rostedt (Red Hat) 130e7c15cd8SSteven Rostedt (Red Hat) if (!call_filter_check_discard(call, entry, buffer, event)) 13152ffabe3SSteven Rostedt (Red Hat) trace_buffer_unlock_commit_nostack(buffer, event); 132e7c15cd8SSteven Rostedt (Red Hat) } 133e7c15cd8SSteven Rostedt (Red Hat) 134e7c15cd8SSteven Rostedt (Red Hat) /* Macros to encapsulate the time capturing infrastructure */ 135e7c15cd8SSteven Rostedt (Red Hat) #define time_type u64 136e7c15cd8SSteven Rostedt (Red Hat) #define time_get() trace_clock_local() 137e7c15cd8SSteven Rostedt (Red Hat) #define time_to_us(x) div_u64(x, 1000) 138e7c15cd8SSteven Rostedt (Red Hat) #define time_sub(a, b) ((a) - (b)) 139e7c15cd8SSteven Rostedt (Red Hat) #define init_time(a, b) (a = b) 140e7c15cd8SSteven Rostedt (Red Hat) #define time_u64(a) a 141e7c15cd8SSteven Rostedt (Red Hat) 1427b2c8625SSteven Rostedt (Red Hat) void trace_hwlat_callback(bool enter) 1437b2c8625SSteven Rostedt (Red Hat) { 1447b2c8625SSteven Rostedt (Red Hat) if (smp_processor_id() != nmi_cpu) 1457b2c8625SSteven Rostedt (Red Hat) return; 1467b2c8625SSteven Rostedt (Red Hat) 1477b2c8625SSteven Rostedt (Red Hat) /* 1487b2c8625SSteven Rostedt (Red Hat) * Currently trace_clock_local() calls sched_clock() and the 1497b2c8625SSteven Rostedt (Red Hat) * generic version is not NMI safe. 1507b2c8625SSteven Rostedt (Red Hat) */ 1517b2c8625SSteven Rostedt (Red Hat) if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) { 1527b2c8625SSteven Rostedt (Red Hat) if (enter) 1537b2c8625SSteven Rostedt (Red Hat) nmi_ts_start = time_get(); 1547b2c8625SSteven Rostedt (Red Hat) else 15598dc19c1SSrivatsa S. Bhat (VMware) nmi_total_ts += time_get() - nmi_ts_start; 1567b2c8625SSteven Rostedt (Red Hat) } 1577b2c8625SSteven Rostedt (Red Hat) 1587b2c8625SSteven Rostedt (Red Hat) if (enter) 1597b2c8625SSteven Rostedt (Red Hat) nmi_count++; 1607b2c8625SSteven Rostedt (Red Hat) } 1617b2c8625SSteven Rostedt (Red Hat) 162e7c15cd8SSteven Rostedt (Red Hat) /** 163e7c15cd8SSteven Rostedt (Red Hat) * get_sample - sample the CPU TSC and look for likely hardware latencies 164e7c15cd8SSteven Rostedt (Red Hat) * 165e7c15cd8SSteven Rostedt (Red Hat) * Used to repeatedly capture the CPU TSC (or similar), looking for potential 166e7c15cd8SSteven Rostedt (Red Hat) * hardware-induced latency. Called with interrupts disabled and with 167e7c15cd8SSteven Rostedt (Red Hat) * hwlat_data.lock held. 168e7c15cd8SSteven Rostedt (Red Hat) */ 169e7c15cd8SSteven Rostedt (Red Hat) static int get_sample(void) 170e7c15cd8SSteven Rostedt (Red Hat) { 171e7c15cd8SSteven Rostedt (Red Hat) struct trace_array *tr = hwlat_trace; 172*b396bfdeSSteven Rostedt (VMware) struct hwlat_sample s; 173e7c15cd8SSteven Rostedt (Red Hat) time_type start, t1, t2, last_t2; 174*b396bfdeSSteven Rostedt (VMware) s64 diff, outer_diff, total, last_total = 0; 175e7c15cd8SSteven Rostedt (Red Hat) u64 sample = 0; 176e7c15cd8SSteven Rostedt (Red Hat) u64 thresh = tracing_thresh; 177e7c15cd8SSteven Rostedt (Red Hat) u64 outer_sample = 0; 178e7c15cd8SSteven Rostedt (Red Hat) int ret = -1; 179*b396bfdeSSteven Rostedt (VMware) unsigned int count = 0; 180e7c15cd8SSteven Rostedt (Red Hat) 181e7c15cd8SSteven Rostedt (Red Hat) do_div(thresh, NSEC_PER_USEC); /* modifies interval value */ 182e7c15cd8SSteven Rostedt (Red Hat) 1837b2c8625SSteven Rostedt (Red Hat) nmi_cpu = smp_processor_id(); 1847b2c8625SSteven Rostedt (Red Hat) nmi_total_ts = 0; 1857b2c8625SSteven Rostedt (Red Hat) nmi_count = 0; 1867b2c8625SSteven Rostedt (Red Hat) /* Make sure NMIs see this first */ 1877b2c8625SSteven Rostedt (Red Hat) barrier(); 1887b2c8625SSteven Rostedt (Red Hat) 1897b2c8625SSteven Rostedt (Red Hat) trace_hwlat_callback_enabled = true; 1907b2c8625SSteven Rostedt (Red Hat) 191e7c15cd8SSteven Rostedt (Red Hat) init_time(last_t2, 0); 192e7c15cd8SSteven Rostedt (Red Hat) start = time_get(); /* start timestamp */ 193*b396bfdeSSteven Rostedt (VMware) outer_diff = 0; 194e7c15cd8SSteven Rostedt (Red Hat) 195e7c15cd8SSteven Rostedt (Red Hat) do { 196e7c15cd8SSteven Rostedt (Red Hat) 197e7c15cd8SSteven Rostedt (Red Hat) t1 = time_get(); /* we'll look for a discontinuity */ 198e7c15cd8SSteven Rostedt (Red Hat) t2 = time_get(); 199e7c15cd8SSteven Rostedt (Red Hat) 200e7c15cd8SSteven Rostedt (Red Hat) if (time_u64(last_t2)) { 201e7c15cd8SSteven Rostedt (Red Hat) /* Check the delta from outer loop (t2 to next t1) */ 202*b396bfdeSSteven Rostedt (VMware) outer_diff = time_to_us(time_sub(t1, last_t2)); 203e7c15cd8SSteven Rostedt (Red Hat) /* This shouldn't happen */ 204*b396bfdeSSteven Rostedt (VMware) if (outer_diff < 0) { 205e7c15cd8SSteven Rostedt (Red Hat) pr_err(BANNER "time running backwards\n"); 206e7c15cd8SSteven Rostedt (Red Hat) goto out; 207e7c15cd8SSteven Rostedt (Red Hat) } 208*b396bfdeSSteven Rostedt (VMware) if (outer_diff > outer_sample) 209*b396bfdeSSteven Rostedt (VMware) outer_sample = outer_diff; 210e7c15cd8SSteven Rostedt (Red Hat) } 211e7c15cd8SSteven Rostedt (Red Hat) last_t2 = t2; 212e7c15cd8SSteven Rostedt (Red Hat) 213e7c15cd8SSteven Rostedt (Red Hat) total = time_to_us(time_sub(t2, start)); /* sample width */ 214e7c15cd8SSteven Rostedt (Red Hat) 215e7c15cd8SSteven Rostedt (Red Hat) /* Check for possible overflows */ 216e7c15cd8SSteven Rostedt (Red Hat) if (total < last_total) { 217e7c15cd8SSteven Rostedt (Red Hat) pr_err("Time total overflowed\n"); 218e7c15cd8SSteven Rostedt (Red Hat) break; 219e7c15cd8SSteven Rostedt (Red Hat) } 220e7c15cd8SSteven Rostedt (Red Hat) last_total = total; 221e7c15cd8SSteven Rostedt (Red Hat) 222e7c15cd8SSteven Rostedt (Red Hat) /* This checks the inner loop (t1 to t2) */ 223e7c15cd8SSteven Rostedt (Red Hat) diff = time_to_us(time_sub(t2, t1)); /* current diff */ 224e7c15cd8SSteven Rostedt (Red Hat) 225*b396bfdeSSteven Rostedt (VMware) if (diff > thresh || outer_diff > thresh) { 226*b396bfdeSSteven Rostedt (VMware) if (!count) 227*b396bfdeSSteven Rostedt (VMware) ktime_get_real_ts64(&s.timestamp); 228*b396bfdeSSteven Rostedt (VMware) count++; 229*b396bfdeSSteven Rostedt (VMware) } 230*b396bfdeSSteven Rostedt (VMware) 231e7c15cd8SSteven Rostedt (Red Hat) /* This shouldn't happen */ 232e7c15cd8SSteven Rostedt (Red Hat) if (diff < 0) { 233e7c15cd8SSteven Rostedt (Red Hat) pr_err(BANNER "time running backwards\n"); 234e7c15cd8SSteven Rostedt (Red Hat) goto out; 235e7c15cd8SSteven Rostedt (Red Hat) } 236e7c15cd8SSteven Rostedt (Red Hat) 237e7c15cd8SSteven Rostedt (Red Hat) if (diff > sample) 238e7c15cd8SSteven Rostedt (Red Hat) sample = diff; /* only want highest value */ 239e7c15cd8SSteven Rostedt (Red Hat) 240e7c15cd8SSteven Rostedt (Red Hat) } while (total <= hwlat_data.sample_width); 241e7c15cd8SSteven Rostedt (Red Hat) 2427b2c8625SSteven Rostedt (Red Hat) barrier(); /* finish the above in the view for NMIs */ 2437b2c8625SSteven Rostedt (Red Hat) trace_hwlat_callback_enabled = false; 2447b2c8625SSteven Rostedt (Red Hat) barrier(); /* Make sure nmi_total_ts is no longer updated */ 2457b2c8625SSteven Rostedt (Red Hat) 246e7c15cd8SSteven Rostedt (Red Hat) ret = 0; 247e7c15cd8SSteven Rostedt (Red Hat) 248e7c15cd8SSteven Rostedt (Red Hat) /* If we exceed the threshold value, we have found a hardware latency */ 249e7c15cd8SSteven Rostedt (Red Hat) if (sample > thresh || outer_sample > thresh) { 25091edde2eSViktor Rosendahl (BMW) u64 latency; 251e7c15cd8SSteven Rostedt (Red Hat) 252e7c15cd8SSteven Rostedt (Red Hat) ret = 1; 253e7c15cd8SSteven Rostedt (Red Hat) 2547b2c8625SSteven Rostedt (Red Hat) /* We read in microseconds */ 2557b2c8625SSteven Rostedt (Red Hat) if (nmi_total_ts) 2567b2c8625SSteven Rostedt (Red Hat) do_div(nmi_total_ts, NSEC_PER_USEC); 2577b2c8625SSteven Rostedt (Red Hat) 258e7c15cd8SSteven Rostedt (Red Hat) hwlat_data.count++; 259e7c15cd8SSteven Rostedt (Red Hat) s.seqnum = hwlat_data.count; 260e7c15cd8SSteven Rostedt (Red Hat) s.duration = sample; 261e7c15cd8SSteven Rostedt (Red Hat) s.outer_duration = outer_sample; 2627b2c8625SSteven Rostedt (Red Hat) s.nmi_total_ts = nmi_total_ts; 2637b2c8625SSteven Rostedt (Red Hat) s.nmi_count = nmi_count; 264*b396bfdeSSteven Rostedt (VMware) s.count = count; 265e7c15cd8SSteven Rostedt (Red Hat) trace_hwlat_sample(&s); 266e7c15cd8SSteven Rostedt (Red Hat) 26791edde2eSViktor Rosendahl (BMW) latency = max(sample, outer_sample); 26891edde2eSViktor Rosendahl (BMW) 269e7c15cd8SSteven Rostedt (Red Hat) /* Keep a running maximum ever recorded hardware latency */ 27091edde2eSViktor Rosendahl (BMW) if (latency > tr->max_latency) { 27191edde2eSViktor Rosendahl (BMW) tr->max_latency = latency; 27291edde2eSViktor Rosendahl (BMW) latency_fsnotify(tr); 27391edde2eSViktor Rosendahl (BMW) } 274e7c15cd8SSteven Rostedt (Red Hat) } 275e7c15cd8SSteven Rostedt (Red Hat) 276e7c15cd8SSteven Rostedt (Red Hat) out: 277e7c15cd8SSteven Rostedt (Red Hat) return ret; 278e7c15cd8SSteven Rostedt (Red Hat) } 279e7c15cd8SSteven Rostedt (Red Hat) 2800330f7aaSSteven Rostedt (Red Hat) static struct cpumask save_cpumask; 2810330f7aaSSteven Rostedt (Red Hat) static bool disable_migrate; 2820330f7aaSSteven Rostedt (Red Hat) 283f447c196SSteven Rostedt (VMware) static void move_to_next_cpu(void) 2840330f7aaSSteven Rostedt (Red Hat) { 285f447c196SSteven Rostedt (VMware) struct cpumask *current_mask = &save_cpumask; 2860330f7aaSSteven Rostedt (Red Hat) int next_cpu; 2870330f7aaSSteven Rostedt (Red Hat) 2880330f7aaSSteven Rostedt (Red Hat) if (disable_migrate) 2890330f7aaSSteven Rostedt (Red Hat) return; 2900330f7aaSSteven Rostedt (Red Hat) /* 2910330f7aaSSteven Rostedt (Red Hat) * If for some reason the user modifies the CPU affinity 2920c3c86bdSSrivatsa S. Bhat (VMware) * of this thread, then stop migrating for the duration 2930330f7aaSSteven Rostedt (Red Hat) * of the current test. 2940330f7aaSSteven Rostedt (Red Hat) */ 2953bd37062SSebastian Andrzej Siewior if (!cpumask_equal(current_mask, current->cpus_ptr)) 2960330f7aaSSteven Rostedt (Red Hat) goto disable; 2970330f7aaSSteven Rostedt (Red Hat) 2980330f7aaSSteven Rostedt (Red Hat) get_online_cpus(); 2990330f7aaSSteven Rostedt (Red Hat) cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); 3000330f7aaSSteven Rostedt (Red Hat) next_cpu = cpumask_next(smp_processor_id(), current_mask); 3010330f7aaSSteven Rostedt (Red Hat) put_online_cpus(); 3020330f7aaSSteven Rostedt (Red Hat) 3030330f7aaSSteven Rostedt (Red Hat) if (next_cpu >= nr_cpu_ids) 3040330f7aaSSteven Rostedt (Red Hat) next_cpu = cpumask_first(current_mask); 3050330f7aaSSteven Rostedt (Red Hat) 3060330f7aaSSteven Rostedt (Red Hat) if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ 3070330f7aaSSteven Rostedt (Red Hat) goto disable; 3080330f7aaSSteven Rostedt (Red Hat) 3090330f7aaSSteven Rostedt (Red Hat) cpumask_clear(current_mask); 3100330f7aaSSteven Rostedt (Red Hat) cpumask_set_cpu(next_cpu, current_mask); 3110330f7aaSSteven Rostedt (Red Hat) 3120330f7aaSSteven Rostedt (Red Hat) sched_setaffinity(0, current_mask); 3130330f7aaSSteven Rostedt (Red Hat) return; 3140330f7aaSSteven Rostedt (Red Hat) 3150330f7aaSSteven Rostedt (Red Hat) disable: 3160330f7aaSSteven Rostedt (Red Hat) disable_migrate = true; 3170330f7aaSSteven Rostedt (Red Hat) } 3180330f7aaSSteven Rostedt (Red Hat) 319e7c15cd8SSteven Rostedt (Red Hat) /* 320e7c15cd8SSteven Rostedt (Red Hat) * kthread_fn - The CPU time sampling/hardware latency detection kernel thread 321e7c15cd8SSteven Rostedt (Red Hat) * 322e7c15cd8SSteven Rostedt (Red Hat) * Used to periodically sample the CPU TSC via a call to get_sample. We 323e7c15cd8SSteven Rostedt (Red Hat) * disable interrupts, which does (intentionally) introduce latency since we 324e7c15cd8SSteven Rostedt (Red Hat) * need to ensure nothing else might be running (and thus preempting). 325e7c15cd8SSteven Rostedt (Red Hat) * Obviously this should never be used in production environments. 326e7c15cd8SSteven Rostedt (Red Hat) * 3278e0f1142SLuiz Capitulino * Executes one loop interaction on each CPU in tracing_cpumask sysfs file. 328e7c15cd8SSteven Rostedt (Red Hat) */ 329e7c15cd8SSteven Rostedt (Red Hat) static int kthread_fn(void *data) 330e7c15cd8SSteven Rostedt (Red Hat) { 331e7c15cd8SSteven Rostedt (Red Hat) u64 interval; 332e7c15cd8SSteven Rostedt (Red Hat) 333e7c15cd8SSteven Rostedt (Red Hat) while (!kthread_should_stop()) { 334e7c15cd8SSteven Rostedt (Red Hat) 335f447c196SSteven Rostedt (VMware) move_to_next_cpu(); 3360330f7aaSSteven Rostedt (Red Hat) 337e7c15cd8SSteven Rostedt (Red Hat) local_irq_disable(); 338e7c15cd8SSteven Rostedt (Red Hat) get_sample(); 339e7c15cd8SSteven Rostedt (Red Hat) local_irq_enable(); 340e7c15cd8SSteven Rostedt (Red Hat) 341e7c15cd8SSteven Rostedt (Red Hat) mutex_lock(&hwlat_data.lock); 342e7c15cd8SSteven Rostedt (Red Hat) interval = hwlat_data.sample_window - hwlat_data.sample_width; 343e7c15cd8SSteven Rostedt (Red Hat) mutex_unlock(&hwlat_data.lock); 344e7c15cd8SSteven Rostedt (Red Hat) 345e7c15cd8SSteven Rostedt (Red Hat) do_div(interval, USEC_PER_MSEC); /* modifies interval value */ 346e7c15cd8SSteven Rostedt (Red Hat) 347e7c15cd8SSteven Rostedt (Red Hat) /* Always sleep for at least 1ms */ 348e7c15cd8SSteven Rostedt (Red Hat) if (interval < 1) 349e7c15cd8SSteven Rostedt (Red Hat) interval = 1; 350e7c15cd8SSteven Rostedt (Red Hat) 351e7c15cd8SSteven Rostedt (Red Hat) if (msleep_interruptible(interval)) 352e7c15cd8SSteven Rostedt (Red Hat) break; 353e7c15cd8SSteven Rostedt (Red Hat) } 354e7c15cd8SSteven Rostedt (Red Hat) 355e7c15cd8SSteven Rostedt (Red Hat) return 0; 356e7c15cd8SSteven Rostedt (Red Hat) } 357e7c15cd8SSteven Rostedt (Red Hat) 358e7c15cd8SSteven Rostedt (Red Hat) /** 359e7c15cd8SSteven Rostedt (Red Hat) * start_kthread - Kick off the hardware latency sampling/detector kthread 360e7c15cd8SSteven Rostedt (Red Hat) * 361e7c15cd8SSteven Rostedt (Red Hat) * This starts the kernel thread that will sit and sample the CPU timestamp 362e7c15cd8SSteven Rostedt (Red Hat) * counter (TSC or similar) and look for potential hardware latencies. 363e7c15cd8SSteven Rostedt (Red Hat) */ 364e7c15cd8SSteven Rostedt (Red Hat) static int start_kthread(struct trace_array *tr) 365e7c15cd8SSteven Rostedt (Red Hat) { 366f447c196SSteven Rostedt (VMware) struct cpumask *current_mask = &save_cpumask; 367e7c15cd8SSteven Rostedt (Red Hat) struct task_struct *kthread; 368f447c196SSteven Rostedt (VMware) int next_cpu; 369f447c196SSteven Rostedt (VMware) 370978defeeSSteven Rostedt (VMware) if (WARN_ON(hwlat_kthread)) 37182fbc8c4SErica Bugden return 0; 37282fbc8c4SErica Bugden 373f447c196SSteven Rostedt (VMware) /* Just pick the first CPU on first iteration */ 374f447c196SSteven Rostedt (VMware) current_mask = &save_cpumask; 375f447c196SSteven Rostedt (VMware) get_online_cpus(); 376f447c196SSteven Rostedt (VMware) cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); 377f447c196SSteven Rostedt (VMware) put_online_cpus(); 378f447c196SSteven Rostedt (VMware) next_cpu = cpumask_first(current_mask); 379e7c15cd8SSteven Rostedt (Red Hat) 380e7c15cd8SSteven Rostedt (Red Hat) kthread = kthread_create(kthread_fn, NULL, "hwlatd"); 381e7c15cd8SSteven Rostedt (Red Hat) if (IS_ERR(kthread)) { 382e7c15cd8SSteven Rostedt (Red Hat) pr_err(BANNER "could not start sampling thread\n"); 383e7c15cd8SSteven Rostedt (Red Hat) return -ENOMEM; 384e7c15cd8SSteven Rostedt (Red Hat) } 385f447c196SSteven Rostedt (VMware) 386f447c196SSteven Rostedt (VMware) cpumask_clear(current_mask); 387f447c196SSteven Rostedt (VMware) cpumask_set_cpu(next_cpu, current_mask); 388f447c196SSteven Rostedt (VMware) sched_setaffinity(kthread->pid, current_mask); 389f447c196SSteven Rostedt (VMware) 390e7c15cd8SSteven Rostedt (Red Hat) hwlat_kthread = kthread; 391e7c15cd8SSteven Rostedt (Red Hat) wake_up_process(kthread); 392e7c15cd8SSteven Rostedt (Red Hat) 393e7c15cd8SSteven Rostedt (Red Hat) return 0; 394e7c15cd8SSteven Rostedt (Red Hat) } 395e7c15cd8SSteven Rostedt (Red Hat) 396e7c15cd8SSteven Rostedt (Red Hat) /** 397e7c15cd8SSteven Rostedt (Red Hat) * stop_kthread - Inform the hardware latency samping/detector kthread to stop 398e7c15cd8SSteven Rostedt (Red Hat) * 399e7c15cd8SSteven Rostedt (Red Hat) * This kicks the running hardware latency sampling/detector kernel thread and 400e7c15cd8SSteven Rostedt (Red Hat) * tells it to stop sampling now. Use this on unload and at system shutdown. 401e7c15cd8SSteven Rostedt (Red Hat) */ 402e7c15cd8SSteven Rostedt (Red Hat) static void stop_kthread(void) 403e7c15cd8SSteven Rostedt (Red Hat) { 404e7c15cd8SSteven Rostedt (Red Hat) if (!hwlat_kthread) 405e7c15cd8SSteven Rostedt (Red Hat) return; 406e7c15cd8SSteven Rostedt (Red Hat) kthread_stop(hwlat_kthread); 407e7c15cd8SSteven Rostedt (Red Hat) hwlat_kthread = NULL; 408e7c15cd8SSteven Rostedt (Red Hat) } 409e7c15cd8SSteven Rostedt (Red Hat) 410e7c15cd8SSteven Rostedt (Red Hat) /* 411e7c15cd8SSteven Rostedt (Red Hat) * hwlat_read - Wrapper read function for reading both window and width 412e7c15cd8SSteven Rostedt (Red Hat) * @filp: The active open file structure 413e7c15cd8SSteven Rostedt (Red Hat) * @ubuf: The userspace provided buffer to read value into 414e7c15cd8SSteven Rostedt (Red Hat) * @cnt: The maximum number of bytes to read 415e7c15cd8SSteven Rostedt (Red Hat) * @ppos: The current "file" position 416e7c15cd8SSteven Rostedt (Red Hat) * 417e7c15cd8SSteven Rostedt (Red Hat) * This function provides a generic read implementation for the global state 418e7c15cd8SSteven Rostedt (Red Hat) * "hwlat_data" structure filesystem entries. 419e7c15cd8SSteven Rostedt (Red Hat) */ 420e7c15cd8SSteven Rostedt (Red Hat) static ssize_t hwlat_read(struct file *filp, char __user *ubuf, 421e7c15cd8SSteven Rostedt (Red Hat) size_t cnt, loff_t *ppos) 422e7c15cd8SSteven Rostedt (Red Hat) { 423e7c15cd8SSteven Rostedt (Red Hat) char buf[U64STR_SIZE]; 424e7c15cd8SSteven Rostedt (Red Hat) u64 *entry = filp->private_data; 425e7c15cd8SSteven Rostedt (Red Hat) u64 val; 426e7c15cd8SSteven Rostedt (Red Hat) int len; 427e7c15cd8SSteven Rostedt (Red Hat) 428e7c15cd8SSteven Rostedt (Red Hat) if (!entry) 429e7c15cd8SSteven Rostedt (Red Hat) return -EFAULT; 430e7c15cd8SSteven Rostedt (Red Hat) 431e7c15cd8SSteven Rostedt (Red Hat) if (cnt > sizeof(buf)) 432e7c15cd8SSteven Rostedt (Red Hat) cnt = sizeof(buf); 433e7c15cd8SSteven Rostedt (Red Hat) 434e7c15cd8SSteven Rostedt (Red Hat) val = *entry; 435e7c15cd8SSteven Rostedt (Red Hat) 436e7c15cd8SSteven Rostedt (Red Hat) len = snprintf(buf, sizeof(buf), "%llu\n", val); 437e7c15cd8SSteven Rostedt (Red Hat) 438e7c15cd8SSteven Rostedt (Red Hat) return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 439e7c15cd8SSteven Rostedt (Red Hat) } 440e7c15cd8SSteven Rostedt (Red Hat) 441e7c15cd8SSteven Rostedt (Red Hat) /** 442e7c15cd8SSteven Rostedt (Red Hat) * hwlat_width_write - Write function for "width" entry 443e7c15cd8SSteven Rostedt (Red Hat) * @filp: The active open file structure 444e7c15cd8SSteven Rostedt (Red Hat) * @ubuf: The user buffer that contains the value to write 445e7c15cd8SSteven Rostedt (Red Hat) * @cnt: The maximum number of bytes to write to "file" 446e7c15cd8SSteven Rostedt (Red Hat) * @ppos: The current position in @file 447e7c15cd8SSteven Rostedt (Red Hat) * 448e7c15cd8SSteven Rostedt (Red Hat) * This function provides a write implementation for the "width" interface 449e7c15cd8SSteven Rostedt (Red Hat) * to the hardware latency detector. It can be used to configure 450e7c15cd8SSteven Rostedt (Red Hat) * for how many us of the total window us we will actively sample for any 451e7c15cd8SSteven Rostedt (Red Hat) * hardware-induced latency periods. Obviously, it is not possible to 452e7c15cd8SSteven Rostedt (Red Hat) * sample constantly and have the system respond to a sample reader, or, 453e7c15cd8SSteven Rostedt (Red Hat) * worse, without having the system appear to have gone out to lunch. It 454e7c15cd8SSteven Rostedt (Red Hat) * is enforced that width is less that the total window size. 455e7c15cd8SSteven Rostedt (Red Hat) */ 456e7c15cd8SSteven Rostedt (Red Hat) static ssize_t 457e7c15cd8SSteven Rostedt (Red Hat) hwlat_width_write(struct file *filp, const char __user *ubuf, 458e7c15cd8SSteven Rostedt (Red Hat) size_t cnt, loff_t *ppos) 459e7c15cd8SSteven Rostedt (Red Hat) { 460e7c15cd8SSteven Rostedt (Red Hat) u64 val; 461e7c15cd8SSteven Rostedt (Red Hat) int err; 462e7c15cd8SSteven Rostedt (Red Hat) 463e7c15cd8SSteven Rostedt (Red Hat) err = kstrtoull_from_user(ubuf, cnt, 10, &val); 464e7c15cd8SSteven Rostedt (Red Hat) if (err) 465e7c15cd8SSteven Rostedt (Red Hat) return err; 466e7c15cd8SSteven Rostedt (Red Hat) 467e7c15cd8SSteven Rostedt (Red Hat) mutex_lock(&hwlat_data.lock); 468e7c15cd8SSteven Rostedt (Red Hat) if (val < hwlat_data.sample_window) 469e7c15cd8SSteven Rostedt (Red Hat) hwlat_data.sample_width = val; 470e7c15cd8SSteven Rostedt (Red Hat) else 471e7c15cd8SSteven Rostedt (Red Hat) err = -EINVAL; 472e7c15cd8SSteven Rostedt (Red Hat) mutex_unlock(&hwlat_data.lock); 473e7c15cd8SSteven Rostedt (Red Hat) 474e7c15cd8SSteven Rostedt (Red Hat) if (err) 475e7c15cd8SSteven Rostedt (Red Hat) return err; 476e7c15cd8SSteven Rostedt (Red Hat) 477e7c15cd8SSteven Rostedt (Red Hat) return cnt; 478e7c15cd8SSteven Rostedt (Red Hat) } 479e7c15cd8SSteven Rostedt (Red Hat) 480e7c15cd8SSteven Rostedt (Red Hat) /** 481e7c15cd8SSteven Rostedt (Red Hat) * hwlat_window_write - Write function for "window" entry 482e7c15cd8SSteven Rostedt (Red Hat) * @filp: The active open file structure 483e7c15cd8SSteven Rostedt (Red Hat) * @ubuf: The user buffer that contains the value to write 484e7c15cd8SSteven Rostedt (Red Hat) * @cnt: The maximum number of bytes to write to "file" 485e7c15cd8SSteven Rostedt (Red Hat) * @ppos: The current position in @file 486e7c15cd8SSteven Rostedt (Red Hat) * 487e7c15cd8SSteven Rostedt (Red Hat) * This function provides a write implementation for the "window" interface 488e7c15cd8SSteven Rostedt (Red Hat) * to the hardware latency detetector. The window is the total time 489e7c15cd8SSteven Rostedt (Red Hat) * in us that will be considered one sample period. Conceptually, windows 490e7c15cd8SSteven Rostedt (Red Hat) * occur back-to-back and contain a sample width period during which 491e7c15cd8SSteven Rostedt (Red Hat) * actual sampling occurs. Can be used to write a new total window size. It 492e7c15cd8SSteven Rostedt (Red Hat) * is enfoced that any value written must be greater than the sample width 493e7c15cd8SSteven Rostedt (Red Hat) * size, or an error results. 494e7c15cd8SSteven Rostedt (Red Hat) */ 495e7c15cd8SSteven Rostedt (Red Hat) static ssize_t 496e7c15cd8SSteven Rostedt (Red Hat) hwlat_window_write(struct file *filp, const char __user *ubuf, 497e7c15cd8SSteven Rostedt (Red Hat) size_t cnt, loff_t *ppos) 498e7c15cd8SSteven Rostedt (Red Hat) { 499e7c15cd8SSteven Rostedt (Red Hat) u64 val; 500e7c15cd8SSteven Rostedt (Red Hat) int err; 501e7c15cd8SSteven Rostedt (Red Hat) 502e7c15cd8SSteven Rostedt (Red Hat) err = kstrtoull_from_user(ubuf, cnt, 10, &val); 503e7c15cd8SSteven Rostedt (Red Hat) if (err) 504e7c15cd8SSteven Rostedt (Red Hat) return err; 505e7c15cd8SSteven Rostedt (Red Hat) 506e7c15cd8SSteven Rostedt (Red Hat) mutex_lock(&hwlat_data.lock); 507e7c15cd8SSteven Rostedt (Red Hat) if (hwlat_data.sample_width < val) 508e7c15cd8SSteven Rostedt (Red Hat) hwlat_data.sample_window = val; 509e7c15cd8SSteven Rostedt (Red Hat) else 510e7c15cd8SSteven Rostedt (Red Hat) err = -EINVAL; 511e7c15cd8SSteven Rostedt (Red Hat) mutex_unlock(&hwlat_data.lock); 512e7c15cd8SSteven Rostedt (Red Hat) 513e7c15cd8SSteven Rostedt (Red Hat) if (err) 514e7c15cd8SSteven Rostedt (Red Hat) return err; 515e7c15cd8SSteven Rostedt (Red Hat) 516e7c15cd8SSteven Rostedt (Red Hat) return cnt; 517e7c15cd8SSteven Rostedt (Red Hat) } 518e7c15cd8SSteven Rostedt (Red Hat) 519e7c15cd8SSteven Rostedt (Red Hat) static const struct file_operations width_fops = { 520e7c15cd8SSteven Rostedt (Red Hat) .open = tracing_open_generic, 521e7c15cd8SSteven Rostedt (Red Hat) .read = hwlat_read, 522e7c15cd8SSteven Rostedt (Red Hat) .write = hwlat_width_write, 523e7c15cd8SSteven Rostedt (Red Hat) }; 524e7c15cd8SSteven Rostedt (Red Hat) 525e7c15cd8SSteven Rostedt (Red Hat) static const struct file_operations window_fops = { 526e7c15cd8SSteven Rostedt (Red Hat) .open = tracing_open_generic, 527e7c15cd8SSteven Rostedt (Red Hat) .read = hwlat_read, 528e7c15cd8SSteven Rostedt (Red Hat) .write = hwlat_window_write, 529e7c15cd8SSteven Rostedt (Red Hat) }; 530e7c15cd8SSteven Rostedt (Red Hat) 531e7c15cd8SSteven Rostedt (Red Hat) /** 532e7c15cd8SSteven Rostedt (Red Hat) * init_tracefs - A function to initialize the tracefs interface files 533e7c15cd8SSteven Rostedt (Red Hat) * 534e7c15cd8SSteven Rostedt (Red Hat) * This function creates entries in tracefs for "hwlat_detector". 535e7c15cd8SSteven Rostedt (Red Hat) * It creates the hwlat_detector directory in the tracing directory, 536e7c15cd8SSteven Rostedt (Red Hat) * and within that directory is the count, width and window files to 537e7c15cd8SSteven Rostedt (Red Hat) * change and view those values. 538e7c15cd8SSteven Rostedt (Red Hat) */ 539e7c15cd8SSteven Rostedt (Red Hat) static int init_tracefs(void) 540e7c15cd8SSteven Rostedt (Red Hat) { 541e7c15cd8SSteven Rostedt (Red Hat) struct dentry *d_tracer; 542e7c15cd8SSteven Rostedt (Red Hat) struct dentry *top_dir; 543e7c15cd8SSteven Rostedt (Red Hat) 544e7c15cd8SSteven Rostedt (Red Hat) d_tracer = tracing_init_dentry(); 545e7c15cd8SSteven Rostedt (Red Hat) if (IS_ERR(d_tracer)) 546e7c15cd8SSteven Rostedt (Red Hat) return -ENOMEM; 547e7c15cd8SSteven Rostedt (Red Hat) 548e7c15cd8SSteven Rostedt (Red Hat) top_dir = tracefs_create_dir("hwlat_detector", d_tracer); 549e7c15cd8SSteven Rostedt (Red Hat) if (!top_dir) 550e7c15cd8SSteven Rostedt (Red Hat) return -ENOMEM; 551e7c15cd8SSteven Rostedt (Red Hat) 552e7c15cd8SSteven Rostedt (Red Hat) hwlat_sample_window = tracefs_create_file("window", 0640, 553e7c15cd8SSteven Rostedt (Red Hat) top_dir, 554e7c15cd8SSteven Rostedt (Red Hat) &hwlat_data.sample_window, 555e7c15cd8SSteven Rostedt (Red Hat) &window_fops); 556e7c15cd8SSteven Rostedt (Red Hat) if (!hwlat_sample_window) 557e7c15cd8SSteven Rostedt (Red Hat) goto err; 558e7c15cd8SSteven Rostedt (Red Hat) 559e7c15cd8SSteven Rostedt (Red Hat) hwlat_sample_width = tracefs_create_file("width", 0644, 560e7c15cd8SSteven Rostedt (Red Hat) top_dir, 561e7c15cd8SSteven Rostedt (Red Hat) &hwlat_data.sample_width, 562e7c15cd8SSteven Rostedt (Red Hat) &width_fops); 563e7c15cd8SSteven Rostedt (Red Hat) if (!hwlat_sample_width) 564e7c15cd8SSteven Rostedt (Red Hat) goto err; 565e7c15cd8SSteven Rostedt (Red Hat) 566e7c15cd8SSteven Rostedt (Red Hat) return 0; 567e7c15cd8SSteven Rostedt (Red Hat) 568e7c15cd8SSteven Rostedt (Red Hat) err: 569a3d1e7ebSAl Viro tracefs_remove(top_dir); 570e7c15cd8SSteven Rostedt (Red Hat) return -ENOMEM; 571e7c15cd8SSteven Rostedt (Red Hat) } 572e7c15cd8SSteven Rostedt (Red Hat) 573e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_start(struct trace_array *tr) 574e7c15cd8SSteven Rostedt (Red Hat) { 575e7c15cd8SSteven Rostedt (Red Hat) int err; 576e7c15cd8SSteven Rostedt (Red Hat) 577e7c15cd8SSteven Rostedt (Red Hat) err = start_kthread(tr); 578e7c15cd8SSteven Rostedt (Red Hat) if (err) 579e7c15cd8SSteven Rostedt (Red Hat) pr_err(BANNER "Cannot start hwlat kthread\n"); 580e7c15cd8SSteven Rostedt (Red Hat) } 581e7c15cd8SSteven Rostedt (Red Hat) 582e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_stop(struct trace_array *tr) 583e7c15cd8SSteven Rostedt (Red Hat) { 584e7c15cd8SSteven Rostedt (Red Hat) stop_kthread(); 585e7c15cd8SSteven Rostedt (Red Hat) } 586e7c15cd8SSteven Rostedt (Red Hat) 587e7c15cd8SSteven Rostedt (Red Hat) static bool hwlat_busy; 588e7c15cd8SSteven Rostedt (Red Hat) 589e7c15cd8SSteven Rostedt (Red Hat) static int hwlat_tracer_init(struct trace_array *tr) 590e7c15cd8SSteven Rostedt (Red Hat) { 591e7c15cd8SSteven Rostedt (Red Hat) /* Only allow one instance to enable this */ 592e7c15cd8SSteven Rostedt (Red Hat) if (hwlat_busy) 593e7c15cd8SSteven Rostedt (Red Hat) return -EBUSY; 594e7c15cd8SSteven Rostedt (Red Hat) 595e7c15cd8SSteven Rostedt (Red Hat) hwlat_trace = tr; 596e7c15cd8SSteven Rostedt (Red Hat) 5970330f7aaSSteven Rostedt (Red Hat) disable_migrate = false; 598e7c15cd8SSteven Rostedt (Red Hat) hwlat_data.count = 0; 599e7c15cd8SSteven Rostedt (Red Hat) tr->max_latency = 0; 600e7c15cd8SSteven Rostedt (Red Hat) save_tracing_thresh = tracing_thresh; 601e7c15cd8SSteven Rostedt (Red Hat) 602e7c15cd8SSteven Rostedt (Red Hat) /* tracing_thresh is in nsecs, we speak in usecs */ 603e7c15cd8SSteven Rostedt (Red Hat) if (!tracing_thresh) 604e7c15cd8SSteven Rostedt (Red Hat) tracing_thresh = last_tracing_thresh; 605e7c15cd8SSteven Rostedt (Red Hat) 606e7c15cd8SSteven Rostedt (Red Hat) if (tracer_tracing_is_on(tr)) 607e7c15cd8SSteven Rostedt (Red Hat) hwlat_tracer_start(tr); 608e7c15cd8SSteven Rostedt (Red Hat) 609e7c15cd8SSteven Rostedt (Red Hat) hwlat_busy = true; 610e7c15cd8SSteven Rostedt (Red Hat) 611e7c15cd8SSteven Rostedt (Red Hat) return 0; 612e7c15cd8SSteven Rostedt (Red Hat) } 613e7c15cd8SSteven Rostedt (Red Hat) 614e7c15cd8SSteven Rostedt (Red Hat) static void hwlat_tracer_reset(struct trace_array *tr) 615e7c15cd8SSteven Rostedt (Red Hat) { 616e7c15cd8SSteven Rostedt (Red Hat) stop_kthread(); 617e7c15cd8SSteven Rostedt (Red Hat) 618e7c15cd8SSteven Rostedt (Red Hat) /* the tracing threshold is static between runs */ 619e7c15cd8SSteven Rostedt (Red Hat) last_tracing_thresh = tracing_thresh; 620e7c15cd8SSteven Rostedt (Red Hat) 621e7c15cd8SSteven Rostedt (Red Hat) tracing_thresh = save_tracing_thresh; 622e7c15cd8SSteven Rostedt (Red Hat) hwlat_busy = false; 623e7c15cd8SSteven Rostedt (Red Hat) } 624e7c15cd8SSteven Rostedt (Red Hat) 625e7c15cd8SSteven Rostedt (Red Hat) static struct tracer hwlat_tracer __read_mostly = 626e7c15cd8SSteven Rostedt (Red Hat) { 627e7c15cd8SSteven Rostedt (Red Hat) .name = "hwlat", 628e7c15cd8SSteven Rostedt (Red Hat) .init = hwlat_tracer_init, 629e7c15cd8SSteven Rostedt (Red Hat) .reset = hwlat_tracer_reset, 630e7c15cd8SSteven Rostedt (Red Hat) .start = hwlat_tracer_start, 631e7c15cd8SSteven Rostedt (Red Hat) .stop = hwlat_tracer_stop, 632e7c15cd8SSteven Rostedt (Red Hat) .allow_instances = true, 633e7c15cd8SSteven Rostedt (Red Hat) }; 634e7c15cd8SSteven Rostedt (Red Hat) 635e7c15cd8SSteven Rostedt (Red Hat) __init static int init_hwlat_tracer(void) 636e7c15cd8SSteven Rostedt (Red Hat) { 637e7c15cd8SSteven Rostedt (Red Hat) int ret; 638e7c15cd8SSteven Rostedt (Red Hat) 639e7c15cd8SSteven Rostedt (Red Hat) mutex_init(&hwlat_data.lock); 640e7c15cd8SSteven Rostedt (Red Hat) 641e7c15cd8SSteven Rostedt (Red Hat) ret = register_tracer(&hwlat_tracer); 642e7c15cd8SSteven Rostedt (Red Hat) if (ret) 643e7c15cd8SSteven Rostedt (Red Hat) return ret; 644e7c15cd8SSteven Rostedt (Red Hat) 645e7c15cd8SSteven Rostedt (Red Hat) init_tracefs(); 646e7c15cd8SSteven Rostedt (Red Hat) 647e7c15cd8SSteven Rostedt (Red Hat) return 0; 648e7c15cd8SSteven Rostedt (Red Hat) } 649e7c15cd8SSteven Rostedt (Red Hat) late_initcall(init_hwlat_tracer); 650