1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * OS Noise Tracer: computes the OS Noise suffered by a running thread.
4 * Timerlat Tracer: measures the wakeup latency of a timer triggered IRQ and thread.
5 *
6 * Based on "hwlat_detector" tracer by:
7 * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
8 * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
9 * With feedback from Clark Williams <williams@redhat.com>
10 *
11 * And also based on the rtsl tracer presented on:
12 * DE OLIVEIRA, Daniel Bristot, et al. Demystifying the real-time linux
13 * scheduling latency. In: 32nd Euromicro Conference on Real-Time Systems
14 * (ECRTS 2020). Schloss Dagstuhl-Leibniz-Zentrum fur Informatik, 2020.
15 *
16 * Copyright (C) 2021 Daniel Bristot de Oliveira, Red Hat, Inc. <bristot@redhat.com>
17 */
18
19 #include <linux/kthread.h>
20 #include <linux/tracefs.h>
21 #include <linux/uaccess.h>
22 #include <linux/cpumask.h>
23 #include <linux/delay.h>
24 #include <linux/sched/clock.h>
25 #include <uapi/linux/sched/types.h>
26 #include <linux/sched.h>
27 #include "trace.h"
28
29 #ifdef CONFIG_X86_LOCAL_APIC
30 #include <asm/trace/irq_vectors.h>
31 #undef TRACE_INCLUDE_PATH
32 #undef TRACE_INCLUDE_FILE
33 #endif /* CONFIG_X86_LOCAL_APIC */
34
35 #include <trace/events/irq.h>
36 #include <trace/events/sched.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/osnoise.h>
40
41 /*
42 * Default values.
43 */
44 #define BANNER "osnoise: "
45 #define DEFAULT_SAMPLE_PERIOD 1000000 /* 1s */
46 #define DEFAULT_SAMPLE_RUNTIME 1000000 /* 1s */
47
48 #define DEFAULT_TIMERLAT_PERIOD 1000 /* 1ms */
49 #define DEFAULT_TIMERLAT_PRIO 95 /* FIFO 95 */
50
51 /*
52 * osnoise/options entries.
53 */
54 enum osnoise_options_index {
55 OSN_DEFAULTS = 0,
56 OSN_WORKLOAD,
57 OSN_PANIC_ON_STOP,
58 OSN_PREEMPT_DISABLE,
59 OSN_IRQ_DISABLE,
60 OSN_MAX
61 };
62
63 static const char * const osnoise_options_str[OSN_MAX] = {
64 "DEFAULTS",
65 "OSNOISE_WORKLOAD",
66 "PANIC_ON_STOP",
67 "OSNOISE_PREEMPT_DISABLE",
68 "OSNOISE_IRQ_DISABLE" };
69
70 #define OSN_DEFAULT_OPTIONS 0x2
71 static unsigned long osnoise_options = OSN_DEFAULT_OPTIONS;
72
73 /*
74 * trace_array of the enabled osnoise/timerlat instances.
75 */
76 struct osnoise_instance {
77 struct list_head list;
78 struct trace_array *tr;
79 };
80
81 static struct list_head osnoise_instances;
82
osnoise_has_registered_instances(void)83 static bool osnoise_has_registered_instances(void)
84 {
85 return !!list_first_or_null_rcu(&osnoise_instances,
86 struct osnoise_instance,
87 list);
88 }
89
90 /*
91 * osnoise_instance_registered - check if a tr is already registered
92 */
osnoise_instance_registered(struct trace_array * tr)93 static int osnoise_instance_registered(struct trace_array *tr)
94 {
95 struct osnoise_instance *inst;
96 int found = 0;
97
98 rcu_read_lock();
99 list_for_each_entry_rcu(inst, &osnoise_instances, list) {
100 if (inst->tr == tr)
101 found = 1;
102 }
103 rcu_read_unlock();
104
105 return found;
106 }
107
108 /*
109 * osnoise_register_instance - register a new trace instance
110 *
111 * Register a trace_array *tr in the list of instances running
112 * osnoise/timerlat tracers.
113 */
osnoise_register_instance(struct trace_array * tr)114 static int osnoise_register_instance(struct trace_array *tr)
115 {
116 struct osnoise_instance *inst;
117
118 /*
119 * register/unregister serialization is provided by trace's
120 * trace_types_lock.
121 */
122 lockdep_assert_held(&trace_types_lock);
123
124 inst = kmalloc(sizeof(*inst), GFP_KERNEL);
125 if (!inst)
126 return -ENOMEM;
127
128 INIT_LIST_HEAD_RCU(&inst->list);
129 inst->tr = tr;
130 list_add_tail_rcu(&inst->list, &osnoise_instances);
131
132 return 0;
133 }
134
135 /*
136 * osnoise_unregister_instance - unregister a registered trace instance
137 *
138 * Remove the trace_array *tr from the list of instances running
139 * osnoise/timerlat tracers.
140 */
osnoise_unregister_instance(struct trace_array * tr)141 static void osnoise_unregister_instance(struct trace_array *tr)
142 {
143 struct osnoise_instance *inst;
144 int found = 0;
145
146 /*
147 * register/unregister serialization is provided by trace's
148 * trace_types_lock.
149 */
150 list_for_each_entry_rcu(inst, &osnoise_instances, list,
151 lockdep_is_held(&trace_types_lock)) {
152 if (inst->tr == tr) {
153 list_del_rcu(&inst->list);
154 found = 1;
155 break;
156 }
157 }
158
159 if (!found)
160 return;
161
162 kvfree_rcu_mightsleep(inst);
163 }
164
165 /*
166 * NMI runtime info.
167 */
168 struct osn_nmi {
169 u64 count;
170 u64 delta_start;
171 };
172
173 /*
174 * IRQ runtime info.
175 */
176 struct osn_irq {
177 u64 count;
178 u64 arrival_time;
179 u64 delta_start;
180 };
181
182 #define IRQ_CONTEXT 0
183 #define THREAD_CONTEXT 1
184 #define THREAD_URET 2
185 /*
186 * sofirq runtime info.
187 */
188 struct osn_softirq {
189 u64 count;
190 u64 arrival_time;
191 u64 delta_start;
192 };
193
194 /*
195 * thread runtime info.
196 */
197 struct osn_thread {
198 u64 count;
199 u64 arrival_time;
200 u64 delta_start;
201 };
202
203 /*
204 * Runtime information: this structure saves the runtime information used by
205 * one sampling thread.
206 */
207 struct osnoise_variables {
208 struct task_struct *kthread;
209 bool sampling;
210 pid_t pid;
211 struct osn_nmi nmi;
212 struct osn_irq irq;
213 struct osn_softirq softirq;
214 struct osn_thread thread;
215 local_t int_counter;
216 };
217
218 /*
219 * Per-cpu runtime information.
220 */
221 static DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
222
223 /*
224 * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU
225 */
this_cpu_osn_var(void)226 static inline struct osnoise_variables *this_cpu_osn_var(void)
227 {
228 return this_cpu_ptr(&per_cpu_osnoise_var);
229 }
230
231 /*
232 * Protect the interface.
233 */
234 static struct mutex interface_lock;
235
236 #ifdef CONFIG_TIMERLAT_TRACER
237 /*
238 * Runtime information for the timer mode.
239 */
240 struct timerlat_variables {
241 struct task_struct *kthread;
242 struct hrtimer timer;
243 u64 rel_period;
244 u64 abs_period;
245 bool tracing_thread;
246 u64 count;
247 bool uthread_migrate;
248 };
249
250 static DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
251
252 /*
253 * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU
254 */
this_cpu_tmr_var(void)255 static inline struct timerlat_variables *this_cpu_tmr_var(void)
256 {
257 return this_cpu_ptr(&per_cpu_timerlat_var);
258 }
259
260 /*
261 * tlat_var_reset - Reset the values of the given timerlat_variables
262 */
tlat_var_reset(void)263 static inline void tlat_var_reset(void)
264 {
265 struct timerlat_variables *tlat_var;
266 int cpu;
267
268 /* Synchronize with the timerlat interfaces */
269 mutex_lock(&interface_lock);
270 /*
271 * So far, all the values are initialized as 0, so
272 * zeroing the structure is perfect.
273 */
274 for_each_cpu(cpu, cpu_online_mask) {
275 tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
276 if (tlat_var->kthread)
277 hrtimer_cancel(&tlat_var->timer);
278 memset(tlat_var, 0, sizeof(*tlat_var));
279 }
280 mutex_unlock(&interface_lock);
281 }
282 #else /* CONFIG_TIMERLAT_TRACER */
283 #define tlat_var_reset() do {} while (0)
284 #endif /* CONFIG_TIMERLAT_TRACER */
285
286 /*
287 * osn_var_reset - Reset the values of the given osnoise_variables
288 */
osn_var_reset(void)289 static inline void osn_var_reset(void)
290 {
291 struct osnoise_variables *osn_var;
292 int cpu;
293
294 /*
295 * So far, all the values are initialized as 0, so
296 * zeroing the structure is perfect.
297 */
298 for_each_cpu(cpu, cpu_online_mask) {
299 osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
300 memset(osn_var, 0, sizeof(*osn_var));
301 }
302 }
303
304 /*
305 * osn_var_reset_all - Reset the value of all per-cpu osnoise_variables
306 */
osn_var_reset_all(void)307 static inline void osn_var_reset_all(void)
308 {
309 osn_var_reset();
310 tlat_var_reset();
311 }
312
313 /*
314 * Tells NMIs to call back to the osnoise tracer to record timestamps.
315 */
316 bool trace_osnoise_callback_enabled;
317
318 /*
319 * osnoise sample structure definition. Used to store the statistics of a
320 * sample run.
321 */
322 struct osnoise_sample {
323 u64 runtime; /* runtime */
324 u64 noise; /* noise */
325 u64 max_sample; /* max single noise sample */
326 int hw_count; /* # HW (incl. hypervisor) interference */
327 int nmi_count; /* # NMIs during this sample */
328 int irq_count; /* # IRQs during this sample */
329 int softirq_count; /* # softirqs during this sample */
330 int thread_count; /* # threads during this sample */
331 };
332
333 #ifdef CONFIG_TIMERLAT_TRACER
334 /*
335 * timerlat sample structure definition. Used to store the statistics of
336 * a sample run.
337 */
338 struct timerlat_sample {
339 u64 timer_latency; /* timer_latency */
340 unsigned int seqnum; /* unique sequence */
341 int context; /* timer context */
342 };
343 #endif
344
345 /*
346 * Tracer data.
347 */
348 static struct osnoise_data {
349 u64 sample_period; /* total sampling period */
350 u64 sample_runtime; /* active sampling portion of period */
351 u64 stop_tracing; /* stop trace in the internal operation (loop/irq) */
352 u64 stop_tracing_total; /* stop trace in the final operation (report/thread) */
353 #ifdef CONFIG_TIMERLAT_TRACER
354 u64 timerlat_period; /* timerlat period */
355 u64 print_stack; /* print IRQ stack if total > */
356 int timerlat_tracer; /* timerlat tracer */
357 #endif
358 bool tainted; /* infor users and developers about a problem */
359 } osnoise_data = {
360 .sample_period = DEFAULT_SAMPLE_PERIOD,
361 .sample_runtime = DEFAULT_SAMPLE_RUNTIME,
362 .stop_tracing = 0,
363 .stop_tracing_total = 0,
364 #ifdef CONFIG_TIMERLAT_TRACER
365 .print_stack = 0,
366 .timerlat_period = DEFAULT_TIMERLAT_PERIOD,
367 .timerlat_tracer = 0,
368 #endif
369 };
370
371 #ifdef CONFIG_TIMERLAT_TRACER
timerlat_enabled(void)372 static inline bool timerlat_enabled(void)
373 {
374 return osnoise_data.timerlat_tracer;
375 }
376
timerlat_softirq_exit(struct osnoise_variables * osn_var)377 static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var)
378 {
379 struct timerlat_variables *tlat_var = this_cpu_tmr_var();
380 /*
381 * If the timerlat is enabled, but the irq handler did
382 * not run yet enabling timerlat_tracer, do not trace.
383 */
384 if (!tlat_var->tracing_thread) {
385 osn_var->softirq.arrival_time = 0;
386 osn_var->softirq.delta_start = 0;
387 return 0;
388 }
389 return 1;
390 }
391
timerlat_thread_exit(struct osnoise_variables * osn_var)392 static inline int timerlat_thread_exit(struct osnoise_variables *osn_var)
393 {
394 struct timerlat_variables *tlat_var = this_cpu_tmr_var();
395 /*
396 * If the timerlat is enabled, but the irq handler did
397 * not run yet enabling timerlat_tracer, do not trace.
398 */
399 if (!tlat_var->tracing_thread) {
400 osn_var->thread.delta_start = 0;
401 osn_var->thread.arrival_time = 0;
402 return 0;
403 }
404 return 1;
405 }
406 #else /* CONFIG_TIMERLAT_TRACER */
timerlat_enabled(void)407 static inline bool timerlat_enabled(void)
408 {
409 return false;
410 }
411
timerlat_softirq_exit(struct osnoise_variables * osn_var)412 static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var)
413 {
414 return 1;
415 }
timerlat_thread_exit(struct osnoise_variables * osn_var)416 static inline int timerlat_thread_exit(struct osnoise_variables *osn_var)
417 {
418 return 1;
419 }
420 #endif
421
422 #ifdef CONFIG_PREEMPT_RT
423 /*
424 * Print the osnoise header info.
425 */
print_osnoise_headers(struct seq_file * s)426 static void print_osnoise_headers(struct seq_file *s)
427 {
428 if (osnoise_data.tainted)
429 seq_puts(s, "# osnoise is tainted!\n");
430
431 seq_puts(s, "# _-------=> irqs-off\n");
432 seq_puts(s, "# / _------=> need-resched\n");
433 seq_puts(s, "# | / _-----=> need-resched-lazy\n");
434 seq_puts(s, "# || / _----=> hardirq/softirq\n");
435 seq_puts(s, "# ||| / _---=> preempt-depth\n");
436 seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n");
437 seq_puts(s, "# ||||| / _-=> migrate-disable\n");
438
439 seq_puts(s, "# |||||| / ");
440 seq_puts(s, " MAX\n");
441
442 seq_puts(s, "# ||||| / ");
443 seq_puts(s, " SINGLE Interference counters:\n");
444
445 seq_puts(s, "# ||||||| RUNTIME ");
446 seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n");
447
448 seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP IN US ");
449 seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n");
450
451 seq_puts(s, "# | | | ||||||| | | ");
452 seq_puts(s, " | | | | | | | |\n");
453 }
454 #else /* CONFIG_PREEMPT_RT */
print_osnoise_headers(struct seq_file * s)455 static void print_osnoise_headers(struct seq_file *s)
456 {
457 if (osnoise_data.tainted)
458 seq_puts(s, "# osnoise is tainted!\n");
459
460 seq_puts(s, "# _-----=> irqs-off\n");
461 seq_puts(s, "# / _----=> need-resched\n");
462 seq_puts(s, "# | / _---=> hardirq/softirq\n");
463 seq_puts(s, "# || / _--=> preempt-depth\n");
464 seq_puts(s, "# ||| / _-=> migrate-disable ");
465 seq_puts(s, " MAX\n");
466 seq_puts(s, "# |||| / delay ");
467 seq_puts(s, " SINGLE Interference counters:\n");
468
469 seq_puts(s, "# ||||| RUNTIME ");
470 seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n");
471
472 seq_puts(s, "# TASK-PID CPU# ||||| TIMESTAMP IN US ");
473 seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n");
474
475 seq_puts(s, "# | | | ||||| | | ");
476 seq_puts(s, " | | | | | | | |\n");
477 }
478 #endif /* CONFIG_PREEMPT_RT */
479
480 /*
481 * osnoise_taint - report an osnoise error.
482 */
483 #define osnoise_taint(msg) ({ \
484 struct osnoise_instance *inst; \
485 struct trace_buffer *buffer; \
486 \
487 rcu_read_lock(); \
488 list_for_each_entry_rcu(inst, &osnoise_instances, list) { \
489 buffer = inst->tr->array_buffer.buffer; \
490 trace_array_printk_buf(buffer, _THIS_IP_, msg); \
491 } \
492 rcu_read_unlock(); \
493 osnoise_data.tainted = true; \
494 })
495
496 /*
497 * Record an osnoise_sample into the tracer buffer.
498 */
499 static void
__trace_osnoise_sample(struct osnoise_sample * sample,struct trace_buffer * buffer)500 __trace_osnoise_sample(struct osnoise_sample *sample, struct trace_buffer *buffer)
501 {
502 struct trace_event_call *call = &event_osnoise;
503 struct ring_buffer_event *event;
504 struct osnoise_entry *entry;
505
506 event = trace_buffer_lock_reserve(buffer, TRACE_OSNOISE, sizeof(*entry),
507 tracing_gen_ctx());
508 if (!event)
509 return;
510 entry = ring_buffer_event_data(event);
511 entry->runtime = sample->runtime;
512 entry->noise = sample->noise;
513 entry->max_sample = sample->max_sample;
514 entry->hw_count = sample->hw_count;
515 entry->nmi_count = sample->nmi_count;
516 entry->irq_count = sample->irq_count;
517 entry->softirq_count = sample->softirq_count;
518 entry->thread_count = sample->thread_count;
519
520 if (!call_filter_check_discard(call, entry, buffer, event))
521 trace_buffer_unlock_commit_nostack(buffer, event);
522 }
523
524 /*
525 * Record an osnoise_sample on all osnoise instances.
526 */
trace_osnoise_sample(struct osnoise_sample * sample)527 static void trace_osnoise_sample(struct osnoise_sample *sample)
528 {
529 struct osnoise_instance *inst;
530 struct trace_buffer *buffer;
531
532 rcu_read_lock();
533 list_for_each_entry_rcu(inst, &osnoise_instances, list) {
534 buffer = inst->tr->array_buffer.buffer;
535 __trace_osnoise_sample(sample, buffer);
536 }
537 rcu_read_unlock();
538 }
539
540 #ifdef CONFIG_TIMERLAT_TRACER
541 /*
542 * Print the timerlat header info.
543 */
544 #ifdef CONFIG_PREEMPT_RT
print_timerlat_headers(struct seq_file * s)545 static void print_timerlat_headers(struct seq_file *s)
546 {
547 seq_puts(s, "# _-------=> irqs-off\n");
548 seq_puts(s, "# / _------=> need-resched\n");
549 seq_puts(s, "# | / _-----=> need-resched-lazy\n");
550 seq_puts(s, "# || / _----=> hardirq/softirq\n");
551 seq_puts(s, "# ||| / _---=> preempt-depth\n");
552 seq_puts(s, "# |||| / _--=> preempt-lazy-depth\n");
553 seq_puts(s, "# ||||| / _-=> migrate-disable\n");
554 seq_puts(s, "# |||||| /\n");
555 seq_puts(s, "# ||||||| ACTIVATION\n");
556 seq_puts(s, "# TASK-PID CPU# ||||||| TIMESTAMP ID ");
557 seq_puts(s, " CONTEXT LATENCY\n");
558 seq_puts(s, "# | | | ||||||| | | ");
559 seq_puts(s, " | |\n");
560 }
561 #else /* CONFIG_PREEMPT_RT */
print_timerlat_headers(struct seq_file * s)562 static void print_timerlat_headers(struct seq_file *s)
563 {
564 seq_puts(s, "# _-----=> irqs-off\n");
565 seq_puts(s, "# / _----=> need-resched\n");
566 seq_puts(s, "# | / _---=> hardirq/softirq\n");
567 seq_puts(s, "# || / _--=> preempt-depth\n");
568 seq_puts(s, "# ||| / _-=> migrate-disable\n");
569 seq_puts(s, "# |||| / delay\n");
570 seq_puts(s, "# ||||| ACTIVATION\n");
571 seq_puts(s, "# TASK-PID CPU# ||||| TIMESTAMP ID ");
572 seq_puts(s, " CONTEXT LATENCY\n");
573 seq_puts(s, "# | | | ||||| | | ");
574 seq_puts(s, " | |\n");
575 }
576 #endif /* CONFIG_PREEMPT_RT */
577
578 static void
__trace_timerlat_sample(struct timerlat_sample * sample,struct trace_buffer * buffer)579 __trace_timerlat_sample(struct timerlat_sample *sample, struct trace_buffer *buffer)
580 {
581 struct trace_event_call *call = &event_osnoise;
582 struct ring_buffer_event *event;
583 struct timerlat_entry *entry;
584
585 event = trace_buffer_lock_reserve(buffer, TRACE_TIMERLAT, sizeof(*entry),
586 tracing_gen_ctx());
587 if (!event)
588 return;
589 entry = ring_buffer_event_data(event);
590 entry->seqnum = sample->seqnum;
591 entry->context = sample->context;
592 entry->timer_latency = sample->timer_latency;
593
594 if (!call_filter_check_discard(call, entry, buffer, event))
595 trace_buffer_unlock_commit_nostack(buffer, event);
596 }
597
598 /*
599 * Record an timerlat_sample into the tracer buffer.
600 */
trace_timerlat_sample(struct timerlat_sample * sample)601 static void trace_timerlat_sample(struct timerlat_sample *sample)
602 {
603 struct osnoise_instance *inst;
604 struct trace_buffer *buffer;
605
606 rcu_read_lock();
607 list_for_each_entry_rcu(inst, &osnoise_instances, list) {
608 buffer = inst->tr->array_buffer.buffer;
609 __trace_timerlat_sample(sample, buffer);
610 }
611 rcu_read_unlock();
612 }
613
614 #ifdef CONFIG_STACKTRACE
615
616 #define MAX_CALLS 256
617
618 /*
619 * Stack trace will take place only at IRQ level, so, no need
620 * to control nesting here.
621 */
622 struct trace_stack {
623 int stack_size;
624 int nr_entries;
625 unsigned long calls[MAX_CALLS];
626 };
627
628 static DEFINE_PER_CPU(struct trace_stack, trace_stack);
629
630 /*
631 * timerlat_save_stack - save a stack trace without printing
632 *
633 * Save the current stack trace without printing. The
634 * stack will be printed later, after the end of the measurement.
635 */
timerlat_save_stack(int skip)636 static void timerlat_save_stack(int skip)
637 {
638 unsigned int size, nr_entries;
639 struct trace_stack *fstack;
640
641 fstack = this_cpu_ptr(&trace_stack);
642
643 size = ARRAY_SIZE(fstack->calls);
644
645 nr_entries = stack_trace_save(fstack->calls, size, skip);
646
647 fstack->stack_size = nr_entries * sizeof(unsigned long);
648 fstack->nr_entries = nr_entries;
649
650 return;
651
652 }
653
654 static void
__timerlat_dump_stack(struct trace_buffer * buffer,struct trace_stack * fstack,unsigned int size)655 __timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, unsigned int size)
656 {
657 struct trace_event_call *call = &event_osnoise;
658 struct ring_buffer_event *event;
659 struct stack_entry *entry;
660
661 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size,
662 tracing_gen_ctx());
663 if (!event)
664 return;
665
666 entry = ring_buffer_event_data(event);
667
668 memcpy(&entry->caller, fstack->calls, size);
669 entry->size = fstack->nr_entries;
670
671 if (!call_filter_check_discard(call, entry, buffer, event))
672 trace_buffer_unlock_commit_nostack(buffer, event);
673 }
674
675 /*
676 * timerlat_dump_stack - dump a stack trace previously saved
677 */
timerlat_dump_stack(u64 latency)678 static void timerlat_dump_stack(u64 latency)
679 {
680 struct osnoise_instance *inst;
681 struct trace_buffer *buffer;
682 struct trace_stack *fstack;
683 unsigned int size;
684
685 /*
686 * trace only if latency > print_stack config, if enabled.
687 */
688 if (!osnoise_data.print_stack || osnoise_data.print_stack > latency)
689 return;
690
691 preempt_disable_notrace();
692 fstack = this_cpu_ptr(&trace_stack);
693 size = fstack->stack_size;
694
695 rcu_read_lock();
696 list_for_each_entry_rcu(inst, &osnoise_instances, list) {
697 buffer = inst->tr->array_buffer.buffer;
698 __timerlat_dump_stack(buffer, fstack, size);
699
700 }
701 rcu_read_unlock();
702 preempt_enable_notrace();
703 }
704 #else /* CONFIG_STACKTRACE */
705 #define timerlat_dump_stack(u64 latency) do {} while (0)
706 #define timerlat_save_stack(a) do {} while (0)
707 #endif /* CONFIG_STACKTRACE */
708 #endif /* CONFIG_TIMERLAT_TRACER */
709
710 /*
711 * Macros to encapsulate the time capturing infrastructure.
712 */
713 #define time_get() trace_clock_local()
714 #define time_to_us(x) div_u64(x, 1000)
715 #define time_sub(a, b) ((a) - (b))
716
717 /*
718 * cond_move_irq_delta_start - Forward the delta_start of a running IRQ
719 *
720 * If an IRQ is preempted by an NMI, its delta_start is pushed forward
721 * to discount the NMI interference.
722 *
723 * See get_int_safe_duration().
724 */
725 static inline void
cond_move_irq_delta_start(struct osnoise_variables * osn_var,u64 duration)726 cond_move_irq_delta_start(struct osnoise_variables *osn_var, u64 duration)
727 {
728 if (osn_var->irq.delta_start)
729 osn_var->irq.delta_start += duration;
730 }
731
732 #ifndef CONFIG_PREEMPT_RT
733 /*
734 * cond_move_softirq_delta_start - Forward the delta_start of a running softirq.
735 *
736 * If a softirq is preempted by an IRQ or NMI, its delta_start is pushed
737 * forward to discount the interference.
738 *
739 * See get_int_safe_duration().
740 */
741 static inline void
cond_move_softirq_delta_start(struct osnoise_variables * osn_var,u64 duration)742 cond_move_softirq_delta_start(struct osnoise_variables *osn_var, u64 duration)
743 {
744 if (osn_var->softirq.delta_start)
745 osn_var->softirq.delta_start += duration;
746 }
747 #else /* CONFIG_PREEMPT_RT */
748 #define cond_move_softirq_delta_start(osn_var, duration) do {} while (0)
749 #endif
750
751 /*
752 * cond_move_thread_delta_start - Forward the delta_start of a running thread
753 *
754 * If a noisy thread is preempted by an softirq, IRQ or NMI, its delta_start
755 * is pushed forward to discount the interference.
756 *
757 * See get_int_safe_duration().
758 */
759 static inline void
cond_move_thread_delta_start(struct osnoise_variables * osn_var,u64 duration)760 cond_move_thread_delta_start(struct osnoise_variables *osn_var, u64 duration)
761 {
762 if (osn_var->thread.delta_start)
763 osn_var->thread.delta_start += duration;
764 }
765
766 /*
767 * get_int_safe_duration - Get the duration of a window
768 *
769 * The irq, softirq and thread varaibles need to have its duration without
770 * the interference from higher priority interrupts. Instead of keeping a
771 * variable to discount the interrupt interference from these variables, the
772 * starting time of these variables are pushed forward with the interrupt's
773 * duration. In this way, a single variable is used to:
774 *
775 * - Know if a given window is being measured.
776 * - Account its duration.
777 * - Discount the interference.
778 *
779 * To avoid getting inconsistent values, e.g.,:
780 *
781 * now = time_get()
782 * ---> interrupt!
783 * delta_start -= int duration;
784 * <---
785 * duration = now - delta_start;
786 *
787 * result: negative duration if the variable duration before the
788 * interrupt was smaller than the interrupt execution.
789 *
790 * A counter of interrupts is used. If the counter increased, try
791 * to capture an interference safe duration.
792 */
793 static inline s64
get_int_safe_duration(struct osnoise_variables * osn_var,u64 * delta_start)794 get_int_safe_duration(struct osnoise_variables *osn_var, u64 *delta_start)
795 {
796 u64 int_counter, now;
797 s64 duration;
798
799 do {
800 int_counter = local_read(&osn_var->int_counter);
801 /* synchronize with interrupts */
802 barrier();
803
804 now = time_get();
805 duration = (now - *delta_start);
806
807 /* synchronize with interrupts */
808 barrier();
809 } while (int_counter != local_read(&osn_var->int_counter));
810
811 /*
812 * This is an evidence of race conditions that cause
813 * a value to be "discounted" too much.
814 */
815 if (duration < 0)
816 osnoise_taint("Negative duration!\n");
817
818 *delta_start = 0;
819
820 return duration;
821 }
822
823 /*
824 *
825 * set_int_safe_time - Save the current time on *time, aware of interference
826 *
827 * Get the time, taking into consideration a possible interference from
828 * higher priority interrupts.
829 *
830 * See get_int_safe_duration() for an explanation.
831 */
832 static u64
set_int_safe_time(struct osnoise_variables * osn_var,u64 * time)833 set_int_safe_time(struct osnoise_variables *osn_var, u64 *time)
834 {
835 u64 int_counter;
836
837 do {
838 int_counter = local_read(&osn_var->int_counter);
839 /* synchronize with interrupts */
840 barrier();
841
842 *time = time_get();
843
844 /* synchronize with interrupts */
845 barrier();
846 } while (int_counter != local_read(&osn_var->int_counter));
847
848 return int_counter;
849 }
850
851 #ifdef CONFIG_TIMERLAT_TRACER
852 /*
853 * copy_int_safe_time - Copy *src into *desc aware of interference
854 */
855 static u64
copy_int_safe_time(struct osnoise_variables * osn_var,u64 * dst,u64 * src)856 copy_int_safe_time(struct osnoise_variables *osn_var, u64 *dst, u64 *src)
857 {
858 u64 int_counter;
859
860 do {
861 int_counter = local_read(&osn_var->int_counter);
862 /* synchronize with interrupts */
863 barrier();
864
865 *dst = *src;
866
867 /* synchronize with interrupts */
868 barrier();
869 } while (int_counter != local_read(&osn_var->int_counter));
870
871 return int_counter;
872 }
873 #endif /* CONFIG_TIMERLAT_TRACER */
874
875 /*
876 * trace_osnoise_callback - NMI entry/exit callback
877 *
878 * This function is called at the entry and exit NMI code. The bool enter
879 * distinguishes between either case. This function is used to note a NMI
880 * occurrence, compute the noise caused by the NMI, and to remove the noise
881 * it is potentially causing on other interference variables.
882 */
trace_osnoise_callback(bool enter)883 void trace_osnoise_callback(bool enter)
884 {
885 struct osnoise_variables *osn_var = this_cpu_osn_var();
886 u64 duration;
887
888 if (!osn_var->sampling)
889 return;
890
891 /*
892 * Currently trace_clock_local() calls sched_clock() and the
893 * generic version is not NMI safe.
894 */
895 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
896 if (enter) {
897 osn_var->nmi.delta_start = time_get();
898 local_inc(&osn_var->int_counter);
899 } else {
900 duration = time_get() - osn_var->nmi.delta_start;
901
902 trace_nmi_noise(osn_var->nmi.delta_start, duration);
903
904 cond_move_irq_delta_start(osn_var, duration);
905 cond_move_softirq_delta_start(osn_var, duration);
906 cond_move_thread_delta_start(osn_var, duration);
907 }
908 }
909
910 if (enter)
911 osn_var->nmi.count++;
912 }
913
914 /*
915 * osnoise_trace_irq_entry - Note the starting of an IRQ
916 *
917 * Save the starting time of an IRQ. As IRQs are non-preemptive to other IRQs,
918 * it is safe to use a single variable (ons_var->irq) to save the statistics.
919 * The arrival_time is used to report... the arrival time. The delta_start
920 * is used to compute the duration at the IRQ exit handler. See
921 * cond_move_irq_delta_start().
922 */
osnoise_trace_irq_entry(int id)923 void osnoise_trace_irq_entry(int id)
924 {
925 struct osnoise_variables *osn_var = this_cpu_osn_var();
926
927 if (!osn_var->sampling)
928 return;
929 /*
930 * This value will be used in the report, but not to compute
931 * the execution time, so it is safe to get it unsafe.
932 */
933 osn_var->irq.arrival_time = time_get();
934 set_int_safe_time(osn_var, &osn_var->irq.delta_start);
935 osn_var->irq.count++;
936
937 local_inc(&osn_var->int_counter);
938 }
939
940 /*
941 * osnoise_irq_exit - Note the end of an IRQ, sava data and trace
942 *
943 * Computes the duration of the IRQ noise, and trace it. Also discounts the
944 * interference from other sources of noise could be currently being accounted.
945 */
osnoise_trace_irq_exit(int id,const char * desc)946 void osnoise_trace_irq_exit(int id, const char *desc)
947 {
948 struct osnoise_variables *osn_var = this_cpu_osn_var();
949 s64 duration;
950
951 if (!osn_var->sampling)
952 return;
953
954 duration = get_int_safe_duration(osn_var, &osn_var->irq.delta_start);
955 trace_irq_noise(id, desc, osn_var->irq.arrival_time, duration);
956 osn_var->irq.arrival_time = 0;
957 cond_move_softirq_delta_start(osn_var, duration);
958 cond_move_thread_delta_start(osn_var, duration);
959 }
960
961 /*
962 * trace_irqentry_callback - Callback to the irq:irq_entry traceevent
963 *
964 * Used to note the starting of an IRQ occurece.
965 */
trace_irqentry_callback(void * data,int irq,struct irqaction * action)966 static void trace_irqentry_callback(void *data, int irq,
967 struct irqaction *action)
968 {
969 osnoise_trace_irq_entry(irq);
970 }
971
972 /*
973 * trace_irqexit_callback - Callback to the irq:irq_exit traceevent
974 *
975 * Used to note the end of an IRQ occurece.
976 */
trace_irqexit_callback(void * data,int irq,struct irqaction * action,int ret)977 static void trace_irqexit_callback(void *data, int irq,
978 struct irqaction *action, int ret)
979 {
980 osnoise_trace_irq_exit(irq, action->name);
981 }
982
983 /*
984 * arch specific register function.
985 */
osnoise_arch_register(void)986 int __weak osnoise_arch_register(void)
987 {
988 return 0;
989 }
990
991 /*
992 * arch specific unregister function.
993 */
osnoise_arch_unregister(void)994 void __weak osnoise_arch_unregister(void)
995 {
996 return;
997 }
998
999 /*
1000 * hook_irq_events - Hook IRQ handling events
1001 *
1002 * This function hooks the IRQ related callbacks to the respective trace
1003 * events.
1004 */
hook_irq_events(void)1005 static int hook_irq_events(void)
1006 {
1007 int ret;
1008
1009 ret = register_trace_irq_handler_entry(trace_irqentry_callback, NULL);
1010 if (ret)
1011 goto out_err;
1012
1013 ret = register_trace_irq_handler_exit(trace_irqexit_callback, NULL);
1014 if (ret)
1015 goto out_unregister_entry;
1016
1017 ret = osnoise_arch_register();
1018 if (ret)
1019 goto out_irq_exit;
1020
1021 return 0;
1022
1023 out_irq_exit:
1024 unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
1025 out_unregister_entry:
1026 unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
1027 out_err:
1028 return -EINVAL;
1029 }
1030
1031 /*
1032 * unhook_irq_events - Unhook IRQ handling events
1033 *
1034 * This function unhooks the IRQ related callbacks to the respective trace
1035 * events.
1036 */
unhook_irq_events(void)1037 static void unhook_irq_events(void)
1038 {
1039 osnoise_arch_unregister();
1040 unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
1041 unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
1042 }
1043
1044 #ifndef CONFIG_PREEMPT_RT
1045 /*
1046 * trace_softirq_entry_callback - Note the starting of a softirq
1047 *
1048 * Save the starting time of a softirq. As softirqs are non-preemptive to
1049 * other softirqs, it is safe to use a single variable (ons_var->softirq)
1050 * to save the statistics. The arrival_time is used to report... the
1051 * arrival time. The delta_start is used to compute the duration at the
1052 * softirq exit handler. See cond_move_softirq_delta_start().
1053 */
trace_softirq_entry_callback(void * data,unsigned int vec_nr)1054 static void trace_softirq_entry_callback(void *data, unsigned int vec_nr)
1055 {
1056 struct osnoise_variables *osn_var = this_cpu_osn_var();
1057
1058 if (!osn_var->sampling)
1059 return;
1060 /*
1061 * This value will be used in the report, but not to compute
1062 * the execution time, so it is safe to get it unsafe.
1063 */
1064 osn_var->softirq.arrival_time = time_get();
1065 set_int_safe_time(osn_var, &osn_var->softirq.delta_start);
1066 osn_var->softirq.count++;
1067
1068 local_inc(&osn_var->int_counter);
1069 }
1070
1071 /*
1072 * trace_softirq_exit_callback - Note the end of an softirq
1073 *
1074 * Computes the duration of the softirq noise, and trace it. Also discounts the
1075 * interference from other sources of noise could be currently being accounted.
1076 */
trace_softirq_exit_callback(void * data,unsigned int vec_nr)1077 static void trace_softirq_exit_callback(void *data, unsigned int vec_nr)
1078 {
1079 struct osnoise_variables *osn_var = this_cpu_osn_var();
1080 s64 duration;
1081
1082 if (!osn_var->sampling)
1083 return;
1084
1085 if (unlikely(timerlat_enabled()))
1086 if (!timerlat_softirq_exit(osn_var))
1087 return;
1088
1089 duration = get_int_safe_duration(osn_var, &osn_var->softirq.delta_start);
1090 trace_softirq_noise(vec_nr, osn_var->softirq.arrival_time, duration);
1091 cond_move_thread_delta_start(osn_var, duration);
1092 osn_var->softirq.arrival_time = 0;
1093 }
1094
1095 /*
1096 * hook_softirq_events - Hook softirq handling events
1097 *
1098 * This function hooks the softirq related callbacks to the respective trace
1099 * events.
1100 */
hook_softirq_events(void)1101 static int hook_softirq_events(void)
1102 {
1103 int ret;
1104
1105 ret = register_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1106 if (ret)
1107 goto out_err;
1108
1109 ret = register_trace_softirq_exit(trace_softirq_exit_callback, NULL);
1110 if (ret)
1111 goto out_unreg_entry;
1112
1113 return 0;
1114
1115 out_unreg_entry:
1116 unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1117 out_err:
1118 return -EINVAL;
1119 }
1120
1121 /*
1122 * unhook_softirq_events - Unhook softirq handling events
1123 *
1124 * This function hooks the softirq related callbacks to the respective trace
1125 * events.
1126 */
unhook_softirq_events(void)1127 static void unhook_softirq_events(void)
1128 {
1129 unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1130 unregister_trace_softirq_exit(trace_softirq_exit_callback, NULL);
1131 }
1132 #else /* CONFIG_PREEMPT_RT */
1133 /*
1134 * softirq are threads on the PREEMPT_RT mode.
1135 */
hook_softirq_events(void)1136 static int hook_softirq_events(void)
1137 {
1138 return 0;
1139 }
unhook_softirq_events(void)1140 static void unhook_softirq_events(void)
1141 {
1142 }
1143 #endif
1144
1145 /*
1146 * thread_entry - Record the starting of a thread noise window
1147 *
1148 * It saves the context switch time for a noisy thread, and increments
1149 * the interference counters.
1150 */
1151 static void
thread_entry(struct osnoise_variables * osn_var,struct task_struct * t)1152 thread_entry(struct osnoise_variables *osn_var, struct task_struct *t)
1153 {
1154 if (!osn_var->sampling)
1155 return;
1156 /*
1157 * The arrival time will be used in the report, but not to compute
1158 * the execution time, so it is safe to get it unsafe.
1159 */
1160 osn_var->thread.arrival_time = time_get();
1161
1162 set_int_safe_time(osn_var, &osn_var->thread.delta_start);
1163
1164 osn_var->thread.count++;
1165 local_inc(&osn_var->int_counter);
1166 }
1167
1168 /*
1169 * thread_exit - Report the end of a thread noise window
1170 *
1171 * It computes the total noise from a thread, tracing if needed.
1172 */
1173 static void
thread_exit(struct osnoise_variables * osn_var,struct task_struct * t)1174 thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
1175 {
1176 s64 duration;
1177
1178 if (!osn_var->sampling)
1179 return;
1180
1181 if (unlikely(timerlat_enabled()))
1182 if (!timerlat_thread_exit(osn_var))
1183 return;
1184
1185 duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start);
1186
1187 trace_thread_noise(t, osn_var->thread.arrival_time, duration);
1188
1189 osn_var->thread.arrival_time = 0;
1190 }
1191
1192 #ifdef CONFIG_TIMERLAT_TRACER
1193 /*
1194 * osnoise_stop_exception - Stop tracing and the tracer.
1195 */
osnoise_stop_exception(char * msg,int cpu)1196 static __always_inline void osnoise_stop_exception(char *msg, int cpu)
1197 {
1198 struct osnoise_instance *inst;
1199 struct trace_array *tr;
1200
1201 rcu_read_lock();
1202 list_for_each_entry_rcu(inst, &osnoise_instances, list) {
1203 tr = inst->tr;
1204 trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
1205 "stop tracing hit on cpu %d due to exception: %s\n",
1206 smp_processor_id(),
1207 msg);
1208
1209 if (test_bit(OSN_PANIC_ON_STOP, &osnoise_options))
1210 panic("tracer hit on cpu %d due to exception: %s\n",
1211 smp_processor_id(),
1212 msg);
1213
1214 tracer_tracing_off(tr);
1215 }
1216 rcu_read_unlock();
1217 }
1218
1219 /*
1220 * trace_sched_migrate_callback - sched:sched_migrate_task trace event handler
1221 *
1222 * his function is hooked to the sched:sched_migrate_task trace event, and monitors
1223 * timerlat user-space thread migration.
1224 */
trace_sched_migrate_callback(void * data,struct task_struct * p,int dest_cpu)1225 static void trace_sched_migrate_callback(void *data, struct task_struct *p, int dest_cpu)
1226 {
1227 struct osnoise_variables *osn_var;
1228 long cpu = task_cpu(p);
1229
1230 osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
1231 if (osn_var->pid == p->pid && dest_cpu != cpu) {
1232 per_cpu_ptr(&per_cpu_timerlat_var, cpu)->uthread_migrate = 1;
1233 osnoise_taint("timerlat user-thread migrated\n");
1234 osnoise_stop_exception("timerlat user-thread migrated", cpu);
1235 }
1236 }
1237
register_migration_monitor(void)1238 static int register_migration_monitor(void)
1239 {
1240 int ret = 0;
1241
1242 /*
1243 * Timerlat thread migration check is only required when running timerlat in user-space.
1244 * Thus, enable callback only if timerlat is set with no workload.
1245 */
1246 if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
1247 ret = register_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
1248
1249 return ret;
1250 }
1251
unregister_migration_monitor(void)1252 static void unregister_migration_monitor(void)
1253 {
1254 if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options))
1255 unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
1256 }
1257 #else
register_migration_monitor(void)1258 static int register_migration_monitor(void)
1259 {
1260 return 0;
1261 }
unregister_migration_monitor(void)1262 static void unregister_migration_monitor(void) {}
1263 #endif
1264 /*
1265 * trace_sched_switch - sched:sched_switch trace event handler
1266 *
1267 * This function is hooked to the sched:sched_switch trace event, and it is
1268 * used to record the beginning and to report the end of a thread noise window.
1269 */
1270 static void
trace_sched_switch_callback(void * data,bool preempt,struct task_struct * p,struct task_struct * n,unsigned int prev_state)1271 trace_sched_switch_callback(void *data, bool preempt,
1272 struct task_struct *p,
1273 struct task_struct *n,
1274 unsigned int prev_state)
1275 {
1276 struct osnoise_variables *osn_var = this_cpu_osn_var();
1277 int workload = test_bit(OSN_WORKLOAD, &osnoise_options);
1278
1279 if ((p->pid != osn_var->pid) || !workload)
1280 thread_exit(osn_var, p);
1281
1282 if ((n->pid != osn_var->pid) || !workload)
1283 thread_entry(osn_var, n);
1284 }
1285
1286 /*
1287 * hook_thread_events - Hook the instrumentation for thread noise
1288 *
1289 * Hook the osnoise tracer callbacks to handle the noise from other
1290 * threads on the necessary kernel events.
1291 */
hook_thread_events(void)1292 static int hook_thread_events(void)
1293 {
1294 int ret;
1295
1296 ret = register_trace_sched_switch(trace_sched_switch_callback, NULL);
1297 if (ret)
1298 return -EINVAL;
1299
1300 ret = register_migration_monitor();
1301 if (ret)
1302 goto out_unreg;
1303
1304 return 0;
1305
1306 out_unreg:
1307 unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
1308 return -EINVAL;
1309 }
1310
1311 /*
1312 * unhook_thread_events - unhook the instrumentation for thread noise
1313 *
1314 * Unook the osnoise tracer callbacks to handle the noise from other
1315 * threads on the necessary kernel events.
1316 */
unhook_thread_events(void)1317 static void unhook_thread_events(void)
1318 {
1319 unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
1320 unregister_migration_monitor();
1321 }
1322
1323 /*
1324 * save_osn_sample_stats - Save the osnoise_sample statistics
1325 *
1326 * Save the osnoise_sample statistics before the sampling phase. These
1327 * values will be used later to compute the diff betwneen the statistics
1328 * before and after the osnoise sampling.
1329 */
1330 static void
save_osn_sample_stats(struct osnoise_variables * osn_var,struct osnoise_sample * s)1331 save_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
1332 {
1333 s->nmi_count = osn_var->nmi.count;
1334 s->irq_count = osn_var->irq.count;
1335 s->softirq_count = osn_var->softirq.count;
1336 s->thread_count = osn_var->thread.count;
1337 }
1338
1339 /*
1340 * diff_osn_sample_stats - Compute the osnoise_sample statistics
1341 *
1342 * After a sample period, compute the difference on the osnoise_sample
1343 * statistics. The struct osnoise_sample *s contains the statistics saved via
1344 * save_osn_sample_stats() before the osnoise sampling.
1345 */
1346 static void
diff_osn_sample_stats(struct osnoise_variables * osn_var,struct osnoise_sample * s)1347 diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
1348 {
1349 s->nmi_count = osn_var->nmi.count - s->nmi_count;
1350 s->irq_count = osn_var->irq.count - s->irq_count;
1351 s->softirq_count = osn_var->softirq.count - s->softirq_count;
1352 s->thread_count = osn_var->thread.count - s->thread_count;
1353 }
1354
1355 /*
1356 * osnoise_stop_tracing - Stop tracing and the tracer.
1357 */
osnoise_stop_tracing(void)1358 static __always_inline void osnoise_stop_tracing(void)
1359 {
1360 struct osnoise_instance *inst;
1361 struct trace_array *tr;
1362
1363 rcu_read_lock();
1364 list_for_each_entry_rcu(inst, &osnoise_instances, list) {
1365 tr = inst->tr;
1366 trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
1367 "stop tracing hit on cpu %d\n", smp_processor_id());
1368
1369 if (test_bit(OSN_PANIC_ON_STOP, &osnoise_options))
1370 panic("tracer hit stop condition on CPU %d\n", smp_processor_id());
1371
1372 tracer_tracing_off(tr);
1373 }
1374 rcu_read_unlock();
1375 }
1376
1377 /*
1378 * osnoise_has_tracing_on - Check if there is at least one instance on
1379 */
osnoise_has_tracing_on(void)1380 static __always_inline int osnoise_has_tracing_on(void)
1381 {
1382 struct osnoise_instance *inst;
1383 int trace_is_on = 0;
1384
1385 rcu_read_lock();
1386 list_for_each_entry_rcu(inst, &osnoise_instances, list)
1387 trace_is_on += tracer_tracing_is_on(inst->tr);
1388 rcu_read_unlock();
1389
1390 return trace_is_on;
1391 }
1392
1393 /*
1394 * notify_new_max_latency - Notify a new max latency via fsnotify interface.
1395 */
notify_new_max_latency(u64 latency)1396 static void notify_new_max_latency(u64 latency)
1397 {
1398 struct osnoise_instance *inst;
1399 struct trace_array *tr;
1400
1401 rcu_read_lock();
1402 list_for_each_entry_rcu(inst, &osnoise_instances, list) {
1403 tr = inst->tr;
1404 if (tracer_tracing_is_on(tr) && tr->max_latency < latency) {
1405 tr->max_latency = latency;
1406 latency_fsnotify(tr);
1407 }
1408 }
1409 rcu_read_unlock();
1410 }
1411
1412 /*
1413 * run_osnoise - Sample the time and look for osnoise
1414 *
1415 * Used to capture the time, looking for potential osnoise latency repeatedly.
1416 * Different from hwlat_detector, it is called with preemption and interrupts
1417 * enabled. This allows irqs, softirqs and threads to run, interfering on the
1418 * osnoise sampling thread, as they would do with a regular thread.
1419 */
run_osnoise(void)1420 static int run_osnoise(void)
1421 {
1422 bool disable_irq = test_bit(OSN_IRQ_DISABLE, &osnoise_options);
1423 struct osnoise_variables *osn_var = this_cpu_osn_var();
1424 u64 start, sample, last_sample;
1425 u64 last_int_count, int_count;
1426 s64 noise = 0, max_noise = 0;
1427 s64 total, last_total = 0;
1428 struct osnoise_sample s;
1429 bool disable_preemption;
1430 unsigned int threshold;
1431 u64 runtime, stop_in;
1432 u64 sum_noise = 0;
1433 int hw_count = 0;
1434 int ret = -1;
1435
1436 /*
1437 * Disabling preemption is only required if IRQs are enabled,
1438 * and the options is set on.
1439 */
1440 disable_preemption = !disable_irq && test_bit(OSN_PREEMPT_DISABLE, &osnoise_options);
1441
1442 /*
1443 * Considers the current thread as the workload.
1444 */
1445 osn_var->pid = current->pid;
1446
1447 /*
1448 * Save the current stats for the diff
1449 */
1450 save_osn_sample_stats(osn_var, &s);
1451
1452 /*
1453 * if threshold is 0, use the default value of 5 us.
1454 */
1455 threshold = tracing_thresh ? : 5000;
1456
1457 /*
1458 * Apply PREEMPT and IRQ disabled options.
1459 */
1460 if (disable_irq)
1461 local_irq_disable();
1462
1463 if (disable_preemption)
1464 preempt_disable();
1465
1466 /*
1467 * Make sure NMIs see sampling first
1468 */
1469 osn_var->sampling = true;
1470 barrier();
1471
1472 /*
1473 * Transform the *_us config to nanoseconds to avoid the
1474 * division on the main loop.
1475 */
1476 runtime = osnoise_data.sample_runtime * NSEC_PER_USEC;
1477 stop_in = osnoise_data.stop_tracing * NSEC_PER_USEC;
1478
1479 /*
1480 * Start timestemp
1481 */
1482 start = time_get();
1483
1484 /*
1485 * "previous" loop.
1486 */
1487 last_int_count = set_int_safe_time(osn_var, &last_sample);
1488
1489 do {
1490 /*
1491 * Get sample!
1492 */
1493 int_count = set_int_safe_time(osn_var, &sample);
1494
1495 noise = time_sub(sample, last_sample);
1496
1497 /*
1498 * This shouldn't happen.
1499 */
1500 if (noise < 0) {
1501 osnoise_taint("negative noise!");
1502 goto out;
1503 }
1504
1505 /*
1506 * Sample runtime.
1507 */
1508 total = time_sub(sample, start);
1509
1510 /*
1511 * Check for possible overflows.
1512 */
1513 if (total < last_total) {
1514 osnoise_taint("total overflow!");
1515 break;
1516 }
1517
1518 last_total = total;
1519
1520 if (noise >= threshold) {
1521 int interference = int_count - last_int_count;
1522
1523 if (noise > max_noise)
1524 max_noise = noise;
1525
1526 if (!interference)
1527 hw_count++;
1528
1529 sum_noise += noise;
1530
1531 trace_sample_threshold(last_sample, noise, interference);
1532
1533 if (osnoise_data.stop_tracing)
1534 if (noise > stop_in)
1535 osnoise_stop_tracing();
1536 }
1537
1538 /*
1539 * In some cases, notably when running on a nohz_full CPU with
1540 * a stopped tick PREEMPT_RCU has no way to account for QSs.
1541 * This will eventually cause unwarranted noise as PREEMPT_RCU
1542 * will force preemption as the means of ending the current
1543 * grace period. We avoid this problem by calling
1544 * rcu_momentary_dyntick_idle(), which performs a zero duration
1545 * EQS allowing PREEMPT_RCU to end the current grace period.
1546 * This call shouldn't be wrapped inside an RCU critical
1547 * section.
1548 *
1549 * Note that in non PREEMPT_RCU kernels QSs are handled through
1550 * cond_resched()
1551 */
1552 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
1553 if (!disable_irq)
1554 local_irq_disable();
1555
1556 rcu_momentary_dyntick_idle();
1557
1558 if (!disable_irq)
1559 local_irq_enable();
1560 }
1561
1562 /*
1563 * For the non-preemptive kernel config: let threads runs, if
1564 * they so wish, unless set not do to so.
1565 */
1566 if (!disable_irq && !disable_preemption)
1567 cond_resched();
1568
1569 last_sample = sample;
1570 last_int_count = int_count;
1571
1572 } while (total < runtime && !kthread_should_stop());
1573
1574 /*
1575 * Finish the above in the view for interrupts.
1576 */
1577 barrier();
1578
1579 osn_var->sampling = false;
1580
1581 /*
1582 * Make sure sampling data is no longer updated.
1583 */
1584 barrier();
1585
1586 /*
1587 * Return to the preemptive state.
1588 */
1589 if (disable_preemption)
1590 preempt_enable();
1591
1592 if (disable_irq)
1593 local_irq_enable();
1594
1595 /*
1596 * Save noise info.
1597 */
1598 s.noise = time_to_us(sum_noise);
1599 s.runtime = time_to_us(total);
1600 s.max_sample = time_to_us(max_noise);
1601 s.hw_count = hw_count;
1602
1603 /* Save interference stats info */
1604 diff_osn_sample_stats(osn_var, &s);
1605
1606 trace_osnoise_sample(&s);
1607
1608 notify_new_max_latency(max_noise);
1609
1610 if (osnoise_data.stop_tracing_total)
1611 if (s.noise > osnoise_data.stop_tracing_total)
1612 osnoise_stop_tracing();
1613
1614 return 0;
1615 out:
1616 return ret;
1617 }
1618
1619 static struct cpumask osnoise_cpumask;
1620 static struct cpumask save_cpumask;
1621 static struct cpumask kthread_cpumask;
1622
1623 /*
1624 * osnoise_sleep - sleep until the next period
1625 */
osnoise_sleep(bool skip_period)1626 static void osnoise_sleep(bool skip_period)
1627 {
1628 u64 interval;
1629 ktime_t wake_time;
1630
1631 mutex_lock(&interface_lock);
1632 if (skip_period)
1633 interval = osnoise_data.sample_period;
1634 else
1635 interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
1636 mutex_unlock(&interface_lock);
1637
1638 /*
1639 * differently from hwlat_detector, the osnoise tracer can run
1640 * without a pause because preemption is on.
1641 */
1642 if (!interval) {
1643 /* Let synchronize_rcu_tasks() make progress */
1644 cond_resched_tasks_rcu_qs();
1645 return;
1646 }
1647
1648 wake_time = ktime_add_us(ktime_get(), interval);
1649 __set_current_state(TASK_INTERRUPTIBLE);
1650
1651 while (schedule_hrtimeout(&wake_time, HRTIMER_MODE_ABS)) {
1652 if (kthread_should_stop())
1653 break;
1654 }
1655 }
1656
1657 /*
1658 * osnoise_migration_pending - checks if the task needs to migrate
1659 *
1660 * osnoise/timerlat threads are per-cpu. If there is a pending request to
1661 * migrate the thread away from the current CPU, something bad has happened.
1662 * Play the good citizen and leave.
1663 *
1664 * Returns 0 if it is safe to continue, 1 otherwise.
1665 */
osnoise_migration_pending(void)1666 static inline int osnoise_migration_pending(void)
1667 {
1668 if (!current->migration_pending)
1669 return 0;
1670
1671 /*
1672 * If migration is pending, there is a task waiting for the
1673 * tracer to enable migration. The tracer does not allow migration,
1674 * thus: taint and leave to unblock the blocked thread.
1675 */
1676 osnoise_taint("migration requested to osnoise threads, leaving.");
1677
1678 /*
1679 * Unset this thread from the threads managed by the interface.
1680 * The tracers are responsible for cleaning their env before
1681 * exiting.
1682 */
1683 mutex_lock(&interface_lock);
1684 this_cpu_osn_var()->kthread = NULL;
1685 cpumask_clear_cpu(smp_processor_id(), &kthread_cpumask);
1686 mutex_unlock(&interface_lock);
1687
1688 return 1;
1689 }
1690
1691 /*
1692 * osnoise_main - The osnoise detection kernel thread
1693 *
1694 * Calls run_osnoise() function to measure the osnoise for the configured runtime,
1695 * every period.
1696 */
osnoise_main(void * data)1697 static int osnoise_main(void *data)
1698 {
1699 unsigned long flags;
1700
1701 /*
1702 * This thread was created pinned to the CPU using PF_NO_SETAFFINITY.
1703 * The problem is that cgroup does not allow PF_NO_SETAFFINITY thread.
1704 *
1705 * To work around this limitation, disable migration and remove the
1706 * flag.
1707 */
1708 migrate_disable();
1709 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1710 current->flags &= ~(PF_NO_SETAFFINITY);
1711 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1712
1713 while (!kthread_should_stop()) {
1714 if (osnoise_migration_pending())
1715 break;
1716
1717 /* skip a period if tracing is off on all instances */
1718 if (!osnoise_has_tracing_on()) {
1719 osnoise_sleep(true);
1720 continue;
1721 }
1722
1723 run_osnoise();
1724 osnoise_sleep(false);
1725 }
1726
1727 migrate_enable();
1728 return 0;
1729 }
1730
1731 #ifdef CONFIG_TIMERLAT_TRACER
1732 /*
1733 * timerlat_irq - hrtimer handler for timerlat.
1734 */
timerlat_irq(struct hrtimer * timer)1735 static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
1736 {
1737 struct osnoise_variables *osn_var = this_cpu_osn_var();
1738 struct timerlat_variables *tlat;
1739 struct timerlat_sample s;
1740 u64 now;
1741 u64 diff;
1742
1743 /*
1744 * I am not sure if the timer was armed for this CPU. So, get
1745 * the timerlat struct from the timer itself, not from this
1746 * CPU.
1747 */
1748 tlat = container_of(timer, struct timerlat_variables, timer);
1749
1750 now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
1751
1752 /*
1753 * Enable the osnoise: events for thread an softirq.
1754 */
1755 tlat->tracing_thread = true;
1756
1757 osn_var->thread.arrival_time = time_get();
1758
1759 /*
1760 * A hardirq is running: the timer IRQ. It is for sure preempting
1761 * a thread, and potentially preempting a softirq.
1762 *
1763 * At this point, it is not interesting to know the duration of the
1764 * preempted thread (and maybe softirq), but how much time they will
1765 * delay the beginning of the execution of the timer thread.
1766 *
1767 * To get the correct (net) delay added by the softirq, its delta_start
1768 * is set as the IRQ one. In this way, at the return of the IRQ, the delta
1769 * start of the sofitrq will be zeroed, accounting then only the time
1770 * after that.
1771 *
1772 * The thread follows the same principle. However, if a softirq is
1773 * running, the thread needs to receive the softirq delta_start. The
1774 * reason being is that the softirq will be the last to be unfolded,
1775 * resseting the thread delay to zero.
1776 *
1777 * The PREEMPT_RT is a special case, though. As softirqs run as threads
1778 * on RT, moving the thread is enough.
1779 */
1780 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && osn_var->softirq.delta_start) {
1781 copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
1782 &osn_var->softirq.delta_start);
1783
1784 copy_int_safe_time(osn_var, &osn_var->softirq.delta_start,
1785 &osn_var->irq.delta_start);
1786 } else {
1787 copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
1788 &osn_var->irq.delta_start);
1789 }
1790
1791 /*
1792 * Compute the current time with the expected time.
1793 */
1794 diff = now - tlat->abs_period;
1795
1796 tlat->count++;
1797 s.seqnum = tlat->count;
1798 s.timer_latency = diff;
1799 s.context = IRQ_CONTEXT;
1800
1801 trace_timerlat_sample(&s);
1802
1803 if (osnoise_data.stop_tracing) {
1804 if (time_to_us(diff) >= osnoise_data.stop_tracing) {
1805
1806 /*
1807 * At this point, if stop_tracing is set and <= print_stack,
1808 * print_stack is set and would be printed in the thread handler.
1809 *
1810 * Thus, print the stack trace as it is helpful to define the
1811 * root cause of an IRQ latency.
1812 */
1813 if (osnoise_data.stop_tracing <= osnoise_data.print_stack) {
1814 timerlat_save_stack(0);
1815 timerlat_dump_stack(time_to_us(diff));
1816 }
1817
1818 osnoise_stop_tracing();
1819 notify_new_max_latency(diff);
1820
1821 wake_up_process(tlat->kthread);
1822
1823 return HRTIMER_NORESTART;
1824 }
1825 }
1826
1827 wake_up_process(tlat->kthread);
1828
1829 if (osnoise_data.print_stack)
1830 timerlat_save_stack(0);
1831
1832 return HRTIMER_NORESTART;
1833 }
1834
1835 /*
1836 * wait_next_period - Wait for the next period for timerlat
1837 */
wait_next_period(struct timerlat_variables * tlat)1838 static int wait_next_period(struct timerlat_variables *tlat)
1839 {
1840 ktime_t next_abs_period, now;
1841 u64 rel_period = osnoise_data.timerlat_period * 1000;
1842
1843 now = hrtimer_cb_get_time(&tlat->timer);
1844 next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
1845
1846 /*
1847 * Save the next abs_period.
1848 */
1849 tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
1850
1851 /*
1852 * If the new abs_period is in the past, skip the activation.
1853 */
1854 while (ktime_compare(now, next_abs_period) > 0) {
1855 next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
1856 tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
1857 }
1858
1859 set_current_state(TASK_INTERRUPTIBLE);
1860
1861 hrtimer_start(&tlat->timer, next_abs_period, HRTIMER_MODE_ABS_PINNED_HARD);
1862 schedule();
1863 return 1;
1864 }
1865
1866 /*
1867 * timerlat_main- Timerlat main
1868 */
timerlat_main(void * data)1869 static int timerlat_main(void *data)
1870 {
1871 struct osnoise_variables *osn_var = this_cpu_osn_var();
1872 struct timerlat_variables *tlat = this_cpu_tmr_var();
1873 struct timerlat_sample s;
1874 struct sched_param sp;
1875 unsigned long flags;
1876 u64 now, diff;
1877
1878 /*
1879 * Make the thread RT, that is how cyclictest is usually used.
1880 */
1881 sp.sched_priority = DEFAULT_TIMERLAT_PRIO;
1882 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1883
1884 /*
1885 * This thread was created pinned to the CPU using PF_NO_SETAFFINITY.
1886 * The problem is that cgroup does not allow PF_NO_SETAFFINITY thread.
1887 *
1888 * To work around this limitation, disable migration and remove the
1889 * flag.
1890 */
1891 migrate_disable();
1892 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1893 current->flags &= ~(PF_NO_SETAFFINITY);
1894 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1895
1896 tlat->count = 0;
1897 tlat->tracing_thread = false;
1898
1899 hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
1900 tlat->timer.function = timerlat_irq;
1901 tlat->kthread = current;
1902 osn_var->pid = current->pid;
1903 /*
1904 * Anotate the arrival time.
1905 */
1906 tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
1907
1908 wait_next_period(tlat);
1909
1910 osn_var->sampling = 1;
1911
1912 while (!kthread_should_stop()) {
1913
1914 now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
1915 diff = now - tlat->abs_period;
1916
1917 s.seqnum = tlat->count;
1918 s.timer_latency = diff;
1919 s.context = THREAD_CONTEXT;
1920
1921 trace_timerlat_sample(&s);
1922
1923 notify_new_max_latency(diff);
1924
1925 timerlat_dump_stack(time_to_us(diff));
1926
1927 tlat->tracing_thread = false;
1928 if (osnoise_data.stop_tracing_total)
1929 if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
1930 osnoise_stop_tracing();
1931
1932 if (osnoise_migration_pending())
1933 break;
1934
1935 wait_next_period(tlat);
1936 }
1937
1938 hrtimer_cancel(&tlat->timer);
1939 migrate_enable();
1940 return 0;
1941 }
1942 #else /* CONFIG_TIMERLAT_TRACER */
timerlat_main(void * data)1943 static int timerlat_main(void *data)
1944 {
1945 return 0;
1946 }
1947 #endif /* CONFIG_TIMERLAT_TRACER */
1948
1949 /*
1950 * stop_kthread - stop a workload thread
1951 */
stop_kthread(unsigned int cpu)1952 static void stop_kthread(unsigned int cpu)
1953 {
1954 struct task_struct *kthread;
1955
1956 mutex_lock(&interface_lock);
1957 kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
1958 if (kthread) {
1959 per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
1960 mutex_unlock(&interface_lock);
1961
1962 if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) &&
1963 !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) {
1964 kthread_stop(kthread);
1965 } else if (!WARN_ON(test_bit(OSN_WORKLOAD, &osnoise_options))) {
1966 /*
1967 * This is a user thread waiting on the timerlat_fd. We need
1968 * to close all users, and the best way to guarantee this is
1969 * by killing the thread. NOTE: this is a purpose specific file.
1970 */
1971 kill_pid(kthread->thread_pid, SIGKILL, 1);
1972 put_task_struct(kthread);
1973 }
1974 } else {
1975 mutex_unlock(&interface_lock);
1976 /* if no workload, just return */
1977 if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
1978 /*
1979 * This is set in the osnoise tracer case.
1980 */
1981 per_cpu(per_cpu_osnoise_var, cpu).sampling = false;
1982 barrier();
1983 }
1984 }
1985 }
1986
1987 /*
1988 * stop_per_cpu_kthread - Stop per-cpu threads
1989 *
1990 * Stop the osnoise sampling htread. Use this on unload and at system
1991 * shutdown.
1992 */
stop_per_cpu_kthreads(void)1993 static void stop_per_cpu_kthreads(void)
1994 {
1995 int cpu;
1996
1997 for_each_possible_cpu(cpu)
1998 stop_kthread(cpu);
1999 }
2000
2001 /*
2002 * start_kthread - Start a workload tread
2003 */
start_kthread(unsigned int cpu)2004 static int start_kthread(unsigned int cpu)
2005 {
2006 struct task_struct *kthread;
2007 void *main = osnoise_main;
2008 char comm[24];
2009
2010 if (timerlat_enabled()) {
2011 snprintf(comm, 24, "timerlat/%d", cpu);
2012 main = timerlat_main;
2013 } else {
2014 /* if no workload, just return */
2015 if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
2016 per_cpu(per_cpu_osnoise_var, cpu).sampling = true;
2017 barrier();
2018 return 0;
2019 }
2020 snprintf(comm, 24, "osnoise/%d", cpu);
2021 }
2022
2023 kthread = kthread_run_on_cpu(main, NULL, cpu, comm);
2024
2025 if (IS_ERR(kthread)) {
2026 pr_err(BANNER "could not start sampling thread\n");
2027 stop_per_cpu_kthreads();
2028 return -ENOMEM;
2029 }
2030
2031 per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
2032 cpumask_set_cpu(cpu, &kthread_cpumask);
2033
2034 return 0;
2035 }
2036
2037 /*
2038 * start_per_cpu_kthread - Kick off per-cpu osnoise sampling kthreads
2039 *
2040 * This starts the kernel thread that will look for osnoise on many
2041 * cpus.
2042 */
start_per_cpu_kthreads(void)2043 static int start_per_cpu_kthreads(void)
2044 {
2045 struct cpumask *current_mask = &save_cpumask;
2046 int retval = 0;
2047 int cpu;
2048
2049 if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
2050 if (timerlat_enabled())
2051 return 0;
2052 }
2053
2054 cpus_read_lock();
2055 /*
2056 * Run only on online CPUs in which osnoise is allowed to run.
2057 */
2058 cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask);
2059
2060 for_each_possible_cpu(cpu) {
2061 if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask)) {
2062 struct task_struct *kthread;
2063
2064 kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
2065 if (!WARN_ON(!kthread))
2066 kthread_stop(kthread);
2067 }
2068 per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
2069 }
2070
2071 for_each_cpu(cpu, current_mask) {
2072 retval = start_kthread(cpu);
2073 if (retval) {
2074 cpus_read_unlock();
2075 stop_per_cpu_kthreads();
2076 return retval;
2077 }
2078 }
2079
2080 cpus_read_unlock();
2081
2082 return retval;
2083 }
2084
2085 #ifdef CONFIG_HOTPLUG_CPU
osnoise_hotplug_workfn(struct work_struct * dummy)2086 static void osnoise_hotplug_workfn(struct work_struct *dummy)
2087 {
2088 unsigned int cpu = smp_processor_id();
2089
2090 mutex_lock(&trace_types_lock);
2091
2092 if (!osnoise_has_registered_instances())
2093 goto out_unlock_trace;
2094
2095 mutex_lock(&interface_lock);
2096 cpus_read_lock();
2097
2098 if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
2099 goto out_unlock;
2100
2101 start_kthread(cpu);
2102
2103 out_unlock:
2104 cpus_read_unlock();
2105 mutex_unlock(&interface_lock);
2106 out_unlock_trace:
2107 mutex_unlock(&trace_types_lock);
2108 }
2109
2110 static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
2111
2112 /*
2113 * osnoise_cpu_init - CPU hotplug online callback function
2114 */
osnoise_cpu_init(unsigned int cpu)2115 static int osnoise_cpu_init(unsigned int cpu)
2116 {
2117 schedule_work_on(cpu, &osnoise_hotplug_work);
2118 return 0;
2119 }
2120
2121 /*
2122 * osnoise_cpu_die - CPU hotplug offline callback function
2123 */
osnoise_cpu_die(unsigned int cpu)2124 static int osnoise_cpu_die(unsigned int cpu)
2125 {
2126 stop_kthread(cpu);
2127 return 0;
2128 }
2129
osnoise_init_hotplug_support(void)2130 static void osnoise_init_hotplug_support(void)
2131 {
2132 int ret;
2133
2134 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/osnoise:online",
2135 osnoise_cpu_init, osnoise_cpu_die);
2136 if (ret < 0)
2137 pr_warn(BANNER "Error to init cpu hotplug support\n");
2138
2139 return;
2140 }
2141 #else /* CONFIG_HOTPLUG_CPU */
osnoise_init_hotplug_support(void)2142 static void osnoise_init_hotplug_support(void)
2143 {
2144 return;
2145 }
2146 #endif /* CONFIG_HOTPLUG_CPU */
2147
2148 /*
2149 * seq file functions for the osnoise/options file.
2150 */
s_options_start(struct seq_file * s,loff_t * pos)2151 static void *s_options_start(struct seq_file *s, loff_t *pos)
2152 {
2153 int option = *pos;
2154
2155 mutex_lock(&interface_lock);
2156
2157 if (option >= OSN_MAX)
2158 return NULL;
2159
2160 return pos;
2161 }
2162
s_options_next(struct seq_file * s,void * v,loff_t * pos)2163 static void *s_options_next(struct seq_file *s, void *v, loff_t *pos)
2164 {
2165 int option = ++(*pos);
2166
2167 if (option >= OSN_MAX)
2168 return NULL;
2169
2170 return pos;
2171 }
2172
s_options_show(struct seq_file * s,void * v)2173 static int s_options_show(struct seq_file *s, void *v)
2174 {
2175 loff_t *pos = v;
2176 int option = *pos;
2177
2178 if (option == OSN_DEFAULTS) {
2179 if (osnoise_options == OSN_DEFAULT_OPTIONS)
2180 seq_printf(s, "%s", osnoise_options_str[option]);
2181 else
2182 seq_printf(s, "NO_%s", osnoise_options_str[option]);
2183 goto out;
2184 }
2185
2186 if (test_bit(option, &osnoise_options))
2187 seq_printf(s, "%s", osnoise_options_str[option]);
2188 else
2189 seq_printf(s, "NO_%s", osnoise_options_str[option]);
2190
2191 out:
2192 if (option != OSN_MAX)
2193 seq_puts(s, " ");
2194
2195 return 0;
2196 }
2197
s_options_stop(struct seq_file * s,void * v)2198 static void s_options_stop(struct seq_file *s, void *v)
2199 {
2200 seq_puts(s, "\n");
2201 mutex_unlock(&interface_lock);
2202 }
2203
2204 static const struct seq_operations osnoise_options_seq_ops = {
2205 .start = s_options_start,
2206 .next = s_options_next,
2207 .show = s_options_show,
2208 .stop = s_options_stop
2209 };
2210
osnoise_options_open(struct inode * inode,struct file * file)2211 static int osnoise_options_open(struct inode *inode, struct file *file)
2212 {
2213 return seq_open(file, &osnoise_options_seq_ops);
2214 };
2215
2216 /**
2217 * osnoise_options_write - Write function for "options" entry
2218 * @filp: The active open file structure
2219 * @ubuf: The user buffer that contains the value to write
2220 * @cnt: The maximum number of bytes to write to "file"
2221 * @ppos: The current position in @file
2222 *
2223 * Writing the option name sets the option, writing the "NO_"
2224 * prefix in front of the option name disables it.
2225 *
2226 * Writing "DEFAULTS" resets the option values to the default ones.
2227 */
osnoise_options_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2228 static ssize_t osnoise_options_write(struct file *filp, const char __user *ubuf,
2229 size_t cnt, loff_t *ppos)
2230 {
2231 int running, option, enable, retval;
2232 char buf[256], *option_str;
2233
2234 if (cnt >= 256)
2235 return -EINVAL;
2236
2237 if (copy_from_user(buf, ubuf, cnt))
2238 return -EFAULT;
2239
2240 buf[cnt] = 0;
2241
2242 if (strncmp(buf, "NO_", 3)) {
2243 option_str = strstrip(buf);
2244 enable = true;
2245 } else {
2246 option_str = strstrip(&buf[3]);
2247 enable = false;
2248 }
2249
2250 option = match_string(osnoise_options_str, OSN_MAX, option_str);
2251 if (option < 0)
2252 return -EINVAL;
2253
2254 /*
2255 * trace_types_lock is taken to avoid concurrency on start/stop.
2256 */
2257 mutex_lock(&trace_types_lock);
2258 running = osnoise_has_registered_instances();
2259 if (running)
2260 stop_per_cpu_kthreads();
2261
2262 mutex_lock(&interface_lock);
2263 /*
2264 * avoid CPU hotplug operations that might read options.
2265 */
2266 cpus_read_lock();
2267
2268 retval = cnt;
2269
2270 if (enable) {
2271 if (option == OSN_DEFAULTS)
2272 osnoise_options = OSN_DEFAULT_OPTIONS;
2273 else
2274 set_bit(option, &osnoise_options);
2275 } else {
2276 if (option == OSN_DEFAULTS)
2277 retval = -EINVAL;
2278 else
2279 clear_bit(option, &osnoise_options);
2280 }
2281
2282 cpus_read_unlock();
2283 mutex_unlock(&interface_lock);
2284
2285 if (running)
2286 start_per_cpu_kthreads();
2287 mutex_unlock(&trace_types_lock);
2288
2289 return retval;
2290 }
2291
2292 /*
2293 * osnoise_cpus_read - Read function for reading the "cpus" file
2294 * @filp: The active open file structure
2295 * @ubuf: The userspace provided buffer to read value into
2296 * @cnt: The maximum number of bytes to read
2297 * @ppos: The current "file" position
2298 *
2299 * Prints the "cpus" output into the user-provided buffer.
2300 */
2301 static ssize_t
osnoise_cpus_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)2302 osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
2303 loff_t *ppos)
2304 {
2305 char *mask_str;
2306 int len;
2307
2308 mutex_lock(&interface_lock);
2309
2310 len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
2311 mask_str = kmalloc(len, GFP_KERNEL);
2312 if (!mask_str) {
2313 count = -ENOMEM;
2314 goto out_unlock;
2315 }
2316
2317 len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
2318 if (len >= count) {
2319 count = -EINVAL;
2320 goto out_free;
2321 }
2322
2323 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
2324
2325 out_free:
2326 kfree(mask_str);
2327 out_unlock:
2328 mutex_unlock(&interface_lock);
2329
2330 return count;
2331 }
2332
2333 /*
2334 * osnoise_cpus_write - Write function for "cpus" entry
2335 * @filp: The active open file structure
2336 * @ubuf: The user buffer that contains the value to write
2337 * @cnt: The maximum number of bytes to write to "file"
2338 * @ppos: The current position in @file
2339 *
2340 * This function provides a write implementation for the "cpus"
2341 * interface to the osnoise trace. By default, it lists all CPUs,
2342 * in this way, allowing osnoise threads to run on any online CPU
2343 * of the system. It serves to restrict the execution of osnoise to the
2344 * set of CPUs writing via this interface. Why not use "tracing_cpumask"?
2345 * Because the user might be interested in tracing what is running on
2346 * other CPUs. For instance, one might run osnoise in one HT CPU
2347 * while observing what is running on the sibling HT CPU.
2348 */
2349 static ssize_t
osnoise_cpus_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)2350 osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
2351 loff_t *ppos)
2352 {
2353 cpumask_var_t osnoise_cpumask_new;
2354 int running, err;
2355 char buf[256];
2356
2357 if (count >= 256)
2358 return -EINVAL;
2359
2360 if (copy_from_user(buf, ubuf, count))
2361 return -EFAULT;
2362
2363 if (!zalloc_cpumask_var(&osnoise_cpumask_new, GFP_KERNEL))
2364 return -ENOMEM;
2365
2366 err = cpulist_parse(buf, osnoise_cpumask_new);
2367 if (err)
2368 goto err_free;
2369
2370 /*
2371 * trace_types_lock is taken to avoid concurrency on start/stop.
2372 */
2373 mutex_lock(&trace_types_lock);
2374 running = osnoise_has_registered_instances();
2375 if (running)
2376 stop_per_cpu_kthreads();
2377
2378 mutex_lock(&interface_lock);
2379 /*
2380 * osnoise_cpumask is read by CPU hotplug operations.
2381 */
2382 cpus_read_lock();
2383
2384 cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
2385
2386 cpus_read_unlock();
2387 mutex_unlock(&interface_lock);
2388
2389 if (running)
2390 start_per_cpu_kthreads();
2391 mutex_unlock(&trace_types_lock);
2392
2393 free_cpumask_var(osnoise_cpumask_new);
2394 return count;
2395
2396 err_free:
2397 free_cpumask_var(osnoise_cpumask_new);
2398
2399 return err;
2400 }
2401
2402 #ifdef CONFIG_TIMERLAT_TRACER
timerlat_fd_open(struct inode * inode,struct file * file)2403 static int timerlat_fd_open(struct inode *inode, struct file *file)
2404 {
2405 struct osnoise_variables *osn_var;
2406 struct timerlat_variables *tlat;
2407 long cpu = (long) inode->i_cdev;
2408
2409 mutex_lock(&interface_lock);
2410
2411 /*
2412 * This file is accessible only if timerlat is enabled, and
2413 * NO_OSNOISE_WORKLOAD is set.
2414 */
2415 if (!timerlat_enabled() || test_bit(OSN_WORKLOAD, &osnoise_options)) {
2416 mutex_unlock(&interface_lock);
2417 return -EINVAL;
2418 }
2419
2420 migrate_disable();
2421
2422 osn_var = this_cpu_osn_var();
2423
2424 /*
2425 * The osn_var->pid holds the single access to this file.
2426 */
2427 if (osn_var->pid) {
2428 mutex_unlock(&interface_lock);
2429 migrate_enable();
2430 return -EBUSY;
2431 }
2432
2433 /*
2434 * timerlat tracer is a per-cpu tracer. Check if the user-space too
2435 * is pinned to a single CPU. The tracer laters monitor if the task
2436 * migrates and then disables tracer if it does. However, it is
2437 * worth doing this basic acceptance test to avoid obviusly wrong
2438 * setup.
2439 */
2440 if (current->nr_cpus_allowed > 1 || cpu != smp_processor_id()) {
2441 mutex_unlock(&interface_lock);
2442 migrate_enable();
2443 return -EPERM;
2444 }
2445
2446 /*
2447 * From now on, it is good to go.
2448 */
2449 file->private_data = inode->i_cdev;
2450
2451 get_task_struct(current);
2452
2453 osn_var->kthread = current;
2454 osn_var->pid = current->pid;
2455
2456 /*
2457 * Setup is done.
2458 */
2459 mutex_unlock(&interface_lock);
2460
2461 tlat = this_cpu_tmr_var();
2462 tlat->count = 0;
2463
2464 hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
2465 tlat->timer.function = timerlat_irq;
2466
2467 migrate_enable();
2468 return 0;
2469 };
2470
2471 /*
2472 * timerlat_fd_read - Read function for "timerlat_fd" file
2473 * @file: The active open file structure
2474 * @ubuf: The userspace provided buffer to read value into
2475 * @cnt: The maximum number of bytes to read
2476 * @ppos: The current "file" position
2477 *
2478 * Prints 1 on timerlat, the number of interferences on osnoise, -1 on error.
2479 */
2480 static ssize_t
timerlat_fd_read(struct file * file,char __user * ubuf,size_t count,loff_t * ppos)2481 timerlat_fd_read(struct file *file, char __user *ubuf, size_t count,
2482 loff_t *ppos)
2483 {
2484 long cpu = (long) file->private_data;
2485 struct osnoise_variables *osn_var;
2486 struct timerlat_variables *tlat;
2487 struct timerlat_sample s;
2488 s64 diff;
2489 u64 now;
2490
2491 migrate_disable();
2492
2493 tlat = this_cpu_tmr_var();
2494
2495 /*
2496 * While in user-space, the thread is migratable. There is nothing
2497 * we can do about it.
2498 * So, if the thread is running on another CPU, stop the machinery.
2499 */
2500 if (cpu == smp_processor_id()) {
2501 if (tlat->uthread_migrate) {
2502 migrate_enable();
2503 return -EINVAL;
2504 }
2505 } else {
2506 per_cpu_ptr(&per_cpu_timerlat_var, cpu)->uthread_migrate = 1;
2507 osnoise_taint("timerlat user thread migrate\n");
2508 osnoise_stop_tracing();
2509 migrate_enable();
2510 return -EINVAL;
2511 }
2512
2513 osn_var = this_cpu_osn_var();
2514
2515 /*
2516 * The timerlat in user-space runs in a different order:
2517 * the read() starts from the execution of the previous occurrence,
2518 * sleeping for the next occurrence.
2519 *
2520 * So, skip if we are entering on read() before the first wakeup
2521 * from timerlat IRQ:
2522 */
2523 if (likely(osn_var->sampling)) {
2524 now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
2525 diff = now - tlat->abs_period;
2526
2527 /*
2528 * it was not a timer firing, but some other signal?
2529 */
2530 if (diff < 0)
2531 goto out;
2532
2533 s.seqnum = tlat->count;
2534 s.timer_latency = diff;
2535 s.context = THREAD_URET;
2536
2537 trace_timerlat_sample(&s);
2538
2539 notify_new_max_latency(diff);
2540
2541 tlat->tracing_thread = false;
2542 if (osnoise_data.stop_tracing_total)
2543 if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
2544 osnoise_stop_tracing();
2545 } else {
2546 tlat->tracing_thread = false;
2547 tlat->kthread = current;
2548
2549 /* Annotate now to drift new period */
2550 tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
2551
2552 osn_var->sampling = 1;
2553 }
2554
2555 /* wait for the next period */
2556 wait_next_period(tlat);
2557
2558 /* This is the wakeup from this cycle */
2559 now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
2560 diff = now - tlat->abs_period;
2561
2562 /*
2563 * it was not a timer firing, but some other signal?
2564 */
2565 if (diff < 0)
2566 goto out;
2567
2568 s.seqnum = tlat->count;
2569 s.timer_latency = diff;
2570 s.context = THREAD_CONTEXT;
2571
2572 trace_timerlat_sample(&s);
2573
2574 if (osnoise_data.stop_tracing_total) {
2575 if (time_to_us(diff) >= osnoise_data.stop_tracing_total) {
2576 timerlat_dump_stack(time_to_us(diff));
2577 notify_new_max_latency(diff);
2578 osnoise_stop_tracing();
2579 }
2580 }
2581
2582 out:
2583 migrate_enable();
2584 return 0;
2585 }
2586
timerlat_fd_release(struct inode * inode,struct file * file)2587 static int timerlat_fd_release(struct inode *inode, struct file *file)
2588 {
2589 struct osnoise_variables *osn_var;
2590 struct timerlat_variables *tlat_var;
2591 long cpu = (long) file->private_data;
2592
2593 migrate_disable();
2594 mutex_lock(&interface_lock);
2595
2596 osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
2597 tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
2598
2599 if (tlat_var->kthread)
2600 hrtimer_cancel(&tlat_var->timer);
2601 memset(tlat_var, 0, sizeof(*tlat_var));
2602
2603 osn_var->sampling = 0;
2604 osn_var->pid = 0;
2605
2606 /*
2607 * We are leaving, not being stopped... see stop_kthread();
2608 */
2609 if (osn_var->kthread) {
2610 put_task_struct(osn_var->kthread);
2611 osn_var->kthread = NULL;
2612 }
2613
2614 mutex_unlock(&interface_lock);
2615 migrate_enable();
2616 return 0;
2617 }
2618 #endif
2619
2620 /*
2621 * osnoise/runtime_us: cannot be greater than the period.
2622 */
2623 static struct trace_min_max_param osnoise_runtime = {
2624 .lock = &interface_lock,
2625 .val = &osnoise_data.sample_runtime,
2626 .max = &osnoise_data.sample_period,
2627 .min = NULL,
2628 };
2629
2630 /*
2631 * osnoise/period_us: cannot be smaller than the runtime.
2632 */
2633 static struct trace_min_max_param osnoise_period = {
2634 .lock = &interface_lock,
2635 .val = &osnoise_data.sample_period,
2636 .max = NULL,
2637 .min = &osnoise_data.sample_runtime,
2638 };
2639
2640 /*
2641 * osnoise/stop_tracing_us: no limit.
2642 */
2643 static struct trace_min_max_param osnoise_stop_tracing_in = {
2644 .lock = &interface_lock,
2645 .val = &osnoise_data.stop_tracing,
2646 .max = NULL,
2647 .min = NULL,
2648 };
2649
2650 /*
2651 * osnoise/stop_tracing_total_us: no limit.
2652 */
2653 static struct trace_min_max_param osnoise_stop_tracing_total = {
2654 .lock = &interface_lock,
2655 .val = &osnoise_data.stop_tracing_total,
2656 .max = NULL,
2657 .min = NULL,
2658 };
2659
2660 #ifdef CONFIG_TIMERLAT_TRACER
2661 /*
2662 * osnoise/print_stack: print the stacktrace of the IRQ handler if the total
2663 * latency is higher than val.
2664 */
2665 static struct trace_min_max_param osnoise_print_stack = {
2666 .lock = &interface_lock,
2667 .val = &osnoise_data.print_stack,
2668 .max = NULL,
2669 .min = NULL,
2670 };
2671
2672 /*
2673 * osnoise/timerlat_period: min 100 us, max 1 s
2674 */
2675 static u64 timerlat_min_period = 100;
2676 static u64 timerlat_max_period = 1000000;
2677 static struct trace_min_max_param timerlat_period = {
2678 .lock = &interface_lock,
2679 .val = &osnoise_data.timerlat_period,
2680 .max = &timerlat_max_period,
2681 .min = &timerlat_min_period,
2682 };
2683
2684 static const struct file_operations timerlat_fd_fops = {
2685 .open = timerlat_fd_open,
2686 .read = timerlat_fd_read,
2687 .release = timerlat_fd_release,
2688 .llseek = generic_file_llseek,
2689 };
2690 #endif
2691
2692 static const struct file_operations cpus_fops = {
2693 .open = tracing_open_generic,
2694 .read = osnoise_cpus_read,
2695 .write = osnoise_cpus_write,
2696 .llseek = generic_file_llseek,
2697 };
2698
2699 static const struct file_operations osnoise_options_fops = {
2700 .open = osnoise_options_open,
2701 .read = seq_read,
2702 .llseek = seq_lseek,
2703 .release = seq_release,
2704 .write = osnoise_options_write
2705 };
2706
2707 #ifdef CONFIG_TIMERLAT_TRACER
2708 #ifdef CONFIG_STACKTRACE
init_timerlat_stack_tracefs(struct dentry * top_dir)2709 static int init_timerlat_stack_tracefs(struct dentry *top_dir)
2710 {
2711 struct dentry *tmp;
2712
2713 tmp = tracefs_create_file("print_stack", TRACE_MODE_WRITE, top_dir,
2714 &osnoise_print_stack, &trace_min_max_fops);
2715 if (!tmp)
2716 return -ENOMEM;
2717
2718 return 0;
2719 }
2720 #else /* CONFIG_STACKTRACE */
init_timerlat_stack_tracefs(struct dentry * top_dir)2721 static int init_timerlat_stack_tracefs(struct dentry *top_dir)
2722 {
2723 return 0;
2724 }
2725 #endif /* CONFIG_STACKTRACE */
2726
osnoise_create_cpu_timerlat_fd(struct dentry * top_dir)2727 static int osnoise_create_cpu_timerlat_fd(struct dentry *top_dir)
2728 {
2729 struct dentry *timerlat_fd;
2730 struct dentry *per_cpu;
2731 struct dentry *cpu_dir;
2732 char cpu_str[30]; /* see trace.c: tracing_init_tracefs_percpu() */
2733 long cpu;
2734
2735 /*
2736 * Why not using tracing instance per_cpu/ dir?
2737 *
2738 * Because osnoise/timerlat have a single workload, having
2739 * multiple files like these are wast of memory.
2740 */
2741 per_cpu = tracefs_create_dir("per_cpu", top_dir);
2742 if (!per_cpu)
2743 return -ENOMEM;
2744
2745 for_each_possible_cpu(cpu) {
2746 snprintf(cpu_str, 30, "cpu%ld", cpu);
2747 cpu_dir = tracefs_create_dir(cpu_str, per_cpu);
2748 if (!cpu_dir)
2749 goto out_clean;
2750
2751 timerlat_fd = trace_create_file("timerlat_fd", TRACE_MODE_READ,
2752 cpu_dir, NULL, &timerlat_fd_fops);
2753 if (!timerlat_fd)
2754 goto out_clean;
2755
2756 /* Record the CPU */
2757 d_inode(timerlat_fd)->i_cdev = (void *)(cpu);
2758 }
2759
2760 return 0;
2761
2762 out_clean:
2763 tracefs_remove(per_cpu);
2764 return -ENOMEM;
2765 }
2766
2767 /*
2768 * init_timerlat_tracefs - A function to initialize the timerlat interface files
2769 */
init_timerlat_tracefs(struct dentry * top_dir)2770 static int init_timerlat_tracefs(struct dentry *top_dir)
2771 {
2772 struct dentry *tmp;
2773 int retval;
2774
2775 tmp = tracefs_create_file("timerlat_period_us", TRACE_MODE_WRITE, top_dir,
2776 &timerlat_period, &trace_min_max_fops);
2777 if (!tmp)
2778 return -ENOMEM;
2779
2780 retval = osnoise_create_cpu_timerlat_fd(top_dir);
2781 if (retval)
2782 return retval;
2783
2784 return init_timerlat_stack_tracefs(top_dir);
2785 }
2786 #else /* CONFIG_TIMERLAT_TRACER */
init_timerlat_tracefs(struct dentry * top_dir)2787 static int init_timerlat_tracefs(struct dentry *top_dir)
2788 {
2789 return 0;
2790 }
2791 #endif /* CONFIG_TIMERLAT_TRACER */
2792
2793 /*
2794 * init_tracefs - A function to initialize the tracefs interface files
2795 *
2796 * This function creates entries in tracefs for "osnoise" and "timerlat".
2797 * It creates these directories in the tracing directory, and within that
2798 * directory the use can change and view the configs.
2799 */
init_tracefs(void)2800 static int init_tracefs(void)
2801 {
2802 struct dentry *top_dir;
2803 struct dentry *tmp;
2804 int ret;
2805
2806 ret = tracing_init_dentry();
2807 if (ret)
2808 return -ENOMEM;
2809
2810 top_dir = tracefs_create_dir("osnoise", NULL);
2811 if (!top_dir)
2812 return 0;
2813
2814 tmp = tracefs_create_file("period_us", TRACE_MODE_WRITE, top_dir,
2815 &osnoise_period, &trace_min_max_fops);
2816 if (!tmp)
2817 goto err;
2818
2819 tmp = tracefs_create_file("runtime_us", TRACE_MODE_WRITE, top_dir,
2820 &osnoise_runtime, &trace_min_max_fops);
2821 if (!tmp)
2822 goto err;
2823
2824 tmp = tracefs_create_file("stop_tracing_us", TRACE_MODE_WRITE, top_dir,
2825 &osnoise_stop_tracing_in, &trace_min_max_fops);
2826 if (!tmp)
2827 goto err;
2828
2829 tmp = tracefs_create_file("stop_tracing_total_us", TRACE_MODE_WRITE, top_dir,
2830 &osnoise_stop_tracing_total, &trace_min_max_fops);
2831 if (!tmp)
2832 goto err;
2833
2834 tmp = trace_create_file("cpus", TRACE_MODE_WRITE, top_dir, NULL, &cpus_fops);
2835 if (!tmp)
2836 goto err;
2837
2838 tmp = trace_create_file("options", TRACE_MODE_WRITE, top_dir, NULL,
2839 &osnoise_options_fops);
2840 if (!tmp)
2841 goto err;
2842
2843 ret = init_timerlat_tracefs(top_dir);
2844 if (ret)
2845 goto err;
2846
2847 return 0;
2848
2849 err:
2850 tracefs_remove(top_dir);
2851 return -ENOMEM;
2852 }
2853
osnoise_hook_events(void)2854 static int osnoise_hook_events(void)
2855 {
2856 int retval;
2857
2858 /*
2859 * Trace is already hooked, we are re-enabling from
2860 * a stop_tracing_*.
2861 */
2862 if (trace_osnoise_callback_enabled)
2863 return 0;
2864
2865 retval = hook_irq_events();
2866 if (retval)
2867 return -EINVAL;
2868
2869 retval = hook_softirq_events();
2870 if (retval)
2871 goto out_unhook_irq;
2872
2873 retval = hook_thread_events();
2874 /*
2875 * All fine!
2876 */
2877 if (!retval)
2878 return 0;
2879
2880 unhook_softirq_events();
2881 out_unhook_irq:
2882 unhook_irq_events();
2883 return -EINVAL;
2884 }
2885
osnoise_unhook_events(void)2886 static void osnoise_unhook_events(void)
2887 {
2888 unhook_thread_events();
2889 unhook_softirq_events();
2890 unhook_irq_events();
2891 }
2892
2893 /*
2894 * osnoise_workload_start - start the workload and hook to events
2895 */
osnoise_workload_start(void)2896 static int osnoise_workload_start(void)
2897 {
2898 int retval;
2899
2900 /*
2901 * Instances need to be registered after calling workload
2902 * start. Hence, if there is already an instance, the
2903 * workload was already registered. Otherwise, this
2904 * code is on the way to register the first instance,
2905 * and the workload will start.
2906 */
2907 if (osnoise_has_registered_instances())
2908 return 0;
2909
2910 osn_var_reset_all();
2911
2912 retval = osnoise_hook_events();
2913 if (retval)
2914 return retval;
2915
2916 /*
2917 * Make sure that ftrace_nmi_enter/exit() see reset values
2918 * before enabling trace_osnoise_callback_enabled.
2919 */
2920 barrier();
2921 trace_osnoise_callback_enabled = true;
2922
2923 retval = start_per_cpu_kthreads();
2924 if (retval) {
2925 trace_osnoise_callback_enabled = false;
2926 /*
2927 * Make sure that ftrace_nmi_enter/exit() see
2928 * trace_osnoise_callback_enabled as false before continuing.
2929 */
2930 barrier();
2931
2932 osnoise_unhook_events();
2933 return retval;
2934 }
2935
2936 return 0;
2937 }
2938
2939 /*
2940 * osnoise_workload_stop - stop the workload and unhook the events
2941 */
osnoise_workload_stop(void)2942 static void osnoise_workload_stop(void)
2943 {
2944 /*
2945 * Instances need to be unregistered before calling
2946 * stop. Hence, if there is a registered instance, more
2947 * than one instance is running, and the workload will not
2948 * yet stop. Otherwise, this code is on the way to disable
2949 * the last instance, and the workload can stop.
2950 */
2951 if (osnoise_has_registered_instances())
2952 return;
2953
2954 /*
2955 * If callbacks were already disabled in a previous stop
2956 * call, there is no need to disable then again.
2957 *
2958 * For instance, this happens when tracing is stopped via:
2959 * echo 0 > tracing_on
2960 * echo nop > current_tracer.
2961 */
2962 if (!trace_osnoise_callback_enabled)
2963 return;
2964
2965 trace_osnoise_callback_enabled = false;
2966 /*
2967 * Make sure that ftrace_nmi_enter/exit() see
2968 * trace_osnoise_callback_enabled as false before continuing.
2969 */
2970 barrier();
2971
2972 stop_per_cpu_kthreads();
2973
2974 osnoise_unhook_events();
2975 }
2976
osnoise_tracer_start(struct trace_array * tr)2977 static void osnoise_tracer_start(struct trace_array *tr)
2978 {
2979 int retval;
2980
2981 /*
2982 * If the instance is already registered, there is no need to
2983 * register it again.
2984 */
2985 if (osnoise_instance_registered(tr))
2986 return;
2987
2988 retval = osnoise_workload_start();
2989 if (retval)
2990 pr_err(BANNER "Error starting osnoise tracer\n");
2991
2992 osnoise_register_instance(tr);
2993 }
2994
osnoise_tracer_stop(struct trace_array * tr)2995 static void osnoise_tracer_stop(struct trace_array *tr)
2996 {
2997 osnoise_unregister_instance(tr);
2998 osnoise_workload_stop();
2999 }
3000
osnoise_tracer_init(struct trace_array * tr)3001 static int osnoise_tracer_init(struct trace_array *tr)
3002 {
3003 /*
3004 * Only allow osnoise tracer if timerlat tracer is not running
3005 * already.
3006 */
3007 if (timerlat_enabled())
3008 return -EBUSY;
3009
3010 tr->max_latency = 0;
3011
3012 osnoise_tracer_start(tr);
3013 return 0;
3014 }
3015
osnoise_tracer_reset(struct trace_array * tr)3016 static void osnoise_tracer_reset(struct trace_array *tr)
3017 {
3018 osnoise_tracer_stop(tr);
3019 }
3020
3021 static struct tracer osnoise_tracer __read_mostly = {
3022 .name = "osnoise",
3023 .init = osnoise_tracer_init,
3024 .reset = osnoise_tracer_reset,
3025 .start = osnoise_tracer_start,
3026 .stop = osnoise_tracer_stop,
3027 .print_header = print_osnoise_headers,
3028 .allow_instances = true,
3029 };
3030
3031 #ifdef CONFIG_TIMERLAT_TRACER
timerlat_tracer_start(struct trace_array * tr)3032 static void timerlat_tracer_start(struct trace_array *tr)
3033 {
3034 int retval;
3035
3036 /*
3037 * If the instance is already registered, there is no need to
3038 * register it again.
3039 */
3040 if (osnoise_instance_registered(tr))
3041 return;
3042
3043 retval = osnoise_workload_start();
3044 if (retval)
3045 pr_err(BANNER "Error starting timerlat tracer\n");
3046
3047 osnoise_register_instance(tr);
3048
3049 return;
3050 }
3051
timerlat_tracer_stop(struct trace_array * tr)3052 static void timerlat_tracer_stop(struct trace_array *tr)
3053 {
3054 int cpu;
3055
3056 osnoise_unregister_instance(tr);
3057
3058 /*
3059 * Instruct the threads to stop only if this is the last instance.
3060 */
3061 if (!osnoise_has_registered_instances()) {
3062 for_each_online_cpu(cpu)
3063 per_cpu(per_cpu_osnoise_var, cpu).sampling = 0;
3064 }
3065
3066 osnoise_workload_stop();
3067 }
3068
timerlat_tracer_init(struct trace_array * tr)3069 static int timerlat_tracer_init(struct trace_array *tr)
3070 {
3071 /*
3072 * Only allow timerlat tracer if osnoise tracer is not running already.
3073 */
3074 if (osnoise_has_registered_instances() && !osnoise_data.timerlat_tracer)
3075 return -EBUSY;
3076
3077 /*
3078 * If this is the first instance, set timerlat_tracer to block
3079 * osnoise tracer start.
3080 */
3081 if (!osnoise_has_registered_instances())
3082 osnoise_data.timerlat_tracer = 1;
3083
3084 tr->max_latency = 0;
3085 timerlat_tracer_start(tr);
3086
3087 return 0;
3088 }
3089
timerlat_tracer_reset(struct trace_array * tr)3090 static void timerlat_tracer_reset(struct trace_array *tr)
3091 {
3092 timerlat_tracer_stop(tr);
3093
3094 /*
3095 * If this is the last instance, reset timerlat_tracer allowing
3096 * osnoise to be started.
3097 */
3098 if (!osnoise_has_registered_instances())
3099 osnoise_data.timerlat_tracer = 0;
3100 }
3101
3102 static struct tracer timerlat_tracer __read_mostly = {
3103 .name = "timerlat",
3104 .init = timerlat_tracer_init,
3105 .reset = timerlat_tracer_reset,
3106 .start = timerlat_tracer_start,
3107 .stop = timerlat_tracer_stop,
3108 .print_header = print_timerlat_headers,
3109 .allow_instances = true,
3110 };
3111
init_timerlat_tracer(void)3112 __init static int init_timerlat_tracer(void)
3113 {
3114 return register_tracer(&timerlat_tracer);
3115 }
3116 #else /* CONFIG_TIMERLAT_TRACER */
init_timerlat_tracer(void)3117 __init static int init_timerlat_tracer(void)
3118 {
3119 return 0;
3120 }
3121 #endif /* CONFIG_TIMERLAT_TRACER */
3122
init_osnoise_tracer(void)3123 __init static int init_osnoise_tracer(void)
3124 {
3125 int ret;
3126
3127 mutex_init(&interface_lock);
3128
3129 cpumask_copy(&osnoise_cpumask, cpu_all_mask);
3130
3131 ret = register_tracer(&osnoise_tracer);
3132 if (ret) {
3133 pr_err(BANNER "Error registering osnoise!\n");
3134 return ret;
3135 }
3136
3137 ret = init_timerlat_tracer();
3138 if (ret) {
3139 pr_err(BANNER "Error registering timerlat!\n");
3140 return ret;
3141 }
3142
3143 osnoise_init_hotplug_support();
3144
3145 INIT_LIST_HEAD_RCU(&osnoise_instances);
3146
3147 init_tracefs();
3148
3149 return 0;
3150 }
3151 late_initcall(init_osnoise_tracer);
3152