xref: /openbmc/linux/kernel/trace/trace_osnoise.c (revision 8b8dcc37)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * OS Noise Tracer: computes the OS Noise suffered by a running thread.
4  * Timerlat Tracer: measures the wakeup latency of a timer triggered IRQ and thread.
5  *
6  * Based on "hwlat_detector" tracer by:
7  *   Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
8  *   Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
9  *   With feedback from Clark Williams <williams@redhat.com>
10  *
11  * And also based on the rtsl tracer presented on:
12  *  DE OLIVEIRA, Daniel Bristot, et al. Demystifying the real-time linux
13  *  scheduling latency. In: 32nd Euromicro Conference on Real-Time Systems
14  *  (ECRTS 2020). Schloss Dagstuhl-Leibniz-Zentrum fur Informatik, 2020.
15  *
16  * Copyright (C) 2021 Daniel Bristot de Oliveira, Red Hat, Inc. <bristot@redhat.com>
17  */
18 
19 #include <linux/kthread.h>
20 #include <linux/tracefs.h>
21 #include <linux/uaccess.h>
22 #include <linux/cpumask.h>
23 #include <linux/delay.h>
24 #include <linux/sched/clock.h>
25 #include <uapi/linux/sched/types.h>
26 #include <linux/sched.h>
27 #include "trace.h"
28 
29 #ifdef CONFIG_X86_LOCAL_APIC
30 #include <asm/trace/irq_vectors.h>
31 #undef TRACE_INCLUDE_PATH
32 #undef TRACE_INCLUDE_FILE
33 #endif /* CONFIG_X86_LOCAL_APIC */
34 
35 #include <trace/events/irq.h>
36 #include <trace/events/sched.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/osnoise.h>
40 
41 /*
42  * Default values.
43  */
44 #define BANNER			"osnoise: "
45 #define DEFAULT_SAMPLE_PERIOD	1000000			/* 1s */
46 #define DEFAULT_SAMPLE_RUNTIME	1000000			/* 1s */
47 
48 #define DEFAULT_TIMERLAT_PERIOD	1000			/* 1ms */
49 #define DEFAULT_TIMERLAT_PRIO	95			/* FIFO 95 */
50 
51 /*
52  * trace_array of the enabled osnoise/timerlat instances.
53  */
54 struct osnoise_instance {
55 	struct list_head	list;
56 	struct trace_array	*tr;
57 };
58 
59 static struct list_head osnoise_instances;
60 
61 static bool osnoise_has_registered_instances(void)
62 {
63 	return !!list_first_or_null_rcu(&osnoise_instances,
64 					struct osnoise_instance,
65 					list);
66 }
67 
68 /*
69  * osnoise_instance_registered - check if a tr is already registered
70  */
71 static int osnoise_instance_registered(struct trace_array *tr)
72 {
73 	struct osnoise_instance *inst;
74 	int found = 0;
75 
76 	rcu_read_lock();
77 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
78 		if (inst->tr == tr)
79 			found = 1;
80 	}
81 	rcu_read_unlock();
82 
83 	return found;
84 }
85 
86 /*
87  * osnoise_register_instance - register a new trace instance
88  *
89  * Register a trace_array *tr in the list of instances running
90  * osnoise/timerlat tracers.
91  */
92 static int osnoise_register_instance(struct trace_array *tr)
93 {
94 	struct osnoise_instance *inst;
95 
96 	/*
97 	 * register/unregister serialization is provided by trace's
98 	 * trace_types_lock.
99 	 */
100 	lockdep_assert_held(&trace_types_lock);
101 
102 	inst = kmalloc(sizeof(*inst), GFP_KERNEL);
103 	if (!inst)
104 		return -ENOMEM;
105 
106 	INIT_LIST_HEAD_RCU(&inst->list);
107 	inst->tr = tr;
108 	list_add_tail_rcu(&inst->list, &osnoise_instances);
109 
110 	return 0;
111 }
112 
113 /*
114  *  osnoise_unregister_instance - unregister a registered trace instance
115  *
116  * Remove the trace_array *tr from the list of instances running
117  * osnoise/timerlat tracers.
118  */
119 static void osnoise_unregister_instance(struct trace_array *tr)
120 {
121 	struct osnoise_instance *inst;
122 	int found = 0;
123 
124 	/*
125 	 * register/unregister serialization is provided by trace's
126 	 * trace_types_lock.
127 	 */
128 	lockdep_assert_held(&trace_types_lock);
129 
130 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
131 		if (inst->tr == tr) {
132 			list_del_rcu(&inst->list);
133 			found = 1;
134 			break;
135 		}
136 	}
137 
138 	if (!found)
139 		return;
140 
141 	synchronize_rcu();
142 	kfree(inst);
143 }
144 
145 /*
146  * NMI runtime info.
147  */
148 struct osn_nmi {
149 	u64	count;
150 	u64	delta_start;
151 };
152 
153 /*
154  * IRQ runtime info.
155  */
156 struct osn_irq {
157 	u64	count;
158 	u64	arrival_time;
159 	u64	delta_start;
160 };
161 
162 #define IRQ_CONTEXT	0
163 #define THREAD_CONTEXT	1
164 /*
165  * sofirq runtime info.
166  */
167 struct osn_softirq {
168 	u64	count;
169 	u64	arrival_time;
170 	u64	delta_start;
171 };
172 
173 /*
174  * thread runtime info.
175  */
176 struct osn_thread {
177 	u64	count;
178 	u64	arrival_time;
179 	u64	delta_start;
180 };
181 
182 /*
183  * Runtime information: this structure saves the runtime information used by
184  * one sampling thread.
185  */
186 struct osnoise_variables {
187 	struct task_struct	*kthread;
188 	bool			sampling;
189 	pid_t			pid;
190 	struct osn_nmi		nmi;
191 	struct osn_irq		irq;
192 	struct osn_softirq	softirq;
193 	struct osn_thread	thread;
194 	local_t			int_counter;
195 };
196 
197 /*
198  * Per-cpu runtime information.
199  */
200 DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
201 
202 /*
203  * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU
204  */
205 static inline struct osnoise_variables *this_cpu_osn_var(void)
206 {
207 	return this_cpu_ptr(&per_cpu_osnoise_var);
208 }
209 
210 #ifdef CONFIG_TIMERLAT_TRACER
211 /*
212  * Runtime information for the timer mode.
213  */
214 struct timerlat_variables {
215 	struct task_struct	*kthread;
216 	struct hrtimer		timer;
217 	u64			rel_period;
218 	u64			abs_period;
219 	bool			tracing_thread;
220 	u64			count;
221 };
222 
223 DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
224 
225 /*
226  * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU
227  */
228 static inline struct timerlat_variables *this_cpu_tmr_var(void)
229 {
230 	return this_cpu_ptr(&per_cpu_timerlat_var);
231 }
232 
233 /*
234  * tlat_var_reset - Reset the values of the given timerlat_variables
235  */
236 static inline void tlat_var_reset(void)
237 {
238 	struct timerlat_variables *tlat_var;
239 	int cpu;
240 	/*
241 	 * So far, all the values are initialized as 0, so
242 	 * zeroing the structure is perfect.
243 	 */
244 	for_each_cpu(cpu, cpu_online_mask) {
245 		tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
246 		memset(tlat_var, 0, sizeof(*tlat_var));
247 	}
248 }
249 #else /* CONFIG_TIMERLAT_TRACER */
250 #define tlat_var_reset()	do {} while (0)
251 #endif /* CONFIG_TIMERLAT_TRACER */
252 
253 /*
254  * osn_var_reset - Reset the values of the given osnoise_variables
255  */
256 static inline void osn_var_reset(void)
257 {
258 	struct osnoise_variables *osn_var;
259 	int cpu;
260 
261 	/*
262 	 * So far, all the values are initialized as 0, so
263 	 * zeroing the structure is perfect.
264 	 */
265 	for_each_cpu(cpu, cpu_online_mask) {
266 		osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
267 		memset(osn_var, 0, sizeof(*osn_var));
268 	}
269 }
270 
271 /*
272  * osn_var_reset_all - Reset the value of all per-cpu osnoise_variables
273  */
274 static inline void osn_var_reset_all(void)
275 {
276 	osn_var_reset();
277 	tlat_var_reset();
278 }
279 
280 /*
281  * Tells NMIs to call back to the osnoise tracer to record timestamps.
282  */
283 bool trace_osnoise_callback_enabled;
284 
285 /*
286  * osnoise sample structure definition. Used to store the statistics of a
287  * sample run.
288  */
289 struct osnoise_sample {
290 	u64			runtime;	/* runtime */
291 	u64			noise;		/* noise */
292 	u64			max_sample;	/* max single noise sample */
293 	int			hw_count;	/* # HW (incl. hypervisor) interference */
294 	int			nmi_count;	/* # NMIs during this sample */
295 	int			irq_count;	/* # IRQs during this sample */
296 	int			softirq_count;	/* # softirqs during this sample */
297 	int			thread_count;	/* # threads during this sample */
298 };
299 
300 #ifdef CONFIG_TIMERLAT_TRACER
301 /*
302  * timerlat sample structure definition. Used to store the statistics of
303  * a sample run.
304  */
305 struct timerlat_sample {
306 	u64			timer_latency;	/* timer_latency */
307 	unsigned int		seqnum;		/* unique sequence */
308 	int			context;	/* timer context */
309 };
310 #endif
311 
312 /*
313  * Protect the interface.
314  */
315 struct mutex interface_lock;
316 
317 /*
318  * Tracer data.
319  */
320 static struct osnoise_data {
321 	u64	sample_period;		/* total sampling period */
322 	u64	sample_runtime;		/* active sampling portion of period */
323 	u64	stop_tracing;		/* stop trace in the internal operation (loop/irq) */
324 	u64	stop_tracing_total;	/* stop trace in the final operation (report/thread) */
325 #ifdef CONFIG_TIMERLAT_TRACER
326 	u64	timerlat_period;	/* timerlat period */
327 	u64	print_stack;		/* print IRQ stack if total > */
328 	int	timerlat_tracer;	/* timerlat tracer */
329 #endif
330 	bool	tainted;		/* infor users and developers about a problem */
331 } osnoise_data = {
332 	.sample_period			= DEFAULT_SAMPLE_PERIOD,
333 	.sample_runtime			= DEFAULT_SAMPLE_RUNTIME,
334 	.stop_tracing			= 0,
335 	.stop_tracing_total		= 0,
336 #ifdef CONFIG_TIMERLAT_TRACER
337 	.print_stack			= 0,
338 	.timerlat_period		= DEFAULT_TIMERLAT_PERIOD,
339 	.timerlat_tracer		= 0,
340 #endif
341 };
342 
343 #ifdef CONFIG_TIMERLAT_TRACER
344 static inline bool timerlat_enabled(void)
345 {
346 	return osnoise_data.timerlat_tracer;
347 }
348 
349 static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var)
350 {
351 	struct timerlat_variables *tlat_var = this_cpu_tmr_var();
352 	/*
353 	 * If the timerlat is enabled, but the irq handler did
354 	 * not run yet enabling timerlat_tracer, do not trace.
355 	 */
356 	if (!tlat_var->tracing_thread) {
357 		osn_var->softirq.arrival_time = 0;
358 		osn_var->softirq.delta_start = 0;
359 		return 0;
360 	}
361 	return 1;
362 }
363 
364 static inline int timerlat_thread_exit(struct osnoise_variables *osn_var)
365 {
366 	struct timerlat_variables *tlat_var = this_cpu_tmr_var();
367 	/*
368 	 * If the timerlat is enabled, but the irq handler did
369 	 * not run yet enabling timerlat_tracer, do not trace.
370 	 */
371 	if (!tlat_var->tracing_thread) {
372 		osn_var->thread.delta_start = 0;
373 		osn_var->thread.arrival_time = 0;
374 		return 0;
375 	}
376 	return 1;
377 }
378 #else /* CONFIG_TIMERLAT_TRACER */
379 static inline bool timerlat_enabled(void)
380 {
381 	return false;
382 }
383 
384 static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var)
385 {
386 	return 1;
387 }
388 static inline int timerlat_thread_exit(struct osnoise_variables *osn_var)
389 {
390 	return 1;
391 }
392 #endif
393 
394 #ifdef CONFIG_PREEMPT_RT
395 /*
396  * Print the osnoise header info.
397  */
398 static void print_osnoise_headers(struct seq_file *s)
399 {
400 	if (osnoise_data.tainted)
401 		seq_puts(s, "# osnoise is tainted!\n");
402 
403 	seq_puts(s, "#                                _-------=> irqs-off\n");
404 	seq_puts(s, "#                               / _------=> need-resched\n");
405 	seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
406 	seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
407 	seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
408 	seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
409 	seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
410 
411 	seq_puts(s, "#                              |||||| /          ");
412 	seq_puts(s, "                                     MAX\n");
413 
414 	seq_puts(s, "#                              ||||| /                         ");
415 	seq_puts(s, "                    SINGLE      Interference counters:\n");
416 
417 	seq_puts(s, "#                              |||||||               RUNTIME   ");
418 	seq_puts(s, "   NOISE  %% OF CPU  NOISE    +-----------------------------+\n");
419 
420 	seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    IN US    ");
421 	seq_puts(s, "   IN US  AVAILABLE  IN US     HW    NMI    IRQ   SIRQ THREAD\n");
422 
423 	seq_puts(s, "#              | |         |   |||||||      |           |      ");
424 	seq_puts(s, "       |    |            |      |      |      |      |      |\n");
425 }
426 #else /* CONFIG_PREEMPT_RT */
427 static void print_osnoise_headers(struct seq_file *s)
428 {
429 	if (osnoise_data.tainted)
430 		seq_puts(s, "# osnoise is tainted!\n");
431 
432 	seq_puts(s, "#                                _-----=> irqs-off\n");
433 	seq_puts(s, "#                               / _----=> need-resched\n");
434 	seq_puts(s, "#                              | / _---=> hardirq/softirq\n");
435 	seq_puts(s, "#                              || / _--=> preempt-depth\n");
436 	seq_puts(s, "#                              ||| / _-=> migrate-disable     ");
437 	seq_puts(s, "                    MAX\n");
438 	seq_puts(s, "#                              |||| /     delay               ");
439 	seq_puts(s, "                    SINGLE      Interference counters:\n");
440 
441 	seq_puts(s, "#                              |||||               RUNTIME   ");
442 	seq_puts(s, "   NOISE  %% OF CPU  NOISE    +-----------------------------+\n");
443 
444 	seq_puts(s, "#           TASK-PID      CPU# |||||   TIMESTAMP    IN US    ");
445 	seq_puts(s, "   IN US  AVAILABLE  IN US     HW    NMI    IRQ   SIRQ THREAD\n");
446 
447 	seq_puts(s, "#              | |         |   |||||      |           |      ");
448 	seq_puts(s, "       |    |            |      |      |      |      |      |\n");
449 }
450 #endif /* CONFIG_PREEMPT_RT */
451 
452 /*
453  * osnoise_taint - report an osnoise error.
454  */
455 #define osnoise_taint(msg) ({							\
456 	struct osnoise_instance *inst;						\
457 	struct trace_buffer *buffer;						\
458 										\
459 	rcu_read_lock();							\
460 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {		\
461 		buffer = inst->tr->array_buffer.buffer;				\
462 		trace_array_printk_buf(buffer, _THIS_IP_, msg);			\
463 	}									\
464 	rcu_read_unlock();							\
465 	osnoise_data.tainted = true;						\
466 })
467 
468 /*
469  * Record an osnoise_sample into the tracer buffer.
470  */
471 static void
472 __trace_osnoise_sample(struct osnoise_sample *sample, struct trace_buffer *buffer)
473 {
474 	struct trace_event_call *call = &event_osnoise;
475 	struct ring_buffer_event *event;
476 	struct osnoise_entry *entry;
477 
478 	event = trace_buffer_lock_reserve(buffer, TRACE_OSNOISE, sizeof(*entry),
479 					  tracing_gen_ctx());
480 	if (!event)
481 		return;
482 	entry	= ring_buffer_event_data(event);
483 	entry->runtime		= sample->runtime;
484 	entry->noise		= sample->noise;
485 	entry->max_sample	= sample->max_sample;
486 	entry->hw_count		= sample->hw_count;
487 	entry->nmi_count	= sample->nmi_count;
488 	entry->irq_count	= sample->irq_count;
489 	entry->softirq_count	= sample->softirq_count;
490 	entry->thread_count	= sample->thread_count;
491 
492 	if (!call_filter_check_discard(call, entry, buffer, event))
493 		trace_buffer_unlock_commit_nostack(buffer, event);
494 }
495 
496 /*
497  * Record an osnoise_sample on all osnoise instances.
498  */
499 static void trace_osnoise_sample(struct osnoise_sample *sample)
500 {
501 	struct osnoise_instance *inst;
502 	struct trace_buffer *buffer;
503 
504 	rcu_read_lock();
505 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
506 		buffer = inst->tr->array_buffer.buffer;
507 		__trace_osnoise_sample(sample, buffer);
508 	}
509 	rcu_read_unlock();
510 }
511 
512 #ifdef CONFIG_TIMERLAT_TRACER
513 /*
514  * Print the timerlat header info.
515  */
516 #ifdef CONFIG_PREEMPT_RT
517 static void print_timerlat_headers(struct seq_file *s)
518 {
519 	seq_puts(s, "#                                _-------=> irqs-off\n");
520 	seq_puts(s, "#                               / _------=> need-resched\n");
521 	seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
522 	seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
523 	seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
524 	seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
525 	seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
526 	seq_puts(s, "#                              |||||| /\n");
527 	seq_puts(s, "#                              |||||||             ACTIVATION\n");
528 	seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    ID     ");
529 	seq_puts(s, "       CONTEXT                LATENCY\n");
530 	seq_puts(s, "#              | |         |   |||||||      |         |      ");
531 	seq_puts(s, "            |                       |\n");
532 }
533 #else /* CONFIG_PREEMPT_RT */
534 static void print_timerlat_headers(struct seq_file *s)
535 {
536 	seq_puts(s, "#                                _-----=> irqs-off\n");
537 	seq_puts(s, "#                               / _----=> need-resched\n");
538 	seq_puts(s, "#                              | / _---=> hardirq/softirq\n");
539 	seq_puts(s, "#                              || / _--=> preempt-depth\n");
540 	seq_puts(s, "#                              ||| / _-=> migrate-disable\n");
541 	seq_puts(s, "#                              |||| /     delay\n");
542 	seq_puts(s, "#                              |||||            ACTIVATION\n");
543 	seq_puts(s, "#           TASK-PID      CPU# |||||   TIMESTAMP   ID      ");
544 	seq_puts(s, "      CONTEXT                 LATENCY\n");
545 	seq_puts(s, "#              | |         |   |||||      |         |      ");
546 	seq_puts(s, "            |                       |\n");
547 }
548 #endif /* CONFIG_PREEMPT_RT */
549 
550 static void
551 __trace_timerlat_sample(struct timerlat_sample *sample, struct trace_buffer *buffer)
552 {
553 	struct trace_event_call *call = &event_osnoise;
554 	struct ring_buffer_event *event;
555 	struct timerlat_entry *entry;
556 
557 	event = trace_buffer_lock_reserve(buffer, TRACE_TIMERLAT, sizeof(*entry),
558 					  tracing_gen_ctx());
559 	if (!event)
560 		return;
561 	entry	= ring_buffer_event_data(event);
562 	entry->seqnum			= sample->seqnum;
563 	entry->context			= sample->context;
564 	entry->timer_latency		= sample->timer_latency;
565 
566 	if (!call_filter_check_discard(call, entry, buffer, event))
567 		trace_buffer_unlock_commit_nostack(buffer, event);
568 }
569 
570 /*
571  * Record an timerlat_sample into the tracer buffer.
572  */
573 static void trace_timerlat_sample(struct timerlat_sample *sample)
574 {
575 	struct osnoise_instance *inst;
576 	struct trace_buffer *buffer;
577 
578 	rcu_read_lock();
579 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
580 		buffer = inst->tr->array_buffer.buffer;
581 		__trace_timerlat_sample(sample, buffer);
582 	}
583 	rcu_read_unlock();
584 }
585 
586 #ifdef CONFIG_STACKTRACE
587 
588 #define	MAX_CALLS	256
589 
590 /*
591  * Stack trace will take place only at IRQ level, so, no need
592  * to control nesting here.
593  */
594 struct trace_stack {
595 	int		stack_size;
596 	int		nr_entries;
597 	unsigned long	calls[MAX_CALLS];
598 };
599 
600 static DEFINE_PER_CPU(struct trace_stack, trace_stack);
601 
602 /*
603  * timerlat_save_stack - save a stack trace without printing
604  *
605  * Save the current stack trace without printing. The
606  * stack will be printed later, after the end of the measurement.
607  */
608 static void timerlat_save_stack(int skip)
609 {
610 	unsigned int size, nr_entries;
611 	struct trace_stack *fstack;
612 
613 	fstack = this_cpu_ptr(&trace_stack);
614 
615 	size = ARRAY_SIZE(fstack->calls);
616 
617 	nr_entries = stack_trace_save(fstack->calls, size, skip);
618 
619 	fstack->stack_size = nr_entries * sizeof(unsigned long);
620 	fstack->nr_entries = nr_entries;
621 
622 	return;
623 
624 }
625 
626 static void
627 __timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, unsigned int size)
628 {
629 	struct trace_event_call *call = &event_osnoise;
630 	struct ring_buffer_event *event;
631 	struct stack_entry *entry;
632 
633 	event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size,
634 					  tracing_gen_ctx());
635 	if (!event)
636 		return;
637 
638 	entry = ring_buffer_event_data(event);
639 
640 	memcpy(&entry->caller, fstack->calls, size);
641 	entry->size = fstack->nr_entries;
642 
643 	if (!call_filter_check_discard(call, entry, buffer, event))
644 		trace_buffer_unlock_commit_nostack(buffer, event);
645 }
646 
647 /*
648  * timerlat_dump_stack - dump a stack trace previously saved
649  */
650 static void timerlat_dump_stack(u64 latency)
651 {
652 	struct osnoise_instance *inst;
653 	struct trace_buffer *buffer;
654 	struct trace_stack *fstack;
655 	unsigned int size;
656 
657 	/*
658 	 * trace only if latency > print_stack config, if enabled.
659 	 */
660 	if (!osnoise_data.print_stack || osnoise_data.print_stack > latency)
661 		return;
662 
663 	preempt_disable_notrace();
664 	fstack = this_cpu_ptr(&trace_stack);
665 	size = fstack->stack_size;
666 
667 	rcu_read_lock();
668 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
669 		buffer = inst->tr->array_buffer.buffer;
670 		__timerlat_dump_stack(buffer, fstack, size);
671 
672 	}
673 	rcu_read_unlock();
674 	preempt_enable_notrace();
675 }
676 #else /* CONFIG_STACKTRACE */
677 #define timerlat_dump_stack(u64 latency) do {} while (0)
678 #define timerlat_save_stack(a) do {} while (0)
679 #endif /* CONFIG_STACKTRACE */
680 #endif /* CONFIG_TIMERLAT_TRACER */
681 
682 /*
683  * Macros to encapsulate the time capturing infrastructure.
684  */
685 #define time_get()	trace_clock_local()
686 #define time_to_us(x)	div_u64(x, 1000)
687 #define time_sub(a, b)	((a) - (b))
688 
689 /*
690  * cond_move_irq_delta_start - Forward the delta_start of a running IRQ
691  *
692  * If an IRQ is preempted by an NMI, its delta_start is pushed forward
693  * to discount the NMI interference.
694  *
695  * See get_int_safe_duration().
696  */
697 static inline void
698 cond_move_irq_delta_start(struct osnoise_variables *osn_var, u64 duration)
699 {
700 	if (osn_var->irq.delta_start)
701 		osn_var->irq.delta_start += duration;
702 }
703 
704 #ifndef CONFIG_PREEMPT_RT
705 /*
706  * cond_move_softirq_delta_start - Forward the delta_start of a running softirq.
707  *
708  * If a softirq is preempted by an IRQ or NMI, its delta_start is pushed
709  * forward to discount the interference.
710  *
711  * See get_int_safe_duration().
712  */
713 static inline void
714 cond_move_softirq_delta_start(struct osnoise_variables *osn_var, u64 duration)
715 {
716 	if (osn_var->softirq.delta_start)
717 		osn_var->softirq.delta_start += duration;
718 }
719 #else /* CONFIG_PREEMPT_RT */
720 #define cond_move_softirq_delta_start(osn_var, duration) do {} while (0)
721 #endif
722 
723 /*
724  * cond_move_thread_delta_start - Forward the delta_start of a running thread
725  *
726  * If a noisy thread is preempted by an softirq, IRQ or NMI, its delta_start
727  * is pushed forward to discount the interference.
728  *
729  * See get_int_safe_duration().
730  */
731 static inline void
732 cond_move_thread_delta_start(struct osnoise_variables *osn_var, u64 duration)
733 {
734 	if (osn_var->thread.delta_start)
735 		osn_var->thread.delta_start += duration;
736 }
737 
738 /*
739  * get_int_safe_duration - Get the duration of a window
740  *
741  * The irq, softirq and thread varaibles need to have its duration without
742  * the interference from higher priority interrupts. Instead of keeping a
743  * variable to discount the interrupt interference from these variables, the
744  * starting time of these variables are pushed forward with the interrupt's
745  * duration. In this way, a single variable is used to:
746  *
747  *   - Know if a given window is being measured.
748  *   - Account its duration.
749  *   - Discount the interference.
750  *
751  * To avoid getting inconsistent values, e.g.,:
752  *
753  *	now = time_get()
754  *		--->	interrupt!
755  *			delta_start -= int duration;
756  *		<---
757  *	duration = now - delta_start;
758  *
759  *	result: negative duration if the variable duration before the
760  *	interrupt was smaller than the interrupt execution.
761  *
762  * A counter of interrupts is used. If the counter increased, try
763  * to capture an interference safe duration.
764  */
765 static inline s64
766 get_int_safe_duration(struct osnoise_variables *osn_var, u64 *delta_start)
767 {
768 	u64 int_counter, now;
769 	s64 duration;
770 
771 	do {
772 		int_counter = local_read(&osn_var->int_counter);
773 		/* synchronize with interrupts */
774 		barrier();
775 
776 		now = time_get();
777 		duration = (now - *delta_start);
778 
779 		/* synchronize with interrupts */
780 		barrier();
781 	} while (int_counter != local_read(&osn_var->int_counter));
782 
783 	/*
784 	 * This is an evidence of race conditions that cause
785 	 * a value to be "discounted" too much.
786 	 */
787 	if (duration < 0)
788 		osnoise_taint("Negative duration!\n");
789 
790 	*delta_start = 0;
791 
792 	return duration;
793 }
794 
795 /*
796  *
797  * set_int_safe_time - Save the current time on *time, aware of interference
798  *
799  * Get the time, taking into consideration a possible interference from
800  * higher priority interrupts.
801  *
802  * See get_int_safe_duration() for an explanation.
803  */
804 static u64
805 set_int_safe_time(struct osnoise_variables *osn_var, u64 *time)
806 {
807 	u64 int_counter;
808 
809 	do {
810 		int_counter = local_read(&osn_var->int_counter);
811 		/* synchronize with interrupts */
812 		barrier();
813 
814 		*time = time_get();
815 
816 		/* synchronize with interrupts */
817 		barrier();
818 	} while (int_counter != local_read(&osn_var->int_counter));
819 
820 	return int_counter;
821 }
822 
823 #ifdef CONFIG_TIMERLAT_TRACER
824 /*
825  * copy_int_safe_time - Copy *src into *desc aware of interference
826  */
827 static u64
828 copy_int_safe_time(struct osnoise_variables *osn_var, u64 *dst, u64 *src)
829 {
830 	u64 int_counter;
831 
832 	do {
833 		int_counter = local_read(&osn_var->int_counter);
834 		/* synchronize with interrupts */
835 		barrier();
836 
837 		*dst = *src;
838 
839 		/* synchronize with interrupts */
840 		barrier();
841 	} while (int_counter != local_read(&osn_var->int_counter));
842 
843 	return int_counter;
844 }
845 #endif /* CONFIG_TIMERLAT_TRACER */
846 
847 /*
848  * trace_osnoise_callback - NMI entry/exit callback
849  *
850  * This function is called at the entry and exit NMI code. The bool enter
851  * distinguishes between either case. This function is used to note a NMI
852  * occurrence, compute the noise caused by the NMI, and to remove the noise
853  * it is potentially causing on other interference variables.
854  */
855 void trace_osnoise_callback(bool enter)
856 {
857 	struct osnoise_variables *osn_var = this_cpu_osn_var();
858 	u64 duration;
859 
860 	if (!osn_var->sampling)
861 		return;
862 
863 	/*
864 	 * Currently trace_clock_local() calls sched_clock() and the
865 	 * generic version is not NMI safe.
866 	 */
867 	if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
868 		if (enter) {
869 			osn_var->nmi.delta_start = time_get();
870 			local_inc(&osn_var->int_counter);
871 		} else {
872 			duration = time_get() - osn_var->nmi.delta_start;
873 
874 			trace_nmi_noise(osn_var->nmi.delta_start, duration);
875 
876 			cond_move_irq_delta_start(osn_var, duration);
877 			cond_move_softirq_delta_start(osn_var, duration);
878 			cond_move_thread_delta_start(osn_var, duration);
879 		}
880 	}
881 
882 	if (enter)
883 		osn_var->nmi.count++;
884 }
885 
886 /*
887  * osnoise_trace_irq_entry - Note the starting of an IRQ
888  *
889  * Save the starting time of an IRQ. As IRQs are non-preemptive to other IRQs,
890  * it is safe to use a single variable (ons_var->irq) to save the statistics.
891  * The arrival_time is used to report... the arrival time. The delta_start
892  * is used to compute the duration at the IRQ exit handler. See
893  * cond_move_irq_delta_start().
894  */
895 void osnoise_trace_irq_entry(int id)
896 {
897 	struct osnoise_variables *osn_var = this_cpu_osn_var();
898 
899 	if (!osn_var->sampling)
900 		return;
901 	/*
902 	 * This value will be used in the report, but not to compute
903 	 * the execution time, so it is safe to get it unsafe.
904 	 */
905 	osn_var->irq.arrival_time = time_get();
906 	set_int_safe_time(osn_var, &osn_var->irq.delta_start);
907 	osn_var->irq.count++;
908 
909 	local_inc(&osn_var->int_counter);
910 }
911 
912 /*
913  * osnoise_irq_exit - Note the end of an IRQ, sava data and trace
914  *
915  * Computes the duration of the IRQ noise, and trace it. Also discounts the
916  * interference from other sources of noise could be currently being accounted.
917  */
918 void osnoise_trace_irq_exit(int id, const char *desc)
919 {
920 	struct osnoise_variables *osn_var = this_cpu_osn_var();
921 	int duration;
922 
923 	if (!osn_var->sampling)
924 		return;
925 
926 	duration = get_int_safe_duration(osn_var, &osn_var->irq.delta_start);
927 	trace_irq_noise(id, desc, osn_var->irq.arrival_time, duration);
928 	osn_var->irq.arrival_time = 0;
929 	cond_move_softirq_delta_start(osn_var, duration);
930 	cond_move_thread_delta_start(osn_var, duration);
931 }
932 
933 /*
934  * trace_irqentry_callback - Callback to the irq:irq_entry traceevent
935  *
936  * Used to note the starting of an IRQ occurece.
937  */
938 static void trace_irqentry_callback(void *data, int irq,
939 				    struct irqaction *action)
940 {
941 	osnoise_trace_irq_entry(irq);
942 }
943 
944 /*
945  * trace_irqexit_callback - Callback to the irq:irq_exit traceevent
946  *
947  * Used to note the end of an IRQ occurece.
948  */
949 static void trace_irqexit_callback(void *data, int irq,
950 				   struct irqaction *action, int ret)
951 {
952 	osnoise_trace_irq_exit(irq, action->name);
953 }
954 
955 /*
956  * arch specific register function.
957  */
958 int __weak osnoise_arch_register(void)
959 {
960 	return 0;
961 }
962 
963 /*
964  * arch specific unregister function.
965  */
966 void __weak osnoise_arch_unregister(void)
967 {
968 	return;
969 }
970 
971 /*
972  * hook_irq_events - Hook IRQ handling events
973  *
974  * This function hooks the IRQ related callbacks to the respective trace
975  * events.
976  */
977 static int hook_irq_events(void)
978 {
979 	int ret;
980 
981 	ret = register_trace_irq_handler_entry(trace_irqentry_callback, NULL);
982 	if (ret)
983 		goto out_err;
984 
985 	ret = register_trace_irq_handler_exit(trace_irqexit_callback, NULL);
986 	if (ret)
987 		goto out_unregister_entry;
988 
989 	ret = osnoise_arch_register();
990 	if (ret)
991 		goto out_irq_exit;
992 
993 	return 0;
994 
995 out_irq_exit:
996 	unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
997 out_unregister_entry:
998 	unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
999 out_err:
1000 	return -EINVAL;
1001 }
1002 
1003 /*
1004  * unhook_irq_events - Unhook IRQ handling events
1005  *
1006  * This function unhooks the IRQ related callbacks to the respective trace
1007  * events.
1008  */
1009 static void unhook_irq_events(void)
1010 {
1011 	osnoise_arch_unregister();
1012 	unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
1013 	unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
1014 }
1015 
1016 #ifndef CONFIG_PREEMPT_RT
1017 /*
1018  * trace_softirq_entry_callback - Note the starting of a softirq
1019  *
1020  * Save the starting time of a softirq. As softirqs are non-preemptive to
1021  * other softirqs, it is safe to use a single variable (ons_var->softirq)
1022  * to save the statistics. The arrival_time is used to report... the
1023  * arrival time. The delta_start is used to compute the duration at the
1024  * softirq exit handler. See cond_move_softirq_delta_start().
1025  */
1026 static void trace_softirq_entry_callback(void *data, unsigned int vec_nr)
1027 {
1028 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1029 
1030 	if (!osn_var->sampling)
1031 		return;
1032 	/*
1033 	 * This value will be used in the report, but not to compute
1034 	 * the execution time, so it is safe to get it unsafe.
1035 	 */
1036 	osn_var->softirq.arrival_time = time_get();
1037 	set_int_safe_time(osn_var, &osn_var->softirq.delta_start);
1038 	osn_var->softirq.count++;
1039 
1040 	local_inc(&osn_var->int_counter);
1041 }
1042 
1043 /*
1044  * trace_softirq_exit_callback - Note the end of an softirq
1045  *
1046  * Computes the duration of the softirq noise, and trace it. Also discounts the
1047  * interference from other sources of noise could be currently being accounted.
1048  */
1049 static void trace_softirq_exit_callback(void *data, unsigned int vec_nr)
1050 {
1051 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1052 	int duration;
1053 
1054 	if (!osn_var->sampling)
1055 		return;
1056 
1057 	if (unlikely(timerlat_enabled()))
1058 		if (!timerlat_softirq_exit(osn_var))
1059 			return;
1060 
1061 	duration = get_int_safe_duration(osn_var, &osn_var->softirq.delta_start);
1062 	trace_softirq_noise(vec_nr, osn_var->softirq.arrival_time, duration);
1063 	cond_move_thread_delta_start(osn_var, duration);
1064 	osn_var->softirq.arrival_time = 0;
1065 }
1066 
1067 /*
1068  * hook_softirq_events - Hook softirq handling events
1069  *
1070  * This function hooks the softirq related callbacks to the respective trace
1071  * events.
1072  */
1073 static int hook_softirq_events(void)
1074 {
1075 	int ret;
1076 
1077 	ret = register_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1078 	if (ret)
1079 		goto out_err;
1080 
1081 	ret = register_trace_softirq_exit(trace_softirq_exit_callback, NULL);
1082 	if (ret)
1083 		goto out_unreg_entry;
1084 
1085 	return 0;
1086 
1087 out_unreg_entry:
1088 	unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1089 out_err:
1090 	return -EINVAL;
1091 }
1092 
1093 /*
1094  * unhook_softirq_events - Unhook softirq handling events
1095  *
1096  * This function hooks the softirq related callbacks to the respective trace
1097  * events.
1098  */
1099 static void unhook_softirq_events(void)
1100 {
1101 	unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1102 	unregister_trace_softirq_exit(trace_softirq_exit_callback, NULL);
1103 }
1104 #else /* CONFIG_PREEMPT_RT */
1105 /*
1106  * softirq are threads on the PREEMPT_RT mode.
1107  */
1108 static int hook_softirq_events(void)
1109 {
1110 	return 0;
1111 }
1112 static void unhook_softirq_events(void)
1113 {
1114 }
1115 #endif
1116 
1117 /*
1118  * thread_entry - Record the starting of a thread noise window
1119  *
1120  * It saves the context switch time for a noisy thread, and increments
1121  * the interference counters.
1122  */
1123 static void
1124 thread_entry(struct osnoise_variables *osn_var, struct task_struct *t)
1125 {
1126 	if (!osn_var->sampling)
1127 		return;
1128 	/*
1129 	 * The arrival time will be used in the report, but not to compute
1130 	 * the execution time, so it is safe to get it unsafe.
1131 	 */
1132 	osn_var->thread.arrival_time = time_get();
1133 
1134 	set_int_safe_time(osn_var, &osn_var->thread.delta_start);
1135 
1136 	osn_var->thread.count++;
1137 	local_inc(&osn_var->int_counter);
1138 }
1139 
1140 /*
1141  * thread_exit - Report the end of a thread noise window
1142  *
1143  * It computes the total noise from a thread, tracing if needed.
1144  */
1145 static void
1146 thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
1147 {
1148 	int duration;
1149 
1150 	if (!osn_var->sampling)
1151 		return;
1152 
1153 	if (unlikely(timerlat_enabled()))
1154 		if (!timerlat_thread_exit(osn_var))
1155 			return;
1156 
1157 	duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start);
1158 
1159 	trace_thread_noise(t, osn_var->thread.arrival_time, duration);
1160 
1161 	osn_var->thread.arrival_time = 0;
1162 }
1163 
1164 /*
1165  * trace_sched_switch - sched:sched_switch trace event handler
1166  *
1167  * This function is hooked to the sched:sched_switch trace event, and it is
1168  * used to record the beginning and to report the end of a thread noise window.
1169  */
1170 static void
1171 trace_sched_switch_callback(void *data, bool preempt, struct task_struct *p,
1172 			    struct task_struct *n)
1173 {
1174 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1175 
1176 	if (p->pid != osn_var->pid)
1177 		thread_exit(osn_var, p);
1178 
1179 	if (n->pid != osn_var->pid)
1180 		thread_entry(osn_var, n);
1181 }
1182 
1183 /*
1184  * hook_thread_events - Hook the insturmentation for thread noise
1185  *
1186  * Hook the osnoise tracer callbacks to handle the noise from other
1187  * threads on the necessary kernel events.
1188  */
1189 static int hook_thread_events(void)
1190 {
1191 	int ret;
1192 
1193 	ret = register_trace_sched_switch(trace_sched_switch_callback, NULL);
1194 	if (ret)
1195 		return -EINVAL;
1196 
1197 	return 0;
1198 }
1199 
1200 /*
1201  * unhook_thread_events - *nhook the insturmentation for thread noise
1202  *
1203  * Unook the osnoise tracer callbacks to handle the noise from other
1204  * threads on the necessary kernel events.
1205  */
1206 static void unhook_thread_events(void)
1207 {
1208 	unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
1209 }
1210 
1211 /*
1212  * save_osn_sample_stats - Save the osnoise_sample statistics
1213  *
1214  * Save the osnoise_sample statistics before the sampling phase. These
1215  * values will be used later to compute the diff betwneen the statistics
1216  * before and after the osnoise sampling.
1217  */
1218 static void
1219 save_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
1220 {
1221 	s->nmi_count = osn_var->nmi.count;
1222 	s->irq_count = osn_var->irq.count;
1223 	s->softirq_count = osn_var->softirq.count;
1224 	s->thread_count = osn_var->thread.count;
1225 }
1226 
1227 /*
1228  * diff_osn_sample_stats - Compute the osnoise_sample statistics
1229  *
1230  * After a sample period, compute the difference on the osnoise_sample
1231  * statistics. The struct osnoise_sample *s contains the statistics saved via
1232  * save_osn_sample_stats() before the osnoise sampling.
1233  */
1234 static void
1235 diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
1236 {
1237 	s->nmi_count = osn_var->nmi.count - s->nmi_count;
1238 	s->irq_count = osn_var->irq.count - s->irq_count;
1239 	s->softirq_count = osn_var->softirq.count - s->softirq_count;
1240 	s->thread_count = osn_var->thread.count - s->thread_count;
1241 }
1242 
1243 /*
1244  * osnoise_stop_tracing - Stop tracing and the tracer.
1245  */
1246 static __always_inline void osnoise_stop_tracing(void)
1247 {
1248 	struct osnoise_instance *inst;
1249 	struct trace_array *tr;
1250 
1251 	rcu_read_lock();
1252 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
1253 		tr = inst->tr;
1254 		trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
1255 				"stop tracing hit on cpu %d\n", smp_processor_id());
1256 
1257 		tracer_tracing_off(tr);
1258 	}
1259 	rcu_read_unlock();
1260 }
1261 
1262 /*
1263  * notify_new_max_latency - Notify a new max latency via fsnotify interface.
1264  */
1265 static void notify_new_max_latency(u64 latency)
1266 {
1267 	struct osnoise_instance *inst;
1268 	struct trace_array *tr;
1269 
1270 	rcu_read_lock();
1271 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
1272 		tr = inst->tr;
1273 		if (tr->max_latency < latency) {
1274 			tr->max_latency = latency;
1275 			latency_fsnotify(tr);
1276 		}
1277 	}
1278 	rcu_read_unlock();
1279 }
1280 
1281 /*
1282  * run_osnoise - Sample the time and look for osnoise
1283  *
1284  * Used to capture the time, looking for potential osnoise latency repeatedly.
1285  * Different from hwlat_detector, it is called with preemption and interrupts
1286  * enabled. This allows irqs, softirqs and threads to run, interfering on the
1287  * osnoise sampling thread, as they would do with a regular thread.
1288  */
1289 static int run_osnoise(void)
1290 {
1291 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1292 	u64 start, sample, last_sample;
1293 	u64 last_int_count, int_count;
1294 	s64 noise = 0, max_noise = 0;
1295 	s64 total, last_total = 0;
1296 	struct osnoise_sample s;
1297 	unsigned int threshold;
1298 	u64 runtime, stop_in;
1299 	u64 sum_noise = 0;
1300 	int hw_count = 0;
1301 	int ret = -1;
1302 
1303 	/*
1304 	 * Considers the current thread as the workload.
1305 	 */
1306 	osn_var->pid = current->pid;
1307 
1308 	/*
1309 	 * Save the current stats for the diff
1310 	 */
1311 	save_osn_sample_stats(osn_var, &s);
1312 
1313 	/*
1314 	 * if threshold is 0, use the default value of 5 us.
1315 	 */
1316 	threshold = tracing_thresh ? : 5000;
1317 
1318 	/*
1319 	 * Make sure NMIs see sampling first
1320 	 */
1321 	osn_var->sampling = true;
1322 	barrier();
1323 
1324 	/*
1325 	 * Transform the *_us config to nanoseconds to avoid the
1326 	 * division on the main loop.
1327 	 */
1328 	runtime = osnoise_data.sample_runtime * NSEC_PER_USEC;
1329 	stop_in = osnoise_data.stop_tracing * NSEC_PER_USEC;
1330 
1331 	/*
1332 	 * Start timestemp
1333 	 */
1334 	start = time_get();
1335 
1336 	/*
1337 	 * "previous" loop.
1338 	 */
1339 	last_int_count = set_int_safe_time(osn_var, &last_sample);
1340 
1341 	do {
1342 		/*
1343 		 * Get sample!
1344 		 */
1345 		int_count = set_int_safe_time(osn_var, &sample);
1346 
1347 		noise = time_sub(sample, last_sample);
1348 
1349 		/*
1350 		 * This shouldn't happen.
1351 		 */
1352 		if (noise < 0) {
1353 			osnoise_taint("negative noise!");
1354 			goto out;
1355 		}
1356 
1357 		/*
1358 		 * Sample runtime.
1359 		 */
1360 		total = time_sub(sample, start);
1361 
1362 		/*
1363 		 * Check for possible overflows.
1364 		 */
1365 		if (total < last_total) {
1366 			osnoise_taint("total overflow!");
1367 			break;
1368 		}
1369 
1370 		last_total = total;
1371 
1372 		if (noise >= threshold) {
1373 			int interference = int_count - last_int_count;
1374 
1375 			if (noise > max_noise)
1376 				max_noise = noise;
1377 
1378 			if (!interference)
1379 				hw_count++;
1380 
1381 			sum_noise += noise;
1382 
1383 			trace_sample_threshold(last_sample, noise, interference);
1384 
1385 			if (osnoise_data.stop_tracing)
1386 				if (noise > stop_in)
1387 					osnoise_stop_tracing();
1388 		}
1389 
1390 		/*
1391 		 * For the non-preemptive kernel config: let threads runs, if
1392 		 * they so wish.
1393 		 */
1394 		cond_resched();
1395 
1396 		last_sample = sample;
1397 		last_int_count = int_count;
1398 
1399 	} while (total < runtime && !kthread_should_stop());
1400 
1401 	/*
1402 	 * Finish the above in the view for interrupts.
1403 	 */
1404 	barrier();
1405 
1406 	osn_var->sampling = false;
1407 
1408 	/*
1409 	 * Make sure sampling data is no longer updated.
1410 	 */
1411 	barrier();
1412 
1413 	/*
1414 	 * Save noise info.
1415 	 */
1416 	s.noise = time_to_us(sum_noise);
1417 	s.runtime = time_to_us(total);
1418 	s.max_sample = time_to_us(max_noise);
1419 	s.hw_count = hw_count;
1420 
1421 	/* Save interference stats info */
1422 	diff_osn_sample_stats(osn_var, &s);
1423 
1424 	trace_osnoise_sample(&s);
1425 
1426 	notify_new_max_latency(max_noise);
1427 
1428 	if (osnoise_data.stop_tracing_total)
1429 		if (s.noise > osnoise_data.stop_tracing_total)
1430 			osnoise_stop_tracing();
1431 
1432 	return 0;
1433 out:
1434 	return ret;
1435 }
1436 
1437 static struct cpumask osnoise_cpumask;
1438 static struct cpumask save_cpumask;
1439 
1440 /*
1441  * osnoise_main - The osnoise detection kernel thread
1442  *
1443  * Calls run_osnoise() function to measure the osnoise for the configured runtime,
1444  * every period.
1445  */
1446 static int osnoise_main(void *data)
1447 {
1448 	u64 interval;
1449 
1450 	while (!kthread_should_stop()) {
1451 
1452 		run_osnoise();
1453 
1454 		mutex_lock(&interface_lock);
1455 		interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
1456 		mutex_unlock(&interface_lock);
1457 
1458 		do_div(interval, USEC_PER_MSEC);
1459 
1460 		/*
1461 		 * differently from hwlat_detector, the osnoise tracer can run
1462 		 * without a pause because preemption is on.
1463 		 */
1464 		if (interval < 1) {
1465 			/* Let synchronize_rcu_tasks() make progress */
1466 			cond_resched_tasks_rcu_qs();
1467 			continue;
1468 		}
1469 
1470 		if (msleep_interruptible(interval))
1471 			break;
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 #ifdef CONFIG_TIMERLAT_TRACER
1478 /*
1479  * timerlat_irq - hrtimer handler for timerlat.
1480  */
1481 static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
1482 {
1483 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1484 	struct timerlat_variables *tlat;
1485 	struct timerlat_sample s;
1486 	u64 now;
1487 	u64 diff;
1488 
1489 	/*
1490 	 * I am not sure if the timer was armed for this CPU. So, get
1491 	 * the timerlat struct from the timer itself, not from this
1492 	 * CPU.
1493 	 */
1494 	tlat = container_of(timer, struct timerlat_variables, timer);
1495 
1496 	now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
1497 
1498 	/*
1499 	 * Enable the osnoise: events for thread an softirq.
1500 	 */
1501 	tlat->tracing_thread = true;
1502 
1503 	osn_var->thread.arrival_time = time_get();
1504 
1505 	/*
1506 	 * A hardirq is running: the timer IRQ. It is for sure preempting
1507 	 * a thread, and potentially preempting a softirq.
1508 	 *
1509 	 * At this point, it is not interesting to know the duration of the
1510 	 * preempted thread (and maybe softirq), but how much time they will
1511 	 * delay the beginning of the execution of the timer thread.
1512 	 *
1513 	 * To get the correct (net) delay added by the softirq, its delta_start
1514 	 * is set as the IRQ one. In this way, at the return of the IRQ, the delta
1515 	 * start of the sofitrq will be zeroed, accounting then only the time
1516 	 * after that.
1517 	 *
1518 	 * The thread follows the same principle. However, if a softirq is
1519 	 * running, the thread needs to receive the softirq delta_start. The
1520 	 * reason being is that the softirq will be the last to be unfolded,
1521 	 * resseting the thread delay to zero.
1522 	 *
1523 	 * The PREEMPT_RT is a special case, though. As softirqs run as threads
1524 	 * on RT, moving the thread is enough.
1525 	 */
1526 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && osn_var->softirq.delta_start) {
1527 		copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
1528 				   &osn_var->softirq.delta_start);
1529 
1530 		copy_int_safe_time(osn_var, &osn_var->softirq.delta_start,
1531 				    &osn_var->irq.delta_start);
1532 	} else {
1533 		copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
1534 				    &osn_var->irq.delta_start);
1535 	}
1536 
1537 	/*
1538 	 * Compute the current time with the expected time.
1539 	 */
1540 	diff = now - tlat->abs_period;
1541 
1542 	tlat->count++;
1543 	s.seqnum = tlat->count;
1544 	s.timer_latency = diff;
1545 	s.context = IRQ_CONTEXT;
1546 
1547 	trace_timerlat_sample(&s);
1548 
1549 	notify_new_max_latency(diff);
1550 
1551 	if (osnoise_data.stop_tracing)
1552 		if (time_to_us(diff) >= osnoise_data.stop_tracing)
1553 			osnoise_stop_tracing();
1554 
1555 	wake_up_process(tlat->kthread);
1556 
1557 	if (osnoise_data.print_stack)
1558 		timerlat_save_stack(0);
1559 
1560 	return HRTIMER_NORESTART;
1561 }
1562 
1563 /*
1564  * wait_next_period - Wait for the next period for timerlat
1565  */
1566 static int wait_next_period(struct timerlat_variables *tlat)
1567 {
1568 	ktime_t next_abs_period, now;
1569 	u64 rel_period = osnoise_data.timerlat_period * 1000;
1570 
1571 	now = hrtimer_cb_get_time(&tlat->timer);
1572 	next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
1573 
1574 	/*
1575 	 * Save the next abs_period.
1576 	 */
1577 	tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
1578 
1579 	/*
1580 	 * If the new abs_period is in the past, skip the activation.
1581 	 */
1582 	while (ktime_compare(now, next_abs_period) > 0) {
1583 		next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
1584 		tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
1585 	}
1586 
1587 	set_current_state(TASK_INTERRUPTIBLE);
1588 
1589 	hrtimer_start(&tlat->timer, next_abs_period, HRTIMER_MODE_ABS_PINNED_HARD);
1590 	schedule();
1591 	return 1;
1592 }
1593 
1594 /*
1595  * timerlat_main- Timerlat main
1596  */
1597 static int timerlat_main(void *data)
1598 {
1599 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1600 	struct timerlat_variables *tlat = this_cpu_tmr_var();
1601 	struct timerlat_sample s;
1602 	struct sched_param sp;
1603 	u64 now, diff;
1604 
1605 	/*
1606 	 * Make the thread RT, that is how cyclictest is usually used.
1607 	 */
1608 	sp.sched_priority = DEFAULT_TIMERLAT_PRIO;
1609 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1610 
1611 	tlat->count = 0;
1612 	tlat->tracing_thread = false;
1613 
1614 	hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
1615 	tlat->timer.function = timerlat_irq;
1616 	tlat->kthread = current;
1617 	osn_var->pid = current->pid;
1618 	/*
1619 	 * Anotate the arrival time.
1620 	 */
1621 	tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
1622 
1623 	wait_next_period(tlat);
1624 
1625 	osn_var->sampling = 1;
1626 
1627 	while (!kthread_should_stop()) {
1628 		now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
1629 		diff = now - tlat->abs_period;
1630 
1631 		s.seqnum = tlat->count;
1632 		s.timer_latency = diff;
1633 		s.context = THREAD_CONTEXT;
1634 
1635 		trace_timerlat_sample(&s);
1636 
1637 		timerlat_dump_stack(time_to_us(diff));
1638 
1639 		tlat->tracing_thread = false;
1640 		if (osnoise_data.stop_tracing_total)
1641 			if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
1642 				osnoise_stop_tracing();
1643 
1644 		wait_next_period(tlat);
1645 	}
1646 
1647 	hrtimer_cancel(&tlat->timer);
1648 	return 0;
1649 }
1650 #else /* CONFIG_TIMERLAT_TRACER */
1651 static int timerlat_main(void *data)
1652 {
1653 	return 0;
1654 }
1655 #endif /* CONFIG_TIMERLAT_TRACER */
1656 
1657 /*
1658  * stop_kthread - stop a workload thread
1659  */
1660 static void stop_kthread(unsigned int cpu)
1661 {
1662 	struct task_struct *kthread;
1663 
1664 	kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
1665 	if (kthread)
1666 		kthread_stop(kthread);
1667 	per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
1668 }
1669 
1670 /*
1671  * stop_per_cpu_kthread - Stop per-cpu threads
1672  *
1673  * Stop the osnoise sampling htread. Use this on unload and at system
1674  * shutdown.
1675  */
1676 static void stop_per_cpu_kthreads(void)
1677 {
1678 	int cpu;
1679 
1680 	cpus_read_lock();
1681 
1682 	for_each_online_cpu(cpu)
1683 		stop_kthread(cpu);
1684 
1685 	cpus_read_unlock();
1686 }
1687 
1688 /*
1689  * start_kthread - Start a workload tread
1690  */
1691 static int start_kthread(unsigned int cpu)
1692 {
1693 	struct task_struct *kthread;
1694 	void *main = osnoise_main;
1695 	char comm[24];
1696 
1697 	if (timerlat_enabled()) {
1698 		snprintf(comm, 24, "timerlat/%d", cpu);
1699 		main = timerlat_main;
1700 	} else {
1701 		snprintf(comm, 24, "osnoise/%d", cpu);
1702 	}
1703 
1704 	kthread = kthread_create_on_cpu(main, NULL, cpu, comm);
1705 
1706 	if (IS_ERR(kthread)) {
1707 		pr_err(BANNER "could not start sampling thread\n");
1708 		stop_per_cpu_kthreads();
1709 		return -ENOMEM;
1710 	}
1711 
1712 	per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
1713 	wake_up_process(kthread);
1714 
1715 	return 0;
1716 }
1717 
1718 /*
1719  * start_per_cpu_kthread - Kick off per-cpu osnoise sampling kthreads
1720  *
1721  * This starts the kernel thread that will look for osnoise on many
1722  * cpus.
1723  */
1724 static int start_per_cpu_kthreads(void)
1725 {
1726 	struct cpumask *current_mask = &save_cpumask;
1727 	int retval = 0;
1728 	int cpu;
1729 
1730 	cpus_read_lock();
1731 	/*
1732 	 * Run only on online CPUs in which osnoise is allowed to run.
1733 	 */
1734 	cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask);
1735 
1736 	for_each_possible_cpu(cpu)
1737 		per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
1738 
1739 	for_each_cpu(cpu, current_mask) {
1740 		retval = start_kthread(cpu);
1741 		if (retval) {
1742 			stop_per_cpu_kthreads();
1743 			break;
1744 		}
1745 	}
1746 
1747 	cpus_read_unlock();
1748 
1749 	return retval;
1750 }
1751 
1752 #ifdef CONFIG_HOTPLUG_CPU
1753 static void osnoise_hotplug_workfn(struct work_struct *dummy)
1754 {
1755 	unsigned int cpu = smp_processor_id();
1756 
1757 	mutex_lock(&trace_types_lock);
1758 
1759 	if (!osnoise_has_registered_instances())
1760 		goto out_unlock_trace;
1761 
1762 	mutex_lock(&interface_lock);
1763 	cpus_read_lock();
1764 
1765 	if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
1766 		goto out_unlock;
1767 
1768 	start_kthread(cpu);
1769 
1770 out_unlock:
1771 	cpus_read_unlock();
1772 	mutex_unlock(&interface_lock);
1773 out_unlock_trace:
1774 	mutex_unlock(&trace_types_lock);
1775 }
1776 
1777 static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
1778 
1779 /*
1780  * osnoise_cpu_init - CPU hotplug online callback function
1781  */
1782 static int osnoise_cpu_init(unsigned int cpu)
1783 {
1784 	schedule_work_on(cpu, &osnoise_hotplug_work);
1785 	return 0;
1786 }
1787 
1788 /*
1789  * osnoise_cpu_die - CPU hotplug offline callback function
1790  */
1791 static int osnoise_cpu_die(unsigned int cpu)
1792 {
1793 	stop_kthread(cpu);
1794 	return 0;
1795 }
1796 
1797 static void osnoise_init_hotplug_support(void)
1798 {
1799 	int ret;
1800 
1801 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/osnoise:online",
1802 				osnoise_cpu_init, osnoise_cpu_die);
1803 	if (ret < 0)
1804 		pr_warn(BANNER "Error to init cpu hotplug support\n");
1805 
1806 	return;
1807 }
1808 #else /* CONFIG_HOTPLUG_CPU */
1809 static void osnoise_init_hotplug_support(void)
1810 {
1811 	return;
1812 }
1813 #endif /* CONFIG_HOTPLUG_CPU */
1814 
1815 /*
1816  * osnoise_cpus_read - Read function for reading the "cpus" file
1817  * @filp: The active open file structure
1818  * @ubuf: The userspace provided buffer to read value into
1819  * @cnt: The maximum number of bytes to read
1820  * @ppos: The current "file" position
1821  *
1822  * Prints the "cpus" output into the user-provided buffer.
1823  */
1824 static ssize_t
1825 osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
1826 		  loff_t *ppos)
1827 {
1828 	char *mask_str;
1829 	int len;
1830 
1831 	mutex_lock(&interface_lock);
1832 
1833 	len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
1834 	mask_str = kmalloc(len, GFP_KERNEL);
1835 	if (!mask_str) {
1836 		count = -ENOMEM;
1837 		goto out_unlock;
1838 	}
1839 
1840 	len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
1841 	if (len >= count) {
1842 		count = -EINVAL;
1843 		goto out_free;
1844 	}
1845 
1846 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
1847 
1848 out_free:
1849 	kfree(mask_str);
1850 out_unlock:
1851 	mutex_unlock(&interface_lock);
1852 
1853 	return count;
1854 }
1855 
1856 /*
1857  * osnoise_cpus_write - Write function for "cpus" entry
1858  * @filp: The active open file structure
1859  * @ubuf: The user buffer that contains the value to write
1860  * @cnt: The maximum number of bytes to write to "file"
1861  * @ppos: The current position in @file
1862  *
1863  * This function provides a write implementation for the "cpus"
1864  * interface to the osnoise trace. By default, it lists all  CPUs,
1865  * in this way, allowing osnoise threads to run on any online CPU
1866  * of the system. It serves to restrict the execution of osnoise to the
1867  * set of CPUs writing via this interface. Why not use "tracing_cpumask"?
1868  * Because the user might be interested in tracing what is running on
1869  * other CPUs. For instance, one might run osnoise in one HT CPU
1870  * while observing what is running on the sibling HT CPU.
1871  */
1872 static ssize_t
1873 osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
1874 		   loff_t *ppos)
1875 {
1876 	cpumask_var_t osnoise_cpumask_new;
1877 	int running, err;
1878 	char buf[256];
1879 
1880 	if (count >= 256)
1881 		return -EINVAL;
1882 
1883 	if (copy_from_user(buf, ubuf, count))
1884 		return -EFAULT;
1885 
1886 	if (!zalloc_cpumask_var(&osnoise_cpumask_new, GFP_KERNEL))
1887 		return -ENOMEM;
1888 
1889 	err = cpulist_parse(buf, osnoise_cpumask_new);
1890 	if (err)
1891 		goto err_free;
1892 
1893 	/*
1894 	 * trace_types_lock is taken to avoid concurrency on start/stop.
1895 	 */
1896 	mutex_lock(&trace_types_lock);
1897 	running = osnoise_has_registered_instances();
1898 	if (running)
1899 		stop_per_cpu_kthreads();
1900 
1901 	mutex_lock(&interface_lock);
1902 	/*
1903 	 * osnoise_cpumask is read by CPU hotplug operations.
1904 	 */
1905 	cpus_read_lock();
1906 
1907 	cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
1908 
1909 	cpus_read_unlock();
1910 	mutex_unlock(&interface_lock);
1911 
1912 	if (running)
1913 		start_per_cpu_kthreads();
1914 	mutex_unlock(&trace_types_lock);
1915 
1916 	free_cpumask_var(osnoise_cpumask_new);
1917 	return count;
1918 
1919 err_free:
1920 	free_cpumask_var(osnoise_cpumask_new);
1921 
1922 	return err;
1923 }
1924 
1925 /*
1926  * osnoise/runtime_us: cannot be greater than the period.
1927  */
1928 static struct trace_min_max_param osnoise_runtime = {
1929 	.lock	= &interface_lock,
1930 	.val	= &osnoise_data.sample_runtime,
1931 	.max	= &osnoise_data.sample_period,
1932 	.min	= NULL,
1933 };
1934 
1935 /*
1936  * osnoise/period_us: cannot be smaller than the runtime.
1937  */
1938 static struct trace_min_max_param osnoise_period = {
1939 	.lock	= &interface_lock,
1940 	.val	= &osnoise_data.sample_period,
1941 	.max	= NULL,
1942 	.min	= &osnoise_data.sample_runtime,
1943 };
1944 
1945 /*
1946  * osnoise/stop_tracing_us: no limit.
1947  */
1948 static struct trace_min_max_param osnoise_stop_tracing_in = {
1949 	.lock	= &interface_lock,
1950 	.val	= &osnoise_data.stop_tracing,
1951 	.max	= NULL,
1952 	.min	= NULL,
1953 };
1954 
1955 /*
1956  * osnoise/stop_tracing_total_us: no limit.
1957  */
1958 static struct trace_min_max_param osnoise_stop_tracing_total = {
1959 	.lock	= &interface_lock,
1960 	.val	= &osnoise_data.stop_tracing_total,
1961 	.max	= NULL,
1962 	.min	= NULL,
1963 };
1964 
1965 #ifdef CONFIG_TIMERLAT_TRACER
1966 /*
1967  * osnoise/print_stack: print the stacktrace of the IRQ handler if the total
1968  * latency is higher than val.
1969  */
1970 static struct trace_min_max_param osnoise_print_stack = {
1971 	.lock	= &interface_lock,
1972 	.val	= &osnoise_data.print_stack,
1973 	.max	= NULL,
1974 	.min	= NULL,
1975 };
1976 
1977 /*
1978  * osnoise/timerlat_period: min 100 us, max 1 s
1979  */
1980 u64 timerlat_min_period = 100;
1981 u64 timerlat_max_period = 1000000;
1982 static struct trace_min_max_param timerlat_period = {
1983 	.lock	= &interface_lock,
1984 	.val	= &osnoise_data.timerlat_period,
1985 	.max	= &timerlat_max_period,
1986 	.min	= &timerlat_min_period,
1987 };
1988 #endif
1989 
1990 static const struct file_operations cpus_fops = {
1991 	.open		= tracing_open_generic,
1992 	.read		= osnoise_cpus_read,
1993 	.write		= osnoise_cpus_write,
1994 	.llseek		= generic_file_llseek,
1995 };
1996 
1997 #ifdef CONFIG_TIMERLAT_TRACER
1998 #ifdef CONFIG_STACKTRACE
1999 static int init_timerlat_stack_tracefs(struct dentry *top_dir)
2000 {
2001 	struct dentry *tmp;
2002 
2003 	tmp = tracefs_create_file("print_stack", TRACE_MODE_WRITE, top_dir,
2004 				  &osnoise_print_stack, &trace_min_max_fops);
2005 	if (!tmp)
2006 		return -ENOMEM;
2007 
2008 	return 0;
2009 }
2010 #else /* CONFIG_STACKTRACE */
2011 static int init_timerlat_stack_tracefs(struct dentry *top_dir)
2012 {
2013 	return 0;
2014 }
2015 #endif /* CONFIG_STACKTRACE */
2016 
2017 /*
2018  * init_timerlat_tracefs - A function to initialize the timerlat interface files
2019  */
2020 static int init_timerlat_tracefs(struct dentry *top_dir)
2021 {
2022 	struct dentry *tmp;
2023 
2024 	tmp = tracefs_create_file("timerlat_period_us", TRACE_MODE_WRITE, top_dir,
2025 				  &timerlat_period, &trace_min_max_fops);
2026 	if (!tmp)
2027 		return -ENOMEM;
2028 
2029 	return init_timerlat_stack_tracefs(top_dir);
2030 }
2031 #else /* CONFIG_TIMERLAT_TRACER */
2032 static int init_timerlat_tracefs(struct dentry *top_dir)
2033 {
2034 	return 0;
2035 }
2036 #endif /* CONFIG_TIMERLAT_TRACER */
2037 
2038 /*
2039  * init_tracefs - A function to initialize the tracefs interface files
2040  *
2041  * This function creates entries in tracefs for "osnoise" and "timerlat".
2042  * It creates these directories in the tracing directory, and within that
2043  * directory the use can change and view the configs.
2044  */
2045 static int init_tracefs(void)
2046 {
2047 	struct dentry *top_dir;
2048 	struct dentry *tmp;
2049 	int ret;
2050 
2051 	ret = tracing_init_dentry();
2052 	if (ret)
2053 		return -ENOMEM;
2054 
2055 	top_dir = tracefs_create_dir("osnoise", NULL);
2056 	if (!top_dir)
2057 		return 0;
2058 
2059 	tmp = tracefs_create_file("period_us", TRACE_MODE_WRITE, top_dir,
2060 				  &osnoise_period, &trace_min_max_fops);
2061 	if (!tmp)
2062 		goto err;
2063 
2064 	tmp = tracefs_create_file("runtime_us", TRACE_MODE_WRITE, top_dir,
2065 				  &osnoise_runtime, &trace_min_max_fops);
2066 	if (!tmp)
2067 		goto err;
2068 
2069 	tmp = tracefs_create_file("stop_tracing_us", TRACE_MODE_WRITE, top_dir,
2070 				  &osnoise_stop_tracing_in, &trace_min_max_fops);
2071 	if (!tmp)
2072 		goto err;
2073 
2074 	tmp = tracefs_create_file("stop_tracing_total_us", TRACE_MODE_WRITE, top_dir,
2075 				  &osnoise_stop_tracing_total, &trace_min_max_fops);
2076 	if (!tmp)
2077 		goto err;
2078 
2079 	tmp = trace_create_file("cpus", TRACE_MODE_WRITE, top_dir, NULL, &cpus_fops);
2080 	if (!tmp)
2081 		goto err;
2082 
2083 	ret = init_timerlat_tracefs(top_dir);
2084 	if (ret)
2085 		goto err;
2086 
2087 	return 0;
2088 
2089 err:
2090 	tracefs_remove(top_dir);
2091 	return -ENOMEM;
2092 }
2093 
2094 static int osnoise_hook_events(void)
2095 {
2096 	int retval;
2097 
2098 	/*
2099 	 * Trace is already hooked, we are re-enabling from
2100 	 * a stop_tracing_*.
2101 	 */
2102 	if (trace_osnoise_callback_enabled)
2103 		return 0;
2104 
2105 	retval = hook_irq_events();
2106 	if (retval)
2107 		return -EINVAL;
2108 
2109 	retval = hook_softirq_events();
2110 	if (retval)
2111 		goto out_unhook_irq;
2112 
2113 	retval = hook_thread_events();
2114 	/*
2115 	 * All fine!
2116 	 */
2117 	if (!retval)
2118 		return 0;
2119 
2120 	unhook_softirq_events();
2121 out_unhook_irq:
2122 	unhook_irq_events();
2123 	return -EINVAL;
2124 }
2125 
2126 /*
2127  * osnoise_workload_start - start the workload and hook to events
2128  */
2129 static int osnoise_workload_start(void)
2130 {
2131 	int retval;
2132 
2133 	/*
2134 	 * Instances need to be registered after calling workload
2135 	 * start. Hence, if there is already an instance, the
2136 	 * workload was already registered. Otherwise, this
2137 	 * code is on the way to register the first instance,
2138 	 * and the workload will start.
2139 	 */
2140 	if (osnoise_has_registered_instances())
2141 		return 0;
2142 
2143 	osn_var_reset_all();
2144 
2145 	retval = osnoise_hook_events();
2146 	if (retval)
2147 		return retval;
2148 
2149 	/*
2150 	 * Make sure that ftrace_nmi_enter/exit() see reset values
2151 	 * before enabling trace_osnoise_callback_enabled.
2152 	 */
2153 	barrier();
2154 	trace_osnoise_callback_enabled = true;
2155 
2156 	retval = start_per_cpu_kthreads();
2157 	if (retval) {
2158 		unhook_irq_events();
2159 		return retval;
2160 	}
2161 
2162 	return 0;
2163 }
2164 
2165 /*
2166  * osnoise_workload_stop - stop the workload and unhook the events
2167  */
2168 static void osnoise_workload_stop(void)
2169 {
2170 	/*
2171 	 * Instances need to be unregistered before calling
2172 	 * stop. Hence, if there is a registered instance, more
2173 	 * than one instance is running, and the workload will not
2174 	 * yet stop. Otherwise, this code is on the way to disable
2175 	 * the last instance, and the workload can stop.
2176 	 */
2177 	if (osnoise_has_registered_instances())
2178 		return;
2179 
2180 	trace_osnoise_callback_enabled = false;
2181 	/*
2182 	 * Make sure that ftrace_nmi_enter/exit() see
2183 	 * trace_osnoise_callback_enabled as false before continuing.
2184 	 */
2185 	barrier();
2186 
2187 	stop_per_cpu_kthreads();
2188 
2189 	unhook_irq_events();
2190 	unhook_softirq_events();
2191 	unhook_thread_events();
2192 }
2193 
2194 static void osnoise_tracer_start(struct trace_array *tr)
2195 {
2196 	int retval;
2197 
2198 	/*
2199 	 * If the instance is already registered, there is no need to
2200 	 * register it again.
2201 	 */
2202 	if (osnoise_instance_registered(tr))
2203 		return;
2204 
2205 	retval = osnoise_workload_start();
2206 	if (retval)
2207 		pr_err(BANNER "Error starting osnoise tracer\n");
2208 
2209 	osnoise_register_instance(tr);
2210 }
2211 
2212 static void osnoise_tracer_stop(struct trace_array *tr)
2213 {
2214 	osnoise_unregister_instance(tr);
2215 	osnoise_workload_stop();
2216 }
2217 
2218 static int osnoise_tracer_init(struct trace_array *tr)
2219 {
2220 	/*
2221 	 * Only allow osnoise tracer if timerlat tracer is not running
2222 	 * already.
2223 	 */
2224 	if (timerlat_enabled())
2225 		return -EBUSY;
2226 
2227 	tr->max_latency = 0;
2228 
2229 	osnoise_tracer_start(tr);
2230 	return 0;
2231 }
2232 
2233 static void osnoise_tracer_reset(struct trace_array *tr)
2234 {
2235 	osnoise_tracer_stop(tr);
2236 }
2237 
2238 static struct tracer osnoise_tracer __read_mostly = {
2239 	.name		= "osnoise",
2240 	.init		= osnoise_tracer_init,
2241 	.reset		= osnoise_tracer_reset,
2242 	.start		= osnoise_tracer_start,
2243 	.stop		= osnoise_tracer_stop,
2244 	.print_header	= print_osnoise_headers,
2245 	.allow_instances = true,
2246 };
2247 
2248 #ifdef CONFIG_TIMERLAT_TRACER
2249 static void timerlat_tracer_start(struct trace_array *tr)
2250 {
2251 	int retval;
2252 
2253 	/*
2254 	 * If the instance is already registered, there is no need to
2255 	 * register it again.
2256 	 */
2257 	if (osnoise_instance_registered(tr))
2258 		return;
2259 
2260 	retval = osnoise_workload_start();
2261 	if (retval)
2262 		pr_err(BANNER "Error starting timerlat tracer\n");
2263 
2264 	osnoise_register_instance(tr);
2265 
2266 	return;
2267 }
2268 
2269 static void timerlat_tracer_stop(struct trace_array *tr)
2270 {
2271 	int cpu;
2272 
2273 	osnoise_unregister_instance(tr);
2274 
2275 	/*
2276 	 * Instruct the threads to stop only if this is the last instance.
2277 	 */
2278 	if (!osnoise_has_registered_instances()) {
2279 		for_each_online_cpu(cpu)
2280 			per_cpu(per_cpu_osnoise_var, cpu).sampling = 0;
2281 	}
2282 
2283 	osnoise_workload_stop();
2284 }
2285 
2286 static int timerlat_tracer_init(struct trace_array *tr)
2287 {
2288 	/*
2289 	 * Only allow timerlat tracer if osnoise tracer is not running already.
2290 	 */
2291 	if (osnoise_has_registered_instances() && !osnoise_data.timerlat_tracer)
2292 		return -EBUSY;
2293 
2294 	/*
2295 	 * If this is the first instance, set timerlat_tracer to block
2296 	 * osnoise tracer start.
2297 	 */
2298 	if (!osnoise_has_registered_instances())
2299 		osnoise_data.timerlat_tracer = 1;
2300 
2301 	tr->max_latency = 0;
2302 	timerlat_tracer_start(tr);
2303 
2304 	return 0;
2305 }
2306 
2307 static void timerlat_tracer_reset(struct trace_array *tr)
2308 {
2309 	timerlat_tracer_stop(tr);
2310 
2311 	/*
2312 	 * If this is the last instance, reset timerlat_tracer allowing
2313 	 * osnoise to be started.
2314 	 */
2315 	if (!osnoise_has_registered_instances())
2316 		osnoise_data.timerlat_tracer = 0;
2317 }
2318 
2319 static struct tracer timerlat_tracer __read_mostly = {
2320 	.name		= "timerlat",
2321 	.init		= timerlat_tracer_init,
2322 	.reset		= timerlat_tracer_reset,
2323 	.start		= timerlat_tracer_start,
2324 	.stop		= timerlat_tracer_stop,
2325 	.print_header	= print_timerlat_headers,
2326 	.allow_instances = true,
2327 };
2328 
2329 __init static int init_timerlat_tracer(void)
2330 {
2331 	return register_tracer(&timerlat_tracer);
2332 }
2333 #else /* CONFIG_TIMERLAT_TRACER */
2334 __init static int init_timerlat_tracer(void)
2335 {
2336 	return 0;
2337 }
2338 #endif /* CONFIG_TIMERLAT_TRACER */
2339 
2340 __init static int init_osnoise_tracer(void)
2341 {
2342 	int ret;
2343 
2344 	mutex_init(&interface_lock);
2345 
2346 	cpumask_copy(&osnoise_cpumask, cpu_all_mask);
2347 
2348 	ret = register_tracer(&osnoise_tracer);
2349 	if (ret) {
2350 		pr_err(BANNER "Error registering osnoise!\n");
2351 		return ret;
2352 	}
2353 
2354 	ret = init_timerlat_tracer();
2355 	if (ret) {
2356 		pr_err(BANNER "Error registering timerlat!\n");
2357 		return ret;
2358 	}
2359 
2360 	osnoise_init_hotplug_support();
2361 
2362 	INIT_LIST_HEAD_RCU(&osnoise_instances);
2363 
2364 	init_tracefs();
2365 
2366 	return 0;
2367 }
2368 late_initcall(init_osnoise_tracer);
2369