xref: /openbmc/linux/kernel/trace/trace_osnoise.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * OS Noise Tracer: computes the OS Noise suffered by a running thread.
4  * Timerlat Tracer: measures the wakeup latency of a timer triggered IRQ and thread.
5  *
6  * Based on "hwlat_detector" tracer by:
7  *   Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
8  *   Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
9  *   With feedback from Clark Williams <williams@redhat.com>
10  *
11  * And also based on the rtsl tracer presented on:
12  *  DE OLIVEIRA, Daniel Bristot, et al. Demystifying the real-time linux
13  *  scheduling latency. In: 32nd Euromicro Conference on Real-Time Systems
14  *  (ECRTS 2020). Schloss Dagstuhl-Leibniz-Zentrum fur Informatik, 2020.
15  *
16  * Copyright (C) 2021 Daniel Bristot de Oliveira, Red Hat, Inc. <bristot@redhat.com>
17  */
18 
19 #include <linux/kthread.h>
20 #include <linux/tracefs.h>
21 #include <linux/uaccess.h>
22 #include <linux/cpumask.h>
23 #include <linux/delay.h>
24 #include <linux/sched/clock.h>
25 #include <uapi/linux/sched/types.h>
26 #include <linux/sched.h>
27 #include "trace.h"
28 
29 #ifdef CONFIG_X86_LOCAL_APIC
30 #include <asm/trace/irq_vectors.h>
31 #undef TRACE_INCLUDE_PATH
32 #undef TRACE_INCLUDE_FILE
33 #endif /* CONFIG_X86_LOCAL_APIC */
34 
35 #include <trace/events/irq.h>
36 #include <trace/events/sched.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/osnoise.h>
40 
41 /*
42  * Default values.
43  */
44 #define BANNER			"osnoise: "
45 #define DEFAULT_SAMPLE_PERIOD	1000000			/* 1s */
46 #define DEFAULT_SAMPLE_RUNTIME	1000000			/* 1s */
47 
48 #define DEFAULT_TIMERLAT_PERIOD	1000			/* 1ms */
49 #define DEFAULT_TIMERLAT_PRIO	95			/* FIFO 95 */
50 
51 /*
52  * osnoise/options entries.
53  */
54 enum osnoise_options_index {
55 	OSN_DEFAULTS = 0,
56 	OSN_WORKLOAD,
57 	OSN_PANIC_ON_STOP,
58 	OSN_PREEMPT_DISABLE,
59 	OSN_IRQ_DISABLE,
60 	OSN_MAX
61 };
62 
63 static const char * const osnoise_options_str[OSN_MAX] = {
64 							"DEFAULTS",
65 							"OSNOISE_WORKLOAD",
66 							"PANIC_ON_STOP",
67 							"OSNOISE_PREEMPT_DISABLE",
68 							"OSNOISE_IRQ_DISABLE" };
69 
70 #define OSN_DEFAULT_OPTIONS		0x2
71 static unsigned long osnoise_options	= OSN_DEFAULT_OPTIONS;
72 
73 /*
74  * trace_array of the enabled osnoise/timerlat instances.
75  */
76 struct osnoise_instance {
77 	struct list_head	list;
78 	struct trace_array	*tr;
79 };
80 
81 static struct list_head osnoise_instances;
82 
osnoise_has_registered_instances(void)83 static bool osnoise_has_registered_instances(void)
84 {
85 	return !!list_first_or_null_rcu(&osnoise_instances,
86 					struct osnoise_instance,
87 					list);
88 }
89 
90 /*
91  * osnoise_instance_registered - check if a tr is already registered
92  */
osnoise_instance_registered(struct trace_array * tr)93 static int osnoise_instance_registered(struct trace_array *tr)
94 {
95 	struct osnoise_instance *inst;
96 	int found = 0;
97 
98 	rcu_read_lock();
99 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
100 		if (inst->tr == tr)
101 			found = 1;
102 	}
103 	rcu_read_unlock();
104 
105 	return found;
106 }
107 
108 /*
109  * osnoise_register_instance - register a new trace instance
110  *
111  * Register a trace_array *tr in the list of instances running
112  * osnoise/timerlat tracers.
113  */
osnoise_register_instance(struct trace_array * tr)114 static int osnoise_register_instance(struct trace_array *tr)
115 {
116 	struct osnoise_instance *inst;
117 
118 	/*
119 	 * register/unregister serialization is provided by trace's
120 	 * trace_types_lock.
121 	 */
122 	lockdep_assert_held(&trace_types_lock);
123 
124 	inst = kmalloc(sizeof(*inst), GFP_KERNEL);
125 	if (!inst)
126 		return -ENOMEM;
127 
128 	INIT_LIST_HEAD_RCU(&inst->list);
129 	inst->tr = tr;
130 	list_add_tail_rcu(&inst->list, &osnoise_instances);
131 
132 	return 0;
133 }
134 
135 /*
136  *  osnoise_unregister_instance - unregister a registered trace instance
137  *
138  * Remove the trace_array *tr from the list of instances running
139  * osnoise/timerlat tracers.
140  */
osnoise_unregister_instance(struct trace_array * tr)141 static void osnoise_unregister_instance(struct trace_array *tr)
142 {
143 	struct osnoise_instance *inst;
144 	int found = 0;
145 
146 	/*
147 	 * register/unregister serialization is provided by trace's
148 	 * trace_types_lock.
149 	 */
150 	list_for_each_entry_rcu(inst, &osnoise_instances, list,
151 				lockdep_is_held(&trace_types_lock)) {
152 		if (inst->tr == tr) {
153 			list_del_rcu(&inst->list);
154 			found = 1;
155 			break;
156 		}
157 	}
158 
159 	if (!found)
160 		return;
161 
162 	kvfree_rcu_mightsleep(inst);
163 }
164 
165 /*
166  * NMI runtime info.
167  */
168 struct osn_nmi {
169 	u64	count;
170 	u64	delta_start;
171 };
172 
173 /*
174  * IRQ runtime info.
175  */
176 struct osn_irq {
177 	u64	count;
178 	u64	arrival_time;
179 	u64	delta_start;
180 };
181 
182 #define IRQ_CONTEXT	0
183 #define THREAD_CONTEXT	1
184 #define THREAD_URET	2
185 /*
186  * sofirq runtime info.
187  */
188 struct osn_softirq {
189 	u64	count;
190 	u64	arrival_time;
191 	u64	delta_start;
192 };
193 
194 /*
195  * thread runtime info.
196  */
197 struct osn_thread {
198 	u64	count;
199 	u64	arrival_time;
200 	u64	delta_start;
201 };
202 
203 /*
204  * Runtime information: this structure saves the runtime information used by
205  * one sampling thread.
206  */
207 struct osnoise_variables {
208 	struct task_struct	*kthread;
209 	bool			sampling;
210 	pid_t			pid;
211 	struct osn_nmi		nmi;
212 	struct osn_irq		irq;
213 	struct osn_softirq	softirq;
214 	struct osn_thread	thread;
215 	local_t			int_counter;
216 };
217 
218 /*
219  * Per-cpu runtime information.
220  */
221 static DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
222 
223 /*
224  * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU
225  */
this_cpu_osn_var(void)226 static inline struct osnoise_variables *this_cpu_osn_var(void)
227 {
228 	return this_cpu_ptr(&per_cpu_osnoise_var);
229 }
230 
231 /*
232  * Protect the interface.
233  */
234 static struct mutex interface_lock;
235 
236 #ifdef CONFIG_TIMERLAT_TRACER
237 /*
238  * Runtime information for the timer mode.
239  */
240 struct timerlat_variables {
241 	struct task_struct	*kthread;
242 	struct hrtimer		timer;
243 	u64			rel_period;
244 	u64			abs_period;
245 	bool			tracing_thread;
246 	u64			count;
247 	bool			uthread_migrate;
248 };
249 
250 static DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
251 
252 /*
253  * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU
254  */
this_cpu_tmr_var(void)255 static inline struct timerlat_variables *this_cpu_tmr_var(void)
256 {
257 	return this_cpu_ptr(&per_cpu_timerlat_var);
258 }
259 
260 /*
261  * tlat_var_reset - Reset the values of the given timerlat_variables
262  */
tlat_var_reset(void)263 static inline void tlat_var_reset(void)
264 {
265 	struct timerlat_variables *tlat_var;
266 	int cpu;
267 
268 	/* Synchronize with the timerlat interfaces */
269 	mutex_lock(&interface_lock);
270 	/*
271 	 * So far, all the values are initialized as 0, so
272 	 * zeroing the structure is perfect.
273 	 */
274 	for_each_cpu(cpu, cpu_online_mask) {
275 		tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
276 		if (tlat_var->kthread)
277 			hrtimer_cancel(&tlat_var->timer);
278 		memset(tlat_var, 0, sizeof(*tlat_var));
279 	}
280 	mutex_unlock(&interface_lock);
281 }
282 #else /* CONFIG_TIMERLAT_TRACER */
283 #define tlat_var_reset()	do {} while (0)
284 #endif /* CONFIG_TIMERLAT_TRACER */
285 
286 /*
287  * osn_var_reset - Reset the values of the given osnoise_variables
288  */
osn_var_reset(void)289 static inline void osn_var_reset(void)
290 {
291 	struct osnoise_variables *osn_var;
292 	int cpu;
293 
294 	/*
295 	 * So far, all the values are initialized as 0, so
296 	 * zeroing the structure is perfect.
297 	 */
298 	for_each_cpu(cpu, cpu_online_mask) {
299 		osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
300 		memset(osn_var, 0, sizeof(*osn_var));
301 	}
302 }
303 
304 /*
305  * osn_var_reset_all - Reset the value of all per-cpu osnoise_variables
306  */
osn_var_reset_all(void)307 static inline void osn_var_reset_all(void)
308 {
309 	osn_var_reset();
310 	tlat_var_reset();
311 }
312 
313 /*
314  * Tells NMIs to call back to the osnoise tracer to record timestamps.
315  */
316 bool trace_osnoise_callback_enabled;
317 
318 /*
319  * osnoise sample structure definition. Used to store the statistics of a
320  * sample run.
321  */
322 struct osnoise_sample {
323 	u64			runtime;	/* runtime */
324 	u64			noise;		/* noise */
325 	u64			max_sample;	/* max single noise sample */
326 	int			hw_count;	/* # HW (incl. hypervisor) interference */
327 	int			nmi_count;	/* # NMIs during this sample */
328 	int			irq_count;	/* # IRQs during this sample */
329 	int			softirq_count;	/* # softirqs during this sample */
330 	int			thread_count;	/* # threads during this sample */
331 };
332 
333 #ifdef CONFIG_TIMERLAT_TRACER
334 /*
335  * timerlat sample structure definition. Used to store the statistics of
336  * a sample run.
337  */
338 struct timerlat_sample {
339 	u64			timer_latency;	/* timer_latency */
340 	unsigned int		seqnum;		/* unique sequence */
341 	int			context;	/* timer context */
342 };
343 #endif
344 
345 /*
346  * Tracer data.
347  */
348 static struct osnoise_data {
349 	u64	sample_period;		/* total sampling period */
350 	u64	sample_runtime;		/* active sampling portion of period */
351 	u64	stop_tracing;		/* stop trace in the internal operation (loop/irq) */
352 	u64	stop_tracing_total;	/* stop trace in the final operation (report/thread) */
353 #ifdef CONFIG_TIMERLAT_TRACER
354 	u64	timerlat_period;	/* timerlat period */
355 	u64	print_stack;		/* print IRQ stack if total > */
356 	int	timerlat_tracer;	/* timerlat tracer */
357 #endif
358 	bool	tainted;		/* infor users and developers about a problem */
359 } osnoise_data = {
360 	.sample_period			= DEFAULT_SAMPLE_PERIOD,
361 	.sample_runtime			= DEFAULT_SAMPLE_RUNTIME,
362 	.stop_tracing			= 0,
363 	.stop_tracing_total		= 0,
364 #ifdef CONFIG_TIMERLAT_TRACER
365 	.print_stack			= 0,
366 	.timerlat_period		= DEFAULT_TIMERLAT_PERIOD,
367 	.timerlat_tracer		= 0,
368 #endif
369 };
370 
371 #ifdef CONFIG_TIMERLAT_TRACER
timerlat_enabled(void)372 static inline bool timerlat_enabled(void)
373 {
374 	return osnoise_data.timerlat_tracer;
375 }
376 
timerlat_softirq_exit(struct osnoise_variables * osn_var)377 static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var)
378 {
379 	struct timerlat_variables *tlat_var = this_cpu_tmr_var();
380 	/*
381 	 * If the timerlat is enabled, but the irq handler did
382 	 * not run yet enabling timerlat_tracer, do not trace.
383 	 */
384 	if (!tlat_var->tracing_thread) {
385 		osn_var->softirq.arrival_time = 0;
386 		osn_var->softirq.delta_start = 0;
387 		return 0;
388 	}
389 	return 1;
390 }
391 
timerlat_thread_exit(struct osnoise_variables * osn_var)392 static inline int timerlat_thread_exit(struct osnoise_variables *osn_var)
393 {
394 	struct timerlat_variables *tlat_var = this_cpu_tmr_var();
395 	/*
396 	 * If the timerlat is enabled, but the irq handler did
397 	 * not run yet enabling timerlat_tracer, do not trace.
398 	 */
399 	if (!tlat_var->tracing_thread) {
400 		osn_var->thread.delta_start = 0;
401 		osn_var->thread.arrival_time = 0;
402 		return 0;
403 	}
404 	return 1;
405 }
406 #else /* CONFIG_TIMERLAT_TRACER */
timerlat_enabled(void)407 static inline bool timerlat_enabled(void)
408 {
409 	return false;
410 }
411 
timerlat_softirq_exit(struct osnoise_variables * osn_var)412 static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var)
413 {
414 	return 1;
415 }
timerlat_thread_exit(struct osnoise_variables * osn_var)416 static inline int timerlat_thread_exit(struct osnoise_variables *osn_var)
417 {
418 	return 1;
419 }
420 #endif
421 
422 #ifdef CONFIG_PREEMPT_RT
423 /*
424  * Print the osnoise header info.
425  */
print_osnoise_headers(struct seq_file * s)426 static void print_osnoise_headers(struct seq_file *s)
427 {
428 	if (osnoise_data.tainted)
429 		seq_puts(s, "# osnoise is tainted!\n");
430 
431 	seq_puts(s, "#                                _-------=> irqs-off\n");
432 	seq_puts(s, "#                               / _------=> need-resched\n");
433 	seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
434 	seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
435 	seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
436 	seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
437 	seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
438 
439 	seq_puts(s, "#                              |||||| /          ");
440 	seq_puts(s, "                                     MAX\n");
441 
442 	seq_puts(s, "#                              ||||| /                         ");
443 	seq_puts(s, "                    SINGLE      Interference counters:\n");
444 
445 	seq_puts(s, "#                              |||||||               RUNTIME   ");
446 	seq_puts(s, "   NOISE  %% OF CPU  NOISE    +-----------------------------+\n");
447 
448 	seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    IN US    ");
449 	seq_puts(s, "   IN US  AVAILABLE  IN US     HW    NMI    IRQ   SIRQ THREAD\n");
450 
451 	seq_puts(s, "#              | |         |   |||||||      |           |      ");
452 	seq_puts(s, "       |    |            |      |      |      |      |      |\n");
453 }
454 #else /* CONFIG_PREEMPT_RT */
print_osnoise_headers(struct seq_file * s)455 static void print_osnoise_headers(struct seq_file *s)
456 {
457 	if (osnoise_data.tainted)
458 		seq_puts(s, "# osnoise is tainted!\n");
459 
460 	seq_puts(s, "#                                _-----=> irqs-off\n");
461 	seq_puts(s, "#                               / _----=> need-resched\n");
462 	seq_puts(s, "#                              | / _---=> hardirq/softirq\n");
463 	seq_puts(s, "#                              || / _--=> preempt-depth\n");
464 	seq_puts(s, "#                              ||| / _-=> migrate-disable     ");
465 	seq_puts(s, "                    MAX\n");
466 	seq_puts(s, "#                              |||| /     delay               ");
467 	seq_puts(s, "                    SINGLE      Interference counters:\n");
468 
469 	seq_puts(s, "#                              |||||               RUNTIME   ");
470 	seq_puts(s, "   NOISE  %% OF CPU  NOISE    +-----------------------------+\n");
471 
472 	seq_puts(s, "#           TASK-PID      CPU# |||||   TIMESTAMP    IN US    ");
473 	seq_puts(s, "   IN US  AVAILABLE  IN US     HW    NMI    IRQ   SIRQ THREAD\n");
474 
475 	seq_puts(s, "#              | |         |   |||||      |           |      ");
476 	seq_puts(s, "       |    |            |      |      |      |      |      |\n");
477 }
478 #endif /* CONFIG_PREEMPT_RT */
479 
480 /*
481  * osnoise_taint - report an osnoise error.
482  */
483 #define osnoise_taint(msg) ({							\
484 	struct osnoise_instance *inst;						\
485 	struct trace_buffer *buffer;						\
486 										\
487 	rcu_read_lock();							\
488 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {		\
489 		buffer = inst->tr->array_buffer.buffer;				\
490 		trace_array_printk_buf(buffer, _THIS_IP_, msg);			\
491 	}									\
492 	rcu_read_unlock();							\
493 	osnoise_data.tainted = true;						\
494 })
495 
496 /*
497  * Record an osnoise_sample into the tracer buffer.
498  */
499 static void
__trace_osnoise_sample(struct osnoise_sample * sample,struct trace_buffer * buffer)500 __trace_osnoise_sample(struct osnoise_sample *sample, struct trace_buffer *buffer)
501 {
502 	struct trace_event_call *call = &event_osnoise;
503 	struct ring_buffer_event *event;
504 	struct osnoise_entry *entry;
505 
506 	event = trace_buffer_lock_reserve(buffer, TRACE_OSNOISE, sizeof(*entry),
507 					  tracing_gen_ctx());
508 	if (!event)
509 		return;
510 	entry	= ring_buffer_event_data(event);
511 	entry->runtime		= sample->runtime;
512 	entry->noise		= sample->noise;
513 	entry->max_sample	= sample->max_sample;
514 	entry->hw_count		= sample->hw_count;
515 	entry->nmi_count	= sample->nmi_count;
516 	entry->irq_count	= sample->irq_count;
517 	entry->softirq_count	= sample->softirq_count;
518 	entry->thread_count	= sample->thread_count;
519 
520 	if (!call_filter_check_discard(call, entry, buffer, event))
521 		trace_buffer_unlock_commit_nostack(buffer, event);
522 }
523 
524 /*
525  * Record an osnoise_sample on all osnoise instances.
526  */
trace_osnoise_sample(struct osnoise_sample * sample)527 static void trace_osnoise_sample(struct osnoise_sample *sample)
528 {
529 	struct osnoise_instance *inst;
530 	struct trace_buffer *buffer;
531 
532 	rcu_read_lock();
533 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
534 		buffer = inst->tr->array_buffer.buffer;
535 		__trace_osnoise_sample(sample, buffer);
536 	}
537 	rcu_read_unlock();
538 }
539 
540 #ifdef CONFIG_TIMERLAT_TRACER
541 /*
542  * Print the timerlat header info.
543  */
544 #ifdef CONFIG_PREEMPT_RT
print_timerlat_headers(struct seq_file * s)545 static void print_timerlat_headers(struct seq_file *s)
546 {
547 	seq_puts(s, "#                                _-------=> irqs-off\n");
548 	seq_puts(s, "#                               / _------=> need-resched\n");
549 	seq_puts(s, "#                              | / _-----=> need-resched-lazy\n");
550 	seq_puts(s, "#                              || / _----=> hardirq/softirq\n");
551 	seq_puts(s, "#                              ||| / _---=> preempt-depth\n");
552 	seq_puts(s, "#                              |||| / _--=> preempt-lazy-depth\n");
553 	seq_puts(s, "#                              ||||| / _-=> migrate-disable\n");
554 	seq_puts(s, "#                              |||||| /\n");
555 	seq_puts(s, "#                              |||||||             ACTIVATION\n");
556 	seq_puts(s, "#           TASK-PID      CPU# |||||||   TIMESTAMP    ID     ");
557 	seq_puts(s, "       CONTEXT                LATENCY\n");
558 	seq_puts(s, "#              | |         |   |||||||      |         |      ");
559 	seq_puts(s, "            |                       |\n");
560 }
561 #else /* CONFIG_PREEMPT_RT */
print_timerlat_headers(struct seq_file * s)562 static void print_timerlat_headers(struct seq_file *s)
563 {
564 	seq_puts(s, "#                                _-----=> irqs-off\n");
565 	seq_puts(s, "#                               / _----=> need-resched\n");
566 	seq_puts(s, "#                              | / _---=> hardirq/softirq\n");
567 	seq_puts(s, "#                              || / _--=> preempt-depth\n");
568 	seq_puts(s, "#                              ||| / _-=> migrate-disable\n");
569 	seq_puts(s, "#                              |||| /     delay\n");
570 	seq_puts(s, "#                              |||||            ACTIVATION\n");
571 	seq_puts(s, "#           TASK-PID      CPU# |||||   TIMESTAMP   ID      ");
572 	seq_puts(s, "      CONTEXT                 LATENCY\n");
573 	seq_puts(s, "#              | |         |   |||||      |         |      ");
574 	seq_puts(s, "            |                       |\n");
575 }
576 #endif /* CONFIG_PREEMPT_RT */
577 
578 static void
__trace_timerlat_sample(struct timerlat_sample * sample,struct trace_buffer * buffer)579 __trace_timerlat_sample(struct timerlat_sample *sample, struct trace_buffer *buffer)
580 {
581 	struct trace_event_call *call = &event_osnoise;
582 	struct ring_buffer_event *event;
583 	struct timerlat_entry *entry;
584 
585 	event = trace_buffer_lock_reserve(buffer, TRACE_TIMERLAT, sizeof(*entry),
586 					  tracing_gen_ctx());
587 	if (!event)
588 		return;
589 	entry	= ring_buffer_event_data(event);
590 	entry->seqnum			= sample->seqnum;
591 	entry->context			= sample->context;
592 	entry->timer_latency		= sample->timer_latency;
593 
594 	if (!call_filter_check_discard(call, entry, buffer, event))
595 		trace_buffer_unlock_commit_nostack(buffer, event);
596 }
597 
598 /*
599  * Record an timerlat_sample into the tracer buffer.
600  */
trace_timerlat_sample(struct timerlat_sample * sample)601 static void trace_timerlat_sample(struct timerlat_sample *sample)
602 {
603 	struct osnoise_instance *inst;
604 	struct trace_buffer *buffer;
605 
606 	rcu_read_lock();
607 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
608 		buffer = inst->tr->array_buffer.buffer;
609 		__trace_timerlat_sample(sample, buffer);
610 	}
611 	rcu_read_unlock();
612 }
613 
614 #ifdef CONFIG_STACKTRACE
615 
616 #define	MAX_CALLS	256
617 
618 /*
619  * Stack trace will take place only at IRQ level, so, no need
620  * to control nesting here.
621  */
622 struct trace_stack {
623 	int		stack_size;
624 	int		nr_entries;
625 	unsigned long	calls[MAX_CALLS];
626 };
627 
628 static DEFINE_PER_CPU(struct trace_stack, trace_stack);
629 
630 /*
631  * timerlat_save_stack - save a stack trace without printing
632  *
633  * Save the current stack trace without printing. The
634  * stack will be printed later, after the end of the measurement.
635  */
timerlat_save_stack(int skip)636 static void timerlat_save_stack(int skip)
637 {
638 	unsigned int size, nr_entries;
639 	struct trace_stack *fstack;
640 
641 	fstack = this_cpu_ptr(&trace_stack);
642 
643 	size = ARRAY_SIZE(fstack->calls);
644 
645 	nr_entries = stack_trace_save(fstack->calls, size, skip);
646 
647 	fstack->stack_size = nr_entries * sizeof(unsigned long);
648 	fstack->nr_entries = nr_entries;
649 
650 	return;
651 
652 }
653 
654 static void
__timerlat_dump_stack(struct trace_buffer * buffer,struct trace_stack * fstack,unsigned int size)655 __timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, unsigned int size)
656 {
657 	struct trace_event_call *call = &event_osnoise;
658 	struct ring_buffer_event *event;
659 	struct stack_entry *entry;
660 
661 	event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size,
662 					  tracing_gen_ctx());
663 	if (!event)
664 		return;
665 
666 	entry = ring_buffer_event_data(event);
667 
668 	memcpy(&entry->caller, fstack->calls, size);
669 	entry->size = fstack->nr_entries;
670 
671 	if (!call_filter_check_discard(call, entry, buffer, event))
672 		trace_buffer_unlock_commit_nostack(buffer, event);
673 }
674 
675 /*
676  * timerlat_dump_stack - dump a stack trace previously saved
677  */
timerlat_dump_stack(u64 latency)678 static void timerlat_dump_stack(u64 latency)
679 {
680 	struct osnoise_instance *inst;
681 	struct trace_buffer *buffer;
682 	struct trace_stack *fstack;
683 	unsigned int size;
684 
685 	/*
686 	 * trace only if latency > print_stack config, if enabled.
687 	 */
688 	if (!osnoise_data.print_stack || osnoise_data.print_stack > latency)
689 		return;
690 
691 	preempt_disable_notrace();
692 	fstack = this_cpu_ptr(&trace_stack);
693 	size = fstack->stack_size;
694 
695 	rcu_read_lock();
696 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
697 		buffer = inst->tr->array_buffer.buffer;
698 		__timerlat_dump_stack(buffer, fstack, size);
699 
700 	}
701 	rcu_read_unlock();
702 	preempt_enable_notrace();
703 }
704 #else /* CONFIG_STACKTRACE */
705 #define timerlat_dump_stack(u64 latency) do {} while (0)
706 #define timerlat_save_stack(a) do {} while (0)
707 #endif /* CONFIG_STACKTRACE */
708 #endif /* CONFIG_TIMERLAT_TRACER */
709 
710 /*
711  * Macros to encapsulate the time capturing infrastructure.
712  */
713 #define time_get()	trace_clock_local()
714 #define time_to_us(x)	div_u64(x, 1000)
715 #define time_sub(a, b)	((a) - (b))
716 
717 /*
718  * cond_move_irq_delta_start - Forward the delta_start of a running IRQ
719  *
720  * If an IRQ is preempted by an NMI, its delta_start is pushed forward
721  * to discount the NMI interference.
722  *
723  * See get_int_safe_duration().
724  */
725 static inline void
cond_move_irq_delta_start(struct osnoise_variables * osn_var,u64 duration)726 cond_move_irq_delta_start(struct osnoise_variables *osn_var, u64 duration)
727 {
728 	if (osn_var->irq.delta_start)
729 		osn_var->irq.delta_start += duration;
730 }
731 
732 #ifndef CONFIG_PREEMPT_RT
733 /*
734  * cond_move_softirq_delta_start - Forward the delta_start of a running softirq.
735  *
736  * If a softirq is preempted by an IRQ or NMI, its delta_start is pushed
737  * forward to discount the interference.
738  *
739  * See get_int_safe_duration().
740  */
741 static inline void
cond_move_softirq_delta_start(struct osnoise_variables * osn_var,u64 duration)742 cond_move_softirq_delta_start(struct osnoise_variables *osn_var, u64 duration)
743 {
744 	if (osn_var->softirq.delta_start)
745 		osn_var->softirq.delta_start += duration;
746 }
747 #else /* CONFIG_PREEMPT_RT */
748 #define cond_move_softirq_delta_start(osn_var, duration) do {} while (0)
749 #endif
750 
751 /*
752  * cond_move_thread_delta_start - Forward the delta_start of a running thread
753  *
754  * If a noisy thread is preempted by an softirq, IRQ or NMI, its delta_start
755  * is pushed forward to discount the interference.
756  *
757  * See get_int_safe_duration().
758  */
759 static inline void
cond_move_thread_delta_start(struct osnoise_variables * osn_var,u64 duration)760 cond_move_thread_delta_start(struct osnoise_variables *osn_var, u64 duration)
761 {
762 	if (osn_var->thread.delta_start)
763 		osn_var->thread.delta_start += duration;
764 }
765 
766 /*
767  * get_int_safe_duration - Get the duration of a window
768  *
769  * The irq, softirq and thread varaibles need to have its duration without
770  * the interference from higher priority interrupts. Instead of keeping a
771  * variable to discount the interrupt interference from these variables, the
772  * starting time of these variables are pushed forward with the interrupt's
773  * duration. In this way, a single variable is used to:
774  *
775  *   - Know if a given window is being measured.
776  *   - Account its duration.
777  *   - Discount the interference.
778  *
779  * To avoid getting inconsistent values, e.g.,:
780  *
781  *	now = time_get()
782  *		--->	interrupt!
783  *			delta_start -= int duration;
784  *		<---
785  *	duration = now - delta_start;
786  *
787  *	result: negative duration if the variable duration before the
788  *	interrupt was smaller than the interrupt execution.
789  *
790  * A counter of interrupts is used. If the counter increased, try
791  * to capture an interference safe duration.
792  */
793 static inline s64
get_int_safe_duration(struct osnoise_variables * osn_var,u64 * delta_start)794 get_int_safe_duration(struct osnoise_variables *osn_var, u64 *delta_start)
795 {
796 	u64 int_counter, now;
797 	s64 duration;
798 
799 	do {
800 		int_counter = local_read(&osn_var->int_counter);
801 		/* synchronize with interrupts */
802 		barrier();
803 
804 		now = time_get();
805 		duration = (now - *delta_start);
806 
807 		/* synchronize with interrupts */
808 		barrier();
809 	} while (int_counter != local_read(&osn_var->int_counter));
810 
811 	/*
812 	 * This is an evidence of race conditions that cause
813 	 * a value to be "discounted" too much.
814 	 */
815 	if (duration < 0)
816 		osnoise_taint("Negative duration!\n");
817 
818 	*delta_start = 0;
819 
820 	return duration;
821 }
822 
823 /*
824  *
825  * set_int_safe_time - Save the current time on *time, aware of interference
826  *
827  * Get the time, taking into consideration a possible interference from
828  * higher priority interrupts.
829  *
830  * See get_int_safe_duration() for an explanation.
831  */
832 static u64
set_int_safe_time(struct osnoise_variables * osn_var,u64 * time)833 set_int_safe_time(struct osnoise_variables *osn_var, u64 *time)
834 {
835 	u64 int_counter;
836 
837 	do {
838 		int_counter = local_read(&osn_var->int_counter);
839 		/* synchronize with interrupts */
840 		barrier();
841 
842 		*time = time_get();
843 
844 		/* synchronize with interrupts */
845 		barrier();
846 	} while (int_counter != local_read(&osn_var->int_counter));
847 
848 	return int_counter;
849 }
850 
851 #ifdef CONFIG_TIMERLAT_TRACER
852 /*
853  * copy_int_safe_time - Copy *src into *desc aware of interference
854  */
855 static u64
copy_int_safe_time(struct osnoise_variables * osn_var,u64 * dst,u64 * src)856 copy_int_safe_time(struct osnoise_variables *osn_var, u64 *dst, u64 *src)
857 {
858 	u64 int_counter;
859 
860 	do {
861 		int_counter = local_read(&osn_var->int_counter);
862 		/* synchronize with interrupts */
863 		barrier();
864 
865 		*dst = *src;
866 
867 		/* synchronize with interrupts */
868 		barrier();
869 	} while (int_counter != local_read(&osn_var->int_counter));
870 
871 	return int_counter;
872 }
873 #endif /* CONFIG_TIMERLAT_TRACER */
874 
875 /*
876  * trace_osnoise_callback - NMI entry/exit callback
877  *
878  * This function is called at the entry and exit NMI code. The bool enter
879  * distinguishes between either case. This function is used to note a NMI
880  * occurrence, compute the noise caused by the NMI, and to remove the noise
881  * it is potentially causing on other interference variables.
882  */
trace_osnoise_callback(bool enter)883 void trace_osnoise_callback(bool enter)
884 {
885 	struct osnoise_variables *osn_var = this_cpu_osn_var();
886 	u64 duration;
887 
888 	if (!osn_var->sampling)
889 		return;
890 
891 	/*
892 	 * Currently trace_clock_local() calls sched_clock() and the
893 	 * generic version is not NMI safe.
894 	 */
895 	if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
896 		if (enter) {
897 			osn_var->nmi.delta_start = time_get();
898 			local_inc(&osn_var->int_counter);
899 		} else {
900 			duration = time_get() - osn_var->nmi.delta_start;
901 
902 			trace_nmi_noise(osn_var->nmi.delta_start, duration);
903 
904 			cond_move_irq_delta_start(osn_var, duration);
905 			cond_move_softirq_delta_start(osn_var, duration);
906 			cond_move_thread_delta_start(osn_var, duration);
907 		}
908 	}
909 
910 	if (enter)
911 		osn_var->nmi.count++;
912 }
913 
914 /*
915  * osnoise_trace_irq_entry - Note the starting of an IRQ
916  *
917  * Save the starting time of an IRQ. As IRQs are non-preemptive to other IRQs,
918  * it is safe to use a single variable (ons_var->irq) to save the statistics.
919  * The arrival_time is used to report... the arrival time. The delta_start
920  * is used to compute the duration at the IRQ exit handler. See
921  * cond_move_irq_delta_start().
922  */
osnoise_trace_irq_entry(int id)923 void osnoise_trace_irq_entry(int id)
924 {
925 	struct osnoise_variables *osn_var = this_cpu_osn_var();
926 
927 	if (!osn_var->sampling)
928 		return;
929 	/*
930 	 * This value will be used in the report, but not to compute
931 	 * the execution time, so it is safe to get it unsafe.
932 	 */
933 	osn_var->irq.arrival_time = time_get();
934 	set_int_safe_time(osn_var, &osn_var->irq.delta_start);
935 	osn_var->irq.count++;
936 
937 	local_inc(&osn_var->int_counter);
938 }
939 
940 /*
941  * osnoise_irq_exit - Note the end of an IRQ, sava data and trace
942  *
943  * Computes the duration of the IRQ noise, and trace it. Also discounts the
944  * interference from other sources of noise could be currently being accounted.
945  */
osnoise_trace_irq_exit(int id,const char * desc)946 void osnoise_trace_irq_exit(int id, const char *desc)
947 {
948 	struct osnoise_variables *osn_var = this_cpu_osn_var();
949 	s64 duration;
950 
951 	if (!osn_var->sampling)
952 		return;
953 
954 	duration = get_int_safe_duration(osn_var, &osn_var->irq.delta_start);
955 	trace_irq_noise(id, desc, osn_var->irq.arrival_time, duration);
956 	osn_var->irq.arrival_time = 0;
957 	cond_move_softirq_delta_start(osn_var, duration);
958 	cond_move_thread_delta_start(osn_var, duration);
959 }
960 
961 /*
962  * trace_irqentry_callback - Callback to the irq:irq_entry traceevent
963  *
964  * Used to note the starting of an IRQ occurece.
965  */
trace_irqentry_callback(void * data,int irq,struct irqaction * action)966 static void trace_irqentry_callback(void *data, int irq,
967 				    struct irqaction *action)
968 {
969 	osnoise_trace_irq_entry(irq);
970 }
971 
972 /*
973  * trace_irqexit_callback - Callback to the irq:irq_exit traceevent
974  *
975  * Used to note the end of an IRQ occurece.
976  */
trace_irqexit_callback(void * data,int irq,struct irqaction * action,int ret)977 static void trace_irqexit_callback(void *data, int irq,
978 				   struct irqaction *action, int ret)
979 {
980 	osnoise_trace_irq_exit(irq, action->name);
981 }
982 
983 /*
984  * arch specific register function.
985  */
osnoise_arch_register(void)986 int __weak osnoise_arch_register(void)
987 {
988 	return 0;
989 }
990 
991 /*
992  * arch specific unregister function.
993  */
osnoise_arch_unregister(void)994 void __weak osnoise_arch_unregister(void)
995 {
996 	return;
997 }
998 
999 /*
1000  * hook_irq_events - Hook IRQ handling events
1001  *
1002  * This function hooks the IRQ related callbacks to the respective trace
1003  * events.
1004  */
hook_irq_events(void)1005 static int hook_irq_events(void)
1006 {
1007 	int ret;
1008 
1009 	ret = register_trace_irq_handler_entry(trace_irqentry_callback, NULL);
1010 	if (ret)
1011 		goto out_err;
1012 
1013 	ret = register_trace_irq_handler_exit(trace_irqexit_callback, NULL);
1014 	if (ret)
1015 		goto out_unregister_entry;
1016 
1017 	ret = osnoise_arch_register();
1018 	if (ret)
1019 		goto out_irq_exit;
1020 
1021 	return 0;
1022 
1023 out_irq_exit:
1024 	unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
1025 out_unregister_entry:
1026 	unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
1027 out_err:
1028 	return -EINVAL;
1029 }
1030 
1031 /*
1032  * unhook_irq_events - Unhook IRQ handling events
1033  *
1034  * This function unhooks the IRQ related callbacks to the respective trace
1035  * events.
1036  */
unhook_irq_events(void)1037 static void unhook_irq_events(void)
1038 {
1039 	osnoise_arch_unregister();
1040 	unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
1041 	unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
1042 }
1043 
1044 #ifndef CONFIG_PREEMPT_RT
1045 /*
1046  * trace_softirq_entry_callback - Note the starting of a softirq
1047  *
1048  * Save the starting time of a softirq. As softirqs are non-preemptive to
1049  * other softirqs, it is safe to use a single variable (ons_var->softirq)
1050  * to save the statistics. The arrival_time is used to report... the
1051  * arrival time. The delta_start is used to compute the duration at the
1052  * softirq exit handler. See cond_move_softirq_delta_start().
1053  */
trace_softirq_entry_callback(void * data,unsigned int vec_nr)1054 static void trace_softirq_entry_callback(void *data, unsigned int vec_nr)
1055 {
1056 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1057 
1058 	if (!osn_var->sampling)
1059 		return;
1060 	/*
1061 	 * This value will be used in the report, but not to compute
1062 	 * the execution time, so it is safe to get it unsafe.
1063 	 */
1064 	osn_var->softirq.arrival_time = time_get();
1065 	set_int_safe_time(osn_var, &osn_var->softirq.delta_start);
1066 	osn_var->softirq.count++;
1067 
1068 	local_inc(&osn_var->int_counter);
1069 }
1070 
1071 /*
1072  * trace_softirq_exit_callback - Note the end of an softirq
1073  *
1074  * Computes the duration of the softirq noise, and trace it. Also discounts the
1075  * interference from other sources of noise could be currently being accounted.
1076  */
trace_softirq_exit_callback(void * data,unsigned int vec_nr)1077 static void trace_softirq_exit_callback(void *data, unsigned int vec_nr)
1078 {
1079 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1080 	s64 duration;
1081 
1082 	if (!osn_var->sampling)
1083 		return;
1084 
1085 	if (unlikely(timerlat_enabled()))
1086 		if (!timerlat_softirq_exit(osn_var))
1087 			return;
1088 
1089 	duration = get_int_safe_duration(osn_var, &osn_var->softirq.delta_start);
1090 	trace_softirq_noise(vec_nr, osn_var->softirq.arrival_time, duration);
1091 	cond_move_thread_delta_start(osn_var, duration);
1092 	osn_var->softirq.arrival_time = 0;
1093 }
1094 
1095 /*
1096  * hook_softirq_events - Hook softirq handling events
1097  *
1098  * This function hooks the softirq related callbacks to the respective trace
1099  * events.
1100  */
hook_softirq_events(void)1101 static int hook_softirq_events(void)
1102 {
1103 	int ret;
1104 
1105 	ret = register_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1106 	if (ret)
1107 		goto out_err;
1108 
1109 	ret = register_trace_softirq_exit(trace_softirq_exit_callback, NULL);
1110 	if (ret)
1111 		goto out_unreg_entry;
1112 
1113 	return 0;
1114 
1115 out_unreg_entry:
1116 	unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1117 out_err:
1118 	return -EINVAL;
1119 }
1120 
1121 /*
1122  * unhook_softirq_events - Unhook softirq handling events
1123  *
1124  * This function hooks the softirq related callbacks to the respective trace
1125  * events.
1126  */
unhook_softirq_events(void)1127 static void unhook_softirq_events(void)
1128 {
1129 	unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
1130 	unregister_trace_softirq_exit(trace_softirq_exit_callback, NULL);
1131 }
1132 #else /* CONFIG_PREEMPT_RT */
1133 /*
1134  * softirq are threads on the PREEMPT_RT mode.
1135  */
hook_softirq_events(void)1136 static int hook_softirq_events(void)
1137 {
1138 	return 0;
1139 }
unhook_softirq_events(void)1140 static void unhook_softirq_events(void)
1141 {
1142 }
1143 #endif
1144 
1145 /*
1146  * thread_entry - Record the starting of a thread noise window
1147  *
1148  * It saves the context switch time for a noisy thread, and increments
1149  * the interference counters.
1150  */
1151 static void
thread_entry(struct osnoise_variables * osn_var,struct task_struct * t)1152 thread_entry(struct osnoise_variables *osn_var, struct task_struct *t)
1153 {
1154 	if (!osn_var->sampling)
1155 		return;
1156 	/*
1157 	 * The arrival time will be used in the report, but not to compute
1158 	 * the execution time, so it is safe to get it unsafe.
1159 	 */
1160 	osn_var->thread.arrival_time = time_get();
1161 
1162 	set_int_safe_time(osn_var, &osn_var->thread.delta_start);
1163 
1164 	osn_var->thread.count++;
1165 	local_inc(&osn_var->int_counter);
1166 }
1167 
1168 /*
1169  * thread_exit - Report the end of a thread noise window
1170  *
1171  * It computes the total noise from a thread, tracing if needed.
1172  */
1173 static void
thread_exit(struct osnoise_variables * osn_var,struct task_struct * t)1174 thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
1175 {
1176 	s64 duration;
1177 
1178 	if (!osn_var->sampling)
1179 		return;
1180 
1181 	if (unlikely(timerlat_enabled()))
1182 		if (!timerlat_thread_exit(osn_var))
1183 			return;
1184 
1185 	duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start);
1186 
1187 	trace_thread_noise(t, osn_var->thread.arrival_time, duration);
1188 
1189 	osn_var->thread.arrival_time = 0;
1190 }
1191 
1192 #ifdef CONFIG_TIMERLAT_TRACER
1193 /*
1194  * osnoise_stop_exception - Stop tracing and the tracer.
1195  */
osnoise_stop_exception(char * msg,int cpu)1196 static __always_inline void osnoise_stop_exception(char *msg, int cpu)
1197 {
1198 	struct osnoise_instance *inst;
1199 	struct trace_array *tr;
1200 
1201 	rcu_read_lock();
1202 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
1203 		tr = inst->tr;
1204 		trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
1205 				       "stop tracing hit on cpu %d due to exception: %s\n",
1206 				       smp_processor_id(),
1207 				       msg);
1208 
1209 		if (test_bit(OSN_PANIC_ON_STOP, &osnoise_options))
1210 			panic("tracer hit on cpu %d due to exception: %s\n",
1211 			      smp_processor_id(),
1212 			      msg);
1213 
1214 		tracer_tracing_off(tr);
1215 	}
1216 	rcu_read_unlock();
1217 }
1218 
1219 /*
1220  * trace_sched_migrate_callback - sched:sched_migrate_task trace event handler
1221  *
1222  * his function is hooked to the sched:sched_migrate_task trace event, and monitors
1223  * timerlat user-space thread migration.
1224  */
trace_sched_migrate_callback(void * data,struct task_struct * p,int dest_cpu)1225 static void trace_sched_migrate_callback(void *data, struct task_struct *p, int dest_cpu)
1226 {
1227 	struct osnoise_variables *osn_var;
1228 	long cpu = task_cpu(p);
1229 
1230 	osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
1231 	if (osn_var->pid == p->pid && dest_cpu != cpu) {
1232 		per_cpu_ptr(&per_cpu_timerlat_var, cpu)->uthread_migrate = 1;
1233 		osnoise_taint("timerlat user-thread migrated\n");
1234 		osnoise_stop_exception("timerlat user-thread migrated", cpu);
1235 	}
1236 }
1237 
1238 static bool monitor_enabled;
1239 
register_migration_monitor(void)1240 static int register_migration_monitor(void)
1241 {
1242 	int ret = 0;
1243 
1244 	/*
1245 	 * Timerlat thread migration check is only required when running timerlat in user-space.
1246 	 * Thus, enable callback only if timerlat is set with no workload.
1247 	 */
1248 	if (timerlat_enabled() && !test_bit(OSN_WORKLOAD, &osnoise_options)) {
1249 		if (WARN_ON_ONCE(monitor_enabled))
1250 			return 0;
1251 
1252 		ret = register_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
1253 		if (!ret)
1254 			monitor_enabled = true;
1255 	}
1256 
1257 	return ret;
1258 }
1259 
unregister_migration_monitor(void)1260 static void unregister_migration_monitor(void)
1261 {
1262 	if (!monitor_enabled)
1263 		return;
1264 
1265 	unregister_trace_sched_migrate_task(trace_sched_migrate_callback, NULL);
1266 	monitor_enabled = false;
1267 }
1268 #else
register_migration_monitor(void)1269 static int register_migration_monitor(void)
1270 {
1271 	return 0;
1272 }
unregister_migration_monitor(void)1273 static void unregister_migration_monitor(void) {}
1274 #endif
1275 /*
1276  * trace_sched_switch - sched:sched_switch trace event handler
1277  *
1278  * This function is hooked to the sched:sched_switch trace event, and it is
1279  * used to record the beginning and to report the end of a thread noise window.
1280  */
1281 static void
trace_sched_switch_callback(void * data,bool preempt,struct task_struct * p,struct task_struct * n,unsigned int prev_state)1282 trace_sched_switch_callback(void *data, bool preempt,
1283 			    struct task_struct *p,
1284 			    struct task_struct *n,
1285 			    unsigned int prev_state)
1286 {
1287 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1288 	int workload = test_bit(OSN_WORKLOAD, &osnoise_options);
1289 
1290 	if ((p->pid != osn_var->pid) || !workload)
1291 		thread_exit(osn_var, p);
1292 
1293 	if ((n->pid != osn_var->pid) || !workload)
1294 		thread_entry(osn_var, n);
1295 }
1296 
1297 /*
1298  * hook_thread_events - Hook the instrumentation for thread noise
1299  *
1300  * Hook the osnoise tracer callbacks to handle the noise from other
1301  * threads on the necessary kernel events.
1302  */
hook_thread_events(void)1303 static int hook_thread_events(void)
1304 {
1305 	int ret;
1306 
1307 	ret = register_trace_sched_switch(trace_sched_switch_callback, NULL);
1308 	if (ret)
1309 		return -EINVAL;
1310 
1311 	ret = register_migration_monitor();
1312 	if (ret)
1313 		goto out_unreg;
1314 
1315 	return 0;
1316 
1317 out_unreg:
1318 	unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
1319 	return -EINVAL;
1320 }
1321 
1322 /*
1323  * unhook_thread_events - unhook the instrumentation for thread noise
1324  *
1325  * Unook the osnoise tracer callbacks to handle the noise from other
1326  * threads on the necessary kernel events.
1327  */
unhook_thread_events(void)1328 static void unhook_thread_events(void)
1329 {
1330 	unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
1331 	unregister_migration_monitor();
1332 }
1333 
1334 /*
1335  * save_osn_sample_stats - Save the osnoise_sample statistics
1336  *
1337  * Save the osnoise_sample statistics before the sampling phase. These
1338  * values will be used later to compute the diff betwneen the statistics
1339  * before and after the osnoise sampling.
1340  */
1341 static void
save_osn_sample_stats(struct osnoise_variables * osn_var,struct osnoise_sample * s)1342 save_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
1343 {
1344 	s->nmi_count = osn_var->nmi.count;
1345 	s->irq_count = osn_var->irq.count;
1346 	s->softirq_count = osn_var->softirq.count;
1347 	s->thread_count = osn_var->thread.count;
1348 }
1349 
1350 /*
1351  * diff_osn_sample_stats - Compute the osnoise_sample statistics
1352  *
1353  * After a sample period, compute the difference on the osnoise_sample
1354  * statistics. The struct osnoise_sample *s contains the statistics saved via
1355  * save_osn_sample_stats() before the osnoise sampling.
1356  */
1357 static void
diff_osn_sample_stats(struct osnoise_variables * osn_var,struct osnoise_sample * s)1358 diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
1359 {
1360 	s->nmi_count = osn_var->nmi.count - s->nmi_count;
1361 	s->irq_count = osn_var->irq.count - s->irq_count;
1362 	s->softirq_count = osn_var->softirq.count - s->softirq_count;
1363 	s->thread_count = osn_var->thread.count - s->thread_count;
1364 }
1365 
1366 /*
1367  * osnoise_stop_tracing - Stop tracing and the tracer.
1368  */
osnoise_stop_tracing(void)1369 static __always_inline void osnoise_stop_tracing(void)
1370 {
1371 	struct osnoise_instance *inst;
1372 	struct trace_array *tr;
1373 
1374 	rcu_read_lock();
1375 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
1376 		tr = inst->tr;
1377 		trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
1378 				"stop tracing hit on cpu %d\n", smp_processor_id());
1379 
1380 		if (test_bit(OSN_PANIC_ON_STOP, &osnoise_options))
1381 			panic("tracer hit stop condition on CPU %d\n", smp_processor_id());
1382 
1383 		tracer_tracing_off(tr);
1384 	}
1385 	rcu_read_unlock();
1386 }
1387 
1388 /*
1389  * osnoise_has_tracing_on - Check if there is at least one instance on
1390  */
osnoise_has_tracing_on(void)1391 static __always_inline int osnoise_has_tracing_on(void)
1392 {
1393 	struct osnoise_instance *inst;
1394 	int trace_is_on = 0;
1395 
1396 	rcu_read_lock();
1397 	list_for_each_entry_rcu(inst, &osnoise_instances, list)
1398 		trace_is_on += tracer_tracing_is_on(inst->tr);
1399 	rcu_read_unlock();
1400 
1401 	return trace_is_on;
1402 }
1403 
1404 /*
1405  * notify_new_max_latency - Notify a new max latency via fsnotify interface.
1406  */
notify_new_max_latency(u64 latency)1407 static void notify_new_max_latency(u64 latency)
1408 {
1409 	struct osnoise_instance *inst;
1410 	struct trace_array *tr;
1411 
1412 	rcu_read_lock();
1413 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
1414 		tr = inst->tr;
1415 		if (tracer_tracing_is_on(tr) && tr->max_latency < latency) {
1416 			tr->max_latency = latency;
1417 			latency_fsnotify(tr);
1418 		}
1419 	}
1420 	rcu_read_unlock();
1421 }
1422 
1423 /*
1424  * run_osnoise - Sample the time and look for osnoise
1425  *
1426  * Used to capture the time, looking for potential osnoise latency repeatedly.
1427  * Different from hwlat_detector, it is called with preemption and interrupts
1428  * enabled. This allows irqs, softirqs and threads to run, interfering on the
1429  * osnoise sampling thread, as they would do with a regular thread.
1430  */
run_osnoise(void)1431 static int run_osnoise(void)
1432 {
1433 	bool disable_irq = test_bit(OSN_IRQ_DISABLE, &osnoise_options);
1434 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1435 	u64 start, sample, last_sample;
1436 	u64 last_int_count, int_count;
1437 	s64 noise = 0, max_noise = 0;
1438 	s64 total, last_total = 0;
1439 	struct osnoise_sample s;
1440 	bool disable_preemption;
1441 	unsigned int threshold;
1442 	u64 runtime, stop_in;
1443 	u64 sum_noise = 0;
1444 	int hw_count = 0;
1445 	int ret = -1;
1446 
1447 	/*
1448 	 * Disabling preemption is only required if IRQs are enabled,
1449 	 * and the options is set on.
1450 	 */
1451 	disable_preemption = !disable_irq && test_bit(OSN_PREEMPT_DISABLE, &osnoise_options);
1452 
1453 	/*
1454 	 * Considers the current thread as the workload.
1455 	 */
1456 	osn_var->pid = current->pid;
1457 
1458 	/*
1459 	 * Save the current stats for the diff
1460 	 */
1461 	save_osn_sample_stats(osn_var, &s);
1462 
1463 	/*
1464 	 * if threshold is 0, use the default value of 5 us.
1465 	 */
1466 	threshold = tracing_thresh ? : 5000;
1467 
1468 	/*
1469 	 * Apply PREEMPT and IRQ disabled options.
1470 	 */
1471 	if (disable_irq)
1472 		local_irq_disable();
1473 
1474 	if (disable_preemption)
1475 		preempt_disable();
1476 
1477 	/*
1478 	 * Make sure NMIs see sampling first
1479 	 */
1480 	osn_var->sampling = true;
1481 	barrier();
1482 
1483 	/*
1484 	 * Transform the *_us config to nanoseconds to avoid the
1485 	 * division on the main loop.
1486 	 */
1487 	runtime = osnoise_data.sample_runtime * NSEC_PER_USEC;
1488 	stop_in = osnoise_data.stop_tracing * NSEC_PER_USEC;
1489 
1490 	/*
1491 	 * Start timestemp
1492 	 */
1493 	start = time_get();
1494 
1495 	/*
1496 	 * "previous" loop.
1497 	 */
1498 	last_int_count = set_int_safe_time(osn_var, &last_sample);
1499 
1500 	do {
1501 		/*
1502 		 * Get sample!
1503 		 */
1504 		int_count = set_int_safe_time(osn_var, &sample);
1505 
1506 		noise = time_sub(sample, last_sample);
1507 
1508 		/*
1509 		 * This shouldn't happen.
1510 		 */
1511 		if (noise < 0) {
1512 			osnoise_taint("negative noise!");
1513 			goto out;
1514 		}
1515 
1516 		/*
1517 		 * Sample runtime.
1518 		 */
1519 		total = time_sub(sample, start);
1520 
1521 		/*
1522 		 * Check for possible overflows.
1523 		 */
1524 		if (total < last_total) {
1525 			osnoise_taint("total overflow!");
1526 			break;
1527 		}
1528 
1529 		last_total = total;
1530 
1531 		if (noise >= threshold) {
1532 			int interference = int_count - last_int_count;
1533 
1534 			if (noise > max_noise)
1535 				max_noise = noise;
1536 
1537 			if (!interference)
1538 				hw_count++;
1539 
1540 			sum_noise += noise;
1541 
1542 			trace_sample_threshold(last_sample, noise, interference);
1543 
1544 			if (osnoise_data.stop_tracing)
1545 				if (noise > stop_in)
1546 					osnoise_stop_tracing();
1547 		}
1548 
1549 		/*
1550 		 * In some cases, notably when running on a nohz_full CPU with
1551 		 * a stopped tick PREEMPT_RCU has no way to account for QSs.
1552 		 * This will eventually cause unwarranted noise as PREEMPT_RCU
1553 		 * will force preemption as the means of ending the current
1554 		 * grace period. We avoid this problem by calling
1555 		 * rcu_momentary_dyntick_idle(), which performs a zero duration
1556 		 * EQS allowing PREEMPT_RCU to end the current grace period.
1557 		 * This call shouldn't be wrapped inside an RCU critical
1558 		 * section.
1559 		 *
1560 		 * Note that in non PREEMPT_RCU kernels QSs are handled through
1561 		 * cond_resched()
1562 		 */
1563 		if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
1564 			if (!disable_irq)
1565 				local_irq_disable();
1566 
1567 			rcu_momentary_dyntick_idle();
1568 
1569 			if (!disable_irq)
1570 				local_irq_enable();
1571 		}
1572 
1573 		/*
1574 		 * For the non-preemptive kernel config: let threads runs, if
1575 		 * they so wish, unless set not do to so.
1576 		 */
1577 		if (!disable_irq && !disable_preemption)
1578 			cond_resched();
1579 
1580 		last_sample = sample;
1581 		last_int_count = int_count;
1582 
1583 	} while (total < runtime && !kthread_should_stop());
1584 
1585 	/*
1586 	 * Finish the above in the view for interrupts.
1587 	 */
1588 	barrier();
1589 
1590 	osn_var->sampling = false;
1591 
1592 	/*
1593 	 * Make sure sampling data is no longer updated.
1594 	 */
1595 	barrier();
1596 
1597 	/*
1598 	 * Return to the preemptive state.
1599 	 */
1600 	if (disable_preemption)
1601 		preempt_enable();
1602 
1603 	if (disable_irq)
1604 		local_irq_enable();
1605 
1606 	/*
1607 	 * Save noise info.
1608 	 */
1609 	s.noise = time_to_us(sum_noise);
1610 	s.runtime = time_to_us(total);
1611 	s.max_sample = time_to_us(max_noise);
1612 	s.hw_count = hw_count;
1613 
1614 	/* Save interference stats info */
1615 	diff_osn_sample_stats(osn_var, &s);
1616 
1617 	trace_osnoise_sample(&s);
1618 
1619 	notify_new_max_latency(max_noise);
1620 
1621 	if (osnoise_data.stop_tracing_total)
1622 		if (s.noise > osnoise_data.stop_tracing_total)
1623 			osnoise_stop_tracing();
1624 
1625 	return 0;
1626 out:
1627 	return ret;
1628 }
1629 
1630 static struct cpumask osnoise_cpumask;
1631 static struct cpumask save_cpumask;
1632 static struct cpumask kthread_cpumask;
1633 
1634 /*
1635  * osnoise_sleep - sleep until the next period
1636  */
osnoise_sleep(bool skip_period)1637 static void osnoise_sleep(bool skip_period)
1638 {
1639 	u64 interval;
1640 	ktime_t wake_time;
1641 
1642 	mutex_lock(&interface_lock);
1643 	if (skip_period)
1644 		interval = osnoise_data.sample_period;
1645 	else
1646 		interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
1647 	mutex_unlock(&interface_lock);
1648 
1649 	/*
1650 	 * differently from hwlat_detector, the osnoise tracer can run
1651 	 * without a pause because preemption is on.
1652 	 */
1653 	if (!interval) {
1654 		/* Let synchronize_rcu_tasks() make progress */
1655 		cond_resched_tasks_rcu_qs();
1656 		return;
1657 	}
1658 
1659 	wake_time = ktime_add_us(ktime_get(), interval);
1660 	__set_current_state(TASK_INTERRUPTIBLE);
1661 
1662 	while (schedule_hrtimeout(&wake_time, HRTIMER_MODE_ABS)) {
1663 		if (kthread_should_stop())
1664 			break;
1665 	}
1666 }
1667 
1668 /*
1669  * osnoise_migration_pending - checks if the task needs to migrate
1670  *
1671  * osnoise/timerlat threads are per-cpu. If there is a pending request to
1672  * migrate the thread away from the current CPU, something bad has happened.
1673  * Play the good citizen and leave.
1674  *
1675  * Returns 0 if it is safe to continue, 1 otherwise.
1676  */
osnoise_migration_pending(void)1677 static inline int osnoise_migration_pending(void)
1678 {
1679 	if (!current->migration_pending)
1680 		return 0;
1681 
1682 	/*
1683 	 * If migration is pending, there is a task waiting for the
1684 	 * tracer to enable migration. The tracer does not allow migration,
1685 	 * thus: taint and leave to unblock the blocked thread.
1686 	 */
1687 	osnoise_taint("migration requested to osnoise threads, leaving.");
1688 
1689 	/*
1690 	 * Unset this thread from the threads managed by the interface.
1691 	 * The tracers are responsible for cleaning their env before
1692 	 * exiting.
1693 	 */
1694 	mutex_lock(&interface_lock);
1695 	this_cpu_osn_var()->kthread = NULL;
1696 	cpumask_clear_cpu(smp_processor_id(), &kthread_cpumask);
1697 	mutex_unlock(&interface_lock);
1698 
1699 	return 1;
1700 }
1701 
1702 /*
1703  * osnoise_main - The osnoise detection kernel thread
1704  *
1705  * Calls run_osnoise() function to measure the osnoise for the configured runtime,
1706  * every period.
1707  */
osnoise_main(void * data)1708 static int osnoise_main(void *data)
1709 {
1710 	unsigned long flags;
1711 
1712 	/*
1713 	 * This thread was created pinned to the CPU using PF_NO_SETAFFINITY.
1714 	 * The problem is that cgroup does not allow PF_NO_SETAFFINITY thread.
1715 	 *
1716 	 * To work around this limitation, disable migration and remove the
1717 	 * flag.
1718 	 */
1719 	migrate_disable();
1720 	raw_spin_lock_irqsave(&current->pi_lock, flags);
1721 	current->flags &= ~(PF_NO_SETAFFINITY);
1722 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1723 
1724 	while (!kthread_should_stop()) {
1725 		if (osnoise_migration_pending())
1726 			break;
1727 
1728 		/* skip a period if tracing is off on all instances */
1729 		if (!osnoise_has_tracing_on()) {
1730 			osnoise_sleep(true);
1731 			continue;
1732 		}
1733 
1734 		run_osnoise();
1735 		osnoise_sleep(false);
1736 	}
1737 
1738 	migrate_enable();
1739 	return 0;
1740 }
1741 
1742 #ifdef CONFIG_TIMERLAT_TRACER
1743 /*
1744  * timerlat_irq - hrtimer handler for timerlat.
1745  */
timerlat_irq(struct hrtimer * timer)1746 static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
1747 {
1748 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1749 	struct timerlat_variables *tlat;
1750 	struct timerlat_sample s;
1751 	u64 now;
1752 	u64 diff;
1753 
1754 	/*
1755 	 * I am not sure if the timer was armed for this CPU. So, get
1756 	 * the timerlat struct from the timer itself, not from this
1757 	 * CPU.
1758 	 */
1759 	tlat = container_of(timer, struct timerlat_variables, timer);
1760 
1761 	now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
1762 
1763 	/*
1764 	 * Enable the osnoise: events for thread an softirq.
1765 	 */
1766 	tlat->tracing_thread = true;
1767 
1768 	osn_var->thread.arrival_time = time_get();
1769 
1770 	/*
1771 	 * A hardirq is running: the timer IRQ. It is for sure preempting
1772 	 * a thread, and potentially preempting a softirq.
1773 	 *
1774 	 * At this point, it is not interesting to know the duration of the
1775 	 * preempted thread (and maybe softirq), but how much time they will
1776 	 * delay the beginning of the execution of the timer thread.
1777 	 *
1778 	 * To get the correct (net) delay added by the softirq, its delta_start
1779 	 * is set as the IRQ one. In this way, at the return of the IRQ, the delta
1780 	 * start of the sofitrq will be zeroed, accounting then only the time
1781 	 * after that.
1782 	 *
1783 	 * The thread follows the same principle. However, if a softirq is
1784 	 * running, the thread needs to receive the softirq delta_start. The
1785 	 * reason being is that the softirq will be the last to be unfolded,
1786 	 * resseting the thread delay to zero.
1787 	 *
1788 	 * The PREEMPT_RT is a special case, though. As softirqs run as threads
1789 	 * on RT, moving the thread is enough.
1790 	 */
1791 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && osn_var->softirq.delta_start) {
1792 		copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
1793 				   &osn_var->softirq.delta_start);
1794 
1795 		copy_int_safe_time(osn_var, &osn_var->softirq.delta_start,
1796 				    &osn_var->irq.delta_start);
1797 	} else {
1798 		copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
1799 				    &osn_var->irq.delta_start);
1800 	}
1801 
1802 	/*
1803 	 * Compute the current time with the expected time.
1804 	 */
1805 	diff = now - tlat->abs_period;
1806 
1807 	tlat->count++;
1808 	s.seqnum = tlat->count;
1809 	s.timer_latency = diff;
1810 	s.context = IRQ_CONTEXT;
1811 
1812 	trace_timerlat_sample(&s);
1813 
1814 	if (osnoise_data.stop_tracing) {
1815 		if (time_to_us(diff) >= osnoise_data.stop_tracing) {
1816 
1817 			/*
1818 			 * At this point, if stop_tracing is set and <= print_stack,
1819 			 * print_stack is set and would be printed in the thread handler.
1820 			 *
1821 			 * Thus, print the stack trace as it is helpful to define the
1822 			 * root cause of an IRQ latency.
1823 			 */
1824 			if (osnoise_data.stop_tracing <= osnoise_data.print_stack) {
1825 				timerlat_save_stack(0);
1826 				timerlat_dump_stack(time_to_us(diff));
1827 			}
1828 
1829 			osnoise_stop_tracing();
1830 			notify_new_max_latency(diff);
1831 
1832 			wake_up_process(tlat->kthread);
1833 
1834 			return HRTIMER_NORESTART;
1835 		}
1836 	}
1837 
1838 	wake_up_process(tlat->kthread);
1839 
1840 	if (osnoise_data.print_stack)
1841 		timerlat_save_stack(0);
1842 
1843 	return HRTIMER_NORESTART;
1844 }
1845 
1846 /*
1847  * wait_next_period - Wait for the next period for timerlat
1848  */
wait_next_period(struct timerlat_variables * tlat)1849 static int wait_next_period(struct timerlat_variables *tlat)
1850 {
1851 	ktime_t next_abs_period, now;
1852 	u64 rel_period = osnoise_data.timerlat_period * 1000;
1853 
1854 	now = hrtimer_cb_get_time(&tlat->timer);
1855 	next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
1856 
1857 	/*
1858 	 * Save the next abs_period.
1859 	 */
1860 	tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
1861 
1862 	/*
1863 	 * If the new abs_period is in the past, skip the activation.
1864 	 */
1865 	while (ktime_compare(now, next_abs_period) > 0) {
1866 		next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
1867 		tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
1868 	}
1869 
1870 	set_current_state(TASK_INTERRUPTIBLE);
1871 
1872 	hrtimer_start(&tlat->timer, next_abs_period, HRTIMER_MODE_ABS_PINNED_HARD);
1873 	schedule();
1874 	return 1;
1875 }
1876 
1877 /*
1878  * timerlat_main- Timerlat main
1879  */
timerlat_main(void * data)1880 static int timerlat_main(void *data)
1881 {
1882 	struct osnoise_variables *osn_var = this_cpu_osn_var();
1883 	struct timerlat_variables *tlat = this_cpu_tmr_var();
1884 	struct timerlat_sample s;
1885 	struct sched_param sp;
1886 	unsigned long flags;
1887 	u64 now, diff;
1888 
1889 	/*
1890 	 * Make the thread RT, that is how cyclictest is usually used.
1891 	 */
1892 	sp.sched_priority = DEFAULT_TIMERLAT_PRIO;
1893 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1894 
1895 	/*
1896 	 * This thread was created pinned to the CPU using PF_NO_SETAFFINITY.
1897 	 * The problem is that cgroup does not allow PF_NO_SETAFFINITY thread.
1898 	 *
1899 	 * To work around this limitation, disable migration and remove the
1900 	 * flag.
1901 	 */
1902 	migrate_disable();
1903 	raw_spin_lock_irqsave(&current->pi_lock, flags);
1904 	current->flags &= ~(PF_NO_SETAFFINITY);
1905 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1906 
1907 	tlat->count = 0;
1908 	tlat->tracing_thread = false;
1909 
1910 	hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
1911 	tlat->timer.function = timerlat_irq;
1912 	tlat->kthread = current;
1913 	osn_var->pid = current->pid;
1914 	/*
1915 	 * Anotate the arrival time.
1916 	 */
1917 	tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
1918 
1919 	wait_next_period(tlat);
1920 
1921 	osn_var->sampling = 1;
1922 
1923 	while (!kthread_should_stop()) {
1924 
1925 		now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
1926 		diff = now - tlat->abs_period;
1927 
1928 		s.seqnum = tlat->count;
1929 		s.timer_latency = diff;
1930 		s.context = THREAD_CONTEXT;
1931 
1932 		trace_timerlat_sample(&s);
1933 
1934 		notify_new_max_latency(diff);
1935 
1936 		timerlat_dump_stack(time_to_us(diff));
1937 
1938 		tlat->tracing_thread = false;
1939 		if (osnoise_data.stop_tracing_total)
1940 			if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
1941 				osnoise_stop_tracing();
1942 
1943 		if (osnoise_migration_pending())
1944 			break;
1945 
1946 		wait_next_period(tlat);
1947 	}
1948 
1949 	hrtimer_cancel(&tlat->timer);
1950 	migrate_enable();
1951 	return 0;
1952 }
1953 #else /* CONFIG_TIMERLAT_TRACER */
timerlat_main(void * data)1954 static int timerlat_main(void *data)
1955 {
1956 	return 0;
1957 }
1958 #endif /* CONFIG_TIMERLAT_TRACER */
1959 
1960 /*
1961  * stop_kthread - stop a workload thread
1962  */
stop_kthread(unsigned int cpu)1963 static void stop_kthread(unsigned int cpu)
1964 {
1965 	struct task_struct *kthread;
1966 
1967 	kthread = xchg_relaxed(&(per_cpu(per_cpu_osnoise_var, cpu).kthread), NULL);
1968 	if (kthread) {
1969 		if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) &&
1970 		    !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) {
1971 			kthread_stop(kthread);
1972 		} else if (!WARN_ON(test_bit(OSN_WORKLOAD, &osnoise_options))) {
1973 			/*
1974 			 * This is a user thread waiting on the timerlat_fd. We need
1975 			 * to close all users, and the best way to guarantee this is
1976 			 * by killing the thread. NOTE: this is a purpose specific file.
1977 			 */
1978 			kill_pid(kthread->thread_pid, SIGKILL, 1);
1979 			put_task_struct(kthread);
1980 		}
1981 	} else {
1982 		/* if no workload, just return */
1983 		if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
1984 			/*
1985 			 * This is set in the osnoise tracer case.
1986 			 */
1987 			per_cpu(per_cpu_osnoise_var, cpu).sampling = false;
1988 			barrier();
1989 		}
1990 	}
1991 }
1992 
1993 /*
1994  * stop_per_cpu_kthread - Stop per-cpu threads
1995  *
1996  * Stop the osnoise sampling htread. Use this on unload and at system
1997  * shutdown.
1998  */
stop_per_cpu_kthreads(void)1999 static void stop_per_cpu_kthreads(void)
2000 {
2001 	int cpu;
2002 
2003 	cpus_read_lock();
2004 
2005 	for_each_online_cpu(cpu)
2006 		stop_kthread(cpu);
2007 
2008 	cpus_read_unlock();
2009 }
2010 
2011 /*
2012  * start_kthread - Start a workload tread
2013  */
start_kthread(unsigned int cpu)2014 static int start_kthread(unsigned int cpu)
2015 {
2016 	struct task_struct *kthread;
2017 	void *main = osnoise_main;
2018 	char comm[24];
2019 
2020 	/* Do not start a new thread if it is already running */
2021 	if (per_cpu(per_cpu_osnoise_var, cpu).kthread)
2022 		return 0;
2023 
2024 	if (timerlat_enabled()) {
2025 		snprintf(comm, 24, "timerlat/%d", cpu);
2026 		main = timerlat_main;
2027 	} else {
2028 		/* if no workload, just return */
2029 		if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
2030 			per_cpu(per_cpu_osnoise_var, cpu).sampling = true;
2031 			barrier();
2032 			return 0;
2033 		}
2034 		snprintf(comm, 24, "osnoise/%d", cpu);
2035 	}
2036 
2037 	kthread = kthread_run_on_cpu(main, NULL, cpu, comm);
2038 
2039 	if (IS_ERR(kthread)) {
2040 		pr_err(BANNER "could not start sampling thread\n");
2041 		stop_per_cpu_kthreads();
2042 		return -ENOMEM;
2043 	}
2044 
2045 	per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
2046 	cpumask_set_cpu(cpu, &kthread_cpumask);
2047 
2048 	return 0;
2049 }
2050 
2051 /*
2052  * start_per_cpu_kthread - Kick off per-cpu osnoise sampling kthreads
2053  *
2054  * This starts the kernel thread that will look for osnoise on many
2055  * cpus.
2056  */
start_per_cpu_kthreads(void)2057 static int start_per_cpu_kthreads(void)
2058 {
2059 	struct cpumask *current_mask = &save_cpumask;
2060 	int retval = 0;
2061 	int cpu;
2062 
2063 	if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
2064 		if (timerlat_enabled())
2065 			return 0;
2066 	}
2067 
2068 	cpus_read_lock();
2069 	/*
2070 	 * Run only on online CPUs in which osnoise is allowed to run.
2071 	 */
2072 	cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask);
2073 
2074 	for_each_possible_cpu(cpu) {
2075 		if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask)) {
2076 			struct task_struct *kthread;
2077 
2078 			kthread = xchg_relaxed(&(per_cpu(per_cpu_osnoise_var, cpu).kthread), NULL);
2079 			if (!WARN_ON(!kthread))
2080 				kthread_stop(kthread);
2081 		}
2082 	}
2083 
2084 	for_each_cpu(cpu, current_mask) {
2085 		retval = start_kthread(cpu);
2086 		if (retval) {
2087 			cpus_read_unlock();
2088 			stop_per_cpu_kthreads();
2089 			return retval;
2090 		}
2091 	}
2092 
2093 	cpus_read_unlock();
2094 
2095 	return retval;
2096 }
2097 
2098 #ifdef CONFIG_HOTPLUG_CPU
osnoise_hotplug_workfn(struct work_struct * dummy)2099 static void osnoise_hotplug_workfn(struct work_struct *dummy)
2100 {
2101 	unsigned int cpu = smp_processor_id();
2102 
2103 	mutex_lock(&trace_types_lock);
2104 
2105 	if (!osnoise_has_registered_instances())
2106 		goto out_unlock_trace;
2107 
2108 	mutex_lock(&interface_lock);
2109 	cpus_read_lock();
2110 
2111 	if (!cpu_online(cpu))
2112 		goto out_unlock;
2113 	if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
2114 		goto out_unlock;
2115 
2116 	start_kthread(cpu);
2117 
2118 out_unlock:
2119 	cpus_read_unlock();
2120 	mutex_unlock(&interface_lock);
2121 out_unlock_trace:
2122 	mutex_unlock(&trace_types_lock);
2123 }
2124 
2125 static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
2126 
2127 /*
2128  * osnoise_cpu_init - CPU hotplug online callback function
2129  */
osnoise_cpu_init(unsigned int cpu)2130 static int osnoise_cpu_init(unsigned int cpu)
2131 {
2132 	schedule_work_on(cpu, &osnoise_hotplug_work);
2133 	return 0;
2134 }
2135 
2136 /*
2137  * osnoise_cpu_die - CPU hotplug offline callback function
2138  */
osnoise_cpu_die(unsigned int cpu)2139 static int osnoise_cpu_die(unsigned int cpu)
2140 {
2141 	stop_kthread(cpu);
2142 	return 0;
2143 }
2144 
osnoise_init_hotplug_support(void)2145 static void osnoise_init_hotplug_support(void)
2146 {
2147 	int ret;
2148 
2149 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/osnoise:online",
2150 				osnoise_cpu_init, osnoise_cpu_die);
2151 	if (ret < 0)
2152 		pr_warn(BANNER "Error to init cpu hotplug support\n");
2153 
2154 	return;
2155 }
2156 #else /* CONFIG_HOTPLUG_CPU */
osnoise_init_hotplug_support(void)2157 static void osnoise_init_hotplug_support(void)
2158 {
2159 	return;
2160 }
2161 #endif /* CONFIG_HOTPLUG_CPU */
2162 
2163 /*
2164  * seq file functions for the osnoise/options file.
2165  */
s_options_start(struct seq_file * s,loff_t * pos)2166 static void *s_options_start(struct seq_file *s, loff_t *pos)
2167 {
2168 	int option = *pos;
2169 
2170 	mutex_lock(&interface_lock);
2171 
2172 	if (option >= OSN_MAX)
2173 		return NULL;
2174 
2175 	return pos;
2176 }
2177 
s_options_next(struct seq_file * s,void * v,loff_t * pos)2178 static void *s_options_next(struct seq_file *s, void *v, loff_t *pos)
2179 {
2180 	int option = ++(*pos);
2181 
2182 	if (option >= OSN_MAX)
2183 		return NULL;
2184 
2185 	return pos;
2186 }
2187 
s_options_show(struct seq_file * s,void * v)2188 static int s_options_show(struct seq_file *s, void *v)
2189 {
2190 	loff_t *pos = v;
2191 	int option = *pos;
2192 
2193 	if (option == OSN_DEFAULTS) {
2194 		if (osnoise_options == OSN_DEFAULT_OPTIONS)
2195 			seq_printf(s, "%s", osnoise_options_str[option]);
2196 		else
2197 			seq_printf(s, "NO_%s", osnoise_options_str[option]);
2198 		goto out;
2199 	}
2200 
2201 	if (test_bit(option, &osnoise_options))
2202 		seq_printf(s, "%s", osnoise_options_str[option]);
2203 	else
2204 		seq_printf(s, "NO_%s", osnoise_options_str[option]);
2205 
2206 out:
2207 	if (option != OSN_MAX)
2208 		seq_puts(s, " ");
2209 
2210 	return 0;
2211 }
2212 
s_options_stop(struct seq_file * s,void * v)2213 static void s_options_stop(struct seq_file *s, void *v)
2214 {
2215 	seq_puts(s, "\n");
2216 	mutex_unlock(&interface_lock);
2217 }
2218 
2219 static const struct seq_operations osnoise_options_seq_ops = {
2220 	.start		= s_options_start,
2221 	.next		= s_options_next,
2222 	.show		= s_options_show,
2223 	.stop		= s_options_stop
2224 };
2225 
osnoise_options_open(struct inode * inode,struct file * file)2226 static int osnoise_options_open(struct inode *inode, struct file *file)
2227 {
2228 	return seq_open(file, &osnoise_options_seq_ops);
2229 };
2230 
2231 /**
2232  * osnoise_options_write - Write function for "options" entry
2233  * @filp: The active open file structure
2234  * @ubuf: The user buffer that contains the value to write
2235  * @cnt: The maximum number of bytes to write to "file"
2236  * @ppos: The current position in @file
2237  *
2238  * Writing the option name sets the option, writing the "NO_"
2239  * prefix in front of the option name disables it.
2240  *
2241  * Writing "DEFAULTS" resets the option values to the default ones.
2242  */
osnoise_options_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2243 static ssize_t osnoise_options_write(struct file *filp, const char __user *ubuf,
2244 				     size_t cnt, loff_t *ppos)
2245 {
2246 	int running, option, enable, retval;
2247 	char buf[256], *option_str;
2248 
2249 	if (cnt >= 256)
2250 		return -EINVAL;
2251 
2252 	if (copy_from_user(buf, ubuf, cnt))
2253 		return -EFAULT;
2254 
2255 	buf[cnt] = 0;
2256 
2257 	if (strncmp(buf, "NO_", 3)) {
2258 		option_str = strstrip(buf);
2259 		enable = true;
2260 	} else {
2261 		option_str = strstrip(&buf[3]);
2262 		enable = false;
2263 	}
2264 
2265 	option = match_string(osnoise_options_str, OSN_MAX, option_str);
2266 	if (option < 0)
2267 		return -EINVAL;
2268 
2269 	/*
2270 	 * trace_types_lock is taken to avoid concurrency on start/stop.
2271 	 */
2272 	mutex_lock(&trace_types_lock);
2273 	running = osnoise_has_registered_instances();
2274 	if (running)
2275 		stop_per_cpu_kthreads();
2276 
2277 	mutex_lock(&interface_lock);
2278 	/*
2279 	 * avoid CPU hotplug operations that might read options.
2280 	 */
2281 	cpus_read_lock();
2282 
2283 	retval = cnt;
2284 
2285 	if (enable) {
2286 		if (option == OSN_DEFAULTS)
2287 			osnoise_options = OSN_DEFAULT_OPTIONS;
2288 		else
2289 			set_bit(option, &osnoise_options);
2290 	} else {
2291 		if (option == OSN_DEFAULTS)
2292 			retval = -EINVAL;
2293 		else
2294 			clear_bit(option, &osnoise_options);
2295 	}
2296 
2297 	cpus_read_unlock();
2298 	mutex_unlock(&interface_lock);
2299 
2300 	if (running)
2301 		start_per_cpu_kthreads();
2302 	mutex_unlock(&trace_types_lock);
2303 
2304 	return retval;
2305 }
2306 
2307 /*
2308  * osnoise_cpus_read - Read function for reading the "cpus" file
2309  * @filp: The active open file structure
2310  * @ubuf: The userspace provided buffer to read value into
2311  * @cnt: The maximum number of bytes to read
2312  * @ppos: The current "file" position
2313  *
2314  * Prints the "cpus" output into the user-provided buffer.
2315  */
2316 static ssize_t
osnoise_cpus_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)2317 osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
2318 		  loff_t *ppos)
2319 {
2320 	char *mask_str;
2321 	int len;
2322 
2323 	mutex_lock(&interface_lock);
2324 
2325 	len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
2326 	mask_str = kmalloc(len, GFP_KERNEL);
2327 	if (!mask_str) {
2328 		count = -ENOMEM;
2329 		goto out_unlock;
2330 	}
2331 
2332 	len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
2333 	if (len >= count) {
2334 		count = -EINVAL;
2335 		goto out_free;
2336 	}
2337 
2338 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
2339 
2340 out_free:
2341 	kfree(mask_str);
2342 out_unlock:
2343 	mutex_unlock(&interface_lock);
2344 
2345 	return count;
2346 }
2347 
2348 /*
2349  * osnoise_cpus_write - Write function for "cpus" entry
2350  * @filp: The active open file structure
2351  * @ubuf: The user buffer that contains the value to write
2352  * @cnt: The maximum number of bytes to write to "file"
2353  * @ppos: The current position in @file
2354  *
2355  * This function provides a write implementation for the "cpus"
2356  * interface to the osnoise trace. By default, it lists all  CPUs,
2357  * in this way, allowing osnoise threads to run on any online CPU
2358  * of the system. It serves to restrict the execution of osnoise to the
2359  * set of CPUs writing via this interface. Why not use "tracing_cpumask"?
2360  * Because the user might be interested in tracing what is running on
2361  * other CPUs. For instance, one might run osnoise in one HT CPU
2362  * while observing what is running on the sibling HT CPU.
2363  */
2364 static ssize_t
osnoise_cpus_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)2365 osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
2366 		   loff_t *ppos)
2367 {
2368 	cpumask_var_t osnoise_cpumask_new;
2369 	int running, err;
2370 	char buf[256];
2371 
2372 	if (count >= 256)
2373 		return -EINVAL;
2374 
2375 	if (copy_from_user(buf, ubuf, count))
2376 		return -EFAULT;
2377 
2378 	if (!zalloc_cpumask_var(&osnoise_cpumask_new, GFP_KERNEL))
2379 		return -ENOMEM;
2380 
2381 	err = cpulist_parse(buf, osnoise_cpumask_new);
2382 	if (err)
2383 		goto err_free;
2384 
2385 	/*
2386 	 * trace_types_lock is taken to avoid concurrency on start/stop.
2387 	 */
2388 	mutex_lock(&trace_types_lock);
2389 	running = osnoise_has_registered_instances();
2390 	if (running)
2391 		stop_per_cpu_kthreads();
2392 
2393 	mutex_lock(&interface_lock);
2394 	/*
2395 	 * osnoise_cpumask is read by CPU hotplug operations.
2396 	 */
2397 	cpus_read_lock();
2398 
2399 	cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
2400 
2401 	cpus_read_unlock();
2402 	mutex_unlock(&interface_lock);
2403 
2404 	if (running)
2405 		start_per_cpu_kthreads();
2406 	mutex_unlock(&trace_types_lock);
2407 
2408 	free_cpumask_var(osnoise_cpumask_new);
2409 	return count;
2410 
2411 err_free:
2412 	free_cpumask_var(osnoise_cpumask_new);
2413 
2414 	return err;
2415 }
2416 
2417 #ifdef CONFIG_TIMERLAT_TRACER
timerlat_fd_open(struct inode * inode,struct file * file)2418 static int timerlat_fd_open(struct inode *inode, struct file *file)
2419 {
2420 	struct osnoise_variables *osn_var;
2421 	struct timerlat_variables *tlat;
2422 	long cpu = (long) inode->i_cdev;
2423 
2424 	mutex_lock(&interface_lock);
2425 
2426 	/*
2427 	 * This file is accessible only if timerlat is enabled, and
2428 	 * NO_OSNOISE_WORKLOAD is set.
2429 	 */
2430 	if (!timerlat_enabled() || test_bit(OSN_WORKLOAD, &osnoise_options)) {
2431 		mutex_unlock(&interface_lock);
2432 		return -EINVAL;
2433 	}
2434 
2435 	migrate_disable();
2436 
2437 	osn_var = this_cpu_osn_var();
2438 
2439 	/*
2440 	 * The osn_var->pid holds the single access to this file.
2441 	 */
2442 	if (osn_var->pid) {
2443 		mutex_unlock(&interface_lock);
2444 		migrate_enable();
2445 		return -EBUSY;
2446 	}
2447 
2448 	/*
2449 	 * timerlat tracer is a per-cpu tracer. Check if the user-space too
2450 	 * is pinned to a single CPU. The tracer laters monitor if the task
2451 	 * migrates and then disables tracer if it does. However, it is
2452 	 * worth doing this basic acceptance test to avoid obviusly wrong
2453 	 * setup.
2454 	 */
2455 	if (current->nr_cpus_allowed > 1 ||  cpu != smp_processor_id()) {
2456 		mutex_unlock(&interface_lock);
2457 		migrate_enable();
2458 		return -EPERM;
2459 	}
2460 
2461 	/*
2462 	 * From now on, it is good to go.
2463 	 */
2464 	file->private_data = inode->i_cdev;
2465 
2466 	get_task_struct(current);
2467 
2468 	osn_var->kthread = current;
2469 	osn_var->pid = current->pid;
2470 
2471 	/*
2472 	 * Setup is done.
2473 	 */
2474 	mutex_unlock(&interface_lock);
2475 
2476 	tlat = this_cpu_tmr_var();
2477 	tlat->count = 0;
2478 
2479 	hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
2480 	tlat->timer.function = timerlat_irq;
2481 
2482 	migrate_enable();
2483 	return 0;
2484 };
2485 
2486 /*
2487  * timerlat_fd_read - Read function for "timerlat_fd" file
2488  * @file: The active open file structure
2489  * @ubuf: The userspace provided buffer to read value into
2490  * @cnt: The maximum number of bytes to read
2491  * @ppos: The current "file" position
2492  *
2493  * Prints 1 on timerlat, the number of interferences on osnoise, -1 on error.
2494  */
2495 static ssize_t
timerlat_fd_read(struct file * file,char __user * ubuf,size_t count,loff_t * ppos)2496 timerlat_fd_read(struct file *file, char __user *ubuf, size_t count,
2497 		  loff_t *ppos)
2498 {
2499 	long cpu = (long) file->private_data;
2500 	struct osnoise_variables *osn_var;
2501 	struct timerlat_variables *tlat;
2502 	struct timerlat_sample s;
2503 	s64 diff;
2504 	u64 now;
2505 
2506 	migrate_disable();
2507 
2508 	tlat = this_cpu_tmr_var();
2509 
2510 	/*
2511 	 * While in user-space, the thread is migratable. There is nothing
2512 	 * we can do about it.
2513 	 * So, if the thread is running on another CPU, stop the machinery.
2514 	 */
2515 	if (cpu == smp_processor_id()) {
2516 		if (tlat->uthread_migrate) {
2517 			migrate_enable();
2518 			return -EINVAL;
2519 		}
2520 	} else {
2521 		per_cpu_ptr(&per_cpu_timerlat_var, cpu)->uthread_migrate = 1;
2522 		osnoise_taint("timerlat user thread migrate\n");
2523 		osnoise_stop_tracing();
2524 		migrate_enable();
2525 		return -EINVAL;
2526 	}
2527 
2528 	osn_var = this_cpu_osn_var();
2529 
2530 	/*
2531 	 * The timerlat in user-space runs in a different order:
2532 	 * the read() starts from the execution of the previous occurrence,
2533 	 * sleeping for the next occurrence.
2534 	 *
2535 	 * So, skip if we are entering on read() before the first wakeup
2536 	 * from timerlat IRQ:
2537 	 */
2538 	if (likely(osn_var->sampling)) {
2539 		now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
2540 		diff = now - tlat->abs_period;
2541 
2542 		/*
2543 		 * it was not a timer firing, but some other signal?
2544 		 */
2545 		if (diff < 0)
2546 			goto out;
2547 
2548 		s.seqnum = tlat->count;
2549 		s.timer_latency = diff;
2550 		s.context = THREAD_URET;
2551 
2552 		trace_timerlat_sample(&s);
2553 
2554 		notify_new_max_latency(diff);
2555 
2556 		tlat->tracing_thread = false;
2557 		if (osnoise_data.stop_tracing_total)
2558 			if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
2559 				osnoise_stop_tracing();
2560 	} else {
2561 		tlat->tracing_thread = false;
2562 		tlat->kthread = current;
2563 
2564 		/* Annotate now to drift new period */
2565 		tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
2566 
2567 		osn_var->sampling = 1;
2568 	}
2569 
2570 	/* wait for the next period */
2571 	wait_next_period(tlat);
2572 
2573 	/* This is the wakeup from this cycle */
2574 	now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
2575 	diff = now - tlat->abs_period;
2576 
2577 	/*
2578 	 * it was not a timer firing, but some other signal?
2579 	 */
2580 	if (diff < 0)
2581 		goto out;
2582 
2583 	s.seqnum = tlat->count;
2584 	s.timer_latency = diff;
2585 	s.context = THREAD_CONTEXT;
2586 
2587 	trace_timerlat_sample(&s);
2588 
2589 	if (osnoise_data.stop_tracing_total) {
2590 		if (time_to_us(diff) >= osnoise_data.stop_tracing_total) {
2591 			timerlat_dump_stack(time_to_us(diff));
2592 			notify_new_max_latency(diff);
2593 			osnoise_stop_tracing();
2594 		}
2595 	}
2596 
2597 out:
2598 	migrate_enable();
2599 	return 0;
2600 }
2601 
timerlat_fd_release(struct inode * inode,struct file * file)2602 static int timerlat_fd_release(struct inode *inode, struct file *file)
2603 {
2604 	struct osnoise_variables *osn_var;
2605 	struct timerlat_variables *tlat_var;
2606 	long cpu = (long) file->private_data;
2607 
2608 	migrate_disable();
2609 	mutex_lock(&interface_lock);
2610 
2611 	osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
2612 	tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
2613 
2614 	if (tlat_var->kthread)
2615 		hrtimer_cancel(&tlat_var->timer);
2616 	memset(tlat_var, 0, sizeof(*tlat_var));
2617 
2618 	osn_var->sampling = 0;
2619 	osn_var->pid = 0;
2620 
2621 	/*
2622 	 * We are leaving, not being stopped... see stop_kthread();
2623 	 */
2624 	if (osn_var->kthread) {
2625 		put_task_struct(osn_var->kthread);
2626 		osn_var->kthread = NULL;
2627 	}
2628 
2629 	mutex_unlock(&interface_lock);
2630 	migrate_enable();
2631 	return 0;
2632 }
2633 #endif
2634 
2635 /*
2636  * osnoise/runtime_us: cannot be greater than the period.
2637  */
2638 static struct trace_min_max_param osnoise_runtime = {
2639 	.lock	= &interface_lock,
2640 	.val	= &osnoise_data.sample_runtime,
2641 	.max	= &osnoise_data.sample_period,
2642 	.min	= NULL,
2643 };
2644 
2645 /*
2646  * osnoise/period_us: cannot be smaller than the runtime.
2647  */
2648 static struct trace_min_max_param osnoise_period = {
2649 	.lock	= &interface_lock,
2650 	.val	= &osnoise_data.sample_period,
2651 	.max	= NULL,
2652 	.min	= &osnoise_data.sample_runtime,
2653 };
2654 
2655 /*
2656  * osnoise/stop_tracing_us: no limit.
2657  */
2658 static struct trace_min_max_param osnoise_stop_tracing_in = {
2659 	.lock	= &interface_lock,
2660 	.val	= &osnoise_data.stop_tracing,
2661 	.max	= NULL,
2662 	.min	= NULL,
2663 };
2664 
2665 /*
2666  * osnoise/stop_tracing_total_us: no limit.
2667  */
2668 static struct trace_min_max_param osnoise_stop_tracing_total = {
2669 	.lock	= &interface_lock,
2670 	.val	= &osnoise_data.stop_tracing_total,
2671 	.max	= NULL,
2672 	.min	= NULL,
2673 };
2674 
2675 #ifdef CONFIG_TIMERLAT_TRACER
2676 /*
2677  * osnoise/print_stack: print the stacktrace of the IRQ handler if the total
2678  * latency is higher than val.
2679  */
2680 static struct trace_min_max_param osnoise_print_stack = {
2681 	.lock	= &interface_lock,
2682 	.val	= &osnoise_data.print_stack,
2683 	.max	= NULL,
2684 	.min	= NULL,
2685 };
2686 
2687 /*
2688  * osnoise/timerlat_period: min 100 us, max 1 s
2689  */
2690 static u64 timerlat_min_period = 100;
2691 static u64 timerlat_max_period = 1000000;
2692 static struct trace_min_max_param timerlat_period = {
2693 	.lock	= &interface_lock,
2694 	.val	= &osnoise_data.timerlat_period,
2695 	.max	= &timerlat_max_period,
2696 	.min	= &timerlat_min_period,
2697 };
2698 
2699 static const struct file_operations timerlat_fd_fops = {
2700 	.open		= timerlat_fd_open,
2701 	.read		= timerlat_fd_read,
2702 	.release	= timerlat_fd_release,
2703 	.llseek		= generic_file_llseek,
2704 };
2705 #endif
2706 
2707 static const struct file_operations cpus_fops = {
2708 	.open		= tracing_open_generic,
2709 	.read		= osnoise_cpus_read,
2710 	.write		= osnoise_cpus_write,
2711 	.llseek		= generic_file_llseek,
2712 };
2713 
2714 static const struct file_operations osnoise_options_fops = {
2715 	.open		= osnoise_options_open,
2716 	.read		= seq_read,
2717 	.llseek		= seq_lseek,
2718 	.release	= seq_release,
2719 	.write		= osnoise_options_write
2720 };
2721 
2722 #ifdef CONFIG_TIMERLAT_TRACER
2723 #ifdef CONFIG_STACKTRACE
init_timerlat_stack_tracefs(struct dentry * top_dir)2724 static int init_timerlat_stack_tracefs(struct dentry *top_dir)
2725 {
2726 	struct dentry *tmp;
2727 
2728 	tmp = tracefs_create_file("print_stack", TRACE_MODE_WRITE, top_dir,
2729 				  &osnoise_print_stack, &trace_min_max_fops);
2730 	if (!tmp)
2731 		return -ENOMEM;
2732 
2733 	return 0;
2734 }
2735 #else /* CONFIG_STACKTRACE */
init_timerlat_stack_tracefs(struct dentry * top_dir)2736 static int init_timerlat_stack_tracefs(struct dentry *top_dir)
2737 {
2738 	return 0;
2739 }
2740 #endif /* CONFIG_STACKTRACE */
2741 
osnoise_create_cpu_timerlat_fd(struct dentry * top_dir)2742 static int osnoise_create_cpu_timerlat_fd(struct dentry *top_dir)
2743 {
2744 	struct dentry *timerlat_fd;
2745 	struct dentry *per_cpu;
2746 	struct dentry *cpu_dir;
2747 	char cpu_str[30]; /* see trace.c: tracing_init_tracefs_percpu() */
2748 	long cpu;
2749 
2750 	/*
2751 	 * Why not using tracing instance per_cpu/ dir?
2752 	 *
2753 	 * Because osnoise/timerlat have a single workload, having
2754 	 * multiple files like these are wast of memory.
2755 	 */
2756 	per_cpu = tracefs_create_dir("per_cpu", top_dir);
2757 	if (!per_cpu)
2758 		return -ENOMEM;
2759 
2760 	for_each_possible_cpu(cpu) {
2761 		snprintf(cpu_str, 30, "cpu%ld", cpu);
2762 		cpu_dir = tracefs_create_dir(cpu_str, per_cpu);
2763 		if (!cpu_dir)
2764 			goto out_clean;
2765 
2766 		timerlat_fd = trace_create_file("timerlat_fd", TRACE_MODE_READ,
2767 						cpu_dir, NULL, &timerlat_fd_fops);
2768 		if (!timerlat_fd)
2769 			goto out_clean;
2770 
2771 		/* Record the CPU */
2772 		d_inode(timerlat_fd)->i_cdev = (void *)(cpu);
2773 	}
2774 
2775 	return 0;
2776 
2777 out_clean:
2778 	tracefs_remove(per_cpu);
2779 	return -ENOMEM;
2780 }
2781 
2782 /*
2783  * init_timerlat_tracefs - A function to initialize the timerlat interface files
2784  */
init_timerlat_tracefs(struct dentry * top_dir)2785 static int init_timerlat_tracefs(struct dentry *top_dir)
2786 {
2787 	struct dentry *tmp;
2788 	int retval;
2789 
2790 	tmp = tracefs_create_file("timerlat_period_us", TRACE_MODE_WRITE, top_dir,
2791 				  &timerlat_period, &trace_min_max_fops);
2792 	if (!tmp)
2793 		return -ENOMEM;
2794 
2795 	retval = osnoise_create_cpu_timerlat_fd(top_dir);
2796 	if (retval)
2797 		return retval;
2798 
2799 	return init_timerlat_stack_tracefs(top_dir);
2800 }
2801 #else /* CONFIG_TIMERLAT_TRACER */
init_timerlat_tracefs(struct dentry * top_dir)2802 static int init_timerlat_tracefs(struct dentry *top_dir)
2803 {
2804 	return 0;
2805 }
2806 #endif /* CONFIG_TIMERLAT_TRACER */
2807 
2808 /*
2809  * init_tracefs - A function to initialize the tracefs interface files
2810  *
2811  * This function creates entries in tracefs for "osnoise" and "timerlat".
2812  * It creates these directories in the tracing directory, and within that
2813  * directory the use can change and view the configs.
2814  */
init_tracefs(void)2815 static int init_tracefs(void)
2816 {
2817 	struct dentry *top_dir;
2818 	struct dentry *tmp;
2819 	int ret;
2820 
2821 	ret = tracing_init_dentry();
2822 	if (ret)
2823 		return -ENOMEM;
2824 
2825 	top_dir = tracefs_create_dir("osnoise", NULL);
2826 	if (!top_dir)
2827 		return 0;
2828 
2829 	tmp = tracefs_create_file("period_us", TRACE_MODE_WRITE, top_dir,
2830 				  &osnoise_period, &trace_min_max_fops);
2831 	if (!tmp)
2832 		goto err;
2833 
2834 	tmp = tracefs_create_file("runtime_us", TRACE_MODE_WRITE, top_dir,
2835 				  &osnoise_runtime, &trace_min_max_fops);
2836 	if (!tmp)
2837 		goto err;
2838 
2839 	tmp = tracefs_create_file("stop_tracing_us", TRACE_MODE_WRITE, top_dir,
2840 				  &osnoise_stop_tracing_in, &trace_min_max_fops);
2841 	if (!tmp)
2842 		goto err;
2843 
2844 	tmp = tracefs_create_file("stop_tracing_total_us", TRACE_MODE_WRITE, top_dir,
2845 				  &osnoise_stop_tracing_total, &trace_min_max_fops);
2846 	if (!tmp)
2847 		goto err;
2848 
2849 	tmp = trace_create_file("cpus", TRACE_MODE_WRITE, top_dir, NULL, &cpus_fops);
2850 	if (!tmp)
2851 		goto err;
2852 
2853 	tmp = trace_create_file("options", TRACE_MODE_WRITE, top_dir, NULL,
2854 				&osnoise_options_fops);
2855 	if (!tmp)
2856 		goto err;
2857 
2858 	ret = init_timerlat_tracefs(top_dir);
2859 	if (ret)
2860 		goto err;
2861 
2862 	return 0;
2863 
2864 err:
2865 	tracefs_remove(top_dir);
2866 	return -ENOMEM;
2867 }
2868 
osnoise_hook_events(void)2869 static int osnoise_hook_events(void)
2870 {
2871 	int retval;
2872 
2873 	/*
2874 	 * Trace is already hooked, we are re-enabling from
2875 	 * a stop_tracing_*.
2876 	 */
2877 	if (trace_osnoise_callback_enabled)
2878 		return 0;
2879 
2880 	retval = hook_irq_events();
2881 	if (retval)
2882 		return -EINVAL;
2883 
2884 	retval = hook_softirq_events();
2885 	if (retval)
2886 		goto out_unhook_irq;
2887 
2888 	retval = hook_thread_events();
2889 	/*
2890 	 * All fine!
2891 	 */
2892 	if (!retval)
2893 		return 0;
2894 
2895 	unhook_softirq_events();
2896 out_unhook_irq:
2897 	unhook_irq_events();
2898 	return -EINVAL;
2899 }
2900 
osnoise_unhook_events(void)2901 static void osnoise_unhook_events(void)
2902 {
2903 	unhook_thread_events();
2904 	unhook_softirq_events();
2905 	unhook_irq_events();
2906 }
2907 
2908 /*
2909  * osnoise_workload_start - start the workload and hook to events
2910  */
osnoise_workload_start(void)2911 static int osnoise_workload_start(void)
2912 {
2913 	int retval;
2914 
2915 	/*
2916 	 * Instances need to be registered after calling workload
2917 	 * start. Hence, if there is already an instance, the
2918 	 * workload was already registered. Otherwise, this
2919 	 * code is on the way to register the first instance,
2920 	 * and the workload will start.
2921 	 */
2922 	if (osnoise_has_registered_instances())
2923 		return 0;
2924 
2925 	osn_var_reset_all();
2926 
2927 	retval = osnoise_hook_events();
2928 	if (retval)
2929 		return retval;
2930 
2931 	/*
2932 	 * Make sure that ftrace_nmi_enter/exit() see reset values
2933 	 * before enabling trace_osnoise_callback_enabled.
2934 	 */
2935 	barrier();
2936 	trace_osnoise_callback_enabled = true;
2937 
2938 	retval = start_per_cpu_kthreads();
2939 	if (retval) {
2940 		trace_osnoise_callback_enabled = false;
2941 		/*
2942 		 * Make sure that ftrace_nmi_enter/exit() see
2943 		 * trace_osnoise_callback_enabled as false before continuing.
2944 		 */
2945 		barrier();
2946 
2947 		osnoise_unhook_events();
2948 		return retval;
2949 	}
2950 
2951 	return 0;
2952 }
2953 
2954 /*
2955  * osnoise_workload_stop - stop the workload and unhook the events
2956  */
osnoise_workload_stop(void)2957 static void osnoise_workload_stop(void)
2958 {
2959 	/*
2960 	 * Instances need to be unregistered before calling
2961 	 * stop. Hence, if there is a registered instance, more
2962 	 * than one instance is running, and the workload will not
2963 	 * yet stop. Otherwise, this code is on the way to disable
2964 	 * the last instance, and the workload can stop.
2965 	 */
2966 	if (osnoise_has_registered_instances())
2967 		return;
2968 
2969 	/*
2970 	 * If callbacks were already disabled in a previous stop
2971 	 * call, there is no need to disable then again.
2972 	 *
2973 	 * For instance, this happens when tracing is stopped via:
2974 	 * echo 0 > tracing_on
2975 	 * echo nop > current_tracer.
2976 	 */
2977 	if (!trace_osnoise_callback_enabled)
2978 		return;
2979 
2980 	trace_osnoise_callback_enabled = false;
2981 	/*
2982 	 * Make sure that ftrace_nmi_enter/exit() see
2983 	 * trace_osnoise_callback_enabled as false before continuing.
2984 	 */
2985 	barrier();
2986 
2987 	stop_per_cpu_kthreads();
2988 
2989 	osnoise_unhook_events();
2990 }
2991 
osnoise_tracer_start(struct trace_array * tr)2992 static void osnoise_tracer_start(struct trace_array *tr)
2993 {
2994 	int retval;
2995 
2996 	/*
2997 	 * If the instance is already registered, there is no need to
2998 	 * register it again.
2999 	 */
3000 	if (osnoise_instance_registered(tr))
3001 		return;
3002 
3003 	retval = osnoise_workload_start();
3004 	if (retval)
3005 		pr_err(BANNER "Error starting osnoise tracer\n");
3006 
3007 	osnoise_register_instance(tr);
3008 }
3009 
osnoise_tracer_stop(struct trace_array * tr)3010 static void osnoise_tracer_stop(struct trace_array *tr)
3011 {
3012 	osnoise_unregister_instance(tr);
3013 	osnoise_workload_stop();
3014 }
3015 
osnoise_tracer_init(struct trace_array * tr)3016 static int osnoise_tracer_init(struct trace_array *tr)
3017 {
3018 	/*
3019 	 * Only allow osnoise tracer if timerlat tracer is not running
3020 	 * already.
3021 	 */
3022 	if (timerlat_enabled())
3023 		return -EBUSY;
3024 
3025 	tr->max_latency = 0;
3026 
3027 	osnoise_tracer_start(tr);
3028 	return 0;
3029 }
3030 
osnoise_tracer_reset(struct trace_array * tr)3031 static void osnoise_tracer_reset(struct trace_array *tr)
3032 {
3033 	osnoise_tracer_stop(tr);
3034 }
3035 
3036 static struct tracer osnoise_tracer __read_mostly = {
3037 	.name		= "osnoise",
3038 	.init		= osnoise_tracer_init,
3039 	.reset		= osnoise_tracer_reset,
3040 	.start		= osnoise_tracer_start,
3041 	.stop		= osnoise_tracer_stop,
3042 	.print_header	= print_osnoise_headers,
3043 	.allow_instances = true,
3044 };
3045 
3046 #ifdef CONFIG_TIMERLAT_TRACER
timerlat_tracer_start(struct trace_array * tr)3047 static void timerlat_tracer_start(struct trace_array *tr)
3048 {
3049 	int retval;
3050 
3051 	/*
3052 	 * If the instance is already registered, there is no need to
3053 	 * register it again.
3054 	 */
3055 	if (osnoise_instance_registered(tr))
3056 		return;
3057 
3058 	retval = osnoise_workload_start();
3059 	if (retval)
3060 		pr_err(BANNER "Error starting timerlat tracer\n");
3061 
3062 	osnoise_register_instance(tr);
3063 
3064 	return;
3065 }
3066 
timerlat_tracer_stop(struct trace_array * tr)3067 static void timerlat_tracer_stop(struct trace_array *tr)
3068 {
3069 	int cpu;
3070 
3071 	osnoise_unregister_instance(tr);
3072 
3073 	/*
3074 	 * Instruct the threads to stop only if this is the last instance.
3075 	 */
3076 	if (!osnoise_has_registered_instances()) {
3077 		for_each_online_cpu(cpu)
3078 			per_cpu(per_cpu_osnoise_var, cpu).sampling = 0;
3079 	}
3080 
3081 	osnoise_workload_stop();
3082 }
3083 
timerlat_tracer_init(struct trace_array * tr)3084 static int timerlat_tracer_init(struct trace_array *tr)
3085 {
3086 	/*
3087 	 * Only allow timerlat tracer if osnoise tracer is not running already.
3088 	 */
3089 	if (osnoise_has_registered_instances() && !osnoise_data.timerlat_tracer)
3090 		return -EBUSY;
3091 
3092 	/*
3093 	 * If this is the first instance, set timerlat_tracer to block
3094 	 * osnoise tracer start.
3095 	 */
3096 	if (!osnoise_has_registered_instances())
3097 		osnoise_data.timerlat_tracer = 1;
3098 
3099 	tr->max_latency = 0;
3100 	timerlat_tracer_start(tr);
3101 
3102 	return 0;
3103 }
3104 
timerlat_tracer_reset(struct trace_array * tr)3105 static void timerlat_tracer_reset(struct trace_array *tr)
3106 {
3107 	timerlat_tracer_stop(tr);
3108 
3109 	/*
3110 	 * If this is the last instance, reset timerlat_tracer allowing
3111 	 * osnoise to be started.
3112 	 */
3113 	if (!osnoise_has_registered_instances())
3114 		osnoise_data.timerlat_tracer = 0;
3115 }
3116 
3117 static struct tracer timerlat_tracer __read_mostly = {
3118 	.name		= "timerlat",
3119 	.init		= timerlat_tracer_init,
3120 	.reset		= timerlat_tracer_reset,
3121 	.start		= timerlat_tracer_start,
3122 	.stop		= timerlat_tracer_stop,
3123 	.print_header	= print_timerlat_headers,
3124 	.allow_instances = true,
3125 };
3126 
init_timerlat_tracer(void)3127 __init static int init_timerlat_tracer(void)
3128 {
3129 	return register_tracer(&timerlat_tracer);
3130 }
3131 #else /* CONFIG_TIMERLAT_TRACER */
init_timerlat_tracer(void)3132 __init static int init_timerlat_tracer(void)
3133 {
3134 	return 0;
3135 }
3136 #endif /* CONFIG_TIMERLAT_TRACER */
3137 
init_osnoise_tracer(void)3138 __init static int init_osnoise_tracer(void)
3139 {
3140 	int ret;
3141 
3142 	mutex_init(&interface_lock);
3143 
3144 	cpumask_copy(&osnoise_cpumask, cpu_all_mask);
3145 
3146 	ret = register_tracer(&osnoise_tracer);
3147 	if (ret) {
3148 		pr_err(BANNER "Error registering osnoise!\n");
3149 		return ret;
3150 	}
3151 
3152 	ret = init_timerlat_tracer();
3153 	if (ret) {
3154 		pr_err(BANNER "Error registering timerlat!\n");
3155 		return ret;
3156 	}
3157 
3158 	osnoise_init_hotplug_support();
3159 
3160 	INIT_LIST_HEAD_RCU(&osnoise_instances);
3161 
3162 	init_tracefs();
3163 
3164 	return 0;
3165 }
3166 late_initcall(init_osnoise_tracer);
3167