xref: /openbmc/linux/kernel/trace/trace.c (revision 93df8a1e)
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 Nadia Yvette Chambers
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
42 #include <linux/fs.h>
43 #include <linux/sched/rt.h>
44 
45 #include "trace.h"
46 #include "trace_output.h"
47 
48 /*
49  * On boot up, the ring buffer is set to the minimum size, so that
50  * we do not waste memory on systems that are not using tracing.
51  */
52 bool ring_buffer_expanded;
53 
54 /*
55  * We need to change this state when a selftest is running.
56  * A selftest will lurk into the ring-buffer to count the
57  * entries inserted during the selftest although some concurrent
58  * insertions into the ring-buffer such as trace_printk could occurred
59  * at the same time, giving false positive or negative results.
60  */
61 static bool __read_mostly tracing_selftest_running;
62 
63 /*
64  * If a tracer is running, we do not want to run SELFTEST.
65  */
66 bool __read_mostly tracing_selftest_disabled;
67 
68 /* Pipe tracepoints to printk */
69 struct trace_iterator *tracepoint_print_iter;
70 int tracepoint_printk;
71 
72 /* For tracers that don't implement custom flags */
73 static struct tracer_opt dummy_tracer_opt[] = {
74 	{ }
75 };
76 
77 static struct tracer_flags dummy_tracer_flags = {
78 	.val = 0,
79 	.opts = dummy_tracer_opt
80 };
81 
82 static int
83 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
84 {
85 	return 0;
86 }
87 
88 /*
89  * To prevent the comm cache from being overwritten when no
90  * tracing is active, only save the comm when a trace event
91  * occurred.
92  */
93 static DEFINE_PER_CPU(bool, trace_cmdline_save);
94 
95 /*
96  * Kill all tracing for good (never come back).
97  * It is initialized to 1 but will turn to zero if the initialization
98  * of the tracer is successful. But that is the only place that sets
99  * this back to zero.
100  */
101 static int tracing_disabled = 1;
102 
103 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
104 
105 cpumask_var_t __read_mostly	tracing_buffer_mask;
106 
107 /*
108  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109  *
110  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111  * is set, then ftrace_dump is called. This will output the contents
112  * of the ftrace buffers to the console.  This is very useful for
113  * capturing traces that lead to crashes and outputing it to a
114  * serial console.
115  *
116  * It is default off, but you can enable it with either specifying
117  * "ftrace_dump_on_oops" in the kernel command line, or setting
118  * /proc/sys/kernel/ftrace_dump_on_oops
119  * Set 1 if you want to dump buffers of all CPUs
120  * Set 2 if you want to dump the buffer of the CPU that triggered oops
121  */
122 
123 enum ftrace_dump_mode ftrace_dump_on_oops;
124 
125 /* When set, tracing will stop when a WARN*() is hit */
126 int __disable_trace_on_warning;
127 
128 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
129 /* Map of enums to their values, for "enum_map" file */
130 struct trace_enum_map_head {
131 	struct module			*mod;
132 	unsigned long			length;
133 };
134 
135 union trace_enum_map_item;
136 
137 struct trace_enum_map_tail {
138 	/*
139 	 * "end" is first and points to NULL as it must be different
140 	 * than "mod" or "enum_string"
141 	 */
142 	union trace_enum_map_item	*next;
143 	const char			*end;	/* points to NULL */
144 };
145 
146 static DEFINE_MUTEX(trace_enum_mutex);
147 
148 /*
149  * The trace_enum_maps are saved in an array with two extra elements,
150  * one at the beginning, and one at the end. The beginning item contains
151  * the count of the saved maps (head.length), and the module they
152  * belong to if not built in (head.mod). The ending item contains a
153  * pointer to the next array of saved enum_map items.
154  */
155 union trace_enum_map_item {
156 	struct trace_enum_map		map;
157 	struct trace_enum_map_head	head;
158 	struct trace_enum_map_tail	tail;
159 };
160 
161 static union trace_enum_map_item *trace_enum_maps;
162 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163 
164 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
165 
166 #define MAX_TRACER_SIZE		100
167 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
168 static char *default_bootup_tracer;
169 
170 static bool allocate_snapshot;
171 
172 static int __init set_cmdline_ftrace(char *str)
173 {
174 	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
175 	default_bootup_tracer = bootup_tracer_buf;
176 	/* We are using ftrace early, expand it */
177 	ring_buffer_expanded = true;
178 	return 1;
179 }
180 __setup("ftrace=", set_cmdline_ftrace);
181 
182 static int __init set_ftrace_dump_on_oops(char *str)
183 {
184 	if (*str++ != '=' || !*str) {
185 		ftrace_dump_on_oops = DUMP_ALL;
186 		return 1;
187 	}
188 
189 	if (!strcmp("orig_cpu", str)) {
190 		ftrace_dump_on_oops = DUMP_ORIG;
191                 return 1;
192         }
193 
194         return 0;
195 }
196 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
197 
198 static int __init stop_trace_on_warning(char *str)
199 {
200 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 		__disable_trace_on_warning = 1;
202 	return 1;
203 }
204 __setup("traceoff_on_warning", stop_trace_on_warning);
205 
206 static int __init boot_alloc_snapshot(char *str)
207 {
208 	allocate_snapshot = true;
209 	/* We also need the main ring buffer expanded */
210 	ring_buffer_expanded = true;
211 	return 1;
212 }
213 __setup("alloc_snapshot", boot_alloc_snapshot);
214 
215 
216 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217 static char *trace_boot_options __initdata;
218 
219 static int __init set_trace_boot_options(char *str)
220 {
221 	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
222 	trace_boot_options = trace_boot_options_buf;
223 	return 0;
224 }
225 __setup("trace_options=", set_trace_boot_options);
226 
227 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228 static char *trace_boot_clock __initdata;
229 
230 static int __init set_trace_boot_clock(char *str)
231 {
232 	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 	trace_boot_clock = trace_boot_clock_buf;
234 	return 0;
235 }
236 __setup("trace_clock=", set_trace_boot_clock);
237 
238 static int __init set_tracepoint_printk(char *str)
239 {
240 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 		tracepoint_printk = 1;
242 	return 1;
243 }
244 __setup("tp_printk", set_tracepoint_printk);
245 
246 unsigned long long ns2usecs(cycle_t nsec)
247 {
248 	nsec += 500;
249 	do_div(nsec, 1000);
250 	return nsec;
251 }
252 
253 /*
254  * The global_trace is the descriptor that holds the tracing
255  * buffers for the live tracing. For each CPU, it contains
256  * a link list of pages that will store trace entries. The
257  * page descriptor of the pages in the memory is used to hold
258  * the link list by linking the lru item in the page descriptor
259  * to each of the pages in the buffer per CPU.
260  *
261  * For each active CPU there is a data field that holds the
262  * pages for the buffer for that CPU. Each CPU has the same number
263  * of pages allocated for its buffer.
264  */
265 static struct trace_array	global_trace;
266 
267 LIST_HEAD(ftrace_trace_arrays);
268 
269 int trace_array_get(struct trace_array *this_tr)
270 {
271 	struct trace_array *tr;
272 	int ret = -ENODEV;
273 
274 	mutex_lock(&trace_types_lock);
275 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
276 		if (tr == this_tr) {
277 			tr->ref++;
278 			ret = 0;
279 			break;
280 		}
281 	}
282 	mutex_unlock(&trace_types_lock);
283 
284 	return ret;
285 }
286 
287 static void __trace_array_put(struct trace_array *this_tr)
288 {
289 	WARN_ON(!this_tr->ref);
290 	this_tr->ref--;
291 }
292 
293 void trace_array_put(struct trace_array *this_tr)
294 {
295 	mutex_lock(&trace_types_lock);
296 	__trace_array_put(this_tr);
297 	mutex_unlock(&trace_types_lock);
298 }
299 
300 int filter_check_discard(struct trace_event_file *file, void *rec,
301 			 struct ring_buffer *buffer,
302 			 struct ring_buffer_event *event)
303 {
304 	if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
305 	    !filter_match_preds(file->filter, rec)) {
306 		ring_buffer_discard_commit(buffer, event);
307 		return 1;
308 	}
309 
310 	return 0;
311 }
312 EXPORT_SYMBOL_GPL(filter_check_discard);
313 
314 int call_filter_check_discard(struct trace_event_call *call, void *rec,
315 			      struct ring_buffer *buffer,
316 			      struct ring_buffer_event *event)
317 {
318 	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
319 	    !filter_match_preds(call->filter, rec)) {
320 		ring_buffer_discard_commit(buffer, event);
321 		return 1;
322 	}
323 
324 	return 0;
325 }
326 EXPORT_SYMBOL_GPL(call_filter_check_discard);
327 
328 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
329 {
330 	u64 ts;
331 
332 	/* Early boot up does not have a buffer yet */
333 	if (!buf->buffer)
334 		return trace_clock_local();
335 
336 	ts = ring_buffer_time_stamp(buf->buffer, cpu);
337 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
338 
339 	return ts;
340 }
341 
342 cycle_t ftrace_now(int cpu)
343 {
344 	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
345 }
346 
347 /**
348  * tracing_is_enabled - Show if global_trace has been disabled
349  *
350  * Shows if the global trace has been enabled or not. It uses the
351  * mirror flag "buffer_disabled" to be used in fast paths such as for
352  * the irqsoff tracer. But it may be inaccurate due to races. If you
353  * need to know the accurate state, use tracing_is_on() which is a little
354  * slower, but accurate.
355  */
356 int tracing_is_enabled(void)
357 {
358 	/*
359 	 * For quick access (irqsoff uses this in fast path), just
360 	 * return the mirror variable of the state of the ring buffer.
361 	 * It's a little racy, but we don't really care.
362 	 */
363 	smp_rmb();
364 	return !global_trace.buffer_disabled;
365 }
366 
367 /*
368  * trace_buf_size is the size in bytes that is allocated
369  * for a buffer. Note, the number of bytes is always rounded
370  * to page size.
371  *
372  * This number is purposely set to a low number of 16384.
373  * If the dump on oops happens, it will be much appreciated
374  * to not have to wait for all that output. Anyway this can be
375  * boot time and run time configurable.
376  */
377 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
378 
379 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
380 
381 /* trace_types holds a link list of available tracers. */
382 static struct tracer		*trace_types __read_mostly;
383 
384 /*
385  * trace_types_lock is used to protect the trace_types list.
386  */
387 DEFINE_MUTEX(trace_types_lock);
388 
389 /*
390  * serialize the access of the ring buffer
391  *
392  * ring buffer serializes readers, but it is low level protection.
393  * The validity of the events (which returns by ring_buffer_peek() ..etc)
394  * are not protected by ring buffer.
395  *
396  * The content of events may become garbage if we allow other process consumes
397  * these events concurrently:
398  *   A) the page of the consumed events may become a normal page
399  *      (not reader page) in ring buffer, and this page will be rewrited
400  *      by events producer.
401  *   B) The page of the consumed events may become a page for splice_read,
402  *      and this page will be returned to system.
403  *
404  * These primitives allow multi process access to different cpu ring buffer
405  * concurrently.
406  *
407  * These primitives don't distinguish read-only and read-consume access.
408  * Multi read-only access are also serialized.
409  */
410 
411 #ifdef CONFIG_SMP
412 static DECLARE_RWSEM(all_cpu_access_lock);
413 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
414 
415 static inline void trace_access_lock(int cpu)
416 {
417 	if (cpu == RING_BUFFER_ALL_CPUS) {
418 		/* gain it for accessing the whole ring buffer. */
419 		down_write(&all_cpu_access_lock);
420 	} else {
421 		/* gain it for accessing a cpu ring buffer. */
422 
423 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
424 		down_read(&all_cpu_access_lock);
425 
426 		/* Secondly block other access to this @cpu ring buffer. */
427 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
428 	}
429 }
430 
431 static inline void trace_access_unlock(int cpu)
432 {
433 	if (cpu == RING_BUFFER_ALL_CPUS) {
434 		up_write(&all_cpu_access_lock);
435 	} else {
436 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
437 		up_read(&all_cpu_access_lock);
438 	}
439 }
440 
441 static inline void trace_access_lock_init(void)
442 {
443 	int cpu;
444 
445 	for_each_possible_cpu(cpu)
446 		mutex_init(&per_cpu(cpu_access_lock, cpu));
447 }
448 
449 #else
450 
451 static DEFINE_MUTEX(access_lock);
452 
453 static inline void trace_access_lock(int cpu)
454 {
455 	(void)cpu;
456 	mutex_lock(&access_lock);
457 }
458 
459 static inline void trace_access_unlock(int cpu)
460 {
461 	(void)cpu;
462 	mutex_unlock(&access_lock);
463 }
464 
465 static inline void trace_access_lock_init(void)
466 {
467 }
468 
469 #endif
470 
471 /* trace_flags holds trace_options default values */
472 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
473 	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
474 	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
475 	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
476 
477 static void tracer_tracing_on(struct trace_array *tr)
478 {
479 	if (tr->trace_buffer.buffer)
480 		ring_buffer_record_on(tr->trace_buffer.buffer);
481 	/*
482 	 * This flag is looked at when buffers haven't been allocated
483 	 * yet, or by some tracers (like irqsoff), that just want to
484 	 * know if the ring buffer has been disabled, but it can handle
485 	 * races of where it gets disabled but we still do a record.
486 	 * As the check is in the fast path of the tracers, it is more
487 	 * important to be fast than accurate.
488 	 */
489 	tr->buffer_disabled = 0;
490 	/* Make the flag seen by readers */
491 	smp_wmb();
492 }
493 
494 /**
495  * tracing_on - enable tracing buffers
496  *
497  * This function enables tracing buffers that may have been
498  * disabled with tracing_off.
499  */
500 void tracing_on(void)
501 {
502 	tracer_tracing_on(&global_trace);
503 }
504 EXPORT_SYMBOL_GPL(tracing_on);
505 
506 /**
507  * __trace_puts - write a constant string into the trace buffer.
508  * @ip:	   The address of the caller
509  * @str:   The constant string to write
510  * @size:  The size of the string.
511  */
512 int __trace_puts(unsigned long ip, const char *str, int size)
513 {
514 	struct ring_buffer_event *event;
515 	struct ring_buffer *buffer;
516 	struct print_entry *entry;
517 	unsigned long irq_flags;
518 	int alloc;
519 	int pc;
520 
521 	if (!(trace_flags & TRACE_ITER_PRINTK))
522 		return 0;
523 
524 	pc = preempt_count();
525 
526 	if (unlikely(tracing_selftest_running || tracing_disabled))
527 		return 0;
528 
529 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
530 
531 	local_save_flags(irq_flags);
532 	buffer = global_trace.trace_buffer.buffer;
533 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
534 					  irq_flags, pc);
535 	if (!event)
536 		return 0;
537 
538 	entry = ring_buffer_event_data(event);
539 	entry->ip = ip;
540 
541 	memcpy(&entry->buf, str, size);
542 
543 	/* Add a newline if necessary */
544 	if (entry->buf[size - 1] != '\n') {
545 		entry->buf[size] = '\n';
546 		entry->buf[size + 1] = '\0';
547 	} else
548 		entry->buf[size] = '\0';
549 
550 	__buffer_unlock_commit(buffer, event);
551 	ftrace_trace_stack(buffer, irq_flags, 4, pc);
552 
553 	return size;
554 }
555 EXPORT_SYMBOL_GPL(__trace_puts);
556 
557 /**
558  * __trace_bputs - write the pointer to a constant string into trace buffer
559  * @ip:	   The address of the caller
560  * @str:   The constant string to write to the buffer to
561  */
562 int __trace_bputs(unsigned long ip, const char *str)
563 {
564 	struct ring_buffer_event *event;
565 	struct ring_buffer *buffer;
566 	struct bputs_entry *entry;
567 	unsigned long irq_flags;
568 	int size = sizeof(struct bputs_entry);
569 	int pc;
570 
571 	if (!(trace_flags & TRACE_ITER_PRINTK))
572 		return 0;
573 
574 	pc = preempt_count();
575 
576 	if (unlikely(tracing_selftest_running || tracing_disabled))
577 		return 0;
578 
579 	local_save_flags(irq_flags);
580 	buffer = global_trace.trace_buffer.buffer;
581 	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
582 					  irq_flags, pc);
583 	if (!event)
584 		return 0;
585 
586 	entry = ring_buffer_event_data(event);
587 	entry->ip			= ip;
588 	entry->str			= str;
589 
590 	__buffer_unlock_commit(buffer, event);
591 	ftrace_trace_stack(buffer, irq_flags, 4, pc);
592 
593 	return 1;
594 }
595 EXPORT_SYMBOL_GPL(__trace_bputs);
596 
597 #ifdef CONFIG_TRACER_SNAPSHOT
598 /**
599  * trace_snapshot - take a snapshot of the current buffer.
600  *
601  * This causes a swap between the snapshot buffer and the current live
602  * tracing buffer. You can use this to take snapshots of the live
603  * trace when some condition is triggered, but continue to trace.
604  *
605  * Note, make sure to allocate the snapshot with either
606  * a tracing_snapshot_alloc(), or by doing it manually
607  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
608  *
609  * If the snapshot buffer is not allocated, it will stop tracing.
610  * Basically making a permanent snapshot.
611  */
612 void tracing_snapshot(void)
613 {
614 	struct trace_array *tr = &global_trace;
615 	struct tracer *tracer = tr->current_trace;
616 	unsigned long flags;
617 
618 	if (in_nmi()) {
619 		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
620 		internal_trace_puts("*** snapshot is being ignored        ***\n");
621 		return;
622 	}
623 
624 	if (!tr->allocated_snapshot) {
625 		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
626 		internal_trace_puts("*** stopping trace here!   ***\n");
627 		tracing_off();
628 		return;
629 	}
630 
631 	/* Note, snapshot can not be used when the tracer uses it */
632 	if (tracer->use_max_tr) {
633 		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
634 		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
635 		return;
636 	}
637 
638 	local_irq_save(flags);
639 	update_max_tr(tr, current, smp_processor_id());
640 	local_irq_restore(flags);
641 }
642 EXPORT_SYMBOL_GPL(tracing_snapshot);
643 
644 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
645 					struct trace_buffer *size_buf, int cpu_id);
646 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
647 
648 static int alloc_snapshot(struct trace_array *tr)
649 {
650 	int ret;
651 
652 	if (!tr->allocated_snapshot) {
653 
654 		/* allocate spare buffer */
655 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
656 				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
657 		if (ret < 0)
658 			return ret;
659 
660 		tr->allocated_snapshot = true;
661 	}
662 
663 	return 0;
664 }
665 
666 static void free_snapshot(struct trace_array *tr)
667 {
668 	/*
669 	 * We don't free the ring buffer. instead, resize it because
670 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
671 	 * we want preserve it.
672 	 */
673 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
674 	set_buffer_entries(&tr->max_buffer, 1);
675 	tracing_reset_online_cpus(&tr->max_buffer);
676 	tr->allocated_snapshot = false;
677 }
678 
679 /**
680  * tracing_alloc_snapshot - allocate snapshot buffer.
681  *
682  * This only allocates the snapshot buffer if it isn't already
683  * allocated - it doesn't also take a snapshot.
684  *
685  * This is meant to be used in cases where the snapshot buffer needs
686  * to be set up for events that can't sleep but need to be able to
687  * trigger a snapshot.
688  */
689 int tracing_alloc_snapshot(void)
690 {
691 	struct trace_array *tr = &global_trace;
692 	int ret;
693 
694 	ret = alloc_snapshot(tr);
695 	WARN_ON(ret < 0);
696 
697 	return ret;
698 }
699 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
700 
701 /**
702  * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
703  *
704  * This is similar to trace_snapshot(), but it will allocate the
705  * snapshot buffer if it isn't already allocated. Use this only
706  * where it is safe to sleep, as the allocation may sleep.
707  *
708  * This causes a swap between the snapshot buffer and the current live
709  * tracing buffer. You can use this to take snapshots of the live
710  * trace when some condition is triggered, but continue to trace.
711  */
712 void tracing_snapshot_alloc(void)
713 {
714 	int ret;
715 
716 	ret = tracing_alloc_snapshot();
717 	if (ret < 0)
718 		return;
719 
720 	tracing_snapshot();
721 }
722 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
723 #else
724 void tracing_snapshot(void)
725 {
726 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
727 }
728 EXPORT_SYMBOL_GPL(tracing_snapshot);
729 int tracing_alloc_snapshot(void)
730 {
731 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
732 	return -ENODEV;
733 }
734 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
735 void tracing_snapshot_alloc(void)
736 {
737 	/* Give warning */
738 	tracing_snapshot();
739 }
740 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
741 #endif /* CONFIG_TRACER_SNAPSHOT */
742 
743 static void tracer_tracing_off(struct trace_array *tr)
744 {
745 	if (tr->trace_buffer.buffer)
746 		ring_buffer_record_off(tr->trace_buffer.buffer);
747 	/*
748 	 * This flag is looked at when buffers haven't been allocated
749 	 * yet, or by some tracers (like irqsoff), that just want to
750 	 * know if the ring buffer has been disabled, but it can handle
751 	 * races of where it gets disabled but we still do a record.
752 	 * As the check is in the fast path of the tracers, it is more
753 	 * important to be fast than accurate.
754 	 */
755 	tr->buffer_disabled = 1;
756 	/* Make the flag seen by readers */
757 	smp_wmb();
758 }
759 
760 /**
761  * tracing_off - turn off tracing buffers
762  *
763  * This function stops the tracing buffers from recording data.
764  * It does not disable any overhead the tracers themselves may
765  * be causing. This function simply causes all recording to
766  * the ring buffers to fail.
767  */
768 void tracing_off(void)
769 {
770 	tracer_tracing_off(&global_trace);
771 }
772 EXPORT_SYMBOL_GPL(tracing_off);
773 
774 void disable_trace_on_warning(void)
775 {
776 	if (__disable_trace_on_warning)
777 		tracing_off();
778 }
779 
780 /**
781  * tracer_tracing_is_on - show real state of ring buffer enabled
782  * @tr : the trace array to know if ring buffer is enabled
783  *
784  * Shows real state of the ring buffer if it is enabled or not.
785  */
786 static int tracer_tracing_is_on(struct trace_array *tr)
787 {
788 	if (tr->trace_buffer.buffer)
789 		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
790 	return !tr->buffer_disabled;
791 }
792 
793 /**
794  * tracing_is_on - show state of ring buffers enabled
795  */
796 int tracing_is_on(void)
797 {
798 	return tracer_tracing_is_on(&global_trace);
799 }
800 EXPORT_SYMBOL_GPL(tracing_is_on);
801 
802 static int __init set_buf_size(char *str)
803 {
804 	unsigned long buf_size;
805 
806 	if (!str)
807 		return 0;
808 	buf_size = memparse(str, &str);
809 	/* nr_entries can not be zero */
810 	if (buf_size == 0)
811 		return 0;
812 	trace_buf_size = buf_size;
813 	return 1;
814 }
815 __setup("trace_buf_size=", set_buf_size);
816 
817 static int __init set_tracing_thresh(char *str)
818 {
819 	unsigned long threshold;
820 	int ret;
821 
822 	if (!str)
823 		return 0;
824 	ret = kstrtoul(str, 0, &threshold);
825 	if (ret < 0)
826 		return 0;
827 	tracing_thresh = threshold * 1000;
828 	return 1;
829 }
830 __setup("tracing_thresh=", set_tracing_thresh);
831 
832 unsigned long nsecs_to_usecs(unsigned long nsecs)
833 {
834 	return nsecs / 1000;
835 }
836 
837 /* These must match the bit postions in trace_iterator_flags */
838 static const char *trace_options[] = {
839 	"print-parent",
840 	"sym-offset",
841 	"sym-addr",
842 	"verbose",
843 	"raw",
844 	"hex",
845 	"bin",
846 	"block",
847 	"stacktrace",
848 	"trace_printk",
849 	"ftrace_preempt",
850 	"branch",
851 	"annotate",
852 	"userstacktrace",
853 	"sym-userobj",
854 	"printk-msg-only",
855 	"context-info",
856 	"latency-format",
857 	"sleep-time",
858 	"graph-time",
859 	"record-cmd",
860 	"overwrite",
861 	"disable_on_free",
862 	"irq-info",
863 	"markers",
864 	"function-trace",
865 	NULL
866 };
867 
868 static struct {
869 	u64 (*func)(void);
870 	const char *name;
871 	int in_ns;		/* is this clock in nanoseconds? */
872 } trace_clocks[] = {
873 	{ trace_clock_local,		"local",	1 },
874 	{ trace_clock_global,		"global",	1 },
875 	{ trace_clock_counter,		"counter",	0 },
876 	{ trace_clock_jiffies,		"uptime",	0 },
877 	{ trace_clock,			"perf",		1 },
878 	{ ktime_get_mono_fast_ns,	"mono",		1 },
879 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
880 	ARCH_TRACE_CLOCKS
881 };
882 
883 /*
884  * trace_parser_get_init - gets the buffer for trace parser
885  */
886 int trace_parser_get_init(struct trace_parser *parser, int size)
887 {
888 	memset(parser, 0, sizeof(*parser));
889 
890 	parser->buffer = kmalloc(size, GFP_KERNEL);
891 	if (!parser->buffer)
892 		return 1;
893 
894 	parser->size = size;
895 	return 0;
896 }
897 
898 /*
899  * trace_parser_put - frees the buffer for trace parser
900  */
901 void trace_parser_put(struct trace_parser *parser)
902 {
903 	kfree(parser->buffer);
904 }
905 
906 /*
907  * trace_get_user - reads the user input string separated by  space
908  * (matched by isspace(ch))
909  *
910  * For each string found the 'struct trace_parser' is updated,
911  * and the function returns.
912  *
913  * Returns number of bytes read.
914  *
915  * See kernel/trace/trace.h for 'struct trace_parser' details.
916  */
917 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
918 	size_t cnt, loff_t *ppos)
919 {
920 	char ch;
921 	size_t read = 0;
922 	ssize_t ret;
923 
924 	if (!*ppos)
925 		trace_parser_clear(parser);
926 
927 	ret = get_user(ch, ubuf++);
928 	if (ret)
929 		goto out;
930 
931 	read++;
932 	cnt--;
933 
934 	/*
935 	 * The parser is not finished with the last write,
936 	 * continue reading the user input without skipping spaces.
937 	 */
938 	if (!parser->cont) {
939 		/* skip white space */
940 		while (cnt && isspace(ch)) {
941 			ret = get_user(ch, ubuf++);
942 			if (ret)
943 				goto out;
944 			read++;
945 			cnt--;
946 		}
947 
948 		/* only spaces were written */
949 		if (isspace(ch)) {
950 			*ppos += read;
951 			ret = read;
952 			goto out;
953 		}
954 
955 		parser->idx = 0;
956 	}
957 
958 	/* read the non-space input */
959 	while (cnt && !isspace(ch)) {
960 		if (parser->idx < parser->size - 1)
961 			parser->buffer[parser->idx++] = ch;
962 		else {
963 			ret = -EINVAL;
964 			goto out;
965 		}
966 		ret = get_user(ch, ubuf++);
967 		if (ret)
968 			goto out;
969 		read++;
970 		cnt--;
971 	}
972 
973 	/* We either got finished input or we have to wait for another call. */
974 	if (isspace(ch)) {
975 		parser->buffer[parser->idx] = 0;
976 		parser->cont = false;
977 	} else if (parser->idx < parser->size - 1) {
978 		parser->cont = true;
979 		parser->buffer[parser->idx++] = ch;
980 	} else {
981 		ret = -EINVAL;
982 		goto out;
983 	}
984 
985 	*ppos += read;
986 	ret = read;
987 
988 out:
989 	return ret;
990 }
991 
992 /* TODO add a seq_buf_to_buffer() */
993 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
994 {
995 	int len;
996 
997 	if (trace_seq_used(s) <= s->seq.readpos)
998 		return -EBUSY;
999 
1000 	len = trace_seq_used(s) - s->seq.readpos;
1001 	if (cnt > len)
1002 		cnt = len;
1003 	memcpy(buf, s->buffer + s->seq.readpos, cnt);
1004 
1005 	s->seq.readpos += cnt;
1006 	return cnt;
1007 }
1008 
1009 unsigned long __read_mostly	tracing_thresh;
1010 
1011 #ifdef CONFIG_TRACER_MAX_TRACE
1012 /*
1013  * Copy the new maximum trace into the separate maximum-trace
1014  * structure. (this way the maximum trace is permanently saved,
1015  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1016  */
1017 static void
1018 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019 {
1020 	struct trace_buffer *trace_buf = &tr->trace_buffer;
1021 	struct trace_buffer *max_buf = &tr->max_buffer;
1022 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1023 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1024 
1025 	max_buf->cpu = cpu;
1026 	max_buf->time_start = data->preempt_timestamp;
1027 
1028 	max_data->saved_latency = tr->max_latency;
1029 	max_data->critical_start = data->critical_start;
1030 	max_data->critical_end = data->critical_end;
1031 
1032 	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1033 	max_data->pid = tsk->pid;
1034 	/*
1035 	 * If tsk == current, then use current_uid(), as that does not use
1036 	 * RCU. The irq tracer can be called out of RCU scope.
1037 	 */
1038 	if (tsk == current)
1039 		max_data->uid = current_uid();
1040 	else
1041 		max_data->uid = task_uid(tsk);
1042 
1043 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1044 	max_data->policy = tsk->policy;
1045 	max_data->rt_priority = tsk->rt_priority;
1046 
1047 	/* record this tasks comm */
1048 	tracing_record_cmdline(tsk);
1049 }
1050 
1051 /**
1052  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1053  * @tr: tracer
1054  * @tsk: the task with the latency
1055  * @cpu: The cpu that initiated the trace.
1056  *
1057  * Flip the buffers between the @tr and the max_tr and record information
1058  * about which task was the cause of this latency.
1059  */
1060 void
1061 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1062 {
1063 	struct ring_buffer *buf;
1064 
1065 	if (tr->stop_count)
1066 		return;
1067 
1068 	WARN_ON_ONCE(!irqs_disabled());
1069 
1070 	if (!tr->allocated_snapshot) {
1071 		/* Only the nop tracer should hit this when disabling */
1072 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1073 		return;
1074 	}
1075 
1076 	arch_spin_lock(&tr->max_lock);
1077 
1078 	buf = tr->trace_buffer.buffer;
1079 	tr->trace_buffer.buffer = tr->max_buffer.buffer;
1080 	tr->max_buffer.buffer = buf;
1081 
1082 	__update_max_tr(tr, tsk, cpu);
1083 	arch_spin_unlock(&tr->max_lock);
1084 }
1085 
1086 /**
1087  * update_max_tr_single - only copy one trace over, and reset the rest
1088  * @tr - tracer
1089  * @tsk - task with the latency
1090  * @cpu - the cpu of the buffer to copy.
1091  *
1092  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1093  */
1094 void
1095 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1096 {
1097 	int ret;
1098 
1099 	if (tr->stop_count)
1100 		return;
1101 
1102 	WARN_ON_ONCE(!irqs_disabled());
1103 	if (!tr->allocated_snapshot) {
1104 		/* Only the nop tracer should hit this when disabling */
1105 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1106 		return;
1107 	}
1108 
1109 	arch_spin_lock(&tr->max_lock);
1110 
1111 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1112 
1113 	if (ret == -EBUSY) {
1114 		/*
1115 		 * We failed to swap the buffer due to a commit taking
1116 		 * place on this CPU. We fail to record, but we reset
1117 		 * the max trace buffer (no one writes directly to it)
1118 		 * and flag that it failed.
1119 		 */
1120 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1121 			"Failed to swap buffers due to commit in progress\n");
1122 	}
1123 
1124 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1125 
1126 	__update_max_tr(tr, tsk, cpu);
1127 	arch_spin_unlock(&tr->max_lock);
1128 }
1129 #endif /* CONFIG_TRACER_MAX_TRACE */
1130 
1131 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1132 {
1133 	/* Iterators are static, they should be filled or empty */
1134 	if (trace_buffer_iter(iter, iter->cpu_file))
1135 		return 0;
1136 
1137 	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1138 				full);
1139 }
1140 
1141 #ifdef CONFIG_FTRACE_STARTUP_TEST
1142 static int run_tracer_selftest(struct tracer *type)
1143 {
1144 	struct trace_array *tr = &global_trace;
1145 	struct tracer *saved_tracer = tr->current_trace;
1146 	int ret;
1147 
1148 	if (!type->selftest || tracing_selftest_disabled)
1149 		return 0;
1150 
1151 	/*
1152 	 * Run a selftest on this tracer.
1153 	 * Here we reset the trace buffer, and set the current
1154 	 * tracer to be this tracer. The tracer can then run some
1155 	 * internal tracing to verify that everything is in order.
1156 	 * If we fail, we do not register this tracer.
1157 	 */
1158 	tracing_reset_online_cpus(&tr->trace_buffer);
1159 
1160 	tr->current_trace = type;
1161 
1162 #ifdef CONFIG_TRACER_MAX_TRACE
1163 	if (type->use_max_tr) {
1164 		/* If we expanded the buffers, make sure the max is expanded too */
1165 		if (ring_buffer_expanded)
1166 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1167 					   RING_BUFFER_ALL_CPUS);
1168 		tr->allocated_snapshot = true;
1169 	}
1170 #endif
1171 
1172 	/* the test is responsible for initializing and enabling */
1173 	pr_info("Testing tracer %s: ", type->name);
1174 	ret = type->selftest(type, tr);
1175 	/* the test is responsible for resetting too */
1176 	tr->current_trace = saved_tracer;
1177 	if (ret) {
1178 		printk(KERN_CONT "FAILED!\n");
1179 		/* Add the warning after printing 'FAILED' */
1180 		WARN_ON(1);
1181 		return -1;
1182 	}
1183 	/* Only reset on passing, to avoid touching corrupted buffers */
1184 	tracing_reset_online_cpus(&tr->trace_buffer);
1185 
1186 #ifdef CONFIG_TRACER_MAX_TRACE
1187 	if (type->use_max_tr) {
1188 		tr->allocated_snapshot = false;
1189 
1190 		/* Shrink the max buffer again */
1191 		if (ring_buffer_expanded)
1192 			ring_buffer_resize(tr->max_buffer.buffer, 1,
1193 					   RING_BUFFER_ALL_CPUS);
1194 	}
1195 #endif
1196 
1197 	printk(KERN_CONT "PASSED\n");
1198 	return 0;
1199 }
1200 #else
1201 static inline int run_tracer_selftest(struct tracer *type)
1202 {
1203 	return 0;
1204 }
1205 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1206 
1207 /**
1208  * register_tracer - register a tracer with the ftrace system.
1209  * @type - the plugin for the tracer
1210  *
1211  * Register a new plugin tracer.
1212  */
1213 int register_tracer(struct tracer *type)
1214 {
1215 	struct tracer *t;
1216 	int ret = 0;
1217 
1218 	if (!type->name) {
1219 		pr_info("Tracer must have a name\n");
1220 		return -1;
1221 	}
1222 
1223 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
1224 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1225 		return -1;
1226 	}
1227 
1228 	mutex_lock(&trace_types_lock);
1229 
1230 	tracing_selftest_running = true;
1231 
1232 	for (t = trace_types; t; t = t->next) {
1233 		if (strcmp(type->name, t->name) == 0) {
1234 			/* already found */
1235 			pr_info("Tracer %s already registered\n",
1236 				type->name);
1237 			ret = -1;
1238 			goto out;
1239 		}
1240 	}
1241 
1242 	if (!type->set_flag)
1243 		type->set_flag = &dummy_set_flag;
1244 	if (!type->flags)
1245 		type->flags = &dummy_tracer_flags;
1246 	else
1247 		if (!type->flags->opts)
1248 			type->flags->opts = dummy_tracer_opt;
1249 
1250 	ret = run_tracer_selftest(type);
1251 	if (ret < 0)
1252 		goto out;
1253 
1254 	type->next = trace_types;
1255 	trace_types = type;
1256 
1257  out:
1258 	tracing_selftest_running = false;
1259 	mutex_unlock(&trace_types_lock);
1260 
1261 	if (ret || !default_bootup_tracer)
1262 		goto out_unlock;
1263 
1264 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1265 		goto out_unlock;
1266 
1267 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1268 	/* Do we want this tracer to start on bootup? */
1269 	tracing_set_tracer(&global_trace, type->name);
1270 	default_bootup_tracer = NULL;
1271 	/* disable other selftests, since this will break it. */
1272 	tracing_selftest_disabled = true;
1273 #ifdef CONFIG_FTRACE_STARTUP_TEST
1274 	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1275 	       type->name);
1276 #endif
1277 
1278  out_unlock:
1279 	return ret;
1280 }
1281 
1282 void tracing_reset(struct trace_buffer *buf, int cpu)
1283 {
1284 	struct ring_buffer *buffer = buf->buffer;
1285 
1286 	if (!buffer)
1287 		return;
1288 
1289 	ring_buffer_record_disable(buffer);
1290 
1291 	/* Make sure all commits have finished */
1292 	synchronize_sched();
1293 	ring_buffer_reset_cpu(buffer, cpu);
1294 
1295 	ring_buffer_record_enable(buffer);
1296 }
1297 
1298 void tracing_reset_online_cpus(struct trace_buffer *buf)
1299 {
1300 	struct ring_buffer *buffer = buf->buffer;
1301 	int cpu;
1302 
1303 	if (!buffer)
1304 		return;
1305 
1306 	ring_buffer_record_disable(buffer);
1307 
1308 	/* Make sure all commits have finished */
1309 	synchronize_sched();
1310 
1311 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1312 
1313 	for_each_online_cpu(cpu)
1314 		ring_buffer_reset_cpu(buffer, cpu);
1315 
1316 	ring_buffer_record_enable(buffer);
1317 }
1318 
1319 /* Must have trace_types_lock held */
1320 void tracing_reset_all_online_cpus(void)
1321 {
1322 	struct trace_array *tr;
1323 
1324 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1325 		tracing_reset_online_cpus(&tr->trace_buffer);
1326 #ifdef CONFIG_TRACER_MAX_TRACE
1327 		tracing_reset_online_cpus(&tr->max_buffer);
1328 #endif
1329 	}
1330 }
1331 
1332 #define SAVED_CMDLINES_DEFAULT 128
1333 #define NO_CMDLINE_MAP UINT_MAX
1334 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1335 struct saved_cmdlines_buffer {
1336 	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1337 	unsigned *map_cmdline_to_pid;
1338 	unsigned cmdline_num;
1339 	int cmdline_idx;
1340 	char *saved_cmdlines;
1341 };
1342 static struct saved_cmdlines_buffer *savedcmd;
1343 
1344 /* temporary disable recording */
1345 static atomic_t trace_record_cmdline_disabled __read_mostly;
1346 
1347 static inline char *get_saved_cmdlines(int idx)
1348 {
1349 	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1350 }
1351 
1352 static inline void set_cmdline(int idx, const char *cmdline)
1353 {
1354 	memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1355 }
1356 
1357 static int allocate_cmdlines_buffer(unsigned int val,
1358 				    struct saved_cmdlines_buffer *s)
1359 {
1360 	s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1361 					GFP_KERNEL);
1362 	if (!s->map_cmdline_to_pid)
1363 		return -ENOMEM;
1364 
1365 	s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1366 	if (!s->saved_cmdlines) {
1367 		kfree(s->map_cmdline_to_pid);
1368 		return -ENOMEM;
1369 	}
1370 
1371 	s->cmdline_idx = 0;
1372 	s->cmdline_num = val;
1373 	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1374 	       sizeof(s->map_pid_to_cmdline));
1375 	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1376 	       val * sizeof(*s->map_cmdline_to_pid));
1377 
1378 	return 0;
1379 }
1380 
1381 static int trace_create_savedcmd(void)
1382 {
1383 	int ret;
1384 
1385 	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1386 	if (!savedcmd)
1387 		return -ENOMEM;
1388 
1389 	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1390 	if (ret < 0) {
1391 		kfree(savedcmd);
1392 		savedcmd = NULL;
1393 		return -ENOMEM;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 int is_tracing_stopped(void)
1400 {
1401 	return global_trace.stop_count;
1402 }
1403 
1404 /**
1405  * tracing_start - quick start of the tracer
1406  *
1407  * If tracing is enabled but was stopped by tracing_stop,
1408  * this will start the tracer back up.
1409  */
1410 void tracing_start(void)
1411 {
1412 	struct ring_buffer *buffer;
1413 	unsigned long flags;
1414 
1415 	if (tracing_disabled)
1416 		return;
1417 
1418 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1419 	if (--global_trace.stop_count) {
1420 		if (global_trace.stop_count < 0) {
1421 			/* Someone screwed up their debugging */
1422 			WARN_ON_ONCE(1);
1423 			global_trace.stop_count = 0;
1424 		}
1425 		goto out;
1426 	}
1427 
1428 	/* Prevent the buffers from switching */
1429 	arch_spin_lock(&global_trace.max_lock);
1430 
1431 	buffer = global_trace.trace_buffer.buffer;
1432 	if (buffer)
1433 		ring_buffer_record_enable(buffer);
1434 
1435 #ifdef CONFIG_TRACER_MAX_TRACE
1436 	buffer = global_trace.max_buffer.buffer;
1437 	if (buffer)
1438 		ring_buffer_record_enable(buffer);
1439 #endif
1440 
1441 	arch_spin_unlock(&global_trace.max_lock);
1442 
1443  out:
1444 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1445 }
1446 
1447 static void tracing_start_tr(struct trace_array *tr)
1448 {
1449 	struct ring_buffer *buffer;
1450 	unsigned long flags;
1451 
1452 	if (tracing_disabled)
1453 		return;
1454 
1455 	/* If global, we need to also start the max tracer */
1456 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1457 		return tracing_start();
1458 
1459 	raw_spin_lock_irqsave(&tr->start_lock, flags);
1460 
1461 	if (--tr->stop_count) {
1462 		if (tr->stop_count < 0) {
1463 			/* Someone screwed up their debugging */
1464 			WARN_ON_ONCE(1);
1465 			tr->stop_count = 0;
1466 		}
1467 		goto out;
1468 	}
1469 
1470 	buffer = tr->trace_buffer.buffer;
1471 	if (buffer)
1472 		ring_buffer_record_enable(buffer);
1473 
1474  out:
1475 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1476 }
1477 
1478 /**
1479  * tracing_stop - quick stop of the tracer
1480  *
1481  * Light weight way to stop tracing. Use in conjunction with
1482  * tracing_start.
1483  */
1484 void tracing_stop(void)
1485 {
1486 	struct ring_buffer *buffer;
1487 	unsigned long flags;
1488 
1489 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1490 	if (global_trace.stop_count++)
1491 		goto out;
1492 
1493 	/* Prevent the buffers from switching */
1494 	arch_spin_lock(&global_trace.max_lock);
1495 
1496 	buffer = global_trace.trace_buffer.buffer;
1497 	if (buffer)
1498 		ring_buffer_record_disable(buffer);
1499 
1500 #ifdef CONFIG_TRACER_MAX_TRACE
1501 	buffer = global_trace.max_buffer.buffer;
1502 	if (buffer)
1503 		ring_buffer_record_disable(buffer);
1504 #endif
1505 
1506 	arch_spin_unlock(&global_trace.max_lock);
1507 
1508  out:
1509 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1510 }
1511 
1512 static void tracing_stop_tr(struct trace_array *tr)
1513 {
1514 	struct ring_buffer *buffer;
1515 	unsigned long flags;
1516 
1517 	/* If global, we need to also stop the max tracer */
1518 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1519 		return tracing_stop();
1520 
1521 	raw_spin_lock_irqsave(&tr->start_lock, flags);
1522 	if (tr->stop_count++)
1523 		goto out;
1524 
1525 	buffer = tr->trace_buffer.buffer;
1526 	if (buffer)
1527 		ring_buffer_record_disable(buffer);
1528 
1529  out:
1530 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1531 }
1532 
1533 void trace_stop_cmdline_recording(void);
1534 
1535 static int trace_save_cmdline(struct task_struct *tsk)
1536 {
1537 	unsigned pid, idx;
1538 
1539 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1540 		return 0;
1541 
1542 	/*
1543 	 * It's not the end of the world if we don't get
1544 	 * the lock, but we also don't want to spin
1545 	 * nor do we want to disable interrupts,
1546 	 * so if we miss here, then better luck next time.
1547 	 */
1548 	if (!arch_spin_trylock(&trace_cmdline_lock))
1549 		return 0;
1550 
1551 	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1552 	if (idx == NO_CMDLINE_MAP) {
1553 		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1554 
1555 		/*
1556 		 * Check whether the cmdline buffer at idx has a pid
1557 		 * mapped. We are going to overwrite that entry so we
1558 		 * need to clear the map_pid_to_cmdline. Otherwise we
1559 		 * would read the new comm for the old pid.
1560 		 */
1561 		pid = savedcmd->map_cmdline_to_pid[idx];
1562 		if (pid != NO_CMDLINE_MAP)
1563 			savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1564 
1565 		savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1566 		savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1567 
1568 		savedcmd->cmdline_idx = idx;
1569 	}
1570 
1571 	set_cmdline(idx, tsk->comm);
1572 
1573 	arch_spin_unlock(&trace_cmdline_lock);
1574 
1575 	return 1;
1576 }
1577 
1578 static void __trace_find_cmdline(int pid, char comm[])
1579 {
1580 	unsigned map;
1581 
1582 	if (!pid) {
1583 		strcpy(comm, "<idle>");
1584 		return;
1585 	}
1586 
1587 	if (WARN_ON_ONCE(pid < 0)) {
1588 		strcpy(comm, "<XXX>");
1589 		return;
1590 	}
1591 
1592 	if (pid > PID_MAX_DEFAULT) {
1593 		strcpy(comm, "<...>");
1594 		return;
1595 	}
1596 
1597 	map = savedcmd->map_pid_to_cmdline[pid];
1598 	if (map != NO_CMDLINE_MAP)
1599 		strcpy(comm, get_saved_cmdlines(map));
1600 	else
1601 		strcpy(comm, "<...>");
1602 }
1603 
1604 void trace_find_cmdline(int pid, char comm[])
1605 {
1606 	preempt_disable();
1607 	arch_spin_lock(&trace_cmdline_lock);
1608 
1609 	__trace_find_cmdline(pid, comm);
1610 
1611 	arch_spin_unlock(&trace_cmdline_lock);
1612 	preempt_enable();
1613 }
1614 
1615 void tracing_record_cmdline(struct task_struct *tsk)
1616 {
1617 	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1618 		return;
1619 
1620 	if (!__this_cpu_read(trace_cmdline_save))
1621 		return;
1622 
1623 	if (trace_save_cmdline(tsk))
1624 		__this_cpu_write(trace_cmdline_save, false);
1625 }
1626 
1627 void
1628 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1629 			     int pc)
1630 {
1631 	struct task_struct *tsk = current;
1632 
1633 	entry->preempt_count		= pc & 0xff;
1634 	entry->pid			= (tsk) ? tsk->pid : 0;
1635 	entry->flags =
1636 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1637 		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1638 #else
1639 		TRACE_FLAG_IRQS_NOSUPPORT |
1640 #endif
1641 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1642 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1643 		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1644 		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1645 }
1646 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1647 
1648 struct ring_buffer_event *
1649 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1650 			  int type,
1651 			  unsigned long len,
1652 			  unsigned long flags, int pc)
1653 {
1654 	struct ring_buffer_event *event;
1655 
1656 	event = ring_buffer_lock_reserve(buffer, len);
1657 	if (event != NULL) {
1658 		struct trace_entry *ent = ring_buffer_event_data(event);
1659 
1660 		tracing_generic_entry_update(ent, flags, pc);
1661 		ent->type = type;
1662 	}
1663 
1664 	return event;
1665 }
1666 
1667 void
1668 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1669 {
1670 	__this_cpu_write(trace_cmdline_save, true);
1671 	ring_buffer_unlock_commit(buffer, event);
1672 }
1673 
1674 static inline void
1675 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1676 			     struct ring_buffer_event *event,
1677 			     unsigned long flags, int pc)
1678 {
1679 	__buffer_unlock_commit(buffer, event);
1680 
1681 	ftrace_trace_stack(buffer, flags, 6, pc);
1682 	ftrace_trace_userstack(buffer, flags, pc);
1683 }
1684 
1685 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1686 				struct ring_buffer_event *event,
1687 				unsigned long flags, int pc)
1688 {
1689 	__trace_buffer_unlock_commit(buffer, event, flags, pc);
1690 }
1691 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1692 
1693 static struct ring_buffer *temp_buffer;
1694 
1695 struct ring_buffer_event *
1696 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1697 			  struct trace_event_file *trace_file,
1698 			  int type, unsigned long len,
1699 			  unsigned long flags, int pc)
1700 {
1701 	struct ring_buffer_event *entry;
1702 
1703 	*current_rb = trace_file->tr->trace_buffer.buffer;
1704 	entry = trace_buffer_lock_reserve(*current_rb,
1705 					 type, len, flags, pc);
1706 	/*
1707 	 * If tracing is off, but we have triggers enabled
1708 	 * we still need to look at the event data. Use the temp_buffer
1709 	 * to store the trace event for the tigger to use. It's recusive
1710 	 * safe and will not be recorded anywhere.
1711 	 */
1712 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1713 		*current_rb = temp_buffer;
1714 		entry = trace_buffer_lock_reserve(*current_rb,
1715 						  type, len, flags, pc);
1716 	}
1717 	return entry;
1718 }
1719 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1720 
1721 struct ring_buffer_event *
1722 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1723 				  int type, unsigned long len,
1724 				  unsigned long flags, int pc)
1725 {
1726 	*current_rb = global_trace.trace_buffer.buffer;
1727 	return trace_buffer_lock_reserve(*current_rb,
1728 					 type, len, flags, pc);
1729 }
1730 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1731 
1732 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1733 					struct ring_buffer_event *event,
1734 					unsigned long flags, int pc)
1735 {
1736 	__trace_buffer_unlock_commit(buffer, event, flags, pc);
1737 }
1738 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1739 
1740 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1741 				     struct ring_buffer_event *event,
1742 				     unsigned long flags, int pc,
1743 				     struct pt_regs *regs)
1744 {
1745 	__buffer_unlock_commit(buffer, event);
1746 
1747 	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1748 	ftrace_trace_userstack(buffer, flags, pc);
1749 }
1750 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1751 
1752 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1753 					 struct ring_buffer_event *event)
1754 {
1755 	ring_buffer_discard_commit(buffer, event);
1756 }
1757 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1758 
1759 void
1760 trace_function(struct trace_array *tr,
1761 	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
1762 	       int pc)
1763 {
1764 	struct trace_event_call *call = &event_function;
1765 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
1766 	struct ring_buffer_event *event;
1767 	struct ftrace_entry *entry;
1768 
1769 	/* If we are reading the ring buffer, don't trace */
1770 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1771 		return;
1772 
1773 	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1774 					  flags, pc);
1775 	if (!event)
1776 		return;
1777 	entry	= ring_buffer_event_data(event);
1778 	entry->ip			= ip;
1779 	entry->parent_ip		= parent_ip;
1780 
1781 	if (!call_filter_check_discard(call, entry, buffer, event))
1782 		__buffer_unlock_commit(buffer, event);
1783 }
1784 
1785 #ifdef CONFIG_STACKTRACE
1786 
1787 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1788 struct ftrace_stack {
1789 	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
1790 };
1791 
1792 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1793 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1794 
1795 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1796 				 unsigned long flags,
1797 				 int skip, int pc, struct pt_regs *regs)
1798 {
1799 	struct trace_event_call *call = &event_kernel_stack;
1800 	struct ring_buffer_event *event;
1801 	struct stack_entry *entry;
1802 	struct stack_trace trace;
1803 	int use_stack;
1804 	int size = FTRACE_STACK_ENTRIES;
1805 
1806 	trace.nr_entries	= 0;
1807 	trace.skip		= skip;
1808 
1809 	/*
1810 	 * Since events can happen in NMIs there's no safe way to
1811 	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1812 	 * or NMI comes in, it will just have to use the default
1813 	 * FTRACE_STACK_SIZE.
1814 	 */
1815 	preempt_disable_notrace();
1816 
1817 	use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1818 	/*
1819 	 * We don't need any atomic variables, just a barrier.
1820 	 * If an interrupt comes in, we don't care, because it would
1821 	 * have exited and put the counter back to what we want.
1822 	 * We just need a barrier to keep gcc from moving things
1823 	 * around.
1824 	 */
1825 	barrier();
1826 	if (use_stack == 1) {
1827 		trace.entries		= this_cpu_ptr(ftrace_stack.calls);
1828 		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;
1829 
1830 		if (regs)
1831 			save_stack_trace_regs(regs, &trace);
1832 		else
1833 			save_stack_trace(&trace);
1834 
1835 		if (trace.nr_entries > size)
1836 			size = trace.nr_entries;
1837 	} else
1838 		/* From now on, use_stack is a boolean */
1839 		use_stack = 0;
1840 
1841 	size *= sizeof(unsigned long);
1842 
1843 	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1844 					  sizeof(*entry) + size, flags, pc);
1845 	if (!event)
1846 		goto out;
1847 	entry = ring_buffer_event_data(event);
1848 
1849 	memset(&entry->caller, 0, size);
1850 
1851 	if (use_stack)
1852 		memcpy(&entry->caller, trace.entries,
1853 		       trace.nr_entries * sizeof(unsigned long));
1854 	else {
1855 		trace.max_entries	= FTRACE_STACK_ENTRIES;
1856 		trace.entries		= entry->caller;
1857 		if (regs)
1858 			save_stack_trace_regs(regs, &trace);
1859 		else
1860 			save_stack_trace(&trace);
1861 	}
1862 
1863 	entry->size = trace.nr_entries;
1864 
1865 	if (!call_filter_check_discard(call, entry, buffer, event))
1866 		__buffer_unlock_commit(buffer, event);
1867 
1868  out:
1869 	/* Again, don't let gcc optimize things here */
1870 	barrier();
1871 	__this_cpu_dec(ftrace_stack_reserve);
1872 	preempt_enable_notrace();
1873 
1874 }
1875 
1876 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1877 			     int skip, int pc, struct pt_regs *regs)
1878 {
1879 	if (!(trace_flags & TRACE_ITER_STACKTRACE))
1880 		return;
1881 
1882 	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
1883 }
1884 
1885 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1886 			int skip, int pc)
1887 {
1888 	if (!(trace_flags & TRACE_ITER_STACKTRACE))
1889 		return;
1890 
1891 	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1892 }
1893 
1894 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1895 		   int pc)
1896 {
1897 	__ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1898 }
1899 
1900 /**
1901  * trace_dump_stack - record a stack back trace in the trace buffer
1902  * @skip: Number of functions to skip (helper handlers)
1903  */
1904 void trace_dump_stack(int skip)
1905 {
1906 	unsigned long flags;
1907 
1908 	if (tracing_disabled || tracing_selftest_running)
1909 		return;
1910 
1911 	local_save_flags(flags);
1912 
1913 	/*
1914 	 * Skip 3 more, seems to get us at the caller of
1915 	 * this function.
1916 	 */
1917 	skip += 3;
1918 	__ftrace_trace_stack(global_trace.trace_buffer.buffer,
1919 			     flags, skip, preempt_count(), NULL);
1920 }
1921 
1922 static DEFINE_PER_CPU(int, user_stack_count);
1923 
1924 void
1925 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1926 {
1927 	struct trace_event_call *call = &event_user_stack;
1928 	struct ring_buffer_event *event;
1929 	struct userstack_entry *entry;
1930 	struct stack_trace trace;
1931 
1932 	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1933 		return;
1934 
1935 	/*
1936 	 * NMIs can not handle page faults, even with fix ups.
1937 	 * The save user stack can (and often does) fault.
1938 	 */
1939 	if (unlikely(in_nmi()))
1940 		return;
1941 
1942 	/*
1943 	 * prevent recursion, since the user stack tracing may
1944 	 * trigger other kernel events.
1945 	 */
1946 	preempt_disable();
1947 	if (__this_cpu_read(user_stack_count))
1948 		goto out;
1949 
1950 	__this_cpu_inc(user_stack_count);
1951 
1952 	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1953 					  sizeof(*entry), flags, pc);
1954 	if (!event)
1955 		goto out_drop_count;
1956 	entry	= ring_buffer_event_data(event);
1957 
1958 	entry->tgid		= current->tgid;
1959 	memset(&entry->caller, 0, sizeof(entry->caller));
1960 
1961 	trace.nr_entries	= 0;
1962 	trace.max_entries	= FTRACE_STACK_ENTRIES;
1963 	trace.skip		= 0;
1964 	trace.entries		= entry->caller;
1965 
1966 	save_stack_trace_user(&trace);
1967 	if (!call_filter_check_discard(call, entry, buffer, event))
1968 		__buffer_unlock_commit(buffer, event);
1969 
1970  out_drop_count:
1971 	__this_cpu_dec(user_stack_count);
1972  out:
1973 	preempt_enable();
1974 }
1975 
1976 #ifdef UNUSED
1977 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1978 {
1979 	ftrace_trace_userstack(tr, flags, preempt_count());
1980 }
1981 #endif /* UNUSED */
1982 
1983 #endif /* CONFIG_STACKTRACE */
1984 
1985 /* created for use with alloc_percpu */
1986 struct trace_buffer_struct {
1987 	char buffer[TRACE_BUF_SIZE];
1988 };
1989 
1990 static struct trace_buffer_struct *trace_percpu_buffer;
1991 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1992 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1993 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1994 
1995 /*
1996  * The buffer used is dependent on the context. There is a per cpu
1997  * buffer for normal context, softirq contex, hard irq context and
1998  * for NMI context. Thise allows for lockless recording.
1999  *
2000  * Note, if the buffers failed to be allocated, then this returns NULL
2001  */
2002 static char *get_trace_buf(void)
2003 {
2004 	struct trace_buffer_struct *percpu_buffer;
2005 
2006 	/*
2007 	 * If we have allocated per cpu buffers, then we do not
2008 	 * need to do any locking.
2009 	 */
2010 	if (in_nmi())
2011 		percpu_buffer = trace_percpu_nmi_buffer;
2012 	else if (in_irq())
2013 		percpu_buffer = trace_percpu_irq_buffer;
2014 	else if (in_softirq())
2015 		percpu_buffer = trace_percpu_sirq_buffer;
2016 	else
2017 		percpu_buffer = trace_percpu_buffer;
2018 
2019 	if (!percpu_buffer)
2020 		return NULL;
2021 
2022 	return this_cpu_ptr(&percpu_buffer->buffer[0]);
2023 }
2024 
2025 static int alloc_percpu_trace_buffer(void)
2026 {
2027 	struct trace_buffer_struct *buffers;
2028 	struct trace_buffer_struct *sirq_buffers;
2029 	struct trace_buffer_struct *irq_buffers;
2030 	struct trace_buffer_struct *nmi_buffers;
2031 
2032 	buffers = alloc_percpu(struct trace_buffer_struct);
2033 	if (!buffers)
2034 		goto err_warn;
2035 
2036 	sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2037 	if (!sirq_buffers)
2038 		goto err_sirq;
2039 
2040 	irq_buffers = alloc_percpu(struct trace_buffer_struct);
2041 	if (!irq_buffers)
2042 		goto err_irq;
2043 
2044 	nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2045 	if (!nmi_buffers)
2046 		goto err_nmi;
2047 
2048 	trace_percpu_buffer = buffers;
2049 	trace_percpu_sirq_buffer = sirq_buffers;
2050 	trace_percpu_irq_buffer = irq_buffers;
2051 	trace_percpu_nmi_buffer = nmi_buffers;
2052 
2053 	return 0;
2054 
2055  err_nmi:
2056 	free_percpu(irq_buffers);
2057  err_irq:
2058 	free_percpu(sirq_buffers);
2059  err_sirq:
2060 	free_percpu(buffers);
2061  err_warn:
2062 	WARN(1, "Could not allocate percpu trace_printk buffer");
2063 	return -ENOMEM;
2064 }
2065 
2066 static int buffers_allocated;
2067 
2068 void trace_printk_init_buffers(void)
2069 {
2070 	if (buffers_allocated)
2071 		return;
2072 
2073 	if (alloc_percpu_trace_buffer())
2074 		return;
2075 
2076 	/* trace_printk() is for debug use only. Don't use it in production. */
2077 
2078 	pr_warning("\n");
2079 	pr_warning("**********************************************************\n");
2080 	pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2081 	pr_warning("**                                                      **\n");
2082 	pr_warning("** trace_printk() being used. Allocating extra memory.  **\n");
2083 	pr_warning("**                                                      **\n");
2084 	pr_warning("** This means that this is a DEBUG kernel and it is     **\n");
2085 	pr_warning("** unsafe for production use.                           **\n");
2086 	pr_warning("**                                                      **\n");
2087 	pr_warning("** If you see this message and you are not debugging    **\n");
2088 	pr_warning("** the kernel, report this immediately to your vendor!  **\n");
2089 	pr_warning("**                                                      **\n");
2090 	pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2091 	pr_warning("**********************************************************\n");
2092 
2093 	/* Expand the buffers to set size */
2094 	tracing_update_buffers();
2095 
2096 	buffers_allocated = 1;
2097 
2098 	/*
2099 	 * trace_printk_init_buffers() can be called by modules.
2100 	 * If that happens, then we need to start cmdline recording
2101 	 * directly here. If the global_trace.buffer is already
2102 	 * allocated here, then this was called by module code.
2103 	 */
2104 	if (global_trace.trace_buffer.buffer)
2105 		tracing_start_cmdline_record();
2106 }
2107 
2108 void trace_printk_start_comm(void)
2109 {
2110 	/* Start tracing comms if trace printk is set */
2111 	if (!buffers_allocated)
2112 		return;
2113 	tracing_start_cmdline_record();
2114 }
2115 
2116 static void trace_printk_start_stop_comm(int enabled)
2117 {
2118 	if (!buffers_allocated)
2119 		return;
2120 
2121 	if (enabled)
2122 		tracing_start_cmdline_record();
2123 	else
2124 		tracing_stop_cmdline_record();
2125 }
2126 
2127 /**
2128  * trace_vbprintk - write binary msg to tracing buffer
2129  *
2130  */
2131 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2132 {
2133 	struct trace_event_call *call = &event_bprint;
2134 	struct ring_buffer_event *event;
2135 	struct ring_buffer *buffer;
2136 	struct trace_array *tr = &global_trace;
2137 	struct bprint_entry *entry;
2138 	unsigned long flags;
2139 	char *tbuffer;
2140 	int len = 0, size, pc;
2141 
2142 	if (unlikely(tracing_selftest_running || tracing_disabled))
2143 		return 0;
2144 
2145 	/* Don't pollute graph traces with trace_vprintk internals */
2146 	pause_graph_tracing();
2147 
2148 	pc = preempt_count();
2149 	preempt_disable_notrace();
2150 
2151 	tbuffer = get_trace_buf();
2152 	if (!tbuffer) {
2153 		len = 0;
2154 		goto out;
2155 	}
2156 
2157 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2158 
2159 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2160 		goto out;
2161 
2162 	local_save_flags(flags);
2163 	size = sizeof(*entry) + sizeof(u32) * len;
2164 	buffer = tr->trace_buffer.buffer;
2165 	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2166 					  flags, pc);
2167 	if (!event)
2168 		goto out;
2169 	entry = ring_buffer_event_data(event);
2170 	entry->ip			= ip;
2171 	entry->fmt			= fmt;
2172 
2173 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2174 	if (!call_filter_check_discard(call, entry, buffer, event)) {
2175 		__buffer_unlock_commit(buffer, event);
2176 		ftrace_trace_stack(buffer, flags, 6, pc);
2177 	}
2178 
2179 out:
2180 	preempt_enable_notrace();
2181 	unpause_graph_tracing();
2182 
2183 	return len;
2184 }
2185 EXPORT_SYMBOL_GPL(trace_vbprintk);
2186 
2187 static int
2188 __trace_array_vprintk(struct ring_buffer *buffer,
2189 		      unsigned long ip, const char *fmt, va_list args)
2190 {
2191 	struct trace_event_call *call = &event_print;
2192 	struct ring_buffer_event *event;
2193 	int len = 0, size, pc;
2194 	struct print_entry *entry;
2195 	unsigned long flags;
2196 	char *tbuffer;
2197 
2198 	if (tracing_disabled || tracing_selftest_running)
2199 		return 0;
2200 
2201 	/* Don't pollute graph traces with trace_vprintk internals */
2202 	pause_graph_tracing();
2203 
2204 	pc = preempt_count();
2205 	preempt_disable_notrace();
2206 
2207 
2208 	tbuffer = get_trace_buf();
2209 	if (!tbuffer) {
2210 		len = 0;
2211 		goto out;
2212 	}
2213 
2214 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2215 
2216 	local_save_flags(flags);
2217 	size = sizeof(*entry) + len + 1;
2218 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2219 					  flags, pc);
2220 	if (!event)
2221 		goto out;
2222 	entry = ring_buffer_event_data(event);
2223 	entry->ip = ip;
2224 
2225 	memcpy(&entry->buf, tbuffer, len + 1);
2226 	if (!call_filter_check_discard(call, entry, buffer, event)) {
2227 		__buffer_unlock_commit(buffer, event);
2228 		ftrace_trace_stack(buffer, flags, 6, pc);
2229 	}
2230  out:
2231 	preempt_enable_notrace();
2232 	unpause_graph_tracing();
2233 
2234 	return len;
2235 }
2236 
2237 int trace_array_vprintk(struct trace_array *tr,
2238 			unsigned long ip, const char *fmt, va_list args)
2239 {
2240 	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2241 }
2242 
2243 int trace_array_printk(struct trace_array *tr,
2244 		       unsigned long ip, const char *fmt, ...)
2245 {
2246 	int ret;
2247 	va_list ap;
2248 
2249 	if (!(trace_flags & TRACE_ITER_PRINTK))
2250 		return 0;
2251 
2252 	va_start(ap, fmt);
2253 	ret = trace_array_vprintk(tr, ip, fmt, ap);
2254 	va_end(ap);
2255 	return ret;
2256 }
2257 
2258 int trace_array_printk_buf(struct ring_buffer *buffer,
2259 			   unsigned long ip, const char *fmt, ...)
2260 {
2261 	int ret;
2262 	va_list ap;
2263 
2264 	if (!(trace_flags & TRACE_ITER_PRINTK))
2265 		return 0;
2266 
2267 	va_start(ap, fmt);
2268 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2269 	va_end(ap);
2270 	return ret;
2271 }
2272 
2273 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2274 {
2275 	return trace_array_vprintk(&global_trace, ip, fmt, args);
2276 }
2277 EXPORT_SYMBOL_GPL(trace_vprintk);
2278 
2279 static void trace_iterator_increment(struct trace_iterator *iter)
2280 {
2281 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2282 
2283 	iter->idx++;
2284 	if (buf_iter)
2285 		ring_buffer_read(buf_iter, NULL);
2286 }
2287 
2288 static struct trace_entry *
2289 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2290 		unsigned long *lost_events)
2291 {
2292 	struct ring_buffer_event *event;
2293 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2294 
2295 	if (buf_iter)
2296 		event = ring_buffer_iter_peek(buf_iter, ts);
2297 	else
2298 		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2299 					 lost_events);
2300 
2301 	if (event) {
2302 		iter->ent_size = ring_buffer_event_length(event);
2303 		return ring_buffer_event_data(event);
2304 	}
2305 	iter->ent_size = 0;
2306 	return NULL;
2307 }
2308 
2309 static struct trace_entry *
2310 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2311 		  unsigned long *missing_events, u64 *ent_ts)
2312 {
2313 	struct ring_buffer *buffer = iter->trace_buffer->buffer;
2314 	struct trace_entry *ent, *next = NULL;
2315 	unsigned long lost_events = 0, next_lost = 0;
2316 	int cpu_file = iter->cpu_file;
2317 	u64 next_ts = 0, ts;
2318 	int next_cpu = -1;
2319 	int next_size = 0;
2320 	int cpu;
2321 
2322 	/*
2323 	 * If we are in a per_cpu trace file, don't bother by iterating over
2324 	 * all cpu and peek directly.
2325 	 */
2326 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
2327 		if (ring_buffer_empty_cpu(buffer, cpu_file))
2328 			return NULL;
2329 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2330 		if (ent_cpu)
2331 			*ent_cpu = cpu_file;
2332 
2333 		return ent;
2334 	}
2335 
2336 	for_each_tracing_cpu(cpu) {
2337 
2338 		if (ring_buffer_empty_cpu(buffer, cpu))
2339 			continue;
2340 
2341 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2342 
2343 		/*
2344 		 * Pick the entry with the smallest timestamp:
2345 		 */
2346 		if (ent && (!next || ts < next_ts)) {
2347 			next = ent;
2348 			next_cpu = cpu;
2349 			next_ts = ts;
2350 			next_lost = lost_events;
2351 			next_size = iter->ent_size;
2352 		}
2353 	}
2354 
2355 	iter->ent_size = next_size;
2356 
2357 	if (ent_cpu)
2358 		*ent_cpu = next_cpu;
2359 
2360 	if (ent_ts)
2361 		*ent_ts = next_ts;
2362 
2363 	if (missing_events)
2364 		*missing_events = next_lost;
2365 
2366 	return next;
2367 }
2368 
2369 /* Find the next real entry, without updating the iterator itself */
2370 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2371 					  int *ent_cpu, u64 *ent_ts)
2372 {
2373 	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2374 }
2375 
2376 /* Find the next real entry, and increment the iterator to the next entry */
2377 void *trace_find_next_entry_inc(struct trace_iterator *iter)
2378 {
2379 	iter->ent = __find_next_entry(iter, &iter->cpu,
2380 				      &iter->lost_events, &iter->ts);
2381 
2382 	if (iter->ent)
2383 		trace_iterator_increment(iter);
2384 
2385 	return iter->ent ? iter : NULL;
2386 }
2387 
2388 static void trace_consume(struct trace_iterator *iter)
2389 {
2390 	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2391 			    &iter->lost_events);
2392 }
2393 
2394 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2395 {
2396 	struct trace_iterator *iter = m->private;
2397 	int i = (int)*pos;
2398 	void *ent;
2399 
2400 	WARN_ON_ONCE(iter->leftover);
2401 
2402 	(*pos)++;
2403 
2404 	/* can't go backwards */
2405 	if (iter->idx > i)
2406 		return NULL;
2407 
2408 	if (iter->idx < 0)
2409 		ent = trace_find_next_entry_inc(iter);
2410 	else
2411 		ent = iter;
2412 
2413 	while (ent && iter->idx < i)
2414 		ent = trace_find_next_entry_inc(iter);
2415 
2416 	iter->pos = *pos;
2417 
2418 	return ent;
2419 }
2420 
2421 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2422 {
2423 	struct ring_buffer_event *event;
2424 	struct ring_buffer_iter *buf_iter;
2425 	unsigned long entries = 0;
2426 	u64 ts;
2427 
2428 	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2429 
2430 	buf_iter = trace_buffer_iter(iter, cpu);
2431 	if (!buf_iter)
2432 		return;
2433 
2434 	ring_buffer_iter_reset(buf_iter);
2435 
2436 	/*
2437 	 * We could have the case with the max latency tracers
2438 	 * that a reset never took place on a cpu. This is evident
2439 	 * by the timestamp being before the start of the buffer.
2440 	 */
2441 	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2442 		if (ts >= iter->trace_buffer->time_start)
2443 			break;
2444 		entries++;
2445 		ring_buffer_read(buf_iter, NULL);
2446 	}
2447 
2448 	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2449 }
2450 
2451 /*
2452  * The current tracer is copied to avoid a global locking
2453  * all around.
2454  */
2455 static void *s_start(struct seq_file *m, loff_t *pos)
2456 {
2457 	struct trace_iterator *iter = m->private;
2458 	struct trace_array *tr = iter->tr;
2459 	int cpu_file = iter->cpu_file;
2460 	void *p = NULL;
2461 	loff_t l = 0;
2462 	int cpu;
2463 
2464 	/*
2465 	 * copy the tracer to avoid using a global lock all around.
2466 	 * iter->trace is a copy of current_trace, the pointer to the
2467 	 * name may be used instead of a strcmp(), as iter->trace->name
2468 	 * will point to the same string as current_trace->name.
2469 	 */
2470 	mutex_lock(&trace_types_lock);
2471 	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2472 		*iter->trace = *tr->current_trace;
2473 	mutex_unlock(&trace_types_lock);
2474 
2475 #ifdef CONFIG_TRACER_MAX_TRACE
2476 	if (iter->snapshot && iter->trace->use_max_tr)
2477 		return ERR_PTR(-EBUSY);
2478 #endif
2479 
2480 	if (!iter->snapshot)
2481 		atomic_inc(&trace_record_cmdline_disabled);
2482 
2483 	if (*pos != iter->pos) {
2484 		iter->ent = NULL;
2485 		iter->cpu = 0;
2486 		iter->idx = -1;
2487 
2488 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
2489 			for_each_tracing_cpu(cpu)
2490 				tracing_iter_reset(iter, cpu);
2491 		} else
2492 			tracing_iter_reset(iter, cpu_file);
2493 
2494 		iter->leftover = 0;
2495 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2496 			;
2497 
2498 	} else {
2499 		/*
2500 		 * If we overflowed the seq_file before, then we want
2501 		 * to just reuse the trace_seq buffer again.
2502 		 */
2503 		if (iter->leftover)
2504 			p = iter;
2505 		else {
2506 			l = *pos - 1;
2507 			p = s_next(m, p, &l);
2508 		}
2509 	}
2510 
2511 	trace_event_read_lock();
2512 	trace_access_lock(cpu_file);
2513 	return p;
2514 }
2515 
2516 static void s_stop(struct seq_file *m, void *p)
2517 {
2518 	struct trace_iterator *iter = m->private;
2519 
2520 #ifdef CONFIG_TRACER_MAX_TRACE
2521 	if (iter->snapshot && iter->trace->use_max_tr)
2522 		return;
2523 #endif
2524 
2525 	if (!iter->snapshot)
2526 		atomic_dec(&trace_record_cmdline_disabled);
2527 
2528 	trace_access_unlock(iter->cpu_file);
2529 	trace_event_read_unlock();
2530 }
2531 
2532 static void
2533 get_total_entries(struct trace_buffer *buf,
2534 		  unsigned long *total, unsigned long *entries)
2535 {
2536 	unsigned long count;
2537 	int cpu;
2538 
2539 	*total = 0;
2540 	*entries = 0;
2541 
2542 	for_each_tracing_cpu(cpu) {
2543 		count = ring_buffer_entries_cpu(buf->buffer, cpu);
2544 		/*
2545 		 * If this buffer has skipped entries, then we hold all
2546 		 * entries for the trace and we need to ignore the
2547 		 * ones before the time stamp.
2548 		 */
2549 		if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2550 			count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2551 			/* total is the same as the entries */
2552 			*total += count;
2553 		} else
2554 			*total += count +
2555 				ring_buffer_overrun_cpu(buf->buffer, cpu);
2556 		*entries += count;
2557 	}
2558 }
2559 
2560 static void print_lat_help_header(struct seq_file *m)
2561 {
2562 	seq_puts(m, "#                  _------=> CPU#            \n"
2563 		    "#                 / _-----=> irqs-off        \n"
2564 		    "#                | / _----=> need-resched    \n"
2565 		    "#                || / _---=> hardirq/softirq \n"
2566 		    "#                ||| / _--=> preempt-depth   \n"
2567 		    "#                |||| /     delay            \n"
2568 		    "#  cmd     pid   ||||| time  |   caller      \n"
2569 		    "#     \\   /      |||||  \\    |   /         \n");
2570 }
2571 
2572 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2573 {
2574 	unsigned long total;
2575 	unsigned long entries;
2576 
2577 	get_total_entries(buf, &total, &entries);
2578 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2579 		   entries, total, num_online_cpus());
2580 	seq_puts(m, "#\n");
2581 }
2582 
2583 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2584 {
2585 	print_event_info(buf, m);
2586 	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n"
2587 		    "#              | |       |          |         |\n");
2588 }
2589 
2590 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2591 {
2592 	print_event_info(buf, m);
2593 	seq_puts(m, "#                              _-----=> irqs-off\n"
2594 		    "#                             / _----=> need-resched\n"
2595 		    "#                            | / _---=> hardirq/softirq\n"
2596 		    "#                            || / _--=> preempt-depth\n"
2597 		    "#                            ||| /     delay\n"
2598 		    "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"
2599 		    "#              | |       |   ||||       |         |\n");
2600 }
2601 
2602 void
2603 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2604 {
2605 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2606 	struct trace_buffer *buf = iter->trace_buffer;
2607 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2608 	struct tracer *type = iter->trace;
2609 	unsigned long entries;
2610 	unsigned long total;
2611 	const char *name = "preemption";
2612 
2613 	name = type->name;
2614 
2615 	get_total_entries(buf, &total, &entries);
2616 
2617 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2618 		   name, UTS_RELEASE);
2619 	seq_puts(m, "# -----------------------------------"
2620 		 "---------------------------------\n");
2621 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2622 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2623 		   nsecs_to_usecs(data->saved_latency),
2624 		   entries,
2625 		   total,
2626 		   buf->cpu,
2627 #if defined(CONFIG_PREEMPT_NONE)
2628 		   "server",
2629 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2630 		   "desktop",
2631 #elif defined(CONFIG_PREEMPT)
2632 		   "preempt",
2633 #else
2634 		   "unknown",
2635 #endif
2636 		   /* These are reserved for later use */
2637 		   0, 0, 0, 0);
2638 #ifdef CONFIG_SMP
2639 	seq_printf(m, " #P:%d)\n", num_online_cpus());
2640 #else
2641 	seq_puts(m, ")\n");
2642 #endif
2643 	seq_puts(m, "#    -----------------\n");
2644 	seq_printf(m, "#    | task: %.16s-%d "
2645 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2646 		   data->comm, data->pid,
2647 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2648 		   data->policy, data->rt_priority);
2649 	seq_puts(m, "#    -----------------\n");
2650 
2651 	if (data->critical_start) {
2652 		seq_puts(m, "#  => started at: ");
2653 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2654 		trace_print_seq(m, &iter->seq);
2655 		seq_puts(m, "\n#  => ended at:   ");
2656 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2657 		trace_print_seq(m, &iter->seq);
2658 		seq_puts(m, "\n#\n");
2659 	}
2660 
2661 	seq_puts(m, "#\n");
2662 }
2663 
2664 static void test_cpu_buff_start(struct trace_iterator *iter)
2665 {
2666 	struct trace_seq *s = &iter->seq;
2667 
2668 	if (!(trace_flags & TRACE_ITER_ANNOTATE))
2669 		return;
2670 
2671 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2672 		return;
2673 
2674 	if (cpumask_test_cpu(iter->cpu, iter->started))
2675 		return;
2676 
2677 	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2678 		return;
2679 
2680 	cpumask_set_cpu(iter->cpu, iter->started);
2681 
2682 	/* Don't print started cpu buffer for the first entry of the trace */
2683 	if (iter->idx > 1)
2684 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2685 				iter->cpu);
2686 }
2687 
2688 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2689 {
2690 	struct trace_seq *s = &iter->seq;
2691 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2692 	struct trace_entry *entry;
2693 	struct trace_event *event;
2694 
2695 	entry = iter->ent;
2696 
2697 	test_cpu_buff_start(iter);
2698 
2699 	event = ftrace_find_event(entry->type);
2700 
2701 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2702 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2703 			trace_print_lat_context(iter);
2704 		else
2705 			trace_print_context(iter);
2706 	}
2707 
2708 	if (trace_seq_has_overflowed(s))
2709 		return TRACE_TYPE_PARTIAL_LINE;
2710 
2711 	if (event)
2712 		return event->funcs->trace(iter, sym_flags, event);
2713 
2714 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
2715 
2716 	return trace_handle_return(s);
2717 }
2718 
2719 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2720 {
2721 	struct trace_seq *s = &iter->seq;
2722 	struct trace_entry *entry;
2723 	struct trace_event *event;
2724 
2725 	entry = iter->ent;
2726 
2727 	if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2728 		trace_seq_printf(s, "%d %d %llu ",
2729 				 entry->pid, iter->cpu, iter->ts);
2730 
2731 	if (trace_seq_has_overflowed(s))
2732 		return TRACE_TYPE_PARTIAL_LINE;
2733 
2734 	event = ftrace_find_event(entry->type);
2735 	if (event)
2736 		return event->funcs->raw(iter, 0, event);
2737 
2738 	trace_seq_printf(s, "%d ?\n", entry->type);
2739 
2740 	return trace_handle_return(s);
2741 }
2742 
2743 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2744 {
2745 	struct trace_seq *s = &iter->seq;
2746 	unsigned char newline = '\n';
2747 	struct trace_entry *entry;
2748 	struct trace_event *event;
2749 
2750 	entry = iter->ent;
2751 
2752 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2753 		SEQ_PUT_HEX_FIELD(s, entry->pid);
2754 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
2755 		SEQ_PUT_HEX_FIELD(s, iter->ts);
2756 		if (trace_seq_has_overflowed(s))
2757 			return TRACE_TYPE_PARTIAL_LINE;
2758 	}
2759 
2760 	event = ftrace_find_event(entry->type);
2761 	if (event) {
2762 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
2763 		if (ret != TRACE_TYPE_HANDLED)
2764 			return ret;
2765 	}
2766 
2767 	SEQ_PUT_FIELD(s, newline);
2768 
2769 	return trace_handle_return(s);
2770 }
2771 
2772 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2773 {
2774 	struct trace_seq *s = &iter->seq;
2775 	struct trace_entry *entry;
2776 	struct trace_event *event;
2777 
2778 	entry = iter->ent;
2779 
2780 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2781 		SEQ_PUT_FIELD(s, entry->pid);
2782 		SEQ_PUT_FIELD(s, iter->cpu);
2783 		SEQ_PUT_FIELD(s, iter->ts);
2784 		if (trace_seq_has_overflowed(s))
2785 			return TRACE_TYPE_PARTIAL_LINE;
2786 	}
2787 
2788 	event = ftrace_find_event(entry->type);
2789 	return event ? event->funcs->binary(iter, 0, event) :
2790 		TRACE_TYPE_HANDLED;
2791 }
2792 
2793 int trace_empty(struct trace_iterator *iter)
2794 {
2795 	struct ring_buffer_iter *buf_iter;
2796 	int cpu;
2797 
2798 	/* If we are looking at one CPU buffer, only check that one */
2799 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2800 		cpu = iter->cpu_file;
2801 		buf_iter = trace_buffer_iter(iter, cpu);
2802 		if (buf_iter) {
2803 			if (!ring_buffer_iter_empty(buf_iter))
2804 				return 0;
2805 		} else {
2806 			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2807 				return 0;
2808 		}
2809 		return 1;
2810 	}
2811 
2812 	for_each_tracing_cpu(cpu) {
2813 		buf_iter = trace_buffer_iter(iter, cpu);
2814 		if (buf_iter) {
2815 			if (!ring_buffer_iter_empty(buf_iter))
2816 				return 0;
2817 		} else {
2818 			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2819 				return 0;
2820 		}
2821 	}
2822 
2823 	return 1;
2824 }
2825 
2826 /*  Called with trace_event_read_lock() held. */
2827 enum print_line_t print_trace_line(struct trace_iterator *iter)
2828 {
2829 	enum print_line_t ret;
2830 
2831 	if (iter->lost_events) {
2832 		trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2833 				 iter->cpu, iter->lost_events);
2834 		if (trace_seq_has_overflowed(&iter->seq))
2835 			return TRACE_TYPE_PARTIAL_LINE;
2836 	}
2837 
2838 	if (iter->trace && iter->trace->print_line) {
2839 		ret = iter->trace->print_line(iter);
2840 		if (ret != TRACE_TYPE_UNHANDLED)
2841 			return ret;
2842 	}
2843 
2844 	if (iter->ent->type == TRACE_BPUTS &&
2845 			trace_flags & TRACE_ITER_PRINTK &&
2846 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2847 		return trace_print_bputs_msg_only(iter);
2848 
2849 	if (iter->ent->type == TRACE_BPRINT &&
2850 			trace_flags & TRACE_ITER_PRINTK &&
2851 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2852 		return trace_print_bprintk_msg_only(iter);
2853 
2854 	if (iter->ent->type == TRACE_PRINT &&
2855 			trace_flags & TRACE_ITER_PRINTK &&
2856 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857 		return trace_print_printk_msg_only(iter);
2858 
2859 	if (trace_flags & TRACE_ITER_BIN)
2860 		return print_bin_fmt(iter);
2861 
2862 	if (trace_flags & TRACE_ITER_HEX)
2863 		return print_hex_fmt(iter);
2864 
2865 	if (trace_flags & TRACE_ITER_RAW)
2866 		return print_raw_fmt(iter);
2867 
2868 	return print_trace_fmt(iter);
2869 }
2870 
2871 void trace_latency_header(struct seq_file *m)
2872 {
2873 	struct trace_iterator *iter = m->private;
2874 
2875 	/* print nothing if the buffers are empty */
2876 	if (trace_empty(iter))
2877 		return;
2878 
2879 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2880 		print_trace_header(m, iter);
2881 
2882 	if (!(trace_flags & TRACE_ITER_VERBOSE))
2883 		print_lat_help_header(m);
2884 }
2885 
2886 void trace_default_header(struct seq_file *m)
2887 {
2888 	struct trace_iterator *iter = m->private;
2889 
2890 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2891 		return;
2892 
2893 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2894 		/* print nothing if the buffers are empty */
2895 		if (trace_empty(iter))
2896 			return;
2897 		print_trace_header(m, iter);
2898 		if (!(trace_flags & TRACE_ITER_VERBOSE))
2899 			print_lat_help_header(m);
2900 	} else {
2901 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2902 			if (trace_flags & TRACE_ITER_IRQ_INFO)
2903 				print_func_help_header_irq(iter->trace_buffer, m);
2904 			else
2905 				print_func_help_header(iter->trace_buffer, m);
2906 		}
2907 	}
2908 }
2909 
2910 static void test_ftrace_alive(struct seq_file *m)
2911 {
2912 	if (!ftrace_is_dead())
2913 		return;
2914 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2915 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
2916 }
2917 
2918 #ifdef CONFIG_TRACER_MAX_TRACE
2919 static void show_snapshot_main_help(struct seq_file *m)
2920 {
2921 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2922 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2923 		    "#                      Takes a snapshot of the main buffer.\n"
2924 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2925 		    "#                      (Doesn't have to be '2' works with any number that\n"
2926 		    "#                       is not a '0' or '1')\n");
2927 }
2928 
2929 static void show_snapshot_percpu_help(struct seq_file *m)
2930 {
2931 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2932 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2933 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2934 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
2935 #else
2936 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2937 		    "#                     Must use main snapshot file to allocate.\n");
2938 #endif
2939 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2940 		    "#                      (Doesn't have to be '2' works with any number that\n"
2941 		    "#                       is not a '0' or '1')\n");
2942 }
2943 
2944 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2945 {
2946 	if (iter->tr->allocated_snapshot)
2947 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2948 	else
2949 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2950 
2951 	seq_puts(m, "# Snapshot commands:\n");
2952 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2953 		show_snapshot_main_help(m);
2954 	else
2955 		show_snapshot_percpu_help(m);
2956 }
2957 #else
2958 /* Should never be called */
2959 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2960 #endif
2961 
2962 static int s_show(struct seq_file *m, void *v)
2963 {
2964 	struct trace_iterator *iter = v;
2965 	int ret;
2966 
2967 	if (iter->ent == NULL) {
2968 		if (iter->tr) {
2969 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
2970 			seq_puts(m, "#\n");
2971 			test_ftrace_alive(m);
2972 		}
2973 		if (iter->snapshot && trace_empty(iter))
2974 			print_snapshot_help(m, iter);
2975 		else if (iter->trace && iter->trace->print_header)
2976 			iter->trace->print_header(m);
2977 		else
2978 			trace_default_header(m);
2979 
2980 	} else if (iter->leftover) {
2981 		/*
2982 		 * If we filled the seq_file buffer earlier, we
2983 		 * want to just show it now.
2984 		 */
2985 		ret = trace_print_seq(m, &iter->seq);
2986 
2987 		/* ret should this time be zero, but you never know */
2988 		iter->leftover = ret;
2989 
2990 	} else {
2991 		print_trace_line(iter);
2992 		ret = trace_print_seq(m, &iter->seq);
2993 		/*
2994 		 * If we overflow the seq_file buffer, then it will
2995 		 * ask us for this data again at start up.
2996 		 * Use that instead.
2997 		 *  ret is 0 if seq_file write succeeded.
2998 		 *        -1 otherwise.
2999 		 */
3000 		iter->leftover = ret;
3001 	}
3002 
3003 	return 0;
3004 }
3005 
3006 /*
3007  * Should be used after trace_array_get(), trace_types_lock
3008  * ensures that i_cdev was already initialized.
3009  */
3010 static inline int tracing_get_cpu(struct inode *inode)
3011 {
3012 	if (inode->i_cdev) /* See trace_create_cpu_file() */
3013 		return (long)inode->i_cdev - 1;
3014 	return RING_BUFFER_ALL_CPUS;
3015 }
3016 
3017 static const struct seq_operations tracer_seq_ops = {
3018 	.start		= s_start,
3019 	.next		= s_next,
3020 	.stop		= s_stop,
3021 	.show		= s_show,
3022 };
3023 
3024 static struct trace_iterator *
3025 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3026 {
3027 	struct trace_array *tr = inode->i_private;
3028 	struct trace_iterator *iter;
3029 	int cpu;
3030 
3031 	if (tracing_disabled)
3032 		return ERR_PTR(-ENODEV);
3033 
3034 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3035 	if (!iter)
3036 		return ERR_PTR(-ENOMEM);
3037 
3038 	iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3039 				    GFP_KERNEL);
3040 	if (!iter->buffer_iter)
3041 		goto release;
3042 
3043 	/*
3044 	 * We make a copy of the current tracer to avoid concurrent
3045 	 * changes on it while we are reading.
3046 	 */
3047 	mutex_lock(&trace_types_lock);
3048 	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3049 	if (!iter->trace)
3050 		goto fail;
3051 
3052 	*iter->trace = *tr->current_trace;
3053 
3054 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3055 		goto fail;
3056 
3057 	iter->tr = tr;
3058 
3059 #ifdef CONFIG_TRACER_MAX_TRACE
3060 	/* Currently only the top directory has a snapshot */
3061 	if (tr->current_trace->print_max || snapshot)
3062 		iter->trace_buffer = &tr->max_buffer;
3063 	else
3064 #endif
3065 		iter->trace_buffer = &tr->trace_buffer;
3066 	iter->snapshot = snapshot;
3067 	iter->pos = -1;
3068 	iter->cpu_file = tracing_get_cpu(inode);
3069 	mutex_init(&iter->mutex);
3070 
3071 	/* Notify the tracer early; before we stop tracing. */
3072 	if (iter->trace && iter->trace->open)
3073 		iter->trace->open(iter);
3074 
3075 	/* Annotate start of buffers if we had overruns */
3076 	if (ring_buffer_overruns(iter->trace_buffer->buffer))
3077 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
3078 
3079 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
3080 	if (trace_clocks[tr->clock_id].in_ns)
3081 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3082 
3083 	/* stop the trace while dumping if we are not opening "snapshot" */
3084 	if (!iter->snapshot)
3085 		tracing_stop_tr(tr);
3086 
3087 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3088 		for_each_tracing_cpu(cpu) {
3089 			iter->buffer_iter[cpu] =
3090 				ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3091 		}
3092 		ring_buffer_read_prepare_sync();
3093 		for_each_tracing_cpu(cpu) {
3094 			ring_buffer_read_start(iter->buffer_iter[cpu]);
3095 			tracing_iter_reset(iter, cpu);
3096 		}
3097 	} else {
3098 		cpu = iter->cpu_file;
3099 		iter->buffer_iter[cpu] =
3100 			ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3101 		ring_buffer_read_prepare_sync();
3102 		ring_buffer_read_start(iter->buffer_iter[cpu]);
3103 		tracing_iter_reset(iter, cpu);
3104 	}
3105 
3106 	mutex_unlock(&trace_types_lock);
3107 
3108 	return iter;
3109 
3110  fail:
3111 	mutex_unlock(&trace_types_lock);
3112 	kfree(iter->trace);
3113 	kfree(iter->buffer_iter);
3114 release:
3115 	seq_release_private(inode, file);
3116 	return ERR_PTR(-ENOMEM);
3117 }
3118 
3119 int tracing_open_generic(struct inode *inode, struct file *filp)
3120 {
3121 	if (tracing_disabled)
3122 		return -ENODEV;
3123 
3124 	filp->private_data = inode->i_private;
3125 	return 0;
3126 }
3127 
3128 bool tracing_is_disabled(void)
3129 {
3130 	return (tracing_disabled) ? true: false;
3131 }
3132 
3133 /*
3134  * Open and update trace_array ref count.
3135  * Must have the current trace_array passed to it.
3136  */
3137 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3138 {
3139 	struct trace_array *tr = inode->i_private;
3140 
3141 	if (tracing_disabled)
3142 		return -ENODEV;
3143 
3144 	if (trace_array_get(tr) < 0)
3145 		return -ENODEV;
3146 
3147 	filp->private_data = inode->i_private;
3148 
3149 	return 0;
3150 }
3151 
3152 static int tracing_release(struct inode *inode, struct file *file)
3153 {
3154 	struct trace_array *tr = inode->i_private;
3155 	struct seq_file *m = file->private_data;
3156 	struct trace_iterator *iter;
3157 	int cpu;
3158 
3159 	if (!(file->f_mode & FMODE_READ)) {
3160 		trace_array_put(tr);
3161 		return 0;
3162 	}
3163 
3164 	/* Writes do not use seq_file */
3165 	iter = m->private;
3166 	mutex_lock(&trace_types_lock);
3167 
3168 	for_each_tracing_cpu(cpu) {
3169 		if (iter->buffer_iter[cpu])
3170 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
3171 	}
3172 
3173 	if (iter->trace && iter->trace->close)
3174 		iter->trace->close(iter);
3175 
3176 	if (!iter->snapshot)
3177 		/* reenable tracing if it was previously enabled */
3178 		tracing_start_tr(tr);
3179 
3180 	__trace_array_put(tr);
3181 
3182 	mutex_unlock(&trace_types_lock);
3183 
3184 	mutex_destroy(&iter->mutex);
3185 	free_cpumask_var(iter->started);
3186 	kfree(iter->trace);
3187 	kfree(iter->buffer_iter);
3188 	seq_release_private(inode, file);
3189 
3190 	return 0;
3191 }
3192 
3193 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3194 {
3195 	struct trace_array *tr = inode->i_private;
3196 
3197 	trace_array_put(tr);
3198 	return 0;
3199 }
3200 
3201 static int tracing_single_release_tr(struct inode *inode, struct file *file)
3202 {
3203 	struct trace_array *tr = inode->i_private;
3204 
3205 	trace_array_put(tr);
3206 
3207 	return single_release(inode, file);
3208 }
3209 
3210 static int tracing_open(struct inode *inode, struct file *file)
3211 {
3212 	struct trace_array *tr = inode->i_private;
3213 	struct trace_iterator *iter;
3214 	int ret = 0;
3215 
3216 	if (trace_array_get(tr) < 0)
3217 		return -ENODEV;
3218 
3219 	/* If this file was open for write, then erase contents */
3220 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3221 		int cpu = tracing_get_cpu(inode);
3222 
3223 		if (cpu == RING_BUFFER_ALL_CPUS)
3224 			tracing_reset_online_cpus(&tr->trace_buffer);
3225 		else
3226 			tracing_reset(&tr->trace_buffer, cpu);
3227 	}
3228 
3229 	if (file->f_mode & FMODE_READ) {
3230 		iter = __tracing_open(inode, file, false);
3231 		if (IS_ERR(iter))
3232 			ret = PTR_ERR(iter);
3233 		else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3234 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
3235 	}
3236 
3237 	if (ret < 0)
3238 		trace_array_put(tr);
3239 
3240 	return ret;
3241 }
3242 
3243 /*
3244  * Some tracers are not suitable for instance buffers.
3245  * A tracer is always available for the global array (toplevel)
3246  * or if it explicitly states that it is.
3247  */
3248 static bool
3249 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3250 {
3251 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3252 }
3253 
3254 /* Find the next tracer that this trace array may use */
3255 static struct tracer *
3256 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3257 {
3258 	while (t && !trace_ok_for_array(t, tr))
3259 		t = t->next;
3260 
3261 	return t;
3262 }
3263 
3264 static void *
3265 t_next(struct seq_file *m, void *v, loff_t *pos)
3266 {
3267 	struct trace_array *tr = m->private;
3268 	struct tracer *t = v;
3269 
3270 	(*pos)++;
3271 
3272 	if (t)
3273 		t = get_tracer_for_array(tr, t->next);
3274 
3275 	return t;
3276 }
3277 
3278 static void *t_start(struct seq_file *m, loff_t *pos)
3279 {
3280 	struct trace_array *tr = m->private;
3281 	struct tracer *t;
3282 	loff_t l = 0;
3283 
3284 	mutex_lock(&trace_types_lock);
3285 
3286 	t = get_tracer_for_array(tr, trace_types);
3287 	for (; t && l < *pos; t = t_next(m, t, &l))
3288 			;
3289 
3290 	return t;
3291 }
3292 
3293 static void t_stop(struct seq_file *m, void *p)
3294 {
3295 	mutex_unlock(&trace_types_lock);
3296 }
3297 
3298 static int t_show(struct seq_file *m, void *v)
3299 {
3300 	struct tracer *t = v;
3301 
3302 	if (!t)
3303 		return 0;
3304 
3305 	seq_puts(m, t->name);
3306 	if (t->next)
3307 		seq_putc(m, ' ');
3308 	else
3309 		seq_putc(m, '\n');
3310 
3311 	return 0;
3312 }
3313 
3314 static const struct seq_operations show_traces_seq_ops = {
3315 	.start		= t_start,
3316 	.next		= t_next,
3317 	.stop		= t_stop,
3318 	.show		= t_show,
3319 };
3320 
3321 static int show_traces_open(struct inode *inode, struct file *file)
3322 {
3323 	struct trace_array *tr = inode->i_private;
3324 	struct seq_file *m;
3325 	int ret;
3326 
3327 	if (tracing_disabled)
3328 		return -ENODEV;
3329 
3330 	ret = seq_open(file, &show_traces_seq_ops);
3331 	if (ret)
3332 		return ret;
3333 
3334 	m = file->private_data;
3335 	m->private = tr;
3336 
3337 	return 0;
3338 }
3339 
3340 static ssize_t
3341 tracing_write_stub(struct file *filp, const char __user *ubuf,
3342 		   size_t count, loff_t *ppos)
3343 {
3344 	return count;
3345 }
3346 
3347 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3348 {
3349 	int ret;
3350 
3351 	if (file->f_mode & FMODE_READ)
3352 		ret = seq_lseek(file, offset, whence);
3353 	else
3354 		file->f_pos = ret = 0;
3355 
3356 	return ret;
3357 }
3358 
3359 static const struct file_operations tracing_fops = {
3360 	.open		= tracing_open,
3361 	.read		= seq_read,
3362 	.write		= tracing_write_stub,
3363 	.llseek		= tracing_lseek,
3364 	.release	= tracing_release,
3365 };
3366 
3367 static const struct file_operations show_traces_fops = {
3368 	.open		= show_traces_open,
3369 	.read		= seq_read,
3370 	.release	= seq_release,
3371 	.llseek		= seq_lseek,
3372 };
3373 
3374 /*
3375  * The tracer itself will not take this lock, but still we want
3376  * to provide a consistent cpumask to user-space:
3377  */
3378 static DEFINE_MUTEX(tracing_cpumask_update_lock);
3379 
3380 /*
3381  * Temporary storage for the character representation of the
3382  * CPU bitmask (and one more byte for the newline):
3383  */
3384 static char mask_str[NR_CPUS + 1];
3385 
3386 static ssize_t
3387 tracing_cpumask_read(struct file *filp, char __user *ubuf,
3388 		     size_t count, loff_t *ppos)
3389 {
3390 	struct trace_array *tr = file_inode(filp)->i_private;
3391 	int len;
3392 
3393 	mutex_lock(&tracing_cpumask_update_lock);
3394 
3395 	len = snprintf(mask_str, count, "%*pb\n",
3396 		       cpumask_pr_args(tr->tracing_cpumask));
3397 	if (len >= count) {
3398 		count = -EINVAL;
3399 		goto out_err;
3400 	}
3401 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3402 
3403 out_err:
3404 	mutex_unlock(&tracing_cpumask_update_lock);
3405 
3406 	return count;
3407 }
3408 
3409 static ssize_t
3410 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3411 		      size_t count, loff_t *ppos)
3412 {
3413 	struct trace_array *tr = file_inode(filp)->i_private;
3414 	cpumask_var_t tracing_cpumask_new;
3415 	int err, cpu;
3416 
3417 	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3418 		return -ENOMEM;
3419 
3420 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3421 	if (err)
3422 		goto err_unlock;
3423 
3424 	mutex_lock(&tracing_cpumask_update_lock);
3425 
3426 	local_irq_disable();
3427 	arch_spin_lock(&tr->max_lock);
3428 	for_each_tracing_cpu(cpu) {
3429 		/*
3430 		 * Increase/decrease the disabled counter if we are
3431 		 * about to flip a bit in the cpumask:
3432 		 */
3433 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3434 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3435 			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3436 			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3437 		}
3438 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3439 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3440 			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3441 			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3442 		}
3443 	}
3444 	arch_spin_unlock(&tr->max_lock);
3445 	local_irq_enable();
3446 
3447 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3448 
3449 	mutex_unlock(&tracing_cpumask_update_lock);
3450 	free_cpumask_var(tracing_cpumask_new);
3451 
3452 	return count;
3453 
3454 err_unlock:
3455 	free_cpumask_var(tracing_cpumask_new);
3456 
3457 	return err;
3458 }
3459 
3460 static const struct file_operations tracing_cpumask_fops = {
3461 	.open		= tracing_open_generic_tr,
3462 	.read		= tracing_cpumask_read,
3463 	.write		= tracing_cpumask_write,
3464 	.release	= tracing_release_generic_tr,
3465 	.llseek		= generic_file_llseek,
3466 };
3467 
3468 static int tracing_trace_options_show(struct seq_file *m, void *v)
3469 {
3470 	struct tracer_opt *trace_opts;
3471 	struct trace_array *tr = m->private;
3472 	u32 tracer_flags;
3473 	int i;
3474 
3475 	mutex_lock(&trace_types_lock);
3476 	tracer_flags = tr->current_trace->flags->val;
3477 	trace_opts = tr->current_trace->flags->opts;
3478 
3479 	for (i = 0; trace_options[i]; i++) {
3480 		if (trace_flags & (1 << i))
3481 			seq_printf(m, "%s\n", trace_options[i]);
3482 		else
3483 			seq_printf(m, "no%s\n", trace_options[i]);
3484 	}
3485 
3486 	for (i = 0; trace_opts[i].name; i++) {
3487 		if (tracer_flags & trace_opts[i].bit)
3488 			seq_printf(m, "%s\n", trace_opts[i].name);
3489 		else
3490 			seq_printf(m, "no%s\n", trace_opts[i].name);
3491 	}
3492 	mutex_unlock(&trace_types_lock);
3493 
3494 	return 0;
3495 }
3496 
3497 static int __set_tracer_option(struct trace_array *tr,
3498 			       struct tracer_flags *tracer_flags,
3499 			       struct tracer_opt *opts, int neg)
3500 {
3501 	struct tracer *trace = tr->current_trace;
3502 	int ret;
3503 
3504 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3505 	if (ret)
3506 		return ret;
3507 
3508 	if (neg)
3509 		tracer_flags->val &= ~opts->bit;
3510 	else
3511 		tracer_flags->val |= opts->bit;
3512 	return 0;
3513 }
3514 
3515 /* Try to assign a tracer specific option */
3516 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3517 {
3518 	struct tracer *trace = tr->current_trace;
3519 	struct tracer_flags *tracer_flags = trace->flags;
3520 	struct tracer_opt *opts = NULL;
3521 	int i;
3522 
3523 	for (i = 0; tracer_flags->opts[i].name; i++) {
3524 		opts = &tracer_flags->opts[i];
3525 
3526 		if (strcmp(cmp, opts->name) == 0)
3527 			return __set_tracer_option(tr, trace->flags, opts, neg);
3528 	}
3529 
3530 	return -EINVAL;
3531 }
3532 
3533 /* Some tracers require overwrite to stay enabled */
3534 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3535 {
3536 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3537 		return -1;
3538 
3539 	return 0;
3540 }
3541 
3542 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3543 {
3544 	/* do nothing if flag is already set */
3545 	if (!!(trace_flags & mask) == !!enabled)
3546 		return 0;
3547 
3548 	/* Give the tracer a chance to approve the change */
3549 	if (tr->current_trace->flag_changed)
3550 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3551 			return -EINVAL;
3552 
3553 	if (enabled)
3554 		trace_flags |= mask;
3555 	else
3556 		trace_flags &= ~mask;
3557 
3558 	if (mask == TRACE_ITER_RECORD_CMD)
3559 		trace_event_enable_cmd_record(enabled);
3560 
3561 	if (mask == TRACE_ITER_OVERWRITE) {
3562 		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3563 #ifdef CONFIG_TRACER_MAX_TRACE
3564 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3565 #endif
3566 	}
3567 
3568 	if (mask == TRACE_ITER_PRINTK)
3569 		trace_printk_start_stop_comm(enabled);
3570 
3571 	return 0;
3572 }
3573 
3574 static int trace_set_options(struct trace_array *tr, char *option)
3575 {
3576 	char *cmp;
3577 	int neg = 0;
3578 	int ret = -ENODEV;
3579 	int i;
3580 
3581 	cmp = strstrip(option);
3582 
3583 	if (strncmp(cmp, "no", 2) == 0) {
3584 		neg = 1;
3585 		cmp += 2;
3586 	}
3587 
3588 	mutex_lock(&trace_types_lock);
3589 
3590 	for (i = 0; trace_options[i]; i++) {
3591 		if (strcmp(cmp, trace_options[i]) == 0) {
3592 			ret = set_tracer_flag(tr, 1 << i, !neg);
3593 			break;
3594 		}
3595 	}
3596 
3597 	/* If no option could be set, test the specific tracer options */
3598 	if (!trace_options[i])
3599 		ret = set_tracer_option(tr, cmp, neg);
3600 
3601 	mutex_unlock(&trace_types_lock);
3602 
3603 	return ret;
3604 }
3605 
3606 static ssize_t
3607 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3608 			size_t cnt, loff_t *ppos)
3609 {
3610 	struct seq_file *m = filp->private_data;
3611 	struct trace_array *tr = m->private;
3612 	char buf[64];
3613 	int ret;
3614 
3615 	if (cnt >= sizeof(buf))
3616 		return -EINVAL;
3617 
3618 	if (copy_from_user(&buf, ubuf, cnt))
3619 		return -EFAULT;
3620 
3621 	buf[cnt] = 0;
3622 
3623 	ret = trace_set_options(tr, buf);
3624 	if (ret < 0)
3625 		return ret;
3626 
3627 	*ppos += cnt;
3628 
3629 	return cnt;
3630 }
3631 
3632 static int tracing_trace_options_open(struct inode *inode, struct file *file)
3633 {
3634 	struct trace_array *tr = inode->i_private;
3635 	int ret;
3636 
3637 	if (tracing_disabled)
3638 		return -ENODEV;
3639 
3640 	if (trace_array_get(tr) < 0)
3641 		return -ENODEV;
3642 
3643 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
3644 	if (ret < 0)
3645 		trace_array_put(tr);
3646 
3647 	return ret;
3648 }
3649 
3650 static const struct file_operations tracing_iter_fops = {
3651 	.open		= tracing_trace_options_open,
3652 	.read		= seq_read,
3653 	.llseek		= seq_lseek,
3654 	.release	= tracing_single_release_tr,
3655 	.write		= tracing_trace_options_write,
3656 };
3657 
3658 static const char readme_msg[] =
3659 	"tracing mini-HOWTO:\n\n"
3660 	"# echo 0 > tracing_on : quick way to disable tracing\n"
3661 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3662 	" Important files:\n"
3663 	"  trace\t\t\t- The static contents of the buffer\n"
3664 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
3665 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3666 	"  current_tracer\t- function and latency tracers\n"
3667 	"  available_tracers\t- list of configured tracers for current_tracer\n"
3668 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3669 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3670 	"  trace_clock\t\t-change the clock used to order events\n"
3671 	"       local:   Per cpu clock but may not be synced across CPUs\n"
3672 	"      global:   Synced across CPUs but slows tracing down.\n"
3673 	"     counter:   Not a clock, but just an increment\n"
3674 	"      uptime:   Jiffy counter from time of boot\n"
3675 	"        perf:   Same clock that perf events use\n"
3676 #ifdef CONFIG_X86_64
3677 	"     x86-tsc:   TSC cycle counter\n"
3678 #endif
3679 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3680 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
3681 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3682 	"\t\t\t  Remove sub-buffer with rmdir\n"
3683 	"  trace_options\t\t- Set format or modify how tracing happens\n"
3684 	"\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3685 	"\t\t\t  option name\n"
3686 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3687 #ifdef CONFIG_DYNAMIC_FTRACE
3688 	"\n  available_filter_functions - list of functions that can be filtered on\n"
3689 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
3690 	"\t\t\t  functions\n"
3691 	"\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3692 	"\t     modules: Can select a group via module\n"
3693 	"\t      Format: :mod:<module-name>\n"
3694 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3695 	"\t    triggers: a command to perform when function is hit\n"
3696 	"\t      Format: <function>:<trigger>[:count]\n"
3697 	"\t     trigger: traceon, traceoff\n"
3698 	"\t\t      enable_event:<system>:<event>\n"
3699 	"\t\t      disable_event:<system>:<event>\n"
3700 #ifdef CONFIG_STACKTRACE
3701 	"\t\t      stacktrace\n"
3702 #endif
3703 #ifdef CONFIG_TRACER_SNAPSHOT
3704 	"\t\t      snapshot\n"
3705 #endif
3706 	"\t\t      dump\n"
3707 	"\t\t      cpudump\n"
3708 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3709 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3710 	"\t     The first one will disable tracing every time do_fault is hit\n"
3711 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3712 	"\t       The first time do trap is hit and it disables tracing, the\n"
3713 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
3714 	"\t       the counter will not decrement. It only decrements when the\n"
3715 	"\t       trigger did work\n"
3716 	"\t     To remove trigger without count:\n"
3717 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3718 	"\t     To remove trigger with a count:\n"
3719 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3720 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3721 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3722 	"\t    modules: Can select a group via module command :mod:\n"
3723 	"\t    Does not accept triggers\n"
3724 #endif /* CONFIG_DYNAMIC_FTRACE */
3725 #ifdef CONFIG_FUNCTION_TRACER
3726 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3727 	"\t\t    (function)\n"
3728 #endif
3729 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3730 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3731 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3732 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3733 #endif
3734 #ifdef CONFIG_TRACER_SNAPSHOT
3735 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3736 	"\t\t\t  snapshot buffer. Read the contents for more\n"
3737 	"\t\t\t  information\n"
3738 #endif
3739 #ifdef CONFIG_STACK_TRACER
3740 	"  stack_trace\t\t- Shows the max stack trace when active\n"
3741 	"  stack_max_size\t- Shows current max stack size that was traced\n"
3742 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
3743 	"\t\t\t  new trace)\n"
3744 #ifdef CONFIG_DYNAMIC_FTRACE
3745 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3746 	"\t\t\t  traces\n"
3747 #endif
3748 #endif /* CONFIG_STACK_TRACER */
3749 	"  events/\t\t- Directory containing all trace event subsystems:\n"
3750 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3751 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
3752 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3753 	"\t\t\t  events\n"
3754 	"      filter\t\t- If set, only events passing filter are traced\n"
3755 	"  events/<system>/<event>/\t- Directory containing control files for\n"
3756 	"\t\t\t  <event>:\n"
3757 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3758 	"      filter\t\t- If set, only events passing filter are traced\n"
3759 	"      trigger\t\t- If set, a command to perform when event is hit\n"
3760 	"\t    Format: <trigger>[:count][if <filter>]\n"
3761 	"\t   trigger: traceon, traceoff\n"
3762 	"\t            enable_event:<system>:<event>\n"
3763 	"\t            disable_event:<system>:<event>\n"
3764 #ifdef CONFIG_STACKTRACE
3765 	"\t\t    stacktrace\n"
3766 #endif
3767 #ifdef CONFIG_TRACER_SNAPSHOT
3768 	"\t\t    snapshot\n"
3769 #endif
3770 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3771 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3772 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3773 	"\t                  events/block/block_unplug/trigger\n"
3774 	"\t   The first disables tracing every time block_unplug is hit.\n"
3775 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3776 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3777 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3778 	"\t   Like function triggers, the counter is only decremented if it\n"
3779 	"\t    enabled or disabled tracing.\n"
3780 	"\t   To remove a trigger without a count:\n"
3781 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
3782 	"\t   To remove a trigger with a count:\n"
3783 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3784 	"\t   Filters can be ignored when removing a trigger.\n"
3785 ;
3786 
3787 static ssize_t
3788 tracing_readme_read(struct file *filp, char __user *ubuf,
3789 		       size_t cnt, loff_t *ppos)
3790 {
3791 	return simple_read_from_buffer(ubuf, cnt, ppos,
3792 					readme_msg, strlen(readme_msg));
3793 }
3794 
3795 static const struct file_operations tracing_readme_fops = {
3796 	.open		= tracing_open_generic,
3797 	.read		= tracing_readme_read,
3798 	.llseek		= generic_file_llseek,
3799 };
3800 
3801 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3802 {
3803 	unsigned int *ptr = v;
3804 
3805 	if (*pos || m->count)
3806 		ptr++;
3807 
3808 	(*pos)++;
3809 
3810 	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3811 	     ptr++) {
3812 		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3813 			continue;
3814 
3815 		return ptr;
3816 	}
3817 
3818 	return NULL;
3819 }
3820 
3821 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3822 {
3823 	void *v;
3824 	loff_t l = 0;
3825 
3826 	preempt_disable();
3827 	arch_spin_lock(&trace_cmdline_lock);
3828 
3829 	v = &savedcmd->map_cmdline_to_pid[0];
3830 	while (l <= *pos) {
3831 		v = saved_cmdlines_next(m, v, &l);
3832 		if (!v)
3833 			return NULL;
3834 	}
3835 
3836 	return v;
3837 }
3838 
3839 static void saved_cmdlines_stop(struct seq_file *m, void *v)
3840 {
3841 	arch_spin_unlock(&trace_cmdline_lock);
3842 	preempt_enable();
3843 }
3844 
3845 static int saved_cmdlines_show(struct seq_file *m, void *v)
3846 {
3847 	char buf[TASK_COMM_LEN];
3848 	unsigned int *pid = v;
3849 
3850 	__trace_find_cmdline(*pid, buf);
3851 	seq_printf(m, "%d %s\n", *pid, buf);
3852 	return 0;
3853 }
3854 
3855 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3856 	.start		= saved_cmdlines_start,
3857 	.next		= saved_cmdlines_next,
3858 	.stop		= saved_cmdlines_stop,
3859 	.show		= saved_cmdlines_show,
3860 };
3861 
3862 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3863 {
3864 	if (tracing_disabled)
3865 		return -ENODEV;
3866 
3867 	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3868 }
3869 
3870 static const struct file_operations tracing_saved_cmdlines_fops = {
3871 	.open		= tracing_saved_cmdlines_open,
3872 	.read		= seq_read,
3873 	.llseek		= seq_lseek,
3874 	.release	= seq_release,
3875 };
3876 
3877 static ssize_t
3878 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3879 				 size_t cnt, loff_t *ppos)
3880 {
3881 	char buf[64];
3882 	int r;
3883 
3884 	arch_spin_lock(&trace_cmdline_lock);
3885 	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3886 	arch_spin_unlock(&trace_cmdline_lock);
3887 
3888 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3889 }
3890 
3891 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3892 {
3893 	kfree(s->saved_cmdlines);
3894 	kfree(s->map_cmdline_to_pid);
3895 	kfree(s);
3896 }
3897 
3898 static int tracing_resize_saved_cmdlines(unsigned int val)
3899 {
3900 	struct saved_cmdlines_buffer *s, *savedcmd_temp;
3901 
3902 	s = kmalloc(sizeof(*s), GFP_KERNEL);
3903 	if (!s)
3904 		return -ENOMEM;
3905 
3906 	if (allocate_cmdlines_buffer(val, s) < 0) {
3907 		kfree(s);
3908 		return -ENOMEM;
3909 	}
3910 
3911 	arch_spin_lock(&trace_cmdline_lock);
3912 	savedcmd_temp = savedcmd;
3913 	savedcmd = s;
3914 	arch_spin_unlock(&trace_cmdline_lock);
3915 	free_saved_cmdlines_buffer(savedcmd_temp);
3916 
3917 	return 0;
3918 }
3919 
3920 static ssize_t
3921 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3922 				  size_t cnt, loff_t *ppos)
3923 {
3924 	unsigned long val;
3925 	int ret;
3926 
3927 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3928 	if (ret)
3929 		return ret;
3930 
3931 	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
3932 	if (!val || val > PID_MAX_DEFAULT)
3933 		return -EINVAL;
3934 
3935 	ret = tracing_resize_saved_cmdlines((unsigned int)val);
3936 	if (ret < 0)
3937 		return ret;
3938 
3939 	*ppos += cnt;
3940 
3941 	return cnt;
3942 }
3943 
3944 static const struct file_operations tracing_saved_cmdlines_size_fops = {
3945 	.open		= tracing_open_generic,
3946 	.read		= tracing_saved_cmdlines_size_read,
3947 	.write		= tracing_saved_cmdlines_size_write,
3948 };
3949 
3950 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
3951 static union trace_enum_map_item *
3952 update_enum_map(union trace_enum_map_item *ptr)
3953 {
3954 	if (!ptr->map.enum_string) {
3955 		if (ptr->tail.next) {
3956 			ptr = ptr->tail.next;
3957 			/* Set ptr to the next real item (skip head) */
3958 			ptr++;
3959 		} else
3960 			return NULL;
3961 	}
3962 	return ptr;
3963 }
3964 
3965 static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3966 {
3967 	union trace_enum_map_item *ptr = v;
3968 
3969 	/*
3970 	 * Paranoid! If ptr points to end, we don't want to increment past it.
3971 	 * This really should never happen.
3972 	 */
3973 	ptr = update_enum_map(ptr);
3974 	if (WARN_ON_ONCE(!ptr))
3975 		return NULL;
3976 
3977 	ptr++;
3978 
3979 	(*pos)++;
3980 
3981 	ptr = update_enum_map(ptr);
3982 
3983 	return ptr;
3984 }
3985 
3986 static void *enum_map_start(struct seq_file *m, loff_t *pos)
3987 {
3988 	union trace_enum_map_item *v;
3989 	loff_t l = 0;
3990 
3991 	mutex_lock(&trace_enum_mutex);
3992 
3993 	v = trace_enum_maps;
3994 	if (v)
3995 		v++;
3996 
3997 	while (v && l < *pos) {
3998 		v = enum_map_next(m, v, &l);
3999 	}
4000 
4001 	return v;
4002 }
4003 
4004 static void enum_map_stop(struct seq_file *m, void *v)
4005 {
4006 	mutex_unlock(&trace_enum_mutex);
4007 }
4008 
4009 static int enum_map_show(struct seq_file *m, void *v)
4010 {
4011 	union trace_enum_map_item *ptr = v;
4012 
4013 	seq_printf(m, "%s %ld (%s)\n",
4014 		   ptr->map.enum_string, ptr->map.enum_value,
4015 		   ptr->map.system);
4016 
4017 	return 0;
4018 }
4019 
4020 static const struct seq_operations tracing_enum_map_seq_ops = {
4021 	.start		= enum_map_start,
4022 	.next		= enum_map_next,
4023 	.stop		= enum_map_stop,
4024 	.show		= enum_map_show,
4025 };
4026 
4027 static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4028 {
4029 	if (tracing_disabled)
4030 		return -ENODEV;
4031 
4032 	return seq_open(filp, &tracing_enum_map_seq_ops);
4033 }
4034 
4035 static const struct file_operations tracing_enum_map_fops = {
4036 	.open		= tracing_enum_map_open,
4037 	.read		= seq_read,
4038 	.llseek		= seq_lseek,
4039 	.release	= seq_release,
4040 };
4041 
4042 static inline union trace_enum_map_item *
4043 trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4044 {
4045 	/* Return tail of array given the head */
4046 	return ptr + ptr->head.length + 1;
4047 }
4048 
4049 static void
4050 trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4051 			   int len)
4052 {
4053 	struct trace_enum_map **stop;
4054 	struct trace_enum_map **map;
4055 	union trace_enum_map_item *map_array;
4056 	union trace_enum_map_item *ptr;
4057 
4058 	stop = start + len;
4059 
4060 	/*
4061 	 * The trace_enum_maps contains the map plus a head and tail item,
4062 	 * where the head holds the module and length of array, and the
4063 	 * tail holds a pointer to the next list.
4064 	 */
4065 	map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4066 	if (!map_array) {
4067 		pr_warning("Unable to allocate trace enum mapping\n");
4068 		return;
4069 	}
4070 
4071 	mutex_lock(&trace_enum_mutex);
4072 
4073 	if (!trace_enum_maps)
4074 		trace_enum_maps = map_array;
4075 	else {
4076 		ptr = trace_enum_maps;
4077 		for (;;) {
4078 			ptr = trace_enum_jmp_to_tail(ptr);
4079 			if (!ptr->tail.next)
4080 				break;
4081 			ptr = ptr->tail.next;
4082 
4083 		}
4084 		ptr->tail.next = map_array;
4085 	}
4086 	map_array->head.mod = mod;
4087 	map_array->head.length = len;
4088 	map_array++;
4089 
4090 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4091 		map_array->map = **map;
4092 		map_array++;
4093 	}
4094 	memset(map_array, 0, sizeof(*map_array));
4095 
4096 	mutex_unlock(&trace_enum_mutex);
4097 }
4098 
4099 static void trace_create_enum_file(struct dentry *d_tracer)
4100 {
4101 	trace_create_file("enum_map", 0444, d_tracer,
4102 			  NULL, &tracing_enum_map_fops);
4103 }
4104 
4105 #else /* CONFIG_TRACE_ENUM_MAP_FILE */
4106 static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4107 static inline void trace_insert_enum_map_file(struct module *mod,
4108 			      struct trace_enum_map **start, int len) { }
4109 #endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4110 
4111 static void trace_insert_enum_map(struct module *mod,
4112 				  struct trace_enum_map **start, int len)
4113 {
4114 	struct trace_enum_map **map;
4115 
4116 	if (len <= 0)
4117 		return;
4118 
4119 	map = start;
4120 
4121 	trace_event_enum_update(map, len);
4122 
4123 	trace_insert_enum_map_file(mod, start, len);
4124 }
4125 
4126 static ssize_t
4127 tracing_set_trace_read(struct file *filp, char __user *ubuf,
4128 		       size_t cnt, loff_t *ppos)
4129 {
4130 	struct trace_array *tr = filp->private_data;
4131 	char buf[MAX_TRACER_SIZE+2];
4132 	int r;
4133 
4134 	mutex_lock(&trace_types_lock);
4135 	r = sprintf(buf, "%s\n", tr->current_trace->name);
4136 	mutex_unlock(&trace_types_lock);
4137 
4138 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4139 }
4140 
4141 int tracer_init(struct tracer *t, struct trace_array *tr)
4142 {
4143 	tracing_reset_online_cpus(&tr->trace_buffer);
4144 	return t->init(tr);
4145 }
4146 
4147 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4148 {
4149 	int cpu;
4150 
4151 	for_each_tracing_cpu(cpu)
4152 		per_cpu_ptr(buf->data, cpu)->entries = val;
4153 }
4154 
4155 #ifdef CONFIG_TRACER_MAX_TRACE
4156 /* resize @tr's buffer to the size of @size_tr's entries */
4157 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4158 					struct trace_buffer *size_buf, int cpu_id)
4159 {
4160 	int cpu, ret = 0;
4161 
4162 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
4163 		for_each_tracing_cpu(cpu) {
4164 			ret = ring_buffer_resize(trace_buf->buffer,
4165 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4166 			if (ret < 0)
4167 				break;
4168 			per_cpu_ptr(trace_buf->data, cpu)->entries =
4169 				per_cpu_ptr(size_buf->data, cpu)->entries;
4170 		}
4171 	} else {
4172 		ret = ring_buffer_resize(trace_buf->buffer,
4173 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4174 		if (ret == 0)
4175 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4176 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
4177 	}
4178 
4179 	return ret;
4180 }
4181 #endif /* CONFIG_TRACER_MAX_TRACE */
4182 
4183 static int __tracing_resize_ring_buffer(struct trace_array *tr,
4184 					unsigned long size, int cpu)
4185 {
4186 	int ret;
4187 
4188 	/*
4189 	 * If kernel or user changes the size of the ring buffer
4190 	 * we use the size that was given, and we can forget about
4191 	 * expanding it later.
4192 	 */
4193 	ring_buffer_expanded = true;
4194 
4195 	/* May be called before buffers are initialized */
4196 	if (!tr->trace_buffer.buffer)
4197 		return 0;
4198 
4199 	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4200 	if (ret < 0)
4201 		return ret;
4202 
4203 #ifdef CONFIG_TRACER_MAX_TRACE
4204 	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4205 	    !tr->current_trace->use_max_tr)
4206 		goto out;
4207 
4208 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4209 	if (ret < 0) {
4210 		int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4211 						     &tr->trace_buffer, cpu);
4212 		if (r < 0) {
4213 			/*
4214 			 * AARGH! We are left with different
4215 			 * size max buffer!!!!
4216 			 * The max buffer is our "snapshot" buffer.
4217 			 * When a tracer needs a snapshot (one of the
4218 			 * latency tracers), it swaps the max buffer
4219 			 * with the saved snap shot. We succeeded to
4220 			 * update the size of the main buffer, but failed to
4221 			 * update the size of the max buffer. But when we tried
4222 			 * to reset the main buffer to the original size, we
4223 			 * failed there too. This is very unlikely to
4224 			 * happen, but if it does, warn and kill all
4225 			 * tracing.
4226 			 */
4227 			WARN_ON(1);
4228 			tracing_disabled = 1;
4229 		}
4230 		return ret;
4231 	}
4232 
4233 	if (cpu == RING_BUFFER_ALL_CPUS)
4234 		set_buffer_entries(&tr->max_buffer, size);
4235 	else
4236 		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4237 
4238  out:
4239 #endif /* CONFIG_TRACER_MAX_TRACE */
4240 
4241 	if (cpu == RING_BUFFER_ALL_CPUS)
4242 		set_buffer_entries(&tr->trace_buffer, size);
4243 	else
4244 		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4245 
4246 	return ret;
4247 }
4248 
4249 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4250 					  unsigned long size, int cpu_id)
4251 {
4252 	int ret = size;
4253 
4254 	mutex_lock(&trace_types_lock);
4255 
4256 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
4257 		/* make sure, this cpu is enabled in the mask */
4258 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4259 			ret = -EINVAL;
4260 			goto out;
4261 		}
4262 	}
4263 
4264 	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4265 	if (ret < 0)
4266 		ret = -ENOMEM;
4267 
4268 out:
4269 	mutex_unlock(&trace_types_lock);
4270 
4271 	return ret;
4272 }
4273 
4274 
4275 /**
4276  * tracing_update_buffers - used by tracing facility to expand ring buffers
4277  *
4278  * To save on memory when the tracing is never used on a system with it
4279  * configured in. The ring buffers are set to a minimum size. But once
4280  * a user starts to use the tracing facility, then they need to grow
4281  * to their default size.
4282  *
4283  * This function is to be called when a tracer is about to be used.
4284  */
4285 int tracing_update_buffers(void)
4286 {
4287 	int ret = 0;
4288 
4289 	mutex_lock(&trace_types_lock);
4290 	if (!ring_buffer_expanded)
4291 		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4292 						RING_BUFFER_ALL_CPUS);
4293 	mutex_unlock(&trace_types_lock);
4294 
4295 	return ret;
4296 }
4297 
4298 struct trace_option_dentry;
4299 
4300 static struct trace_option_dentry *
4301 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4302 
4303 static void
4304 destroy_trace_option_files(struct trace_option_dentry *topts);
4305 
4306 /*
4307  * Used to clear out the tracer before deletion of an instance.
4308  * Must have trace_types_lock held.
4309  */
4310 static void tracing_set_nop(struct trace_array *tr)
4311 {
4312 	if (tr->current_trace == &nop_trace)
4313 		return;
4314 
4315 	tr->current_trace->enabled--;
4316 
4317 	if (tr->current_trace->reset)
4318 		tr->current_trace->reset(tr);
4319 
4320 	tr->current_trace = &nop_trace;
4321 }
4322 
4323 static void update_tracer_options(struct trace_array *tr, struct tracer *t)
4324 {
4325 	static struct trace_option_dentry *topts;
4326 
4327 	/* Only enable if the directory has been created already. */
4328 	if (!tr->dir)
4329 		return;
4330 
4331 	/* Currently, only the top instance has options */
4332 	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4333 		return;
4334 
4335 	destroy_trace_option_files(topts);
4336 	topts = create_trace_option_files(tr, t);
4337 }
4338 
4339 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4340 {
4341 	struct tracer *t;
4342 #ifdef CONFIG_TRACER_MAX_TRACE
4343 	bool had_max_tr;
4344 #endif
4345 	int ret = 0;
4346 
4347 	mutex_lock(&trace_types_lock);
4348 
4349 	if (!ring_buffer_expanded) {
4350 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4351 						RING_BUFFER_ALL_CPUS);
4352 		if (ret < 0)
4353 			goto out;
4354 		ret = 0;
4355 	}
4356 
4357 	for (t = trace_types; t; t = t->next) {
4358 		if (strcmp(t->name, buf) == 0)
4359 			break;
4360 	}
4361 	if (!t) {
4362 		ret = -EINVAL;
4363 		goto out;
4364 	}
4365 	if (t == tr->current_trace)
4366 		goto out;
4367 
4368 	/* Some tracers are only allowed for the top level buffer */
4369 	if (!trace_ok_for_array(t, tr)) {
4370 		ret = -EINVAL;
4371 		goto out;
4372 	}
4373 
4374 	/* If trace pipe files are being read, we can't change the tracer */
4375 	if (tr->current_trace->ref) {
4376 		ret = -EBUSY;
4377 		goto out;
4378 	}
4379 
4380 	trace_branch_disable();
4381 
4382 	tr->current_trace->enabled--;
4383 
4384 	if (tr->current_trace->reset)
4385 		tr->current_trace->reset(tr);
4386 
4387 	/* Current trace needs to be nop_trace before synchronize_sched */
4388 	tr->current_trace = &nop_trace;
4389 
4390 #ifdef CONFIG_TRACER_MAX_TRACE
4391 	had_max_tr = tr->allocated_snapshot;
4392 
4393 	if (had_max_tr && !t->use_max_tr) {
4394 		/*
4395 		 * We need to make sure that the update_max_tr sees that
4396 		 * current_trace changed to nop_trace to keep it from
4397 		 * swapping the buffers after we resize it.
4398 		 * The update_max_tr is called from interrupts disabled
4399 		 * so a synchronized_sched() is sufficient.
4400 		 */
4401 		synchronize_sched();
4402 		free_snapshot(tr);
4403 	}
4404 #endif
4405 	update_tracer_options(tr, t);
4406 
4407 #ifdef CONFIG_TRACER_MAX_TRACE
4408 	if (t->use_max_tr && !had_max_tr) {
4409 		ret = alloc_snapshot(tr);
4410 		if (ret < 0)
4411 			goto out;
4412 	}
4413 #endif
4414 
4415 	if (t->init) {
4416 		ret = tracer_init(t, tr);
4417 		if (ret)
4418 			goto out;
4419 	}
4420 
4421 	tr->current_trace = t;
4422 	tr->current_trace->enabled++;
4423 	trace_branch_enable(tr);
4424  out:
4425 	mutex_unlock(&trace_types_lock);
4426 
4427 	return ret;
4428 }
4429 
4430 static ssize_t
4431 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4432 			size_t cnt, loff_t *ppos)
4433 {
4434 	struct trace_array *tr = filp->private_data;
4435 	char buf[MAX_TRACER_SIZE+1];
4436 	int i;
4437 	size_t ret;
4438 	int err;
4439 
4440 	ret = cnt;
4441 
4442 	if (cnt > MAX_TRACER_SIZE)
4443 		cnt = MAX_TRACER_SIZE;
4444 
4445 	if (copy_from_user(&buf, ubuf, cnt))
4446 		return -EFAULT;
4447 
4448 	buf[cnt] = 0;
4449 
4450 	/* strip ending whitespace. */
4451 	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4452 		buf[i] = 0;
4453 
4454 	err = tracing_set_tracer(tr, buf);
4455 	if (err)
4456 		return err;
4457 
4458 	*ppos += ret;
4459 
4460 	return ret;
4461 }
4462 
4463 static ssize_t
4464 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4465 		   size_t cnt, loff_t *ppos)
4466 {
4467 	char buf[64];
4468 	int r;
4469 
4470 	r = snprintf(buf, sizeof(buf), "%ld\n",
4471 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4472 	if (r > sizeof(buf))
4473 		r = sizeof(buf);
4474 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4475 }
4476 
4477 static ssize_t
4478 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4479 		    size_t cnt, loff_t *ppos)
4480 {
4481 	unsigned long val;
4482 	int ret;
4483 
4484 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4485 	if (ret)
4486 		return ret;
4487 
4488 	*ptr = val * 1000;
4489 
4490 	return cnt;
4491 }
4492 
4493 static ssize_t
4494 tracing_thresh_read(struct file *filp, char __user *ubuf,
4495 		    size_t cnt, loff_t *ppos)
4496 {
4497 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4498 }
4499 
4500 static ssize_t
4501 tracing_thresh_write(struct file *filp, const char __user *ubuf,
4502 		     size_t cnt, loff_t *ppos)
4503 {
4504 	struct trace_array *tr = filp->private_data;
4505 	int ret;
4506 
4507 	mutex_lock(&trace_types_lock);
4508 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4509 	if (ret < 0)
4510 		goto out;
4511 
4512 	if (tr->current_trace->update_thresh) {
4513 		ret = tr->current_trace->update_thresh(tr);
4514 		if (ret < 0)
4515 			goto out;
4516 	}
4517 
4518 	ret = cnt;
4519 out:
4520 	mutex_unlock(&trace_types_lock);
4521 
4522 	return ret;
4523 }
4524 
4525 static ssize_t
4526 tracing_max_lat_read(struct file *filp, char __user *ubuf,
4527 		     size_t cnt, loff_t *ppos)
4528 {
4529 	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4530 }
4531 
4532 static ssize_t
4533 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4534 		      size_t cnt, loff_t *ppos)
4535 {
4536 	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4537 }
4538 
4539 static int tracing_open_pipe(struct inode *inode, struct file *filp)
4540 {
4541 	struct trace_array *tr = inode->i_private;
4542 	struct trace_iterator *iter;
4543 	int ret = 0;
4544 
4545 	if (tracing_disabled)
4546 		return -ENODEV;
4547 
4548 	if (trace_array_get(tr) < 0)
4549 		return -ENODEV;
4550 
4551 	mutex_lock(&trace_types_lock);
4552 
4553 	/* create a buffer to store the information to pass to userspace */
4554 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4555 	if (!iter) {
4556 		ret = -ENOMEM;
4557 		__trace_array_put(tr);
4558 		goto out;
4559 	}
4560 
4561 	trace_seq_init(&iter->seq);
4562 	iter->trace = tr->current_trace;
4563 
4564 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4565 		ret = -ENOMEM;
4566 		goto fail;
4567 	}
4568 
4569 	/* trace pipe does not show start of buffer */
4570 	cpumask_setall(iter->started);
4571 
4572 	if (trace_flags & TRACE_ITER_LATENCY_FMT)
4573 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
4574 
4575 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4576 	if (trace_clocks[tr->clock_id].in_ns)
4577 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4578 
4579 	iter->tr = tr;
4580 	iter->trace_buffer = &tr->trace_buffer;
4581 	iter->cpu_file = tracing_get_cpu(inode);
4582 	mutex_init(&iter->mutex);
4583 	filp->private_data = iter;
4584 
4585 	if (iter->trace->pipe_open)
4586 		iter->trace->pipe_open(iter);
4587 
4588 	nonseekable_open(inode, filp);
4589 
4590 	tr->current_trace->ref++;
4591 out:
4592 	mutex_unlock(&trace_types_lock);
4593 	return ret;
4594 
4595 fail:
4596 	kfree(iter->trace);
4597 	kfree(iter);
4598 	__trace_array_put(tr);
4599 	mutex_unlock(&trace_types_lock);
4600 	return ret;
4601 }
4602 
4603 static int tracing_release_pipe(struct inode *inode, struct file *file)
4604 {
4605 	struct trace_iterator *iter = file->private_data;
4606 	struct trace_array *tr = inode->i_private;
4607 
4608 	mutex_lock(&trace_types_lock);
4609 
4610 	tr->current_trace->ref--;
4611 
4612 	if (iter->trace->pipe_close)
4613 		iter->trace->pipe_close(iter);
4614 
4615 	mutex_unlock(&trace_types_lock);
4616 
4617 	free_cpumask_var(iter->started);
4618 	mutex_destroy(&iter->mutex);
4619 	kfree(iter);
4620 
4621 	trace_array_put(tr);
4622 
4623 	return 0;
4624 }
4625 
4626 static unsigned int
4627 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4628 {
4629 	/* Iterators are static, they should be filled or empty */
4630 	if (trace_buffer_iter(iter, iter->cpu_file))
4631 		return POLLIN | POLLRDNORM;
4632 
4633 	if (trace_flags & TRACE_ITER_BLOCK)
4634 		/*
4635 		 * Always select as readable when in blocking mode
4636 		 */
4637 		return POLLIN | POLLRDNORM;
4638 	else
4639 		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4640 					     filp, poll_table);
4641 }
4642 
4643 static unsigned int
4644 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4645 {
4646 	struct trace_iterator *iter = filp->private_data;
4647 
4648 	return trace_poll(iter, filp, poll_table);
4649 }
4650 
4651 /* Must be called with iter->mutex held. */
4652 static int tracing_wait_pipe(struct file *filp)
4653 {
4654 	struct trace_iterator *iter = filp->private_data;
4655 	int ret;
4656 
4657 	while (trace_empty(iter)) {
4658 
4659 		if ((filp->f_flags & O_NONBLOCK)) {
4660 			return -EAGAIN;
4661 		}
4662 
4663 		/*
4664 		 * We block until we read something and tracing is disabled.
4665 		 * We still block if tracing is disabled, but we have never
4666 		 * read anything. This allows a user to cat this file, and
4667 		 * then enable tracing. But after we have read something,
4668 		 * we give an EOF when tracing is again disabled.
4669 		 *
4670 		 * iter->pos will be 0 if we haven't read anything.
4671 		 */
4672 		if (!tracing_is_on() && iter->pos)
4673 			break;
4674 
4675 		mutex_unlock(&iter->mutex);
4676 
4677 		ret = wait_on_pipe(iter, false);
4678 
4679 		mutex_lock(&iter->mutex);
4680 
4681 		if (ret)
4682 			return ret;
4683 	}
4684 
4685 	return 1;
4686 }
4687 
4688 /*
4689  * Consumer reader.
4690  */
4691 static ssize_t
4692 tracing_read_pipe(struct file *filp, char __user *ubuf,
4693 		  size_t cnt, loff_t *ppos)
4694 {
4695 	struct trace_iterator *iter = filp->private_data;
4696 	ssize_t sret;
4697 
4698 	/* return any leftover data */
4699 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4700 	if (sret != -EBUSY)
4701 		return sret;
4702 
4703 	trace_seq_init(&iter->seq);
4704 
4705 	/*
4706 	 * Avoid more than one consumer on a single file descriptor
4707 	 * This is just a matter of traces coherency, the ring buffer itself
4708 	 * is protected.
4709 	 */
4710 	mutex_lock(&iter->mutex);
4711 	if (iter->trace->read) {
4712 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4713 		if (sret)
4714 			goto out;
4715 	}
4716 
4717 waitagain:
4718 	sret = tracing_wait_pipe(filp);
4719 	if (sret <= 0)
4720 		goto out;
4721 
4722 	/* stop when tracing is finished */
4723 	if (trace_empty(iter)) {
4724 		sret = 0;
4725 		goto out;
4726 	}
4727 
4728 	if (cnt >= PAGE_SIZE)
4729 		cnt = PAGE_SIZE - 1;
4730 
4731 	/* reset all but tr, trace, and overruns */
4732 	memset(&iter->seq, 0,
4733 	       sizeof(struct trace_iterator) -
4734 	       offsetof(struct trace_iterator, seq));
4735 	cpumask_clear(iter->started);
4736 	iter->pos = -1;
4737 
4738 	trace_event_read_lock();
4739 	trace_access_lock(iter->cpu_file);
4740 	while (trace_find_next_entry_inc(iter) != NULL) {
4741 		enum print_line_t ret;
4742 		int save_len = iter->seq.seq.len;
4743 
4744 		ret = print_trace_line(iter);
4745 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4746 			/* don't print partial lines */
4747 			iter->seq.seq.len = save_len;
4748 			break;
4749 		}
4750 		if (ret != TRACE_TYPE_NO_CONSUME)
4751 			trace_consume(iter);
4752 
4753 		if (trace_seq_used(&iter->seq) >= cnt)
4754 			break;
4755 
4756 		/*
4757 		 * Setting the full flag means we reached the trace_seq buffer
4758 		 * size and we should leave by partial output condition above.
4759 		 * One of the trace_seq_* functions is not used properly.
4760 		 */
4761 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4762 			  iter->ent->type);
4763 	}
4764 	trace_access_unlock(iter->cpu_file);
4765 	trace_event_read_unlock();
4766 
4767 	/* Now copy what we have to the user */
4768 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4769 	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4770 		trace_seq_init(&iter->seq);
4771 
4772 	/*
4773 	 * If there was nothing to send to user, in spite of consuming trace
4774 	 * entries, go back to wait for more entries.
4775 	 */
4776 	if (sret == -EBUSY)
4777 		goto waitagain;
4778 
4779 out:
4780 	mutex_unlock(&iter->mutex);
4781 
4782 	return sret;
4783 }
4784 
4785 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4786 				     unsigned int idx)
4787 {
4788 	__free_page(spd->pages[idx]);
4789 }
4790 
4791 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4792 	.can_merge		= 0,
4793 	.confirm		= generic_pipe_buf_confirm,
4794 	.release		= generic_pipe_buf_release,
4795 	.steal			= generic_pipe_buf_steal,
4796 	.get			= generic_pipe_buf_get,
4797 };
4798 
4799 static size_t
4800 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4801 {
4802 	size_t count;
4803 	int save_len;
4804 	int ret;
4805 
4806 	/* Seq buffer is page-sized, exactly what we need. */
4807 	for (;;) {
4808 		save_len = iter->seq.seq.len;
4809 		ret = print_trace_line(iter);
4810 
4811 		if (trace_seq_has_overflowed(&iter->seq)) {
4812 			iter->seq.seq.len = save_len;
4813 			break;
4814 		}
4815 
4816 		/*
4817 		 * This should not be hit, because it should only
4818 		 * be set if the iter->seq overflowed. But check it
4819 		 * anyway to be safe.
4820 		 */
4821 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4822 			iter->seq.seq.len = save_len;
4823 			break;
4824 		}
4825 
4826 		count = trace_seq_used(&iter->seq) - save_len;
4827 		if (rem < count) {
4828 			rem = 0;
4829 			iter->seq.seq.len = save_len;
4830 			break;
4831 		}
4832 
4833 		if (ret != TRACE_TYPE_NO_CONSUME)
4834 			trace_consume(iter);
4835 		rem -= count;
4836 		if (!trace_find_next_entry_inc(iter))	{
4837 			rem = 0;
4838 			iter->ent = NULL;
4839 			break;
4840 		}
4841 	}
4842 
4843 	return rem;
4844 }
4845 
4846 static ssize_t tracing_splice_read_pipe(struct file *filp,
4847 					loff_t *ppos,
4848 					struct pipe_inode_info *pipe,
4849 					size_t len,
4850 					unsigned int flags)
4851 {
4852 	struct page *pages_def[PIPE_DEF_BUFFERS];
4853 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
4854 	struct trace_iterator *iter = filp->private_data;
4855 	struct splice_pipe_desc spd = {
4856 		.pages		= pages_def,
4857 		.partial	= partial_def,
4858 		.nr_pages	= 0, /* This gets updated below. */
4859 		.nr_pages_max	= PIPE_DEF_BUFFERS,
4860 		.flags		= flags,
4861 		.ops		= &tracing_pipe_buf_ops,
4862 		.spd_release	= tracing_spd_release_pipe,
4863 	};
4864 	ssize_t ret;
4865 	size_t rem;
4866 	unsigned int i;
4867 
4868 	if (splice_grow_spd(pipe, &spd))
4869 		return -ENOMEM;
4870 
4871 	mutex_lock(&iter->mutex);
4872 
4873 	if (iter->trace->splice_read) {
4874 		ret = iter->trace->splice_read(iter, filp,
4875 					       ppos, pipe, len, flags);
4876 		if (ret)
4877 			goto out_err;
4878 	}
4879 
4880 	ret = tracing_wait_pipe(filp);
4881 	if (ret <= 0)
4882 		goto out_err;
4883 
4884 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4885 		ret = -EFAULT;
4886 		goto out_err;
4887 	}
4888 
4889 	trace_event_read_lock();
4890 	trace_access_lock(iter->cpu_file);
4891 
4892 	/* Fill as many pages as possible. */
4893 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4894 		spd.pages[i] = alloc_page(GFP_KERNEL);
4895 		if (!spd.pages[i])
4896 			break;
4897 
4898 		rem = tracing_fill_pipe_page(rem, iter);
4899 
4900 		/* Copy the data into the page, so we can start over. */
4901 		ret = trace_seq_to_buffer(&iter->seq,
4902 					  page_address(spd.pages[i]),
4903 					  trace_seq_used(&iter->seq));
4904 		if (ret < 0) {
4905 			__free_page(spd.pages[i]);
4906 			break;
4907 		}
4908 		spd.partial[i].offset = 0;
4909 		spd.partial[i].len = trace_seq_used(&iter->seq);
4910 
4911 		trace_seq_init(&iter->seq);
4912 	}
4913 
4914 	trace_access_unlock(iter->cpu_file);
4915 	trace_event_read_unlock();
4916 	mutex_unlock(&iter->mutex);
4917 
4918 	spd.nr_pages = i;
4919 
4920 	ret = splice_to_pipe(pipe, &spd);
4921 out:
4922 	splice_shrink_spd(&spd);
4923 	return ret;
4924 
4925 out_err:
4926 	mutex_unlock(&iter->mutex);
4927 	goto out;
4928 }
4929 
4930 static ssize_t
4931 tracing_entries_read(struct file *filp, char __user *ubuf,
4932 		     size_t cnt, loff_t *ppos)
4933 {
4934 	struct inode *inode = file_inode(filp);
4935 	struct trace_array *tr = inode->i_private;
4936 	int cpu = tracing_get_cpu(inode);
4937 	char buf[64];
4938 	int r = 0;
4939 	ssize_t ret;
4940 
4941 	mutex_lock(&trace_types_lock);
4942 
4943 	if (cpu == RING_BUFFER_ALL_CPUS) {
4944 		int cpu, buf_size_same;
4945 		unsigned long size;
4946 
4947 		size = 0;
4948 		buf_size_same = 1;
4949 		/* check if all cpu sizes are same */
4950 		for_each_tracing_cpu(cpu) {
4951 			/* fill in the size from first enabled cpu */
4952 			if (size == 0)
4953 				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4954 			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4955 				buf_size_same = 0;
4956 				break;
4957 			}
4958 		}
4959 
4960 		if (buf_size_same) {
4961 			if (!ring_buffer_expanded)
4962 				r = sprintf(buf, "%lu (expanded: %lu)\n",
4963 					    size >> 10,
4964 					    trace_buf_size >> 10);
4965 			else
4966 				r = sprintf(buf, "%lu\n", size >> 10);
4967 		} else
4968 			r = sprintf(buf, "X\n");
4969 	} else
4970 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4971 
4972 	mutex_unlock(&trace_types_lock);
4973 
4974 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4975 	return ret;
4976 }
4977 
4978 static ssize_t
4979 tracing_entries_write(struct file *filp, const char __user *ubuf,
4980 		      size_t cnt, loff_t *ppos)
4981 {
4982 	struct inode *inode = file_inode(filp);
4983 	struct trace_array *tr = inode->i_private;
4984 	unsigned long val;
4985 	int ret;
4986 
4987 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4988 	if (ret)
4989 		return ret;
4990 
4991 	/* must have at least 1 entry */
4992 	if (!val)
4993 		return -EINVAL;
4994 
4995 	/* value is in KB */
4996 	val <<= 10;
4997 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4998 	if (ret < 0)
4999 		return ret;
5000 
5001 	*ppos += cnt;
5002 
5003 	return cnt;
5004 }
5005 
5006 static ssize_t
5007 tracing_total_entries_read(struct file *filp, char __user *ubuf,
5008 				size_t cnt, loff_t *ppos)
5009 {
5010 	struct trace_array *tr = filp->private_data;
5011 	char buf[64];
5012 	int r, cpu;
5013 	unsigned long size = 0, expanded_size = 0;
5014 
5015 	mutex_lock(&trace_types_lock);
5016 	for_each_tracing_cpu(cpu) {
5017 		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5018 		if (!ring_buffer_expanded)
5019 			expanded_size += trace_buf_size >> 10;
5020 	}
5021 	if (ring_buffer_expanded)
5022 		r = sprintf(buf, "%lu\n", size);
5023 	else
5024 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5025 	mutex_unlock(&trace_types_lock);
5026 
5027 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5028 }
5029 
5030 static ssize_t
5031 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5032 			  size_t cnt, loff_t *ppos)
5033 {
5034 	/*
5035 	 * There is no need to read what the user has written, this function
5036 	 * is just to make sure that there is no error when "echo" is used
5037 	 */
5038 
5039 	*ppos += cnt;
5040 
5041 	return cnt;
5042 }
5043 
5044 static int
5045 tracing_free_buffer_release(struct inode *inode, struct file *filp)
5046 {
5047 	struct trace_array *tr = inode->i_private;
5048 
5049 	/* disable tracing ? */
5050 	if (trace_flags & TRACE_ITER_STOP_ON_FREE)
5051 		tracer_tracing_off(tr);
5052 	/* resize the ring buffer to 0 */
5053 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5054 
5055 	trace_array_put(tr);
5056 
5057 	return 0;
5058 }
5059 
5060 static ssize_t
5061 tracing_mark_write(struct file *filp, const char __user *ubuf,
5062 					size_t cnt, loff_t *fpos)
5063 {
5064 	unsigned long addr = (unsigned long)ubuf;
5065 	struct trace_array *tr = filp->private_data;
5066 	struct ring_buffer_event *event;
5067 	struct ring_buffer *buffer;
5068 	struct print_entry *entry;
5069 	unsigned long irq_flags;
5070 	struct page *pages[2];
5071 	void *map_page[2];
5072 	int nr_pages = 1;
5073 	ssize_t written;
5074 	int offset;
5075 	int size;
5076 	int len;
5077 	int ret;
5078 	int i;
5079 
5080 	if (tracing_disabled)
5081 		return -EINVAL;
5082 
5083 	if (!(trace_flags & TRACE_ITER_MARKERS))
5084 		return -EINVAL;
5085 
5086 	if (cnt > TRACE_BUF_SIZE)
5087 		cnt = TRACE_BUF_SIZE;
5088 
5089 	/*
5090 	 * Userspace is injecting traces into the kernel trace buffer.
5091 	 * We want to be as non intrusive as possible.
5092 	 * To do so, we do not want to allocate any special buffers
5093 	 * or take any locks, but instead write the userspace data
5094 	 * straight into the ring buffer.
5095 	 *
5096 	 * First we need to pin the userspace buffer into memory,
5097 	 * which, most likely it is, because it just referenced it.
5098 	 * But there's no guarantee that it is. By using get_user_pages_fast()
5099 	 * and kmap_atomic/kunmap_atomic() we can get access to the
5100 	 * pages directly. We then write the data directly into the
5101 	 * ring buffer.
5102 	 */
5103 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5104 
5105 	/* check if we cross pages */
5106 	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5107 		nr_pages = 2;
5108 
5109 	offset = addr & (PAGE_SIZE - 1);
5110 	addr &= PAGE_MASK;
5111 
5112 	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5113 	if (ret < nr_pages) {
5114 		while (--ret >= 0)
5115 			put_page(pages[ret]);
5116 		written = -EFAULT;
5117 		goto out;
5118 	}
5119 
5120 	for (i = 0; i < nr_pages; i++)
5121 		map_page[i] = kmap_atomic(pages[i]);
5122 
5123 	local_save_flags(irq_flags);
5124 	size = sizeof(*entry) + cnt + 2; /* possible \n added */
5125 	buffer = tr->trace_buffer.buffer;
5126 	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5127 					  irq_flags, preempt_count());
5128 	if (!event) {
5129 		/* Ring buffer disabled, return as if not open for write */
5130 		written = -EBADF;
5131 		goto out_unlock;
5132 	}
5133 
5134 	entry = ring_buffer_event_data(event);
5135 	entry->ip = _THIS_IP_;
5136 
5137 	if (nr_pages == 2) {
5138 		len = PAGE_SIZE - offset;
5139 		memcpy(&entry->buf, map_page[0] + offset, len);
5140 		memcpy(&entry->buf[len], map_page[1], cnt - len);
5141 	} else
5142 		memcpy(&entry->buf, map_page[0] + offset, cnt);
5143 
5144 	if (entry->buf[cnt - 1] != '\n') {
5145 		entry->buf[cnt] = '\n';
5146 		entry->buf[cnt + 1] = '\0';
5147 	} else
5148 		entry->buf[cnt] = '\0';
5149 
5150 	__buffer_unlock_commit(buffer, event);
5151 
5152 	written = cnt;
5153 
5154 	*fpos += written;
5155 
5156  out_unlock:
5157 	for (i = nr_pages - 1; i >= 0; i--) {
5158 		kunmap_atomic(map_page[i]);
5159 		put_page(pages[i]);
5160 	}
5161  out:
5162 	return written;
5163 }
5164 
5165 static int tracing_clock_show(struct seq_file *m, void *v)
5166 {
5167 	struct trace_array *tr = m->private;
5168 	int i;
5169 
5170 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5171 		seq_printf(m,
5172 			"%s%s%s%s", i ? " " : "",
5173 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5174 			i == tr->clock_id ? "]" : "");
5175 	seq_putc(m, '\n');
5176 
5177 	return 0;
5178 }
5179 
5180 static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5181 {
5182 	int i;
5183 
5184 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5185 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
5186 			break;
5187 	}
5188 	if (i == ARRAY_SIZE(trace_clocks))
5189 		return -EINVAL;
5190 
5191 	mutex_lock(&trace_types_lock);
5192 
5193 	tr->clock_id = i;
5194 
5195 	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5196 
5197 	/*
5198 	 * New clock may not be consistent with the previous clock.
5199 	 * Reset the buffer so that it doesn't have incomparable timestamps.
5200 	 */
5201 	tracing_reset_online_cpus(&tr->trace_buffer);
5202 
5203 #ifdef CONFIG_TRACER_MAX_TRACE
5204 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5205 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5206 	tracing_reset_online_cpus(&tr->max_buffer);
5207 #endif
5208 
5209 	mutex_unlock(&trace_types_lock);
5210 
5211 	return 0;
5212 }
5213 
5214 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5215 				   size_t cnt, loff_t *fpos)
5216 {
5217 	struct seq_file *m = filp->private_data;
5218 	struct trace_array *tr = m->private;
5219 	char buf[64];
5220 	const char *clockstr;
5221 	int ret;
5222 
5223 	if (cnt >= sizeof(buf))
5224 		return -EINVAL;
5225 
5226 	if (copy_from_user(&buf, ubuf, cnt))
5227 		return -EFAULT;
5228 
5229 	buf[cnt] = 0;
5230 
5231 	clockstr = strstrip(buf);
5232 
5233 	ret = tracing_set_clock(tr, clockstr);
5234 	if (ret)
5235 		return ret;
5236 
5237 	*fpos += cnt;
5238 
5239 	return cnt;
5240 }
5241 
5242 static int tracing_clock_open(struct inode *inode, struct file *file)
5243 {
5244 	struct trace_array *tr = inode->i_private;
5245 	int ret;
5246 
5247 	if (tracing_disabled)
5248 		return -ENODEV;
5249 
5250 	if (trace_array_get(tr))
5251 		return -ENODEV;
5252 
5253 	ret = single_open(file, tracing_clock_show, inode->i_private);
5254 	if (ret < 0)
5255 		trace_array_put(tr);
5256 
5257 	return ret;
5258 }
5259 
5260 struct ftrace_buffer_info {
5261 	struct trace_iterator	iter;
5262 	void			*spare;
5263 	unsigned int		read;
5264 };
5265 
5266 #ifdef CONFIG_TRACER_SNAPSHOT
5267 static int tracing_snapshot_open(struct inode *inode, struct file *file)
5268 {
5269 	struct trace_array *tr = inode->i_private;
5270 	struct trace_iterator *iter;
5271 	struct seq_file *m;
5272 	int ret = 0;
5273 
5274 	if (trace_array_get(tr) < 0)
5275 		return -ENODEV;
5276 
5277 	if (file->f_mode & FMODE_READ) {
5278 		iter = __tracing_open(inode, file, true);
5279 		if (IS_ERR(iter))
5280 			ret = PTR_ERR(iter);
5281 	} else {
5282 		/* Writes still need the seq_file to hold the private data */
5283 		ret = -ENOMEM;
5284 		m = kzalloc(sizeof(*m), GFP_KERNEL);
5285 		if (!m)
5286 			goto out;
5287 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5288 		if (!iter) {
5289 			kfree(m);
5290 			goto out;
5291 		}
5292 		ret = 0;
5293 
5294 		iter->tr = tr;
5295 		iter->trace_buffer = &tr->max_buffer;
5296 		iter->cpu_file = tracing_get_cpu(inode);
5297 		m->private = iter;
5298 		file->private_data = m;
5299 	}
5300 out:
5301 	if (ret < 0)
5302 		trace_array_put(tr);
5303 
5304 	return ret;
5305 }
5306 
5307 static ssize_t
5308 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5309 		       loff_t *ppos)
5310 {
5311 	struct seq_file *m = filp->private_data;
5312 	struct trace_iterator *iter = m->private;
5313 	struct trace_array *tr = iter->tr;
5314 	unsigned long val;
5315 	int ret;
5316 
5317 	ret = tracing_update_buffers();
5318 	if (ret < 0)
5319 		return ret;
5320 
5321 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5322 	if (ret)
5323 		return ret;
5324 
5325 	mutex_lock(&trace_types_lock);
5326 
5327 	if (tr->current_trace->use_max_tr) {
5328 		ret = -EBUSY;
5329 		goto out;
5330 	}
5331 
5332 	switch (val) {
5333 	case 0:
5334 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5335 			ret = -EINVAL;
5336 			break;
5337 		}
5338 		if (tr->allocated_snapshot)
5339 			free_snapshot(tr);
5340 		break;
5341 	case 1:
5342 /* Only allow per-cpu swap if the ring buffer supports it */
5343 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5344 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5345 			ret = -EINVAL;
5346 			break;
5347 		}
5348 #endif
5349 		if (!tr->allocated_snapshot) {
5350 			ret = alloc_snapshot(tr);
5351 			if (ret < 0)
5352 				break;
5353 		}
5354 		local_irq_disable();
5355 		/* Now, we're going to swap */
5356 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5357 			update_max_tr(tr, current, smp_processor_id());
5358 		else
5359 			update_max_tr_single(tr, current, iter->cpu_file);
5360 		local_irq_enable();
5361 		break;
5362 	default:
5363 		if (tr->allocated_snapshot) {
5364 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5365 				tracing_reset_online_cpus(&tr->max_buffer);
5366 			else
5367 				tracing_reset(&tr->max_buffer, iter->cpu_file);
5368 		}
5369 		break;
5370 	}
5371 
5372 	if (ret >= 0) {
5373 		*ppos += cnt;
5374 		ret = cnt;
5375 	}
5376 out:
5377 	mutex_unlock(&trace_types_lock);
5378 	return ret;
5379 }
5380 
5381 static int tracing_snapshot_release(struct inode *inode, struct file *file)
5382 {
5383 	struct seq_file *m = file->private_data;
5384 	int ret;
5385 
5386 	ret = tracing_release(inode, file);
5387 
5388 	if (file->f_mode & FMODE_READ)
5389 		return ret;
5390 
5391 	/* If write only, the seq_file is just a stub */
5392 	if (m)
5393 		kfree(m->private);
5394 	kfree(m);
5395 
5396 	return 0;
5397 }
5398 
5399 static int tracing_buffers_open(struct inode *inode, struct file *filp);
5400 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5401 				    size_t count, loff_t *ppos);
5402 static int tracing_buffers_release(struct inode *inode, struct file *file);
5403 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5404 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5405 
5406 static int snapshot_raw_open(struct inode *inode, struct file *filp)
5407 {
5408 	struct ftrace_buffer_info *info;
5409 	int ret;
5410 
5411 	ret = tracing_buffers_open(inode, filp);
5412 	if (ret < 0)
5413 		return ret;
5414 
5415 	info = filp->private_data;
5416 
5417 	if (info->iter.trace->use_max_tr) {
5418 		tracing_buffers_release(inode, filp);
5419 		return -EBUSY;
5420 	}
5421 
5422 	info->iter.snapshot = true;
5423 	info->iter.trace_buffer = &info->iter.tr->max_buffer;
5424 
5425 	return ret;
5426 }
5427 
5428 #endif /* CONFIG_TRACER_SNAPSHOT */
5429 
5430 
5431 static const struct file_operations tracing_thresh_fops = {
5432 	.open		= tracing_open_generic,
5433 	.read		= tracing_thresh_read,
5434 	.write		= tracing_thresh_write,
5435 	.llseek		= generic_file_llseek,
5436 };
5437 
5438 static const struct file_operations tracing_max_lat_fops = {
5439 	.open		= tracing_open_generic,
5440 	.read		= tracing_max_lat_read,
5441 	.write		= tracing_max_lat_write,
5442 	.llseek		= generic_file_llseek,
5443 };
5444 
5445 static const struct file_operations set_tracer_fops = {
5446 	.open		= tracing_open_generic,
5447 	.read		= tracing_set_trace_read,
5448 	.write		= tracing_set_trace_write,
5449 	.llseek		= generic_file_llseek,
5450 };
5451 
5452 static const struct file_operations tracing_pipe_fops = {
5453 	.open		= tracing_open_pipe,
5454 	.poll		= tracing_poll_pipe,
5455 	.read		= tracing_read_pipe,
5456 	.splice_read	= tracing_splice_read_pipe,
5457 	.release	= tracing_release_pipe,
5458 	.llseek		= no_llseek,
5459 };
5460 
5461 static const struct file_operations tracing_entries_fops = {
5462 	.open		= tracing_open_generic_tr,
5463 	.read		= tracing_entries_read,
5464 	.write		= tracing_entries_write,
5465 	.llseek		= generic_file_llseek,
5466 	.release	= tracing_release_generic_tr,
5467 };
5468 
5469 static const struct file_operations tracing_total_entries_fops = {
5470 	.open		= tracing_open_generic_tr,
5471 	.read		= tracing_total_entries_read,
5472 	.llseek		= generic_file_llseek,
5473 	.release	= tracing_release_generic_tr,
5474 };
5475 
5476 static const struct file_operations tracing_free_buffer_fops = {
5477 	.open		= tracing_open_generic_tr,
5478 	.write		= tracing_free_buffer_write,
5479 	.release	= tracing_free_buffer_release,
5480 };
5481 
5482 static const struct file_operations tracing_mark_fops = {
5483 	.open		= tracing_open_generic_tr,
5484 	.write		= tracing_mark_write,
5485 	.llseek		= generic_file_llseek,
5486 	.release	= tracing_release_generic_tr,
5487 };
5488 
5489 static const struct file_operations trace_clock_fops = {
5490 	.open		= tracing_clock_open,
5491 	.read		= seq_read,
5492 	.llseek		= seq_lseek,
5493 	.release	= tracing_single_release_tr,
5494 	.write		= tracing_clock_write,
5495 };
5496 
5497 #ifdef CONFIG_TRACER_SNAPSHOT
5498 static const struct file_operations snapshot_fops = {
5499 	.open		= tracing_snapshot_open,
5500 	.read		= seq_read,
5501 	.write		= tracing_snapshot_write,
5502 	.llseek		= tracing_lseek,
5503 	.release	= tracing_snapshot_release,
5504 };
5505 
5506 static const struct file_operations snapshot_raw_fops = {
5507 	.open		= snapshot_raw_open,
5508 	.read		= tracing_buffers_read,
5509 	.release	= tracing_buffers_release,
5510 	.splice_read	= tracing_buffers_splice_read,
5511 	.llseek		= no_llseek,
5512 };
5513 
5514 #endif /* CONFIG_TRACER_SNAPSHOT */
5515 
5516 static int tracing_buffers_open(struct inode *inode, struct file *filp)
5517 {
5518 	struct trace_array *tr = inode->i_private;
5519 	struct ftrace_buffer_info *info;
5520 	int ret;
5521 
5522 	if (tracing_disabled)
5523 		return -ENODEV;
5524 
5525 	if (trace_array_get(tr) < 0)
5526 		return -ENODEV;
5527 
5528 	info = kzalloc(sizeof(*info), GFP_KERNEL);
5529 	if (!info) {
5530 		trace_array_put(tr);
5531 		return -ENOMEM;
5532 	}
5533 
5534 	mutex_lock(&trace_types_lock);
5535 
5536 	info->iter.tr		= tr;
5537 	info->iter.cpu_file	= tracing_get_cpu(inode);
5538 	info->iter.trace	= tr->current_trace;
5539 	info->iter.trace_buffer = &tr->trace_buffer;
5540 	info->spare		= NULL;
5541 	/* Force reading ring buffer for first read */
5542 	info->read		= (unsigned int)-1;
5543 
5544 	filp->private_data = info;
5545 
5546 	tr->current_trace->ref++;
5547 
5548 	mutex_unlock(&trace_types_lock);
5549 
5550 	ret = nonseekable_open(inode, filp);
5551 	if (ret < 0)
5552 		trace_array_put(tr);
5553 
5554 	return ret;
5555 }
5556 
5557 static unsigned int
5558 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5559 {
5560 	struct ftrace_buffer_info *info = filp->private_data;
5561 	struct trace_iterator *iter = &info->iter;
5562 
5563 	return trace_poll(iter, filp, poll_table);
5564 }
5565 
5566 static ssize_t
5567 tracing_buffers_read(struct file *filp, char __user *ubuf,
5568 		     size_t count, loff_t *ppos)
5569 {
5570 	struct ftrace_buffer_info *info = filp->private_data;
5571 	struct trace_iterator *iter = &info->iter;
5572 	ssize_t ret;
5573 	ssize_t size;
5574 
5575 	if (!count)
5576 		return 0;
5577 
5578 #ifdef CONFIG_TRACER_MAX_TRACE
5579 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5580 		return -EBUSY;
5581 #endif
5582 
5583 	if (!info->spare)
5584 		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5585 							  iter->cpu_file);
5586 	if (!info->spare)
5587 		return -ENOMEM;
5588 
5589 	/* Do we have previous read data to read? */
5590 	if (info->read < PAGE_SIZE)
5591 		goto read;
5592 
5593  again:
5594 	trace_access_lock(iter->cpu_file);
5595 	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5596 				    &info->spare,
5597 				    count,
5598 				    iter->cpu_file, 0);
5599 	trace_access_unlock(iter->cpu_file);
5600 
5601 	if (ret < 0) {
5602 		if (trace_empty(iter)) {
5603 			if ((filp->f_flags & O_NONBLOCK))
5604 				return -EAGAIN;
5605 
5606 			ret = wait_on_pipe(iter, false);
5607 			if (ret)
5608 				return ret;
5609 
5610 			goto again;
5611 		}
5612 		return 0;
5613 	}
5614 
5615 	info->read = 0;
5616  read:
5617 	size = PAGE_SIZE - info->read;
5618 	if (size > count)
5619 		size = count;
5620 
5621 	ret = copy_to_user(ubuf, info->spare + info->read, size);
5622 	if (ret == size)
5623 		return -EFAULT;
5624 
5625 	size -= ret;
5626 
5627 	*ppos += size;
5628 	info->read += size;
5629 
5630 	return size;
5631 }
5632 
5633 static int tracing_buffers_release(struct inode *inode, struct file *file)
5634 {
5635 	struct ftrace_buffer_info *info = file->private_data;
5636 	struct trace_iterator *iter = &info->iter;
5637 
5638 	mutex_lock(&trace_types_lock);
5639 
5640 	iter->tr->current_trace->ref--;
5641 
5642 	__trace_array_put(iter->tr);
5643 
5644 	if (info->spare)
5645 		ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5646 	kfree(info);
5647 
5648 	mutex_unlock(&trace_types_lock);
5649 
5650 	return 0;
5651 }
5652 
5653 struct buffer_ref {
5654 	struct ring_buffer	*buffer;
5655 	void			*page;
5656 	int			ref;
5657 };
5658 
5659 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5660 				    struct pipe_buffer *buf)
5661 {
5662 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5663 
5664 	if (--ref->ref)
5665 		return;
5666 
5667 	ring_buffer_free_read_page(ref->buffer, ref->page);
5668 	kfree(ref);
5669 	buf->private = 0;
5670 }
5671 
5672 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5673 				struct pipe_buffer *buf)
5674 {
5675 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5676 
5677 	ref->ref++;
5678 }
5679 
5680 /* Pipe buffer operations for a buffer. */
5681 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5682 	.can_merge		= 0,
5683 	.confirm		= generic_pipe_buf_confirm,
5684 	.release		= buffer_pipe_buf_release,
5685 	.steal			= generic_pipe_buf_steal,
5686 	.get			= buffer_pipe_buf_get,
5687 };
5688 
5689 /*
5690  * Callback from splice_to_pipe(), if we need to release some pages
5691  * at the end of the spd in case we error'ed out in filling the pipe.
5692  */
5693 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5694 {
5695 	struct buffer_ref *ref =
5696 		(struct buffer_ref *)spd->partial[i].private;
5697 
5698 	if (--ref->ref)
5699 		return;
5700 
5701 	ring_buffer_free_read_page(ref->buffer, ref->page);
5702 	kfree(ref);
5703 	spd->partial[i].private = 0;
5704 }
5705 
5706 static ssize_t
5707 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5708 			    struct pipe_inode_info *pipe, size_t len,
5709 			    unsigned int flags)
5710 {
5711 	struct ftrace_buffer_info *info = file->private_data;
5712 	struct trace_iterator *iter = &info->iter;
5713 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
5714 	struct page *pages_def[PIPE_DEF_BUFFERS];
5715 	struct splice_pipe_desc spd = {
5716 		.pages		= pages_def,
5717 		.partial	= partial_def,
5718 		.nr_pages_max	= PIPE_DEF_BUFFERS,
5719 		.flags		= flags,
5720 		.ops		= &buffer_pipe_buf_ops,
5721 		.spd_release	= buffer_spd_release,
5722 	};
5723 	struct buffer_ref *ref;
5724 	int entries, size, i;
5725 	ssize_t ret = 0;
5726 
5727 #ifdef CONFIG_TRACER_MAX_TRACE
5728 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5729 		return -EBUSY;
5730 #endif
5731 
5732 	if (splice_grow_spd(pipe, &spd))
5733 		return -ENOMEM;
5734 
5735 	if (*ppos & (PAGE_SIZE - 1))
5736 		return -EINVAL;
5737 
5738 	if (len & (PAGE_SIZE - 1)) {
5739 		if (len < PAGE_SIZE)
5740 			return -EINVAL;
5741 		len &= PAGE_MASK;
5742 	}
5743 
5744  again:
5745 	trace_access_lock(iter->cpu_file);
5746 	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5747 
5748 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5749 		struct page *page;
5750 		int r;
5751 
5752 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5753 		if (!ref) {
5754 			ret = -ENOMEM;
5755 			break;
5756 		}
5757 
5758 		ref->ref = 1;
5759 		ref->buffer = iter->trace_buffer->buffer;
5760 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5761 		if (!ref->page) {
5762 			ret = -ENOMEM;
5763 			kfree(ref);
5764 			break;
5765 		}
5766 
5767 		r = ring_buffer_read_page(ref->buffer, &ref->page,
5768 					  len, iter->cpu_file, 1);
5769 		if (r < 0) {
5770 			ring_buffer_free_read_page(ref->buffer, ref->page);
5771 			kfree(ref);
5772 			break;
5773 		}
5774 
5775 		/*
5776 		 * zero out any left over data, this is going to
5777 		 * user land.
5778 		 */
5779 		size = ring_buffer_page_len(ref->page);
5780 		if (size < PAGE_SIZE)
5781 			memset(ref->page + size, 0, PAGE_SIZE - size);
5782 
5783 		page = virt_to_page(ref->page);
5784 
5785 		spd.pages[i] = page;
5786 		spd.partial[i].len = PAGE_SIZE;
5787 		spd.partial[i].offset = 0;
5788 		spd.partial[i].private = (unsigned long)ref;
5789 		spd.nr_pages++;
5790 		*ppos += PAGE_SIZE;
5791 
5792 		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5793 	}
5794 
5795 	trace_access_unlock(iter->cpu_file);
5796 	spd.nr_pages = i;
5797 
5798 	/* did we read anything? */
5799 	if (!spd.nr_pages) {
5800 		if (ret)
5801 			return ret;
5802 
5803 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5804 			return -EAGAIN;
5805 
5806 		ret = wait_on_pipe(iter, true);
5807 		if (ret)
5808 			return ret;
5809 
5810 		goto again;
5811 	}
5812 
5813 	ret = splice_to_pipe(pipe, &spd);
5814 	splice_shrink_spd(&spd);
5815 
5816 	return ret;
5817 }
5818 
5819 static const struct file_operations tracing_buffers_fops = {
5820 	.open		= tracing_buffers_open,
5821 	.read		= tracing_buffers_read,
5822 	.poll		= tracing_buffers_poll,
5823 	.release	= tracing_buffers_release,
5824 	.splice_read	= tracing_buffers_splice_read,
5825 	.llseek		= no_llseek,
5826 };
5827 
5828 static ssize_t
5829 tracing_stats_read(struct file *filp, char __user *ubuf,
5830 		   size_t count, loff_t *ppos)
5831 {
5832 	struct inode *inode = file_inode(filp);
5833 	struct trace_array *tr = inode->i_private;
5834 	struct trace_buffer *trace_buf = &tr->trace_buffer;
5835 	int cpu = tracing_get_cpu(inode);
5836 	struct trace_seq *s;
5837 	unsigned long cnt;
5838 	unsigned long long t;
5839 	unsigned long usec_rem;
5840 
5841 	s = kmalloc(sizeof(*s), GFP_KERNEL);
5842 	if (!s)
5843 		return -ENOMEM;
5844 
5845 	trace_seq_init(s);
5846 
5847 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5848 	trace_seq_printf(s, "entries: %ld\n", cnt);
5849 
5850 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5851 	trace_seq_printf(s, "overrun: %ld\n", cnt);
5852 
5853 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5854 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5855 
5856 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5857 	trace_seq_printf(s, "bytes: %ld\n", cnt);
5858 
5859 	if (trace_clocks[tr->clock_id].in_ns) {
5860 		/* local or global for trace_clock */
5861 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5862 		usec_rem = do_div(t, USEC_PER_SEC);
5863 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5864 								t, usec_rem);
5865 
5866 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5867 		usec_rem = do_div(t, USEC_PER_SEC);
5868 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5869 	} else {
5870 		/* counter or tsc mode for trace_clock */
5871 		trace_seq_printf(s, "oldest event ts: %llu\n",
5872 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5873 
5874 		trace_seq_printf(s, "now ts: %llu\n",
5875 				ring_buffer_time_stamp(trace_buf->buffer, cpu));
5876 	}
5877 
5878 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5879 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
5880 
5881 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5882 	trace_seq_printf(s, "read events: %ld\n", cnt);
5883 
5884 	count = simple_read_from_buffer(ubuf, count, ppos,
5885 					s->buffer, trace_seq_used(s));
5886 
5887 	kfree(s);
5888 
5889 	return count;
5890 }
5891 
5892 static const struct file_operations tracing_stats_fops = {
5893 	.open		= tracing_open_generic_tr,
5894 	.read		= tracing_stats_read,
5895 	.llseek		= generic_file_llseek,
5896 	.release	= tracing_release_generic_tr,
5897 };
5898 
5899 #ifdef CONFIG_DYNAMIC_FTRACE
5900 
5901 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5902 {
5903 	return 0;
5904 }
5905 
5906 static ssize_t
5907 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5908 		  size_t cnt, loff_t *ppos)
5909 {
5910 	static char ftrace_dyn_info_buffer[1024];
5911 	static DEFINE_MUTEX(dyn_info_mutex);
5912 	unsigned long *p = filp->private_data;
5913 	char *buf = ftrace_dyn_info_buffer;
5914 	int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5915 	int r;
5916 
5917 	mutex_lock(&dyn_info_mutex);
5918 	r = sprintf(buf, "%ld ", *p);
5919 
5920 	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5921 	buf[r++] = '\n';
5922 
5923 	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5924 
5925 	mutex_unlock(&dyn_info_mutex);
5926 
5927 	return r;
5928 }
5929 
5930 static const struct file_operations tracing_dyn_info_fops = {
5931 	.open		= tracing_open_generic,
5932 	.read		= tracing_read_dyn_info,
5933 	.llseek		= generic_file_llseek,
5934 };
5935 #endif /* CONFIG_DYNAMIC_FTRACE */
5936 
5937 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5938 static void
5939 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5940 {
5941 	tracing_snapshot();
5942 }
5943 
5944 static void
5945 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5946 {
5947 	unsigned long *count = (long *)data;
5948 
5949 	if (!*count)
5950 		return;
5951 
5952 	if (*count != -1)
5953 		(*count)--;
5954 
5955 	tracing_snapshot();
5956 }
5957 
5958 static int
5959 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5960 		      struct ftrace_probe_ops *ops, void *data)
5961 {
5962 	long count = (long)data;
5963 
5964 	seq_printf(m, "%ps:", (void *)ip);
5965 
5966 	seq_puts(m, "snapshot");
5967 
5968 	if (count == -1)
5969 		seq_puts(m, ":unlimited\n");
5970 	else
5971 		seq_printf(m, ":count=%ld\n", count);
5972 
5973 	return 0;
5974 }
5975 
5976 static struct ftrace_probe_ops snapshot_probe_ops = {
5977 	.func			= ftrace_snapshot,
5978 	.print			= ftrace_snapshot_print,
5979 };
5980 
5981 static struct ftrace_probe_ops snapshot_count_probe_ops = {
5982 	.func			= ftrace_count_snapshot,
5983 	.print			= ftrace_snapshot_print,
5984 };
5985 
5986 static int
5987 ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5988 			       char *glob, char *cmd, char *param, int enable)
5989 {
5990 	struct ftrace_probe_ops *ops;
5991 	void *count = (void *)-1;
5992 	char *number;
5993 	int ret;
5994 
5995 	/* hash funcs only work with set_ftrace_filter */
5996 	if (!enable)
5997 		return -EINVAL;
5998 
5999 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
6000 
6001 	if (glob[0] == '!') {
6002 		unregister_ftrace_function_probe_func(glob+1, ops);
6003 		return 0;
6004 	}
6005 
6006 	if (!param)
6007 		goto out_reg;
6008 
6009 	number = strsep(&param, ":");
6010 
6011 	if (!strlen(number))
6012 		goto out_reg;
6013 
6014 	/*
6015 	 * We use the callback data field (which is a pointer)
6016 	 * as our counter.
6017 	 */
6018 	ret = kstrtoul(number, 0, (unsigned long *)&count);
6019 	if (ret)
6020 		return ret;
6021 
6022  out_reg:
6023 	ret = register_ftrace_function_probe(glob, ops, count);
6024 
6025 	if (ret >= 0)
6026 		alloc_snapshot(&global_trace);
6027 
6028 	return ret < 0 ? ret : 0;
6029 }
6030 
6031 static struct ftrace_func_command ftrace_snapshot_cmd = {
6032 	.name			= "snapshot",
6033 	.func			= ftrace_trace_snapshot_callback,
6034 };
6035 
6036 static __init int register_snapshot_cmd(void)
6037 {
6038 	return register_ftrace_command(&ftrace_snapshot_cmd);
6039 }
6040 #else
6041 static inline __init int register_snapshot_cmd(void) { return 0; }
6042 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6043 
6044 static struct dentry *tracing_get_dentry(struct trace_array *tr)
6045 {
6046 	if (WARN_ON(!tr->dir))
6047 		return ERR_PTR(-ENODEV);
6048 
6049 	/* Top directory uses NULL as the parent */
6050 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6051 		return NULL;
6052 
6053 	/* All sub buffers have a descriptor */
6054 	return tr->dir;
6055 }
6056 
6057 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6058 {
6059 	struct dentry *d_tracer;
6060 
6061 	if (tr->percpu_dir)
6062 		return tr->percpu_dir;
6063 
6064 	d_tracer = tracing_get_dentry(tr);
6065 	if (IS_ERR(d_tracer))
6066 		return NULL;
6067 
6068 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6069 
6070 	WARN_ONCE(!tr->percpu_dir,
6071 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6072 
6073 	return tr->percpu_dir;
6074 }
6075 
6076 static struct dentry *
6077 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6078 		      void *data, long cpu, const struct file_operations *fops)
6079 {
6080 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6081 
6082 	if (ret) /* See tracing_get_cpu() */
6083 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
6084 	return ret;
6085 }
6086 
6087 static void
6088 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6089 {
6090 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6091 	struct dentry *d_cpu;
6092 	char cpu_dir[30]; /* 30 characters should be more than enough */
6093 
6094 	if (!d_percpu)
6095 		return;
6096 
6097 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
6098 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6099 	if (!d_cpu) {
6100 		pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
6101 		return;
6102 	}
6103 
6104 	/* per cpu trace_pipe */
6105 	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6106 				tr, cpu, &tracing_pipe_fops);
6107 
6108 	/* per cpu trace */
6109 	trace_create_cpu_file("trace", 0644, d_cpu,
6110 				tr, cpu, &tracing_fops);
6111 
6112 	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6113 				tr, cpu, &tracing_buffers_fops);
6114 
6115 	trace_create_cpu_file("stats", 0444, d_cpu,
6116 				tr, cpu, &tracing_stats_fops);
6117 
6118 	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6119 				tr, cpu, &tracing_entries_fops);
6120 
6121 #ifdef CONFIG_TRACER_SNAPSHOT
6122 	trace_create_cpu_file("snapshot", 0644, d_cpu,
6123 				tr, cpu, &snapshot_fops);
6124 
6125 	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6126 				tr, cpu, &snapshot_raw_fops);
6127 #endif
6128 }
6129 
6130 #ifdef CONFIG_FTRACE_SELFTEST
6131 /* Let selftest have access to static functions in this file */
6132 #include "trace_selftest.c"
6133 #endif
6134 
6135 struct trace_option_dentry {
6136 	struct tracer_opt		*opt;
6137 	struct tracer_flags		*flags;
6138 	struct trace_array		*tr;
6139 	struct dentry			*entry;
6140 };
6141 
6142 static ssize_t
6143 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6144 			loff_t *ppos)
6145 {
6146 	struct trace_option_dentry *topt = filp->private_data;
6147 	char *buf;
6148 
6149 	if (topt->flags->val & topt->opt->bit)
6150 		buf = "1\n";
6151 	else
6152 		buf = "0\n";
6153 
6154 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6155 }
6156 
6157 static ssize_t
6158 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6159 			 loff_t *ppos)
6160 {
6161 	struct trace_option_dentry *topt = filp->private_data;
6162 	unsigned long val;
6163 	int ret;
6164 
6165 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6166 	if (ret)
6167 		return ret;
6168 
6169 	if (val != 0 && val != 1)
6170 		return -EINVAL;
6171 
6172 	if (!!(topt->flags->val & topt->opt->bit) != val) {
6173 		mutex_lock(&trace_types_lock);
6174 		ret = __set_tracer_option(topt->tr, topt->flags,
6175 					  topt->opt, !val);
6176 		mutex_unlock(&trace_types_lock);
6177 		if (ret)
6178 			return ret;
6179 	}
6180 
6181 	*ppos += cnt;
6182 
6183 	return cnt;
6184 }
6185 
6186 
6187 static const struct file_operations trace_options_fops = {
6188 	.open = tracing_open_generic,
6189 	.read = trace_options_read,
6190 	.write = trace_options_write,
6191 	.llseek	= generic_file_llseek,
6192 };
6193 
6194 static ssize_t
6195 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6196 			loff_t *ppos)
6197 {
6198 	long index = (long)filp->private_data;
6199 	char *buf;
6200 
6201 	if (trace_flags & (1 << index))
6202 		buf = "1\n";
6203 	else
6204 		buf = "0\n";
6205 
6206 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6207 }
6208 
6209 static ssize_t
6210 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6211 			 loff_t *ppos)
6212 {
6213 	struct trace_array *tr = &global_trace;
6214 	long index = (long)filp->private_data;
6215 	unsigned long val;
6216 	int ret;
6217 
6218 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6219 	if (ret)
6220 		return ret;
6221 
6222 	if (val != 0 && val != 1)
6223 		return -EINVAL;
6224 
6225 	mutex_lock(&trace_types_lock);
6226 	ret = set_tracer_flag(tr, 1 << index, val);
6227 	mutex_unlock(&trace_types_lock);
6228 
6229 	if (ret < 0)
6230 		return ret;
6231 
6232 	*ppos += cnt;
6233 
6234 	return cnt;
6235 }
6236 
6237 static const struct file_operations trace_options_core_fops = {
6238 	.open = tracing_open_generic,
6239 	.read = trace_options_core_read,
6240 	.write = trace_options_core_write,
6241 	.llseek = generic_file_llseek,
6242 };
6243 
6244 struct dentry *trace_create_file(const char *name,
6245 				 umode_t mode,
6246 				 struct dentry *parent,
6247 				 void *data,
6248 				 const struct file_operations *fops)
6249 {
6250 	struct dentry *ret;
6251 
6252 	ret = tracefs_create_file(name, mode, parent, data, fops);
6253 	if (!ret)
6254 		pr_warning("Could not create tracefs '%s' entry\n", name);
6255 
6256 	return ret;
6257 }
6258 
6259 
6260 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6261 {
6262 	struct dentry *d_tracer;
6263 
6264 	if (tr->options)
6265 		return tr->options;
6266 
6267 	d_tracer = tracing_get_dentry(tr);
6268 	if (IS_ERR(d_tracer))
6269 		return NULL;
6270 
6271 	tr->options = tracefs_create_dir("options", d_tracer);
6272 	if (!tr->options) {
6273 		pr_warning("Could not create tracefs directory 'options'\n");
6274 		return NULL;
6275 	}
6276 
6277 	return tr->options;
6278 }
6279 
6280 static void
6281 create_trace_option_file(struct trace_array *tr,
6282 			 struct trace_option_dentry *topt,
6283 			 struct tracer_flags *flags,
6284 			 struct tracer_opt *opt)
6285 {
6286 	struct dentry *t_options;
6287 
6288 	t_options = trace_options_init_dentry(tr);
6289 	if (!t_options)
6290 		return;
6291 
6292 	topt->flags = flags;
6293 	topt->opt = opt;
6294 	topt->tr = tr;
6295 
6296 	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6297 				    &trace_options_fops);
6298 
6299 }
6300 
6301 static struct trace_option_dentry *
6302 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6303 {
6304 	struct trace_option_dentry *topts;
6305 	struct tracer_flags *flags;
6306 	struct tracer_opt *opts;
6307 	int cnt;
6308 
6309 	if (!tracer)
6310 		return NULL;
6311 
6312 	flags = tracer->flags;
6313 
6314 	if (!flags || !flags->opts)
6315 		return NULL;
6316 
6317 	opts = flags->opts;
6318 
6319 	for (cnt = 0; opts[cnt].name; cnt++)
6320 		;
6321 
6322 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6323 	if (!topts)
6324 		return NULL;
6325 
6326 	for (cnt = 0; opts[cnt].name; cnt++)
6327 		create_trace_option_file(tr, &topts[cnt], flags,
6328 					 &opts[cnt]);
6329 
6330 	return topts;
6331 }
6332 
6333 static void
6334 destroy_trace_option_files(struct trace_option_dentry *topts)
6335 {
6336 	int cnt;
6337 
6338 	if (!topts)
6339 		return;
6340 
6341 	for (cnt = 0; topts[cnt].opt; cnt++)
6342 		tracefs_remove(topts[cnt].entry);
6343 
6344 	kfree(topts);
6345 }
6346 
6347 static struct dentry *
6348 create_trace_option_core_file(struct trace_array *tr,
6349 			      const char *option, long index)
6350 {
6351 	struct dentry *t_options;
6352 
6353 	t_options = trace_options_init_dentry(tr);
6354 	if (!t_options)
6355 		return NULL;
6356 
6357 	return trace_create_file(option, 0644, t_options, (void *)index,
6358 				    &trace_options_core_fops);
6359 }
6360 
6361 static __init void create_trace_options_dir(struct trace_array *tr)
6362 {
6363 	struct dentry *t_options;
6364 	int i;
6365 
6366 	t_options = trace_options_init_dentry(tr);
6367 	if (!t_options)
6368 		return;
6369 
6370 	for (i = 0; trace_options[i]; i++)
6371 		create_trace_option_core_file(tr, trace_options[i], i);
6372 }
6373 
6374 static ssize_t
6375 rb_simple_read(struct file *filp, char __user *ubuf,
6376 	       size_t cnt, loff_t *ppos)
6377 {
6378 	struct trace_array *tr = filp->private_data;
6379 	char buf[64];
6380 	int r;
6381 
6382 	r = tracer_tracing_is_on(tr);
6383 	r = sprintf(buf, "%d\n", r);
6384 
6385 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6386 }
6387 
6388 static ssize_t
6389 rb_simple_write(struct file *filp, const char __user *ubuf,
6390 		size_t cnt, loff_t *ppos)
6391 {
6392 	struct trace_array *tr = filp->private_data;
6393 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
6394 	unsigned long val;
6395 	int ret;
6396 
6397 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6398 	if (ret)
6399 		return ret;
6400 
6401 	if (buffer) {
6402 		mutex_lock(&trace_types_lock);
6403 		if (val) {
6404 			tracer_tracing_on(tr);
6405 			if (tr->current_trace->start)
6406 				tr->current_trace->start(tr);
6407 		} else {
6408 			tracer_tracing_off(tr);
6409 			if (tr->current_trace->stop)
6410 				tr->current_trace->stop(tr);
6411 		}
6412 		mutex_unlock(&trace_types_lock);
6413 	}
6414 
6415 	(*ppos)++;
6416 
6417 	return cnt;
6418 }
6419 
6420 static const struct file_operations rb_simple_fops = {
6421 	.open		= tracing_open_generic_tr,
6422 	.read		= rb_simple_read,
6423 	.write		= rb_simple_write,
6424 	.release	= tracing_release_generic_tr,
6425 	.llseek		= default_llseek,
6426 };
6427 
6428 struct dentry *trace_instance_dir;
6429 
6430 static void
6431 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6432 
6433 static int
6434 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6435 {
6436 	enum ring_buffer_flags rb_flags;
6437 
6438 	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6439 
6440 	buf->tr = tr;
6441 
6442 	buf->buffer = ring_buffer_alloc(size, rb_flags);
6443 	if (!buf->buffer)
6444 		return -ENOMEM;
6445 
6446 	buf->data = alloc_percpu(struct trace_array_cpu);
6447 	if (!buf->data) {
6448 		ring_buffer_free(buf->buffer);
6449 		return -ENOMEM;
6450 	}
6451 
6452 	/* Allocate the first page for all buffers */
6453 	set_buffer_entries(&tr->trace_buffer,
6454 			   ring_buffer_size(tr->trace_buffer.buffer, 0));
6455 
6456 	return 0;
6457 }
6458 
6459 static int allocate_trace_buffers(struct trace_array *tr, int size)
6460 {
6461 	int ret;
6462 
6463 	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6464 	if (ret)
6465 		return ret;
6466 
6467 #ifdef CONFIG_TRACER_MAX_TRACE
6468 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
6469 				    allocate_snapshot ? size : 1);
6470 	if (WARN_ON(ret)) {
6471 		ring_buffer_free(tr->trace_buffer.buffer);
6472 		free_percpu(tr->trace_buffer.data);
6473 		return -ENOMEM;
6474 	}
6475 	tr->allocated_snapshot = allocate_snapshot;
6476 
6477 	/*
6478 	 * Only the top level trace array gets its snapshot allocated
6479 	 * from the kernel command line.
6480 	 */
6481 	allocate_snapshot = false;
6482 #endif
6483 	return 0;
6484 }
6485 
6486 static void free_trace_buffer(struct trace_buffer *buf)
6487 {
6488 	if (buf->buffer) {
6489 		ring_buffer_free(buf->buffer);
6490 		buf->buffer = NULL;
6491 		free_percpu(buf->data);
6492 		buf->data = NULL;
6493 	}
6494 }
6495 
6496 static void free_trace_buffers(struct trace_array *tr)
6497 {
6498 	if (!tr)
6499 		return;
6500 
6501 	free_trace_buffer(&tr->trace_buffer);
6502 
6503 #ifdef CONFIG_TRACER_MAX_TRACE
6504 	free_trace_buffer(&tr->max_buffer);
6505 #endif
6506 }
6507 
6508 static int instance_mkdir(const char *name)
6509 {
6510 	struct trace_array *tr;
6511 	int ret;
6512 
6513 	mutex_lock(&trace_types_lock);
6514 
6515 	ret = -EEXIST;
6516 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6517 		if (tr->name && strcmp(tr->name, name) == 0)
6518 			goto out_unlock;
6519 	}
6520 
6521 	ret = -ENOMEM;
6522 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6523 	if (!tr)
6524 		goto out_unlock;
6525 
6526 	tr->name = kstrdup(name, GFP_KERNEL);
6527 	if (!tr->name)
6528 		goto out_free_tr;
6529 
6530 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6531 		goto out_free_tr;
6532 
6533 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6534 
6535 	raw_spin_lock_init(&tr->start_lock);
6536 
6537 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6538 
6539 	tr->current_trace = &nop_trace;
6540 
6541 	INIT_LIST_HEAD(&tr->systems);
6542 	INIT_LIST_HEAD(&tr->events);
6543 
6544 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6545 		goto out_free_tr;
6546 
6547 	tr->dir = tracefs_create_dir(name, trace_instance_dir);
6548 	if (!tr->dir)
6549 		goto out_free_tr;
6550 
6551 	ret = event_trace_add_tracer(tr->dir, tr);
6552 	if (ret) {
6553 		tracefs_remove_recursive(tr->dir);
6554 		goto out_free_tr;
6555 	}
6556 
6557 	init_tracer_tracefs(tr, tr->dir);
6558 
6559 	list_add(&tr->list, &ftrace_trace_arrays);
6560 
6561 	mutex_unlock(&trace_types_lock);
6562 
6563 	return 0;
6564 
6565  out_free_tr:
6566 	free_trace_buffers(tr);
6567 	free_cpumask_var(tr->tracing_cpumask);
6568 	kfree(tr->name);
6569 	kfree(tr);
6570 
6571  out_unlock:
6572 	mutex_unlock(&trace_types_lock);
6573 
6574 	return ret;
6575 
6576 }
6577 
6578 static int instance_rmdir(const char *name)
6579 {
6580 	struct trace_array *tr;
6581 	int found = 0;
6582 	int ret;
6583 
6584 	mutex_lock(&trace_types_lock);
6585 
6586 	ret = -ENODEV;
6587 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6588 		if (tr->name && strcmp(tr->name, name) == 0) {
6589 			found = 1;
6590 			break;
6591 		}
6592 	}
6593 	if (!found)
6594 		goto out_unlock;
6595 
6596 	ret = -EBUSY;
6597 	if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6598 		goto out_unlock;
6599 
6600 	list_del(&tr->list);
6601 
6602 	tracing_set_nop(tr);
6603 	event_trace_del_tracer(tr);
6604 	ftrace_destroy_function_files(tr);
6605 	debugfs_remove_recursive(tr->dir);
6606 	free_trace_buffers(tr);
6607 
6608 	kfree(tr->name);
6609 	kfree(tr);
6610 
6611 	ret = 0;
6612 
6613  out_unlock:
6614 	mutex_unlock(&trace_types_lock);
6615 
6616 	return ret;
6617 }
6618 
6619 static __init void create_trace_instances(struct dentry *d_tracer)
6620 {
6621 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6622 							 instance_mkdir,
6623 							 instance_rmdir);
6624 	if (WARN_ON(!trace_instance_dir))
6625 		return;
6626 }
6627 
6628 static void
6629 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6630 {
6631 	int cpu;
6632 
6633 	trace_create_file("available_tracers", 0444, d_tracer,
6634 			tr, &show_traces_fops);
6635 
6636 	trace_create_file("current_tracer", 0644, d_tracer,
6637 			tr, &set_tracer_fops);
6638 
6639 	trace_create_file("tracing_cpumask", 0644, d_tracer,
6640 			  tr, &tracing_cpumask_fops);
6641 
6642 	trace_create_file("trace_options", 0644, d_tracer,
6643 			  tr, &tracing_iter_fops);
6644 
6645 	trace_create_file("trace", 0644, d_tracer,
6646 			  tr, &tracing_fops);
6647 
6648 	trace_create_file("trace_pipe", 0444, d_tracer,
6649 			  tr, &tracing_pipe_fops);
6650 
6651 	trace_create_file("buffer_size_kb", 0644, d_tracer,
6652 			  tr, &tracing_entries_fops);
6653 
6654 	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6655 			  tr, &tracing_total_entries_fops);
6656 
6657 	trace_create_file("free_buffer", 0200, d_tracer,
6658 			  tr, &tracing_free_buffer_fops);
6659 
6660 	trace_create_file("trace_marker", 0220, d_tracer,
6661 			  tr, &tracing_mark_fops);
6662 
6663 	trace_create_file("trace_clock", 0644, d_tracer, tr,
6664 			  &trace_clock_fops);
6665 
6666 	trace_create_file("tracing_on", 0644, d_tracer,
6667 			  tr, &rb_simple_fops);
6668 
6669 #ifdef CONFIG_TRACER_MAX_TRACE
6670 	trace_create_file("tracing_max_latency", 0644, d_tracer,
6671 			&tr->max_latency, &tracing_max_lat_fops);
6672 #endif
6673 
6674 	if (ftrace_create_function_files(tr, d_tracer))
6675 		WARN(1, "Could not allocate function filter files");
6676 
6677 #ifdef CONFIG_TRACER_SNAPSHOT
6678 	trace_create_file("snapshot", 0644, d_tracer,
6679 			  tr, &snapshot_fops);
6680 #endif
6681 
6682 	for_each_tracing_cpu(cpu)
6683 		tracing_init_tracefs_percpu(tr, cpu);
6684 
6685 }
6686 
6687 static struct vfsmount *trace_automount(void *ingore)
6688 {
6689 	struct vfsmount *mnt;
6690 	struct file_system_type *type;
6691 
6692 	/*
6693 	 * To maintain backward compatibility for tools that mount
6694 	 * debugfs to get to the tracing facility, tracefs is automatically
6695 	 * mounted to the debugfs/tracing directory.
6696 	 */
6697 	type = get_fs_type("tracefs");
6698 	if (!type)
6699 		return NULL;
6700 	mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6701 	put_filesystem(type);
6702 	if (IS_ERR(mnt))
6703 		return NULL;
6704 	mntget(mnt);
6705 
6706 	return mnt;
6707 }
6708 
6709 /**
6710  * tracing_init_dentry - initialize top level trace array
6711  *
6712  * This is called when creating files or directories in the tracing
6713  * directory. It is called via fs_initcall() by any of the boot up code
6714  * and expects to return the dentry of the top level tracing directory.
6715  */
6716 struct dentry *tracing_init_dentry(void)
6717 {
6718 	struct trace_array *tr = &global_trace;
6719 
6720 	/* The top level trace array uses  NULL as parent */
6721 	if (tr->dir)
6722 		return NULL;
6723 
6724 	if (WARN_ON(!debugfs_initialized()))
6725 		return ERR_PTR(-ENODEV);
6726 
6727 	/*
6728 	 * As there may still be users that expect the tracing
6729 	 * files to exist in debugfs/tracing, we must automount
6730 	 * the tracefs file system there, so older tools still
6731 	 * work with the newer kerenl.
6732 	 */
6733 	tr->dir = debugfs_create_automount("tracing", NULL,
6734 					   trace_automount, NULL);
6735 	if (!tr->dir) {
6736 		pr_warn_once("Could not create debugfs directory 'tracing'\n");
6737 		return ERR_PTR(-ENOMEM);
6738 	}
6739 
6740 	return NULL;
6741 }
6742 
6743 extern struct trace_enum_map *__start_ftrace_enum_maps[];
6744 extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6745 
6746 static void __init trace_enum_init(void)
6747 {
6748 	int len;
6749 
6750 	len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6751 	trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6752 }
6753 
6754 #ifdef CONFIG_MODULES
6755 static void trace_module_add_enums(struct module *mod)
6756 {
6757 	if (!mod->num_trace_enums)
6758 		return;
6759 
6760 	/*
6761 	 * Modules with bad taint do not have events created, do
6762 	 * not bother with enums either.
6763 	 */
6764 	if (trace_module_has_bad_taint(mod))
6765 		return;
6766 
6767 	trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6768 }
6769 
6770 #ifdef CONFIG_TRACE_ENUM_MAP_FILE
6771 static void trace_module_remove_enums(struct module *mod)
6772 {
6773 	union trace_enum_map_item *map;
6774 	union trace_enum_map_item **last = &trace_enum_maps;
6775 
6776 	if (!mod->num_trace_enums)
6777 		return;
6778 
6779 	mutex_lock(&trace_enum_mutex);
6780 
6781 	map = trace_enum_maps;
6782 
6783 	while (map) {
6784 		if (map->head.mod == mod)
6785 			break;
6786 		map = trace_enum_jmp_to_tail(map);
6787 		last = &map->tail.next;
6788 		map = map->tail.next;
6789 	}
6790 	if (!map)
6791 		goto out;
6792 
6793 	*last = trace_enum_jmp_to_tail(map)->tail.next;
6794 	kfree(map);
6795  out:
6796 	mutex_unlock(&trace_enum_mutex);
6797 }
6798 #else
6799 static inline void trace_module_remove_enums(struct module *mod) { }
6800 #endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6801 
6802 static int trace_module_notify(struct notifier_block *self,
6803 			       unsigned long val, void *data)
6804 {
6805 	struct module *mod = data;
6806 
6807 	switch (val) {
6808 	case MODULE_STATE_COMING:
6809 		trace_module_add_enums(mod);
6810 		break;
6811 	case MODULE_STATE_GOING:
6812 		trace_module_remove_enums(mod);
6813 		break;
6814 	}
6815 
6816 	return 0;
6817 }
6818 
6819 static struct notifier_block trace_module_nb = {
6820 	.notifier_call = trace_module_notify,
6821 	.priority = 0,
6822 };
6823 #endif /* CONFIG_MODULES */
6824 
6825 static __init int tracer_init_tracefs(void)
6826 {
6827 	struct dentry *d_tracer;
6828 
6829 	trace_access_lock_init();
6830 
6831 	d_tracer = tracing_init_dentry();
6832 	if (IS_ERR(d_tracer))
6833 		return 0;
6834 
6835 	init_tracer_tracefs(&global_trace, d_tracer);
6836 
6837 	trace_create_file("tracing_thresh", 0644, d_tracer,
6838 			&global_trace, &tracing_thresh_fops);
6839 
6840 	trace_create_file("README", 0444, d_tracer,
6841 			NULL, &tracing_readme_fops);
6842 
6843 	trace_create_file("saved_cmdlines", 0444, d_tracer,
6844 			NULL, &tracing_saved_cmdlines_fops);
6845 
6846 	trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6847 			  NULL, &tracing_saved_cmdlines_size_fops);
6848 
6849 	trace_enum_init();
6850 
6851 	trace_create_enum_file(d_tracer);
6852 
6853 #ifdef CONFIG_MODULES
6854 	register_module_notifier(&trace_module_nb);
6855 #endif
6856 
6857 #ifdef CONFIG_DYNAMIC_FTRACE
6858 	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6859 			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6860 #endif
6861 
6862 	create_trace_instances(d_tracer);
6863 
6864 	create_trace_options_dir(&global_trace);
6865 
6866 	/* If the tracer was started via cmdline, create options for it here */
6867 	if (global_trace.current_trace != &nop_trace)
6868 		update_tracer_options(&global_trace, global_trace.current_trace);
6869 
6870 	return 0;
6871 }
6872 
6873 static int trace_panic_handler(struct notifier_block *this,
6874 			       unsigned long event, void *unused)
6875 {
6876 	if (ftrace_dump_on_oops)
6877 		ftrace_dump(ftrace_dump_on_oops);
6878 	return NOTIFY_OK;
6879 }
6880 
6881 static struct notifier_block trace_panic_notifier = {
6882 	.notifier_call  = trace_panic_handler,
6883 	.next           = NULL,
6884 	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
6885 };
6886 
6887 static int trace_die_handler(struct notifier_block *self,
6888 			     unsigned long val,
6889 			     void *data)
6890 {
6891 	switch (val) {
6892 	case DIE_OOPS:
6893 		if (ftrace_dump_on_oops)
6894 			ftrace_dump(ftrace_dump_on_oops);
6895 		break;
6896 	default:
6897 		break;
6898 	}
6899 	return NOTIFY_OK;
6900 }
6901 
6902 static struct notifier_block trace_die_notifier = {
6903 	.notifier_call = trace_die_handler,
6904 	.priority = 200
6905 };
6906 
6907 /*
6908  * printk is set to max of 1024, we really don't need it that big.
6909  * Nothing should be printing 1000 characters anyway.
6910  */
6911 #define TRACE_MAX_PRINT		1000
6912 
6913 /*
6914  * Define here KERN_TRACE so that we have one place to modify
6915  * it if we decide to change what log level the ftrace dump
6916  * should be at.
6917  */
6918 #define KERN_TRACE		KERN_EMERG
6919 
6920 void
6921 trace_printk_seq(struct trace_seq *s)
6922 {
6923 	/* Probably should print a warning here. */
6924 	if (s->seq.len >= TRACE_MAX_PRINT)
6925 		s->seq.len = TRACE_MAX_PRINT;
6926 
6927 	/*
6928 	 * More paranoid code. Although the buffer size is set to
6929 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6930 	 * an extra layer of protection.
6931 	 */
6932 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6933 		s->seq.len = s->seq.size - 1;
6934 
6935 	/* should be zero ended, but we are paranoid. */
6936 	s->buffer[s->seq.len] = 0;
6937 
6938 	printk(KERN_TRACE "%s", s->buffer);
6939 
6940 	trace_seq_init(s);
6941 }
6942 
6943 void trace_init_global_iter(struct trace_iterator *iter)
6944 {
6945 	iter->tr = &global_trace;
6946 	iter->trace = iter->tr->current_trace;
6947 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
6948 	iter->trace_buffer = &global_trace.trace_buffer;
6949 
6950 	if (iter->trace && iter->trace->open)
6951 		iter->trace->open(iter);
6952 
6953 	/* Annotate start of buffers if we had overruns */
6954 	if (ring_buffer_overruns(iter->trace_buffer->buffer))
6955 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
6956 
6957 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6958 	if (trace_clocks[iter->tr->clock_id].in_ns)
6959 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6960 }
6961 
6962 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6963 {
6964 	/* use static because iter can be a bit big for the stack */
6965 	static struct trace_iterator iter;
6966 	static atomic_t dump_running;
6967 	unsigned int old_userobj;
6968 	unsigned long flags;
6969 	int cnt = 0, cpu;
6970 
6971 	/* Only allow one dump user at a time. */
6972 	if (atomic_inc_return(&dump_running) != 1) {
6973 		atomic_dec(&dump_running);
6974 		return;
6975 	}
6976 
6977 	/*
6978 	 * Always turn off tracing when we dump.
6979 	 * We don't need to show trace output of what happens
6980 	 * between multiple crashes.
6981 	 *
6982 	 * If the user does a sysrq-z, then they can re-enable
6983 	 * tracing with echo 1 > tracing_on.
6984 	 */
6985 	tracing_off();
6986 
6987 	local_irq_save(flags);
6988 
6989 	/* Simulate the iterator */
6990 	trace_init_global_iter(&iter);
6991 
6992 	for_each_tracing_cpu(cpu) {
6993 		atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6994 	}
6995 
6996 	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6997 
6998 	/* don't look at user memory in panic mode */
6999 	trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7000 
7001 	switch (oops_dump_mode) {
7002 	case DUMP_ALL:
7003 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
7004 		break;
7005 	case DUMP_ORIG:
7006 		iter.cpu_file = raw_smp_processor_id();
7007 		break;
7008 	case DUMP_NONE:
7009 		goto out_enable;
7010 	default:
7011 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7012 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
7013 	}
7014 
7015 	printk(KERN_TRACE "Dumping ftrace buffer:\n");
7016 
7017 	/* Did function tracer already get disabled? */
7018 	if (ftrace_is_dead()) {
7019 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7020 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
7021 	}
7022 
7023 	/*
7024 	 * We need to stop all tracing on all CPUS to read the
7025 	 * the next buffer. This is a bit expensive, but is
7026 	 * not done often. We fill all what we can read,
7027 	 * and then release the locks again.
7028 	 */
7029 
7030 	while (!trace_empty(&iter)) {
7031 
7032 		if (!cnt)
7033 			printk(KERN_TRACE "---------------------------------\n");
7034 
7035 		cnt++;
7036 
7037 		/* reset all but tr, trace, and overruns */
7038 		memset(&iter.seq, 0,
7039 		       sizeof(struct trace_iterator) -
7040 		       offsetof(struct trace_iterator, seq));
7041 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
7042 		iter.pos = -1;
7043 
7044 		if (trace_find_next_entry_inc(&iter) != NULL) {
7045 			int ret;
7046 
7047 			ret = print_trace_line(&iter);
7048 			if (ret != TRACE_TYPE_NO_CONSUME)
7049 				trace_consume(&iter);
7050 		}
7051 		touch_nmi_watchdog();
7052 
7053 		trace_printk_seq(&iter.seq);
7054 	}
7055 
7056 	if (!cnt)
7057 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
7058 	else
7059 		printk(KERN_TRACE "---------------------------------\n");
7060 
7061  out_enable:
7062 	trace_flags |= old_userobj;
7063 
7064 	for_each_tracing_cpu(cpu) {
7065 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7066 	}
7067  	atomic_dec(&dump_running);
7068 	local_irq_restore(flags);
7069 }
7070 EXPORT_SYMBOL_GPL(ftrace_dump);
7071 
7072 __init static int tracer_alloc_buffers(void)
7073 {
7074 	int ring_buf_size;
7075 	int ret = -ENOMEM;
7076 
7077 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7078 		goto out;
7079 
7080 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7081 		goto out_free_buffer_mask;
7082 
7083 	/* Only allocate trace_printk buffers if a trace_printk exists */
7084 	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7085 		/* Must be called before global_trace.buffer is allocated */
7086 		trace_printk_init_buffers();
7087 
7088 	/* To save memory, keep the ring buffer size to its minimum */
7089 	if (ring_buffer_expanded)
7090 		ring_buf_size = trace_buf_size;
7091 	else
7092 		ring_buf_size = 1;
7093 
7094 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7095 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7096 
7097 	raw_spin_lock_init(&global_trace.start_lock);
7098 
7099 	/* Used for event triggers */
7100 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7101 	if (!temp_buffer)
7102 		goto out_free_cpumask;
7103 
7104 	if (trace_create_savedcmd() < 0)
7105 		goto out_free_temp_buffer;
7106 
7107 	/* TODO: make the number of buffers hot pluggable with CPUS */
7108 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7109 		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7110 		WARN_ON(1);
7111 		goto out_free_savedcmd;
7112 	}
7113 
7114 	if (global_trace.buffer_disabled)
7115 		tracing_off();
7116 
7117 	if (trace_boot_clock) {
7118 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
7119 		if (ret < 0)
7120 			pr_warning("Trace clock %s not defined, going back to default\n",
7121 				   trace_boot_clock);
7122 	}
7123 
7124 	/*
7125 	 * register_tracer() might reference current_trace, so it
7126 	 * needs to be set before we register anything. This is
7127 	 * just a bootstrap of current_trace anyway.
7128 	 */
7129 	global_trace.current_trace = &nop_trace;
7130 
7131 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7132 
7133 	ftrace_init_global_array_ops(&global_trace);
7134 
7135 	register_tracer(&nop_trace);
7136 
7137 	/* All seems OK, enable tracing */
7138 	tracing_disabled = 0;
7139 
7140 	atomic_notifier_chain_register(&panic_notifier_list,
7141 				       &trace_panic_notifier);
7142 
7143 	register_die_notifier(&trace_die_notifier);
7144 
7145 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7146 
7147 	INIT_LIST_HEAD(&global_trace.systems);
7148 	INIT_LIST_HEAD(&global_trace.events);
7149 	list_add(&global_trace.list, &ftrace_trace_arrays);
7150 
7151 	while (trace_boot_options) {
7152 		char *option;
7153 
7154 		option = strsep(&trace_boot_options, ",");
7155 		trace_set_options(&global_trace, option);
7156 	}
7157 
7158 	register_snapshot_cmd();
7159 
7160 	return 0;
7161 
7162 out_free_savedcmd:
7163 	free_saved_cmdlines_buffer(savedcmd);
7164 out_free_temp_buffer:
7165 	ring_buffer_free(temp_buffer);
7166 out_free_cpumask:
7167 	free_cpumask_var(global_trace.tracing_cpumask);
7168 out_free_buffer_mask:
7169 	free_cpumask_var(tracing_buffer_mask);
7170 out:
7171 	return ret;
7172 }
7173 
7174 void __init trace_init(void)
7175 {
7176 	if (tracepoint_printk) {
7177 		tracepoint_print_iter =
7178 			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7179 		if (WARN_ON(!tracepoint_print_iter))
7180 			tracepoint_printk = 0;
7181 	}
7182 	tracer_alloc_buffers();
7183 	trace_event_init();
7184 }
7185 
7186 __init static int clear_boot_tracer(void)
7187 {
7188 	/*
7189 	 * The default tracer at boot buffer is an init section.
7190 	 * This function is called in lateinit. If we did not
7191 	 * find the boot tracer, then clear it out, to prevent
7192 	 * later registration from accessing the buffer that is
7193 	 * about to be freed.
7194 	 */
7195 	if (!default_bootup_tracer)
7196 		return 0;
7197 
7198 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7199 	       default_bootup_tracer);
7200 	default_bootup_tracer = NULL;
7201 
7202 	return 0;
7203 }
7204 
7205 fs_initcall(tracer_init_tracefs);
7206 late_initcall(clear_boot_tracer);
7207