xref: /openbmc/linux/kernel/trace/trace.c (revision 9ac8d3fb)
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/notifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/pagemap.h>
20 #include <linux/hardirq.h>
21 #include <linux/linkage.h>
22 #include <linux/uaccess.h>
23 #include <linux/ftrace.h>
24 #include <linux/module.h>
25 #include <linux/percpu.h>
26 #include <linux/kdebug.h>
27 #include <linux/ctype.h>
28 #include <linux/init.h>
29 #include <linux/poll.h>
30 #include <linux/gfp.h>
31 #include <linux/fs.h>
32 #include <linux/kprobes.h>
33 #include <linux/writeback.h>
34 
35 #include <linux/stacktrace.h>
36 #include <linux/ring_buffer.h>
37 #include <linux/irqflags.h>
38 
39 #include "trace.h"
40 
41 #define TRACE_BUFFER_FLAGS	(RB_FL_OVERWRITE)
42 
43 unsigned long __read_mostly	tracing_max_latency = (cycle_t)ULONG_MAX;
44 unsigned long __read_mostly	tracing_thresh;
45 
46 static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
47 
48 static inline void ftrace_disable_cpu(void)
49 {
50 	preempt_disable();
51 	local_inc(&__get_cpu_var(ftrace_cpu_disabled));
52 }
53 
54 static inline void ftrace_enable_cpu(void)
55 {
56 	local_dec(&__get_cpu_var(ftrace_cpu_disabled));
57 	preempt_enable();
58 }
59 
60 static cpumask_t __read_mostly		tracing_buffer_mask;
61 
62 #define for_each_tracing_cpu(cpu)	\
63 	for_each_cpu_mask(cpu, tracing_buffer_mask)
64 
65 static int tracing_disabled = 1;
66 
67 long
68 ns2usecs(cycle_t nsec)
69 {
70 	nsec += 500;
71 	do_div(nsec, 1000);
72 	return nsec;
73 }
74 
75 cycle_t ftrace_now(int cpu)
76 {
77 	u64 ts = ring_buffer_time_stamp(cpu);
78 	ring_buffer_normalize_time_stamp(cpu, &ts);
79 	return ts;
80 }
81 
82 /*
83  * The global_trace is the descriptor that holds the tracing
84  * buffers for the live tracing. For each CPU, it contains
85  * a link list of pages that will store trace entries. The
86  * page descriptor of the pages in the memory is used to hold
87  * the link list by linking the lru item in the page descriptor
88  * to each of the pages in the buffer per CPU.
89  *
90  * For each active CPU there is a data field that holds the
91  * pages for the buffer for that CPU. Each CPU has the same number
92  * of pages allocated for its buffer.
93  */
94 static struct trace_array	global_trace;
95 
96 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
97 
98 /*
99  * The max_tr is used to snapshot the global_trace when a maximum
100  * latency is reached. Some tracers will use this to store a maximum
101  * trace while it continues examining live traces.
102  *
103  * The buffers for the max_tr are set up the same as the global_trace.
104  * When a snapshot is taken, the link list of the max_tr is swapped
105  * with the link list of the global_trace and the buffers are reset for
106  * the global_trace so the tracing can continue.
107  */
108 static struct trace_array	max_tr;
109 
110 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
111 
112 /* tracer_enabled is used to toggle activation of a tracer */
113 static int			tracer_enabled = 1;
114 
115 /* function tracing enabled */
116 int				ftrace_function_enabled;
117 
118 /*
119  * trace_buf_size is the size in bytes that is allocated
120  * for a buffer. Note, the number of bytes is always rounded
121  * to page size.
122  *
123  * This number is purposely set to a low number of 16384.
124  * If the dump on oops happens, it will be much appreciated
125  * to not have to wait for all that output. Anyway this can be
126  * boot time and run time configurable.
127  */
128 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
129 
130 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
131 
132 /* trace_types holds a link list of available tracers. */
133 static struct tracer		*trace_types __read_mostly;
134 
135 /* current_trace points to the tracer that is currently active */
136 static struct tracer		*current_trace __read_mostly;
137 
138 /*
139  * max_tracer_type_len is used to simplify the allocating of
140  * buffers to read userspace tracer names. We keep track of
141  * the longest tracer name registered.
142  */
143 static int			max_tracer_type_len;
144 
145 /*
146  * trace_types_lock is used to protect the trace_types list.
147  * This lock is also used to keep user access serialized.
148  * Accesses from userspace will grab this lock while userspace
149  * activities happen inside the kernel.
150  */
151 static DEFINE_MUTEX(trace_types_lock);
152 
153 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
154 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
155 
156 /* trace_flags holds iter_ctrl options */
157 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
158 
159 /**
160  * trace_wake_up - wake up tasks waiting for trace input
161  *
162  * Simply wakes up any task that is blocked on the trace_wait
163  * queue. These is used with trace_poll for tasks polling the trace.
164  */
165 void trace_wake_up(void)
166 {
167 	/*
168 	 * The runqueue_is_locked() can fail, but this is the best we
169 	 * have for now:
170 	 */
171 	if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
172 		wake_up(&trace_wait);
173 }
174 
175 static int __init set_buf_size(char *str)
176 {
177 	unsigned long buf_size;
178 	int ret;
179 
180 	if (!str)
181 		return 0;
182 	ret = strict_strtoul(str, 0, &buf_size);
183 	/* nr_entries can not be zero */
184 	if (ret < 0 || buf_size == 0)
185 		return 0;
186 	trace_buf_size = buf_size;
187 	return 1;
188 }
189 __setup("trace_buf_size=", set_buf_size);
190 
191 unsigned long nsecs_to_usecs(unsigned long nsecs)
192 {
193 	return nsecs / 1000;
194 }
195 
196 /*
197  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
198  * control the output of kernel symbols.
199  */
200 #define TRACE_ITER_SYM_MASK \
201 	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
202 
203 /* These must match the bit postions in trace_iterator_flags */
204 static const char *trace_options[] = {
205 	"print-parent",
206 	"sym-offset",
207 	"sym-addr",
208 	"verbose",
209 	"raw",
210 	"hex",
211 	"bin",
212 	"block",
213 	"stacktrace",
214 	"sched-tree",
215 	"ftrace_printk",
216 	NULL
217 };
218 
219 /*
220  * ftrace_max_lock is used to protect the swapping of buffers
221  * when taking a max snapshot. The buffers themselves are
222  * protected by per_cpu spinlocks. But the action of the swap
223  * needs its own lock.
224  *
225  * This is defined as a raw_spinlock_t in order to help
226  * with performance when lockdep debugging is enabled.
227  */
228 static raw_spinlock_t ftrace_max_lock =
229 	(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
230 
231 /*
232  * Copy the new maximum trace into the separate maximum-trace
233  * structure. (this way the maximum trace is permanently saved,
234  * for later retrieval via /debugfs/tracing/latency_trace)
235  */
236 static void
237 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
238 {
239 	struct trace_array_cpu *data = tr->data[cpu];
240 
241 	max_tr.cpu = cpu;
242 	max_tr.time_start = data->preempt_timestamp;
243 
244 	data = max_tr.data[cpu];
245 	data->saved_latency = tracing_max_latency;
246 
247 	memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
248 	data->pid = tsk->pid;
249 	data->uid = tsk->uid;
250 	data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
251 	data->policy = tsk->policy;
252 	data->rt_priority = tsk->rt_priority;
253 
254 	/* record this tasks comm */
255 	tracing_record_cmdline(current);
256 }
257 
258 /**
259  * trace_seq_printf - sequence printing of trace information
260  * @s: trace sequence descriptor
261  * @fmt: printf format string
262  *
263  * The tracer may use either sequence operations or its own
264  * copy to user routines. To simplify formating of a trace
265  * trace_seq_printf is used to store strings into a special
266  * buffer (@s). Then the output may be either used by
267  * the sequencer or pulled into another buffer.
268  */
269 int
270 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
271 {
272 	int len = (PAGE_SIZE - 1) - s->len;
273 	va_list ap;
274 	int ret;
275 
276 	if (!len)
277 		return 0;
278 
279 	va_start(ap, fmt);
280 	ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
281 	va_end(ap);
282 
283 	/* If we can't write it all, don't bother writing anything */
284 	if (ret >= len)
285 		return 0;
286 
287 	s->len += ret;
288 
289 	return len;
290 }
291 
292 /**
293  * trace_seq_puts - trace sequence printing of simple string
294  * @s: trace sequence descriptor
295  * @str: simple string to record
296  *
297  * The tracer may use either the sequence operations or its own
298  * copy to user routines. This function records a simple string
299  * into a special buffer (@s) for later retrieval by a sequencer
300  * or other mechanism.
301  */
302 static int
303 trace_seq_puts(struct trace_seq *s, const char *str)
304 {
305 	int len = strlen(str);
306 
307 	if (len > ((PAGE_SIZE - 1) - s->len))
308 		return 0;
309 
310 	memcpy(s->buffer + s->len, str, len);
311 	s->len += len;
312 
313 	return len;
314 }
315 
316 static int
317 trace_seq_putc(struct trace_seq *s, unsigned char c)
318 {
319 	if (s->len >= (PAGE_SIZE - 1))
320 		return 0;
321 
322 	s->buffer[s->len++] = c;
323 
324 	return 1;
325 }
326 
327 static int
328 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
329 {
330 	if (len > ((PAGE_SIZE - 1) - s->len))
331 		return 0;
332 
333 	memcpy(s->buffer + s->len, mem, len);
334 	s->len += len;
335 
336 	return len;
337 }
338 
339 #define MAX_MEMHEX_BYTES	8
340 #define HEX_CHARS		(MAX_MEMHEX_BYTES*2 + 1)
341 
342 static int
343 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
344 {
345 	unsigned char hex[HEX_CHARS];
346 	unsigned char *data = mem;
347 	int i, j;
348 
349 #ifdef __BIG_ENDIAN
350 	for (i = 0, j = 0; i < len; i++) {
351 #else
352 	for (i = len-1, j = 0; i >= 0; i--) {
353 #endif
354 		hex[j++] = hex_asc_hi(data[i]);
355 		hex[j++] = hex_asc_lo(data[i]);
356 	}
357 	hex[j++] = ' ';
358 
359 	return trace_seq_putmem(s, hex, j);
360 }
361 
362 static void
363 trace_seq_reset(struct trace_seq *s)
364 {
365 	s->len = 0;
366 	s->readpos = 0;
367 }
368 
369 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
370 {
371 	int len;
372 	int ret;
373 
374 	if (s->len <= s->readpos)
375 		return -EBUSY;
376 
377 	len = s->len - s->readpos;
378 	if (cnt > len)
379 		cnt = len;
380 	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
381 	if (ret)
382 		return -EFAULT;
383 
384 	s->readpos += len;
385 	return cnt;
386 }
387 
388 static void
389 trace_print_seq(struct seq_file *m, struct trace_seq *s)
390 {
391 	int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
392 
393 	s->buffer[len] = 0;
394 	seq_puts(m, s->buffer);
395 
396 	trace_seq_reset(s);
397 }
398 
399 /**
400  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
401  * @tr: tracer
402  * @tsk: the task with the latency
403  * @cpu: The cpu that initiated the trace.
404  *
405  * Flip the buffers between the @tr and the max_tr and record information
406  * about which task was the cause of this latency.
407  */
408 void
409 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
410 {
411 	struct ring_buffer *buf = tr->buffer;
412 
413 	WARN_ON_ONCE(!irqs_disabled());
414 	__raw_spin_lock(&ftrace_max_lock);
415 
416 	tr->buffer = max_tr.buffer;
417 	max_tr.buffer = buf;
418 
419 	ftrace_disable_cpu();
420 	ring_buffer_reset(tr->buffer);
421 	ftrace_enable_cpu();
422 
423 	__update_max_tr(tr, tsk, cpu);
424 	__raw_spin_unlock(&ftrace_max_lock);
425 }
426 
427 /**
428  * update_max_tr_single - only copy one trace over, and reset the rest
429  * @tr - tracer
430  * @tsk - task with the latency
431  * @cpu - the cpu of the buffer to copy.
432  *
433  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
434  */
435 void
436 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
437 {
438 	int ret;
439 
440 	WARN_ON_ONCE(!irqs_disabled());
441 	__raw_spin_lock(&ftrace_max_lock);
442 
443 	ftrace_disable_cpu();
444 
445 	ring_buffer_reset(max_tr.buffer);
446 	ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
447 
448 	ftrace_enable_cpu();
449 
450 	WARN_ON_ONCE(ret);
451 
452 	__update_max_tr(tr, tsk, cpu);
453 	__raw_spin_unlock(&ftrace_max_lock);
454 }
455 
456 /**
457  * register_tracer - register a tracer with the ftrace system.
458  * @type - the plugin for the tracer
459  *
460  * Register a new plugin tracer.
461  */
462 int register_tracer(struct tracer *type)
463 {
464 	struct tracer *t;
465 	int len;
466 	int ret = 0;
467 
468 	if (!type->name) {
469 		pr_info("Tracer must have a name\n");
470 		return -1;
471 	}
472 
473 	mutex_lock(&trace_types_lock);
474 	for (t = trace_types; t; t = t->next) {
475 		if (strcmp(type->name, t->name) == 0) {
476 			/* already found */
477 			pr_info("Trace %s already registered\n",
478 				type->name);
479 			ret = -1;
480 			goto out;
481 		}
482 	}
483 
484 #ifdef CONFIG_FTRACE_STARTUP_TEST
485 	if (type->selftest) {
486 		struct tracer *saved_tracer = current_trace;
487 		struct trace_array *tr = &global_trace;
488 		int saved_ctrl = tr->ctrl;
489 		int i;
490 		/*
491 		 * Run a selftest on this tracer.
492 		 * Here we reset the trace buffer, and set the current
493 		 * tracer to be this tracer. The tracer can then run some
494 		 * internal tracing to verify that everything is in order.
495 		 * If we fail, we do not register this tracer.
496 		 */
497 		for_each_tracing_cpu(i) {
498 			tracing_reset(tr, i);
499 		}
500 		current_trace = type;
501 		tr->ctrl = 0;
502 		/* the test is responsible for initializing and enabling */
503 		pr_info("Testing tracer %s: ", type->name);
504 		ret = type->selftest(type, tr);
505 		/* the test is responsible for resetting too */
506 		current_trace = saved_tracer;
507 		tr->ctrl = saved_ctrl;
508 		if (ret) {
509 			printk(KERN_CONT "FAILED!\n");
510 			goto out;
511 		}
512 		/* Only reset on passing, to avoid touching corrupted buffers */
513 		for_each_tracing_cpu(i) {
514 			tracing_reset(tr, i);
515 		}
516 		printk(KERN_CONT "PASSED\n");
517 	}
518 #endif
519 
520 	type->next = trace_types;
521 	trace_types = type;
522 	len = strlen(type->name);
523 	if (len > max_tracer_type_len)
524 		max_tracer_type_len = len;
525 
526  out:
527 	mutex_unlock(&trace_types_lock);
528 
529 	return ret;
530 }
531 
532 void unregister_tracer(struct tracer *type)
533 {
534 	struct tracer **t;
535 	int len;
536 
537 	mutex_lock(&trace_types_lock);
538 	for (t = &trace_types; *t; t = &(*t)->next) {
539 		if (*t == type)
540 			goto found;
541 	}
542 	pr_info("Trace %s not registered\n", type->name);
543 	goto out;
544 
545  found:
546 	*t = (*t)->next;
547 	if (strlen(type->name) != max_tracer_type_len)
548 		goto out;
549 
550 	max_tracer_type_len = 0;
551 	for (t = &trace_types; *t; t = &(*t)->next) {
552 		len = strlen((*t)->name);
553 		if (len > max_tracer_type_len)
554 			max_tracer_type_len = len;
555 	}
556  out:
557 	mutex_unlock(&trace_types_lock);
558 }
559 
560 void tracing_reset(struct trace_array *tr, int cpu)
561 {
562 	ftrace_disable_cpu();
563 	ring_buffer_reset_cpu(tr->buffer, cpu);
564 	ftrace_enable_cpu();
565 }
566 
567 #define SAVED_CMDLINES 128
568 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
569 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
570 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
571 static int cmdline_idx;
572 static DEFINE_SPINLOCK(trace_cmdline_lock);
573 
574 /* temporary disable recording */
575 atomic_t trace_record_cmdline_disabled __read_mostly;
576 
577 static void trace_init_cmdlines(void)
578 {
579 	memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
580 	memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
581 	cmdline_idx = 0;
582 }
583 
584 void trace_stop_cmdline_recording(void);
585 
586 static void trace_save_cmdline(struct task_struct *tsk)
587 {
588 	unsigned map;
589 	unsigned idx;
590 
591 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
592 		return;
593 
594 	/*
595 	 * It's not the end of the world if we don't get
596 	 * the lock, but we also don't want to spin
597 	 * nor do we want to disable interrupts,
598 	 * so if we miss here, then better luck next time.
599 	 */
600 	if (!spin_trylock(&trace_cmdline_lock))
601 		return;
602 
603 	idx = map_pid_to_cmdline[tsk->pid];
604 	if (idx >= SAVED_CMDLINES) {
605 		idx = (cmdline_idx + 1) % SAVED_CMDLINES;
606 
607 		map = map_cmdline_to_pid[idx];
608 		if (map <= PID_MAX_DEFAULT)
609 			map_pid_to_cmdline[map] = (unsigned)-1;
610 
611 		map_pid_to_cmdline[tsk->pid] = idx;
612 
613 		cmdline_idx = idx;
614 	}
615 
616 	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
617 
618 	spin_unlock(&trace_cmdline_lock);
619 }
620 
621 static char *trace_find_cmdline(int pid)
622 {
623 	char *cmdline = "<...>";
624 	unsigned map;
625 
626 	if (!pid)
627 		return "<idle>";
628 
629 	if (pid > PID_MAX_DEFAULT)
630 		goto out;
631 
632 	map = map_pid_to_cmdline[pid];
633 	if (map >= SAVED_CMDLINES)
634 		goto out;
635 
636 	cmdline = saved_cmdlines[map];
637 
638  out:
639 	return cmdline;
640 }
641 
642 void tracing_record_cmdline(struct task_struct *tsk)
643 {
644 	if (atomic_read(&trace_record_cmdline_disabled))
645 		return;
646 
647 	trace_save_cmdline(tsk);
648 }
649 
650 void
651 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
652 			     int pc)
653 {
654 	struct task_struct *tsk = current;
655 
656 	entry->preempt_count		= pc & 0xff;
657 	entry->pid			= (tsk) ? tsk->pid : 0;
658 	entry->flags =
659 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
660 		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
661 #else
662 		TRACE_FLAG_IRQS_NOSUPPORT |
663 #endif
664 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
665 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
666 		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
667 }
668 
669 void
670 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
671 	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
672 	       int pc)
673 {
674 	struct ring_buffer_event *event;
675 	struct ftrace_entry *entry;
676 	unsigned long irq_flags;
677 
678 	/* If we are reading the ring buffer, don't trace */
679 	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
680 		return;
681 
682 	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
683 					 &irq_flags);
684 	if (!event)
685 		return;
686 	entry	= ring_buffer_event_data(event);
687 	tracing_generic_entry_update(&entry->ent, flags, pc);
688 	entry->ent.type			= TRACE_FN;
689 	entry->ip			= ip;
690 	entry->parent_ip		= parent_ip;
691 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
692 }
693 
694 void
695 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
696        unsigned long ip, unsigned long parent_ip, unsigned long flags,
697        int pc)
698 {
699 	if (likely(!atomic_read(&data->disabled)))
700 		trace_function(tr, data, ip, parent_ip, flags, pc);
701 }
702 
703 static void ftrace_trace_stack(struct trace_array *tr,
704 			       struct trace_array_cpu *data,
705 			       unsigned long flags,
706 			       int skip, int pc)
707 {
708 #ifdef CONFIG_STACKTRACE
709 	struct ring_buffer_event *event;
710 	struct stack_entry *entry;
711 	struct stack_trace trace;
712 	unsigned long irq_flags;
713 
714 	if (!(trace_flags & TRACE_ITER_STACKTRACE))
715 		return;
716 
717 	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
718 					 &irq_flags);
719 	if (!event)
720 		return;
721 	entry	= ring_buffer_event_data(event);
722 	tracing_generic_entry_update(&entry->ent, flags, pc);
723 	entry->ent.type		= TRACE_STACK;
724 
725 	memset(&entry->caller, 0, sizeof(entry->caller));
726 
727 	trace.nr_entries	= 0;
728 	trace.max_entries	= FTRACE_STACK_ENTRIES;
729 	trace.skip		= skip;
730 	trace.entries		= entry->caller;
731 
732 	save_stack_trace(&trace);
733 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
734 #endif
735 }
736 
737 void __trace_stack(struct trace_array *tr,
738 		   struct trace_array_cpu *data,
739 		   unsigned long flags,
740 		   int skip)
741 {
742 	ftrace_trace_stack(tr, data, flags, skip, preempt_count());
743 }
744 
745 static void
746 ftrace_trace_special(void *__tr, void *__data,
747 		     unsigned long arg1, unsigned long arg2, unsigned long arg3,
748 		     int pc)
749 {
750 	struct ring_buffer_event *event;
751 	struct trace_array_cpu *data = __data;
752 	struct trace_array *tr = __tr;
753 	struct special_entry *entry;
754 	unsigned long irq_flags;
755 
756 	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
757 					 &irq_flags);
758 	if (!event)
759 		return;
760 	entry	= ring_buffer_event_data(event);
761 	tracing_generic_entry_update(&entry->ent, 0, pc);
762 	entry->ent.type			= TRACE_SPECIAL;
763 	entry->arg1			= arg1;
764 	entry->arg2			= arg2;
765 	entry->arg3			= arg3;
766 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
767 	ftrace_trace_stack(tr, data, irq_flags, 4, pc);
768 
769 	trace_wake_up();
770 }
771 
772 void
773 __trace_special(void *__tr, void *__data,
774 		unsigned long arg1, unsigned long arg2, unsigned long arg3)
775 {
776 	ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
777 }
778 
779 void
780 tracing_sched_switch_trace(struct trace_array *tr,
781 			   struct trace_array_cpu *data,
782 			   struct task_struct *prev,
783 			   struct task_struct *next,
784 			   unsigned long flags, int pc)
785 {
786 	struct ring_buffer_event *event;
787 	struct ctx_switch_entry *entry;
788 	unsigned long irq_flags;
789 
790 	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
791 					   &irq_flags);
792 	if (!event)
793 		return;
794 	entry	= ring_buffer_event_data(event);
795 	tracing_generic_entry_update(&entry->ent, flags, pc);
796 	entry->ent.type			= TRACE_CTX;
797 	entry->prev_pid			= prev->pid;
798 	entry->prev_prio		= prev->prio;
799 	entry->prev_state		= prev->state;
800 	entry->next_pid			= next->pid;
801 	entry->next_prio		= next->prio;
802 	entry->next_state		= next->state;
803 	entry->next_cpu	= task_cpu(next);
804 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
805 	ftrace_trace_stack(tr, data, flags, 5, pc);
806 }
807 
808 void
809 tracing_sched_wakeup_trace(struct trace_array *tr,
810 			   struct trace_array_cpu *data,
811 			   struct task_struct *wakee,
812 			   struct task_struct *curr,
813 			   unsigned long flags, int pc)
814 {
815 	struct ring_buffer_event *event;
816 	struct ctx_switch_entry *entry;
817 	unsigned long irq_flags;
818 
819 	event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
820 					   &irq_flags);
821 	if (!event)
822 		return;
823 	entry	= ring_buffer_event_data(event);
824 	tracing_generic_entry_update(&entry->ent, flags, pc);
825 	entry->ent.type			= TRACE_WAKE;
826 	entry->prev_pid			= curr->pid;
827 	entry->prev_prio		= curr->prio;
828 	entry->prev_state		= curr->state;
829 	entry->next_pid			= wakee->pid;
830 	entry->next_prio		= wakee->prio;
831 	entry->next_state		= wakee->state;
832 	entry->next_cpu			= task_cpu(wakee);
833 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
834 	ftrace_trace_stack(tr, data, flags, 6, pc);
835 
836 	trace_wake_up();
837 }
838 
839 void
840 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
841 {
842 	struct trace_array *tr = &global_trace;
843 	struct trace_array_cpu *data;
844 	int cpu;
845 	int pc;
846 
847 	if (tracing_disabled || !tr->ctrl)
848 		return;
849 
850 	pc = preempt_count();
851 	preempt_disable_notrace();
852 	cpu = raw_smp_processor_id();
853 	data = tr->data[cpu];
854 
855 	if (likely(!atomic_read(&data->disabled)))
856 		ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
857 
858 	preempt_enable_notrace();
859 }
860 
861 #ifdef CONFIG_FUNCTION_TRACER
862 static void
863 function_trace_call(unsigned long ip, unsigned long parent_ip)
864 {
865 	struct trace_array *tr = &global_trace;
866 	struct trace_array_cpu *data;
867 	unsigned long flags;
868 	long disabled;
869 	int cpu, resched;
870 	int pc;
871 
872 	if (unlikely(!ftrace_function_enabled))
873 		return;
874 
875 	pc = preempt_count();
876 	resched = need_resched();
877 	preempt_disable_notrace();
878 	local_save_flags(flags);
879 	cpu = raw_smp_processor_id();
880 	data = tr->data[cpu];
881 	disabled = atomic_inc_return(&data->disabled);
882 
883 	if (likely(disabled == 1))
884 		trace_function(tr, data, ip, parent_ip, flags, pc);
885 
886 	atomic_dec(&data->disabled);
887 	if (resched)
888 		preempt_enable_no_resched_notrace();
889 	else
890 		preempt_enable_notrace();
891 }
892 
893 static struct ftrace_ops trace_ops __read_mostly =
894 {
895 	.func = function_trace_call,
896 };
897 
898 void tracing_start_function_trace(void)
899 {
900 	ftrace_function_enabled = 0;
901 	register_ftrace_function(&trace_ops);
902 	if (tracer_enabled)
903 		ftrace_function_enabled = 1;
904 }
905 
906 void tracing_stop_function_trace(void)
907 {
908 	ftrace_function_enabled = 0;
909 	unregister_ftrace_function(&trace_ops);
910 }
911 #endif
912 
913 enum trace_file_type {
914 	TRACE_FILE_LAT_FMT	= 1,
915 };
916 
917 static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
918 {
919 	/* Don't allow ftrace to trace into the ring buffers */
920 	ftrace_disable_cpu();
921 
922 	iter->idx++;
923 	if (iter->buffer_iter[iter->cpu])
924 		ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
925 
926 	ftrace_enable_cpu();
927 }
928 
929 static struct trace_entry *
930 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
931 {
932 	struct ring_buffer_event *event;
933 	struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
934 
935 	/* Don't allow ftrace to trace into the ring buffers */
936 	ftrace_disable_cpu();
937 
938 	if (buf_iter)
939 		event = ring_buffer_iter_peek(buf_iter, ts);
940 	else
941 		event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
942 
943 	ftrace_enable_cpu();
944 
945 	return event ? ring_buffer_event_data(event) : NULL;
946 }
947 
948 static struct trace_entry *
949 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
950 {
951 	struct ring_buffer *buffer = iter->tr->buffer;
952 	struct trace_entry *ent, *next = NULL;
953 	u64 next_ts = 0, ts;
954 	int next_cpu = -1;
955 	int cpu;
956 
957 	for_each_tracing_cpu(cpu) {
958 
959 		if (ring_buffer_empty_cpu(buffer, cpu))
960 			continue;
961 
962 		ent = peek_next_entry(iter, cpu, &ts);
963 
964 		/*
965 		 * Pick the entry with the smallest timestamp:
966 		 */
967 		if (ent && (!next || ts < next_ts)) {
968 			next = ent;
969 			next_cpu = cpu;
970 			next_ts = ts;
971 		}
972 	}
973 
974 	if (ent_cpu)
975 		*ent_cpu = next_cpu;
976 
977 	if (ent_ts)
978 		*ent_ts = next_ts;
979 
980 	return next;
981 }
982 
983 /* Find the next real entry, without updating the iterator itself */
984 static struct trace_entry *
985 find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
986 {
987 	return __find_next_entry(iter, ent_cpu, ent_ts);
988 }
989 
990 /* Find the next real entry, and increment the iterator to the next entry */
991 static void *find_next_entry_inc(struct trace_iterator *iter)
992 {
993 	iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
994 
995 	if (iter->ent)
996 		trace_iterator_increment(iter, iter->cpu);
997 
998 	return iter->ent ? iter : NULL;
999 }
1000 
1001 static void trace_consume(struct trace_iterator *iter)
1002 {
1003 	/* Don't allow ftrace to trace into the ring buffers */
1004 	ftrace_disable_cpu();
1005 	ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1006 	ftrace_enable_cpu();
1007 }
1008 
1009 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1010 {
1011 	struct trace_iterator *iter = m->private;
1012 	int i = (int)*pos;
1013 	void *ent;
1014 
1015 	(*pos)++;
1016 
1017 	/* can't go backwards */
1018 	if (iter->idx > i)
1019 		return NULL;
1020 
1021 	if (iter->idx < 0)
1022 		ent = find_next_entry_inc(iter);
1023 	else
1024 		ent = iter;
1025 
1026 	while (ent && iter->idx < i)
1027 		ent = find_next_entry_inc(iter);
1028 
1029 	iter->pos = *pos;
1030 
1031 	return ent;
1032 }
1033 
1034 static void *s_start(struct seq_file *m, loff_t *pos)
1035 {
1036 	struct trace_iterator *iter = m->private;
1037 	void *p = NULL;
1038 	loff_t l = 0;
1039 	int cpu;
1040 
1041 	mutex_lock(&trace_types_lock);
1042 
1043 	if (!current_trace || current_trace != iter->trace) {
1044 		mutex_unlock(&trace_types_lock);
1045 		return NULL;
1046 	}
1047 
1048 	atomic_inc(&trace_record_cmdline_disabled);
1049 
1050 	/* let the tracer grab locks here if needed */
1051 	if (current_trace->start)
1052 		current_trace->start(iter);
1053 
1054 	if (*pos != iter->pos) {
1055 		iter->ent = NULL;
1056 		iter->cpu = 0;
1057 		iter->idx = -1;
1058 
1059 		ftrace_disable_cpu();
1060 
1061 		for_each_tracing_cpu(cpu) {
1062 			ring_buffer_iter_reset(iter->buffer_iter[cpu]);
1063 		}
1064 
1065 		ftrace_enable_cpu();
1066 
1067 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1068 			;
1069 
1070 	} else {
1071 		l = *pos - 1;
1072 		p = s_next(m, p, &l);
1073 	}
1074 
1075 	return p;
1076 }
1077 
1078 static void s_stop(struct seq_file *m, void *p)
1079 {
1080 	struct trace_iterator *iter = m->private;
1081 
1082 	atomic_dec(&trace_record_cmdline_disabled);
1083 
1084 	/* let the tracer release locks here if needed */
1085 	if (current_trace && current_trace == iter->trace && iter->trace->stop)
1086 		iter->trace->stop(iter);
1087 
1088 	mutex_unlock(&trace_types_lock);
1089 }
1090 
1091 #ifdef CONFIG_KRETPROBES
1092 static inline const char *kretprobed(const char *name)
1093 {
1094 	static const char tramp_name[] = "kretprobe_trampoline";
1095 	int size = sizeof(tramp_name);
1096 
1097 	if (strncmp(tramp_name, name, size) == 0)
1098 		return "[unknown/kretprobe'd]";
1099 	return name;
1100 }
1101 #else
1102 static inline const char *kretprobed(const char *name)
1103 {
1104 	return name;
1105 }
1106 #endif /* CONFIG_KRETPROBES */
1107 
1108 static int
1109 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1110 {
1111 #ifdef CONFIG_KALLSYMS
1112 	char str[KSYM_SYMBOL_LEN];
1113 	const char *name;
1114 
1115 	kallsyms_lookup(address, NULL, NULL, NULL, str);
1116 
1117 	name = kretprobed(str);
1118 
1119 	return trace_seq_printf(s, fmt, name);
1120 #endif
1121 	return 1;
1122 }
1123 
1124 static int
1125 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1126 		     unsigned long address)
1127 {
1128 #ifdef CONFIG_KALLSYMS
1129 	char str[KSYM_SYMBOL_LEN];
1130 	const char *name;
1131 
1132 	sprint_symbol(str, address);
1133 	name = kretprobed(str);
1134 
1135 	return trace_seq_printf(s, fmt, name);
1136 #endif
1137 	return 1;
1138 }
1139 
1140 #ifndef CONFIG_64BIT
1141 # define IP_FMT "%08lx"
1142 #else
1143 # define IP_FMT "%016lx"
1144 #endif
1145 
1146 static int
1147 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1148 {
1149 	int ret;
1150 
1151 	if (!ip)
1152 		return trace_seq_printf(s, "0");
1153 
1154 	if (sym_flags & TRACE_ITER_SYM_OFFSET)
1155 		ret = seq_print_sym_offset(s, "%s", ip);
1156 	else
1157 		ret = seq_print_sym_short(s, "%s", ip);
1158 
1159 	if (!ret)
1160 		return 0;
1161 
1162 	if (sym_flags & TRACE_ITER_SYM_ADDR)
1163 		ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1164 	return ret;
1165 }
1166 
1167 static void print_lat_help_header(struct seq_file *m)
1168 {
1169 	seq_puts(m, "#                  _------=> CPU#            \n");
1170 	seq_puts(m, "#                 / _-----=> irqs-off        \n");
1171 	seq_puts(m, "#                | / _----=> need-resched    \n");
1172 	seq_puts(m, "#                || / _---=> hardirq/softirq \n");
1173 	seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
1174 	seq_puts(m, "#                |||| /                      \n");
1175 	seq_puts(m, "#                |||||     delay             \n");
1176 	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
1177 	seq_puts(m, "#     \\   /      |||||   \\   |   /           \n");
1178 }
1179 
1180 static void print_func_help_header(struct seq_file *m)
1181 {
1182 	seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
1183 	seq_puts(m, "#              | |       |          |         |\n");
1184 }
1185 
1186 
1187 static void
1188 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1189 {
1190 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1191 	struct trace_array *tr = iter->tr;
1192 	struct trace_array_cpu *data = tr->data[tr->cpu];
1193 	struct tracer *type = current_trace;
1194 	unsigned long total;
1195 	unsigned long entries;
1196 	const char *name = "preemption";
1197 
1198 	if (type)
1199 		name = type->name;
1200 
1201 	entries = ring_buffer_entries(iter->tr->buffer);
1202 	total = entries +
1203 		ring_buffer_overruns(iter->tr->buffer);
1204 
1205 	seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1206 		   name, UTS_RELEASE);
1207 	seq_puts(m, "-----------------------------------"
1208 		 "---------------------------------\n");
1209 	seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1210 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1211 		   nsecs_to_usecs(data->saved_latency),
1212 		   entries,
1213 		   total,
1214 		   tr->cpu,
1215 #if defined(CONFIG_PREEMPT_NONE)
1216 		   "server",
1217 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1218 		   "desktop",
1219 #elif defined(CONFIG_PREEMPT)
1220 		   "preempt",
1221 #else
1222 		   "unknown",
1223 #endif
1224 		   /* These are reserved for later use */
1225 		   0, 0, 0, 0);
1226 #ifdef CONFIG_SMP
1227 	seq_printf(m, " #P:%d)\n", num_online_cpus());
1228 #else
1229 	seq_puts(m, ")\n");
1230 #endif
1231 	seq_puts(m, "    -----------------\n");
1232 	seq_printf(m, "    | task: %.16s-%d "
1233 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1234 		   data->comm, data->pid, data->uid, data->nice,
1235 		   data->policy, data->rt_priority);
1236 	seq_puts(m, "    -----------------\n");
1237 
1238 	if (data->critical_start) {
1239 		seq_puts(m, " => started at: ");
1240 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1241 		trace_print_seq(m, &iter->seq);
1242 		seq_puts(m, "\n => ended at:   ");
1243 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1244 		trace_print_seq(m, &iter->seq);
1245 		seq_puts(m, "\n");
1246 	}
1247 
1248 	seq_puts(m, "\n");
1249 }
1250 
1251 static void
1252 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1253 {
1254 	int hardirq, softirq;
1255 	char *comm;
1256 
1257 	comm = trace_find_cmdline(entry->pid);
1258 
1259 	trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1260 	trace_seq_printf(s, "%3d", cpu);
1261 	trace_seq_printf(s, "%c%c",
1262 			(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1263 			 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
1264 			((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1265 
1266 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1267 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1268 	if (hardirq && softirq) {
1269 		trace_seq_putc(s, 'H');
1270 	} else {
1271 		if (hardirq) {
1272 			trace_seq_putc(s, 'h');
1273 		} else {
1274 			if (softirq)
1275 				trace_seq_putc(s, 's');
1276 			else
1277 				trace_seq_putc(s, '.');
1278 		}
1279 	}
1280 
1281 	if (entry->preempt_count)
1282 		trace_seq_printf(s, "%x", entry->preempt_count);
1283 	else
1284 		trace_seq_puts(s, ".");
1285 }
1286 
1287 unsigned long preempt_mark_thresh = 100;
1288 
1289 static void
1290 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1291 		    unsigned long rel_usecs)
1292 {
1293 	trace_seq_printf(s, " %4lldus", abs_usecs);
1294 	if (rel_usecs > preempt_mark_thresh)
1295 		trace_seq_puts(s, "!: ");
1296 	else if (rel_usecs > 1)
1297 		trace_seq_puts(s, "+: ");
1298 	else
1299 		trace_seq_puts(s, " : ");
1300 }
1301 
1302 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1303 
1304 /*
1305  * The message is supposed to contain an ending newline.
1306  * If the printing stops prematurely, try to add a newline of our own.
1307  */
1308 void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1309 {
1310 	struct trace_entry *ent;
1311 	struct trace_field_cont *cont;
1312 	bool ok = true;
1313 
1314 	ent = peek_next_entry(iter, iter->cpu, NULL);
1315 	if (!ent || ent->type != TRACE_CONT) {
1316 		trace_seq_putc(s, '\n');
1317 		return;
1318 	}
1319 
1320 	do {
1321 		cont = (struct trace_field_cont *)ent;
1322 		if (ok)
1323 			ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
1324 
1325 		ftrace_disable_cpu();
1326 
1327 		if (iter->buffer_iter[iter->cpu])
1328 			ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1329 		else
1330 			ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1331 
1332 		ftrace_enable_cpu();
1333 
1334 		ent = peek_next_entry(iter, iter->cpu, NULL);
1335 	} while (ent && ent->type == TRACE_CONT);
1336 
1337 	if (!ok)
1338 		trace_seq_putc(s, '\n');
1339 }
1340 
1341 static enum print_line_t
1342 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1343 {
1344 	struct trace_seq *s = &iter->seq;
1345 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1346 	struct trace_entry *next_entry;
1347 	unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1348 	struct trace_entry *entry = iter->ent;
1349 	unsigned long abs_usecs;
1350 	unsigned long rel_usecs;
1351 	u64 next_ts;
1352 	char *comm;
1353 	int S, T;
1354 	int i;
1355 	unsigned state;
1356 
1357 	if (entry->type == TRACE_CONT)
1358 		return TRACE_TYPE_HANDLED;
1359 
1360 	next_entry = find_next_entry(iter, NULL, &next_ts);
1361 	if (!next_entry)
1362 		next_ts = iter->ts;
1363 	rel_usecs = ns2usecs(next_ts - iter->ts);
1364 	abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
1365 
1366 	if (verbose) {
1367 		comm = trace_find_cmdline(entry->pid);
1368 		trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1369 				 " %ld.%03ldms (+%ld.%03ldms): ",
1370 				 comm,
1371 				 entry->pid, cpu, entry->flags,
1372 				 entry->preempt_count, trace_idx,
1373 				 ns2usecs(iter->ts),
1374 				 abs_usecs/1000,
1375 				 abs_usecs % 1000, rel_usecs/1000,
1376 				 rel_usecs % 1000);
1377 	} else {
1378 		lat_print_generic(s, entry, cpu);
1379 		lat_print_timestamp(s, abs_usecs, rel_usecs);
1380 	}
1381 	switch (entry->type) {
1382 	case TRACE_FN: {
1383 		struct ftrace_entry *field;
1384 
1385 		trace_assign_type(field, entry);
1386 
1387 		seq_print_ip_sym(s, field->ip, sym_flags);
1388 		trace_seq_puts(s, " (");
1389 		seq_print_ip_sym(s, field->parent_ip, sym_flags);
1390 		trace_seq_puts(s, ")\n");
1391 		break;
1392 	}
1393 	case TRACE_CTX:
1394 	case TRACE_WAKE: {
1395 		struct ctx_switch_entry *field;
1396 
1397 		trace_assign_type(field, entry);
1398 
1399 		T = field->next_state < sizeof(state_to_char) ?
1400 			state_to_char[field->next_state] : 'X';
1401 
1402 		state = field->prev_state ?
1403 			__ffs(field->prev_state) + 1 : 0;
1404 		S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1405 		comm = trace_find_cmdline(field->next_pid);
1406 		trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1407 				 field->prev_pid,
1408 				 field->prev_prio,
1409 				 S, entry->type == TRACE_CTX ? "==>" : "  +",
1410 				 field->next_cpu,
1411 				 field->next_pid,
1412 				 field->next_prio,
1413 				 T, comm);
1414 		break;
1415 	}
1416 	case TRACE_SPECIAL: {
1417 		struct special_entry *field;
1418 
1419 		trace_assign_type(field, entry);
1420 
1421 		trace_seq_printf(s, "# %ld %ld %ld\n",
1422 				 field->arg1,
1423 				 field->arg2,
1424 				 field->arg3);
1425 		break;
1426 	}
1427 	case TRACE_STACK: {
1428 		struct stack_entry *field;
1429 
1430 		trace_assign_type(field, entry);
1431 
1432 		for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1433 			if (i)
1434 				trace_seq_puts(s, " <= ");
1435 			seq_print_ip_sym(s, field->caller[i], sym_flags);
1436 		}
1437 		trace_seq_puts(s, "\n");
1438 		break;
1439 	}
1440 	case TRACE_PRINT: {
1441 		struct print_entry *field;
1442 
1443 		trace_assign_type(field, entry);
1444 
1445 		seq_print_ip_sym(s, field->ip, sym_flags);
1446 		trace_seq_printf(s, ": %s", field->buf);
1447 		if (entry->flags & TRACE_FLAG_CONT)
1448 			trace_seq_print_cont(s, iter);
1449 		break;
1450 	}
1451 	default:
1452 		trace_seq_printf(s, "Unknown type %d\n", entry->type);
1453 	}
1454 	return TRACE_TYPE_HANDLED;
1455 }
1456 
1457 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1458 {
1459 	struct trace_seq *s = &iter->seq;
1460 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1461 	struct trace_entry *entry;
1462 	unsigned long usec_rem;
1463 	unsigned long long t;
1464 	unsigned long secs;
1465 	char *comm;
1466 	int ret;
1467 	int S, T;
1468 	int i;
1469 
1470 	entry = iter->ent;
1471 
1472 	if (entry->type == TRACE_CONT)
1473 		return TRACE_TYPE_HANDLED;
1474 
1475 	comm = trace_find_cmdline(iter->ent->pid);
1476 
1477 	t = ns2usecs(iter->ts);
1478 	usec_rem = do_div(t, 1000000ULL);
1479 	secs = (unsigned long)t;
1480 
1481 	ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1482 	if (!ret)
1483 		return TRACE_TYPE_PARTIAL_LINE;
1484 	ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
1485 	if (!ret)
1486 		return TRACE_TYPE_PARTIAL_LINE;
1487 	ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1488 	if (!ret)
1489 		return TRACE_TYPE_PARTIAL_LINE;
1490 
1491 	switch (entry->type) {
1492 	case TRACE_FN: {
1493 		struct ftrace_entry *field;
1494 
1495 		trace_assign_type(field, entry);
1496 
1497 		ret = seq_print_ip_sym(s, field->ip, sym_flags);
1498 		if (!ret)
1499 			return TRACE_TYPE_PARTIAL_LINE;
1500 		if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1501 						field->parent_ip) {
1502 			ret = trace_seq_printf(s, " <-");
1503 			if (!ret)
1504 				return TRACE_TYPE_PARTIAL_LINE;
1505 			ret = seq_print_ip_sym(s,
1506 					       field->parent_ip,
1507 					       sym_flags);
1508 			if (!ret)
1509 				return TRACE_TYPE_PARTIAL_LINE;
1510 		}
1511 		ret = trace_seq_printf(s, "\n");
1512 		if (!ret)
1513 			return TRACE_TYPE_PARTIAL_LINE;
1514 		break;
1515 	}
1516 	case TRACE_CTX:
1517 	case TRACE_WAKE: {
1518 		struct ctx_switch_entry *field;
1519 
1520 		trace_assign_type(field, entry);
1521 
1522 		S = field->prev_state < sizeof(state_to_char) ?
1523 			state_to_char[field->prev_state] : 'X';
1524 		T = field->next_state < sizeof(state_to_char) ?
1525 			state_to_char[field->next_state] : 'X';
1526 		ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1527 				       field->prev_pid,
1528 				       field->prev_prio,
1529 				       S,
1530 				       entry->type == TRACE_CTX ? "==>" : "  +",
1531 				       field->next_cpu,
1532 				       field->next_pid,
1533 				       field->next_prio,
1534 				       T);
1535 		if (!ret)
1536 			return TRACE_TYPE_PARTIAL_LINE;
1537 		break;
1538 	}
1539 	case TRACE_SPECIAL: {
1540 		struct special_entry *field;
1541 
1542 		trace_assign_type(field, entry);
1543 
1544 		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1545 				 field->arg1,
1546 				 field->arg2,
1547 				 field->arg3);
1548 		if (!ret)
1549 			return TRACE_TYPE_PARTIAL_LINE;
1550 		break;
1551 	}
1552 	case TRACE_STACK: {
1553 		struct stack_entry *field;
1554 
1555 		trace_assign_type(field, entry);
1556 
1557 		for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1558 			if (i) {
1559 				ret = trace_seq_puts(s, " <= ");
1560 				if (!ret)
1561 					return TRACE_TYPE_PARTIAL_LINE;
1562 			}
1563 			ret = seq_print_ip_sym(s, field->caller[i],
1564 					       sym_flags);
1565 			if (!ret)
1566 				return TRACE_TYPE_PARTIAL_LINE;
1567 		}
1568 		ret = trace_seq_puts(s, "\n");
1569 		if (!ret)
1570 			return TRACE_TYPE_PARTIAL_LINE;
1571 		break;
1572 	}
1573 	case TRACE_PRINT: {
1574 		struct print_entry *field;
1575 
1576 		trace_assign_type(field, entry);
1577 
1578 		seq_print_ip_sym(s, field->ip, sym_flags);
1579 		trace_seq_printf(s, ": %s", field->buf);
1580 		if (entry->flags & TRACE_FLAG_CONT)
1581 			trace_seq_print_cont(s, iter);
1582 		break;
1583 	}
1584 	}
1585 	return TRACE_TYPE_HANDLED;
1586 }
1587 
1588 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1589 {
1590 	struct trace_seq *s = &iter->seq;
1591 	struct trace_entry *entry;
1592 	int ret;
1593 	int S, T;
1594 
1595 	entry = iter->ent;
1596 
1597 	if (entry->type == TRACE_CONT)
1598 		return TRACE_TYPE_HANDLED;
1599 
1600 	ret = trace_seq_printf(s, "%d %d %llu ",
1601 		entry->pid, iter->cpu, iter->ts);
1602 	if (!ret)
1603 		return TRACE_TYPE_PARTIAL_LINE;
1604 
1605 	switch (entry->type) {
1606 	case TRACE_FN: {
1607 		struct ftrace_entry *field;
1608 
1609 		trace_assign_type(field, entry);
1610 
1611 		ret = trace_seq_printf(s, "%x %x\n",
1612 					field->ip,
1613 					field->parent_ip);
1614 		if (!ret)
1615 			return TRACE_TYPE_PARTIAL_LINE;
1616 		break;
1617 	}
1618 	case TRACE_CTX:
1619 	case TRACE_WAKE: {
1620 		struct ctx_switch_entry *field;
1621 
1622 		trace_assign_type(field, entry);
1623 
1624 		S = field->prev_state < sizeof(state_to_char) ?
1625 			state_to_char[field->prev_state] : 'X';
1626 		T = field->next_state < sizeof(state_to_char) ?
1627 			state_to_char[field->next_state] : 'X';
1628 		if (entry->type == TRACE_WAKE)
1629 			S = '+';
1630 		ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1631 				       field->prev_pid,
1632 				       field->prev_prio,
1633 				       S,
1634 				       field->next_cpu,
1635 				       field->next_pid,
1636 				       field->next_prio,
1637 				       T);
1638 		if (!ret)
1639 			return TRACE_TYPE_PARTIAL_LINE;
1640 		break;
1641 	}
1642 	case TRACE_SPECIAL:
1643 	case TRACE_STACK: {
1644 		struct special_entry *field;
1645 
1646 		trace_assign_type(field, entry);
1647 
1648 		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1649 				 field->arg1,
1650 				 field->arg2,
1651 				 field->arg3);
1652 		if (!ret)
1653 			return TRACE_TYPE_PARTIAL_LINE;
1654 		break;
1655 	}
1656 	case TRACE_PRINT: {
1657 		struct print_entry *field;
1658 
1659 		trace_assign_type(field, entry);
1660 
1661 		trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
1662 		if (entry->flags & TRACE_FLAG_CONT)
1663 			trace_seq_print_cont(s, iter);
1664 		break;
1665 	}
1666 	}
1667 	return TRACE_TYPE_HANDLED;
1668 }
1669 
1670 #define SEQ_PUT_FIELD_RET(s, x)				\
1671 do {							\
1672 	if (!trace_seq_putmem(s, &(x), sizeof(x)))	\
1673 		return 0;				\
1674 } while (0)
1675 
1676 #define SEQ_PUT_HEX_FIELD_RET(s, x)			\
1677 do {							\
1678 	BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);	\
1679 	if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))	\
1680 		return 0;				\
1681 } while (0)
1682 
1683 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1684 {
1685 	struct trace_seq *s = &iter->seq;
1686 	unsigned char newline = '\n';
1687 	struct trace_entry *entry;
1688 	int S, T;
1689 
1690 	entry = iter->ent;
1691 
1692 	if (entry->type == TRACE_CONT)
1693 		return TRACE_TYPE_HANDLED;
1694 
1695 	SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1696 	SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1697 	SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1698 
1699 	switch (entry->type) {
1700 	case TRACE_FN: {
1701 		struct ftrace_entry *field;
1702 
1703 		trace_assign_type(field, entry);
1704 
1705 		SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1706 		SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
1707 		break;
1708 	}
1709 	case TRACE_CTX:
1710 	case TRACE_WAKE: {
1711 		struct ctx_switch_entry *field;
1712 
1713 		trace_assign_type(field, entry);
1714 
1715 		S = field->prev_state < sizeof(state_to_char) ?
1716 			state_to_char[field->prev_state] : 'X';
1717 		T = field->next_state < sizeof(state_to_char) ?
1718 			state_to_char[field->next_state] : 'X';
1719 		if (entry->type == TRACE_WAKE)
1720 			S = '+';
1721 		SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1722 		SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1723 		SEQ_PUT_HEX_FIELD_RET(s, S);
1724 		SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1725 		SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1726 		SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
1727 		SEQ_PUT_HEX_FIELD_RET(s, T);
1728 		break;
1729 	}
1730 	case TRACE_SPECIAL:
1731 	case TRACE_STACK: {
1732 		struct special_entry *field;
1733 
1734 		trace_assign_type(field, entry);
1735 
1736 		SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1737 		SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1738 		SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1739 		break;
1740 	}
1741 	}
1742 	SEQ_PUT_FIELD_RET(s, newline);
1743 
1744 	return TRACE_TYPE_HANDLED;
1745 }
1746 
1747 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1748 {
1749 	struct trace_seq *s = &iter->seq;
1750 	struct trace_entry *entry;
1751 
1752 	entry = iter->ent;
1753 
1754 	if (entry->type == TRACE_CONT)
1755 		return TRACE_TYPE_HANDLED;
1756 
1757 	SEQ_PUT_FIELD_RET(s, entry->pid);
1758 	SEQ_PUT_FIELD_RET(s, iter->cpu);
1759 	SEQ_PUT_FIELD_RET(s, iter->ts);
1760 
1761 	switch (entry->type) {
1762 	case TRACE_FN: {
1763 		struct ftrace_entry *field;
1764 
1765 		trace_assign_type(field, entry);
1766 
1767 		SEQ_PUT_FIELD_RET(s, field->ip);
1768 		SEQ_PUT_FIELD_RET(s, field->parent_ip);
1769 		break;
1770 	}
1771 	case TRACE_CTX: {
1772 		struct ctx_switch_entry *field;
1773 
1774 		trace_assign_type(field, entry);
1775 
1776 		SEQ_PUT_FIELD_RET(s, field->prev_pid);
1777 		SEQ_PUT_FIELD_RET(s, field->prev_prio);
1778 		SEQ_PUT_FIELD_RET(s, field->prev_state);
1779 		SEQ_PUT_FIELD_RET(s, field->next_pid);
1780 		SEQ_PUT_FIELD_RET(s, field->next_prio);
1781 		SEQ_PUT_FIELD_RET(s, field->next_state);
1782 		break;
1783 	}
1784 	case TRACE_SPECIAL:
1785 	case TRACE_STACK: {
1786 		struct special_entry *field;
1787 
1788 		trace_assign_type(field, entry);
1789 
1790 		SEQ_PUT_FIELD_RET(s, field->arg1);
1791 		SEQ_PUT_FIELD_RET(s, field->arg2);
1792 		SEQ_PUT_FIELD_RET(s, field->arg3);
1793 		break;
1794 	}
1795 	}
1796 	return 1;
1797 }
1798 
1799 static int trace_empty(struct trace_iterator *iter)
1800 {
1801 	int cpu;
1802 
1803 	for_each_tracing_cpu(cpu) {
1804 		if (iter->buffer_iter[cpu]) {
1805 			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1806 				return 0;
1807 		} else {
1808 			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1809 				return 0;
1810 		}
1811 	}
1812 
1813 	return 1;
1814 }
1815 
1816 static enum print_line_t print_trace_line(struct trace_iterator *iter)
1817 {
1818 	enum print_line_t ret;
1819 
1820 	if (iter->trace && iter->trace->print_line) {
1821 		ret = iter->trace->print_line(iter);
1822 		if (ret != TRACE_TYPE_UNHANDLED)
1823 			return ret;
1824 	}
1825 
1826 	if (trace_flags & TRACE_ITER_BIN)
1827 		return print_bin_fmt(iter);
1828 
1829 	if (trace_flags & TRACE_ITER_HEX)
1830 		return print_hex_fmt(iter);
1831 
1832 	if (trace_flags & TRACE_ITER_RAW)
1833 		return print_raw_fmt(iter);
1834 
1835 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1836 		return print_lat_fmt(iter, iter->idx, iter->cpu);
1837 
1838 	return print_trace_fmt(iter);
1839 }
1840 
1841 static int s_show(struct seq_file *m, void *v)
1842 {
1843 	struct trace_iterator *iter = v;
1844 
1845 	if (iter->ent == NULL) {
1846 		if (iter->tr) {
1847 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
1848 			seq_puts(m, "#\n");
1849 		}
1850 		if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1851 			/* print nothing if the buffers are empty */
1852 			if (trace_empty(iter))
1853 				return 0;
1854 			print_trace_header(m, iter);
1855 			if (!(trace_flags & TRACE_ITER_VERBOSE))
1856 				print_lat_help_header(m);
1857 		} else {
1858 			if (!(trace_flags & TRACE_ITER_VERBOSE))
1859 				print_func_help_header(m);
1860 		}
1861 	} else {
1862 		print_trace_line(iter);
1863 		trace_print_seq(m, &iter->seq);
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 static struct seq_operations tracer_seq_ops = {
1870 	.start		= s_start,
1871 	.next		= s_next,
1872 	.stop		= s_stop,
1873 	.show		= s_show,
1874 };
1875 
1876 static struct trace_iterator *
1877 __tracing_open(struct inode *inode, struct file *file, int *ret)
1878 {
1879 	struct trace_iterator *iter;
1880 	struct seq_file *m;
1881 	int cpu;
1882 
1883 	if (tracing_disabled) {
1884 		*ret = -ENODEV;
1885 		return NULL;
1886 	}
1887 
1888 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1889 	if (!iter) {
1890 		*ret = -ENOMEM;
1891 		goto out;
1892 	}
1893 
1894 	mutex_lock(&trace_types_lock);
1895 	if (current_trace && current_trace->print_max)
1896 		iter->tr = &max_tr;
1897 	else
1898 		iter->tr = inode->i_private;
1899 	iter->trace = current_trace;
1900 	iter->pos = -1;
1901 
1902 	for_each_tracing_cpu(cpu) {
1903 
1904 		iter->buffer_iter[cpu] =
1905 			ring_buffer_read_start(iter->tr->buffer, cpu);
1906 
1907 		if (!iter->buffer_iter[cpu])
1908 			goto fail_buffer;
1909 	}
1910 
1911 	/* TODO stop tracer */
1912 	*ret = seq_open(file, &tracer_seq_ops);
1913 	if (*ret)
1914 		goto fail_buffer;
1915 
1916 	m = file->private_data;
1917 	m->private = iter;
1918 
1919 	/* stop the trace while dumping */
1920 	if (iter->tr->ctrl) {
1921 		tracer_enabled = 0;
1922 		ftrace_function_enabled = 0;
1923 	}
1924 
1925 	if (iter->trace && iter->trace->open)
1926 			iter->trace->open(iter);
1927 
1928 	mutex_unlock(&trace_types_lock);
1929 
1930  out:
1931 	return iter;
1932 
1933  fail_buffer:
1934 	for_each_tracing_cpu(cpu) {
1935 		if (iter->buffer_iter[cpu])
1936 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
1937 	}
1938 	mutex_unlock(&trace_types_lock);
1939 
1940 	return ERR_PTR(-ENOMEM);
1941 }
1942 
1943 int tracing_open_generic(struct inode *inode, struct file *filp)
1944 {
1945 	if (tracing_disabled)
1946 		return -ENODEV;
1947 
1948 	filp->private_data = inode->i_private;
1949 	return 0;
1950 }
1951 
1952 int tracing_release(struct inode *inode, struct file *file)
1953 {
1954 	struct seq_file *m = (struct seq_file *)file->private_data;
1955 	struct trace_iterator *iter = m->private;
1956 	int cpu;
1957 
1958 	mutex_lock(&trace_types_lock);
1959 	for_each_tracing_cpu(cpu) {
1960 		if (iter->buffer_iter[cpu])
1961 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
1962 	}
1963 
1964 	if (iter->trace && iter->trace->close)
1965 		iter->trace->close(iter);
1966 
1967 	/* reenable tracing if it was previously enabled */
1968 	if (iter->tr->ctrl) {
1969 		tracer_enabled = 1;
1970 		/*
1971 		 * It is safe to enable function tracing even if it
1972 		 * isn't used
1973 		 */
1974 		ftrace_function_enabled = 1;
1975 	}
1976 	mutex_unlock(&trace_types_lock);
1977 
1978 	seq_release(inode, file);
1979 	kfree(iter);
1980 	return 0;
1981 }
1982 
1983 static int tracing_open(struct inode *inode, struct file *file)
1984 {
1985 	int ret;
1986 
1987 	__tracing_open(inode, file, &ret);
1988 
1989 	return ret;
1990 }
1991 
1992 static int tracing_lt_open(struct inode *inode, struct file *file)
1993 {
1994 	struct trace_iterator *iter;
1995 	int ret;
1996 
1997 	iter = __tracing_open(inode, file, &ret);
1998 
1999 	if (!ret)
2000 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
2001 
2002 	return ret;
2003 }
2004 
2005 
2006 static void *
2007 t_next(struct seq_file *m, void *v, loff_t *pos)
2008 {
2009 	struct tracer *t = m->private;
2010 
2011 	(*pos)++;
2012 
2013 	if (t)
2014 		t = t->next;
2015 
2016 	m->private = t;
2017 
2018 	return t;
2019 }
2020 
2021 static void *t_start(struct seq_file *m, loff_t *pos)
2022 {
2023 	struct tracer *t = m->private;
2024 	loff_t l = 0;
2025 
2026 	mutex_lock(&trace_types_lock);
2027 	for (; t && l < *pos; t = t_next(m, t, &l))
2028 		;
2029 
2030 	return t;
2031 }
2032 
2033 static void t_stop(struct seq_file *m, void *p)
2034 {
2035 	mutex_unlock(&trace_types_lock);
2036 }
2037 
2038 static int t_show(struct seq_file *m, void *v)
2039 {
2040 	struct tracer *t = v;
2041 
2042 	if (!t)
2043 		return 0;
2044 
2045 	seq_printf(m, "%s", t->name);
2046 	if (t->next)
2047 		seq_putc(m, ' ');
2048 	else
2049 		seq_putc(m, '\n');
2050 
2051 	return 0;
2052 }
2053 
2054 static struct seq_operations show_traces_seq_ops = {
2055 	.start		= t_start,
2056 	.next		= t_next,
2057 	.stop		= t_stop,
2058 	.show		= t_show,
2059 };
2060 
2061 static int show_traces_open(struct inode *inode, struct file *file)
2062 {
2063 	int ret;
2064 
2065 	if (tracing_disabled)
2066 		return -ENODEV;
2067 
2068 	ret = seq_open(file, &show_traces_seq_ops);
2069 	if (!ret) {
2070 		struct seq_file *m = file->private_data;
2071 		m->private = trace_types;
2072 	}
2073 
2074 	return ret;
2075 }
2076 
2077 static struct file_operations tracing_fops = {
2078 	.open		= tracing_open,
2079 	.read		= seq_read,
2080 	.llseek		= seq_lseek,
2081 	.release	= tracing_release,
2082 };
2083 
2084 static struct file_operations tracing_lt_fops = {
2085 	.open		= tracing_lt_open,
2086 	.read		= seq_read,
2087 	.llseek		= seq_lseek,
2088 	.release	= tracing_release,
2089 };
2090 
2091 static struct file_operations show_traces_fops = {
2092 	.open		= show_traces_open,
2093 	.read		= seq_read,
2094 	.release	= seq_release,
2095 };
2096 
2097 /*
2098  * Only trace on a CPU if the bitmask is set:
2099  */
2100 static cpumask_t tracing_cpumask = CPU_MASK_ALL;
2101 
2102 /*
2103  * When tracing/tracing_cpu_mask is modified then this holds
2104  * the new bitmask we are about to install:
2105  */
2106 static cpumask_t tracing_cpumask_new;
2107 
2108 /*
2109  * The tracer itself will not take this lock, but still we want
2110  * to provide a consistent cpumask to user-space:
2111  */
2112 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2113 
2114 /*
2115  * Temporary storage for the character representation of the
2116  * CPU bitmask (and one more byte for the newline):
2117  */
2118 static char mask_str[NR_CPUS + 1];
2119 
2120 static ssize_t
2121 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2122 		     size_t count, loff_t *ppos)
2123 {
2124 	int len;
2125 
2126 	mutex_lock(&tracing_cpumask_update_lock);
2127 
2128 	len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2129 	if (count - len < 2) {
2130 		count = -EINVAL;
2131 		goto out_err;
2132 	}
2133 	len += sprintf(mask_str + len, "\n");
2134 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2135 
2136 out_err:
2137 	mutex_unlock(&tracing_cpumask_update_lock);
2138 
2139 	return count;
2140 }
2141 
2142 static ssize_t
2143 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2144 		      size_t count, loff_t *ppos)
2145 {
2146 	int err, cpu;
2147 
2148 	mutex_lock(&tracing_cpumask_update_lock);
2149 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2150 	if (err)
2151 		goto err_unlock;
2152 
2153 	raw_local_irq_disable();
2154 	__raw_spin_lock(&ftrace_max_lock);
2155 	for_each_tracing_cpu(cpu) {
2156 		/*
2157 		 * Increase/decrease the disabled counter if we are
2158 		 * about to flip a bit in the cpumask:
2159 		 */
2160 		if (cpu_isset(cpu, tracing_cpumask) &&
2161 				!cpu_isset(cpu, tracing_cpumask_new)) {
2162 			atomic_inc(&global_trace.data[cpu]->disabled);
2163 		}
2164 		if (!cpu_isset(cpu, tracing_cpumask) &&
2165 				cpu_isset(cpu, tracing_cpumask_new)) {
2166 			atomic_dec(&global_trace.data[cpu]->disabled);
2167 		}
2168 	}
2169 	__raw_spin_unlock(&ftrace_max_lock);
2170 	raw_local_irq_enable();
2171 
2172 	tracing_cpumask = tracing_cpumask_new;
2173 
2174 	mutex_unlock(&tracing_cpumask_update_lock);
2175 
2176 	return count;
2177 
2178 err_unlock:
2179 	mutex_unlock(&tracing_cpumask_update_lock);
2180 
2181 	return err;
2182 }
2183 
2184 static struct file_operations tracing_cpumask_fops = {
2185 	.open		= tracing_open_generic,
2186 	.read		= tracing_cpumask_read,
2187 	.write		= tracing_cpumask_write,
2188 };
2189 
2190 static ssize_t
2191 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2192 		       size_t cnt, loff_t *ppos)
2193 {
2194 	char *buf;
2195 	int r = 0;
2196 	int len = 0;
2197 	int i;
2198 
2199 	/* calulate max size */
2200 	for (i = 0; trace_options[i]; i++) {
2201 		len += strlen(trace_options[i]);
2202 		len += 3; /* "no" and space */
2203 	}
2204 
2205 	/* +2 for \n and \0 */
2206 	buf = kmalloc(len + 2, GFP_KERNEL);
2207 	if (!buf)
2208 		return -ENOMEM;
2209 
2210 	for (i = 0; trace_options[i]; i++) {
2211 		if (trace_flags & (1 << i))
2212 			r += sprintf(buf + r, "%s ", trace_options[i]);
2213 		else
2214 			r += sprintf(buf + r, "no%s ", trace_options[i]);
2215 	}
2216 
2217 	r += sprintf(buf + r, "\n");
2218 	WARN_ON(r >= len + 2);
2219 
2220 	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2221 
2222 	kfree(buf);
2223 
2224 	return r;
2225 }
2226 
2227 static ssize_t
2228 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2229 			size_t cnt, loff_t *ppos)
2230 {
2231 	char buf[64];
2232 	char *cmp = buf;
2233 	int neg = 0;
2234 	int i;
2235 
2236 	if (cnt >= sizeof(buf))
2237 		return -EINVAL;
2238 
2239 	if (copy_from_user(&buf, ubuf, cnt))
2240 		return -EFAULT;
2241 
2242 	buf[cnt] = 0;
2243 
2244 	if (strncmp(buf, "no", 2) == 0) {
2245 		neg = 1;
2246 		cmp += 2;
2247 	}
2248 
2249 	for (i = 0; trace_options[i]; i++) {
2250 		int len = strlen(trace_options[i]);
2251 
2252 		if (strncmp(cmp, trace_options[i], len) == 0) {
2253 			if (neg)
2254 				trace_flags &= ~(1 << i);
2255 			else
2256 				trace_flags |= (1 << i);
2257 			break;
2258 		}
2259 	}
2260 	/*
2261 	 * If no option could be set, return an error:
2262 	 */
2263 	if (!trace_options[i])
2264 		return -EINVAL;
2265 
2266 	filp->f_pos += cnt;
2267 
2268 	return cnt;
2269 }
2270 
2271 static struct file_operations tracing_iter_fops = {
2272 	.open		= tracing_open_generic,
2273 	.read		= tracing_iter_ctrl_read,
2274 	.write		= tracing_iter_ctrl_write,
2275 };
2276 
2277 static const char readme_msg[] =
2278 	"tracing mini-HOWTO:\n\n"
2279 	"# mkdir /debug\n"
2280 	"# mount -t debugfs nodev /debug\n\n"
2281 	"# cat /debug/tracing/available_tracers\n"
2282 	"wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
2283 	"# cat /debug/tracing/current_tracer\n"
2284 	"none\n"
2285 	"# echo sched_switch > /debug/tracing/current_tracer\n"
2286 	"# cat /debug/tracing/current_tracer\n"
2287 	"sched_switch\n"
2288 	"# cat /debug/tracing/iter_ctrl\n"
2289 	"noprint-parent nosym-offset nosym-addr noverbose\n"
2290 	"# echo print-parent > /debug/tracing/iter_ctrl\n"
2291 	"# echo 1 > /debug/tracing/tracing_enabled\n"
2292 	"# cat /debug/tracing/trace > /tmp/trace.txt\n"
2293 	"echo 0 > /debug/tracing/tracing_enabled\n"
2294 ;
2295 
2296 static ssize_t
2297 tracing_readme_read(struct file *filp, char __user *ubuf,
2298 		       size_t cnt, loff_t *ppos)
2299 {
2300 	return simple_read_from_buffer(ubuf, cnt, ppos,
2301 					readme_msg, strlen(readme_msg));
2302 }
2303 
2304 static struct file_operations tracing_readme_fops = {
2305 	.open		= tracing_open_generic,
2306 	.read		= tracing_readme_read,
2307 };
2308 
2309 static ssize_t
2310 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2311 		  size_t cnt, loff_t *ppos)
2312 {
2313 	struct trace_array *tr = filp->private_data;
2314 	char buf[64];
2315 	int r;
2316 
2317 	r = sprintf(buf, "%ld\n", tr->ctrl);
2318 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2319 }
2320 
2321 static ssize_t
2322 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2323 		   size_t cnt, loff_t *ppos)
2324 {
2325 	struct trace_array *tr = filp->private_data;
2326 	char buf[64];
2327 	long val;
2328 	int ret;
2329 
2330 	if (cnt >= sizeof(buf))
2331 		return -EINVAL;
2332 
2333 	if (copy_from_user(&buf, ubuf, cnt))
2334 		return -EFAULT;
2335 
2336 	buf[cnt] = 0;
2337 
2338 	ret = strict_strtoul(buf, 10, &val);
2339 	if (ret < 0)
2340 		return ret;
2341 
2342 	val = !!val;
2343 
2344 	mutex_lock(&trace_types_lock);
2345 	if (tr->ctrl ^ val) {
2346 		if (val)
2347 			tracer_enabled = 1;
2348 		else
2349 			tracer_enabled = 0;
2350 
2351 		tr->ctrl = val;
2352 
2353 		if (current_trace && current_trace->ctrl_update)
2354 			current_trace->ctrl_update(tr);
2355 	}
2356 	mutex_unlock(&trace_types_lock);
2357 
2358 	filp->f_pos += cnt;
2359 
2360 	return cnt;
2361 }
2362 
2363 static ssize_t
2364 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2365 		       size_t cnt, loff_t *ppos)
2366 {
2367 	char buf[max_tracer_type_len+2];
2368 	int r;
2369 
2370 	mutex_lock(&trace_types_lock);
2371 	if (current_trace)
2372 		r = sprintf(buf, "%s\n", current_trace->name);
2373 	else
2374 		r = sprintf(buf, "\n");
2375 	mutex_unlock(&trace_types_lock);
2376 
2377 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2378 }
2379 
2380 static ssize_t
2381 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2382 			size_t cnt, loff_t *ppos)
2383 {
2384 	struct trace_array *tr = &global_trace;
2385 	struct tracer *t;
2386 	char buf[max_tracer_type_len+1];
2387 	int i;
2388 	size_t ret;
2389 
2390 	ret = cnt;
2391 
2392 	if (cnt > max_tracer_type_len)
2393 		cnt = max_tracer_type_len;
2394 
2395 	if (copy_from_user(&buf, ubuf, cnt))
2396 		return -EFAULT;
2397 
2398 	buf[cnt] = 0;
2399 
2400 	/* strip ending whitespace. */
2401 	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2402 		buf[i] = 0;
2403 
2404 	mutex_lock(&trace_types_lock);
2405 	for (t = trace_types; t; t = t->next) {
2406 		if (strcmp(t->name, buf) == 0)
2407 			break;
2408 	}
2409 	if (!t) {
2410 		ret = -EINVAL;
2411 		goto out;
2412 	}
2413 	if (t == current_trace)
2414 		goto out;
2415 
2416 	if (current_trace && current_trace->reset)
2417 		current_trace->reset(tr);
2418 
2419 	current_trace = t;
2420 	if (t->init)
2421 		t->init(tr);
2422 
2423  out:
2424 	mutex_unlock(&trace_types_lock);
2425 
2426 	if (ret > 0)
2427 		filp->f_pos += ret;
2428 
2429 	return ret;
2430 }
2431 
2432 static ssize_t
2433 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2434 		     size_t cnt, loff_t *ppos)
2435 {
2436 	unsigned long *ptr = filp->private_data;
2437 	char buf[64];
2438 	int r;
2439 
2440 	r = snprintf(buf, sizeof(buf), "%ld\n",
2441 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2442 	if (r > sizeof(buf))
2443 		r = sizeof(buf);
2444 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2445 }
2446 
2447 static ssize_t
2448 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2449 		      size_t cnt, loff_t *ppos)
2450 {
2451 	long *ptr = filp->private_data;
2452 	char buf[64];
2453 	long val;
2454 	int ret;
2455 
2456 	if (cnt >= sizeof(buf))
2457 		return -EINVAL;
2458 
2459 	if (copy_from_user(&buf, ubuf, cnt))
2460 		return -EFAULT;
2461 
2462 	buf[cnt] = 0;
2463 
2464 	ret = strict_strtoul(buf, 10, &val);
2465 	if (ret < 0)
2466 		return ret;
2467 
2468 	*ptr = val * 1000;
2469 
2470 	return cnt;
2471 }
2472 
2473 static atomic_t tracing_reader;
2474 
2475 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2476 {
2477 	struct trace_iterator *iter;
2478 
2479 	if (tracing_disabled)
2480 		return -ENODEV;
2481 
2482 	/* We only allow for reader of the pipe */
2483 	if (atomic_inc_return(&tracing_reader) != 1) {
2484 		atomic_dec(&tracing_reader);
2485 		return -EBUSY;
2486 	}
2487 
2488 	/* create a buffer to store the information to pass to userspace */
2489 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2490 	if (!iter)
2491 		return -ENOMEM;
2492 
2493 	mutex_lock(&trace_types_lock);
2494 	iter->tr = &global_trace;
2495 	iter->trace = current_trace;
2496 	filp->private_data = iter;
2497 
2498 	if (iter->trace->pipe_open)
2499 		iter->trace->pipe_open(iter);
2500 	mutex_unlock(&trace_types_lock);
2501 
2502 	return 0;
2503 }
2504 
2505 static int tracing_release_pipe(struct inode *inode, struct file *file)
2506 {
2507 	struct trace_iterator *iter = file->private_data;
2508 
2509 	kfree(iter);
2510 	atomic_dec(&tracing_reader);
2511 
2512 	return 0;
2513 }
2514 
2515 static unsigned int
2516 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2517 {
2518 	struct trace_iterator *iter = filp->private_data;
2519 
2520 	if (trace_flags & TRACE_ITER_BLOCK) {
2521 		/*
2522 		 * Always select as readable when in blocking mode
2523 		 */
2524 		return POLLIN | POLLRDNORM;
2525 	} else {
2526 		if (!trace_empty(iter))
2527 			return POLLIN | POLLRDNORM;
2528 		poll_wait(filp, &trace_wait, poll_table);
2529 		if (!trace_empty(iter))
2530 			return POLLIN | POLLRDNORM;
2531 
2532 		return 0;
2533 	}
2534 }
2535 
2536 /*
2537  * Consumer reader.
2538  */
2539 static ssize_t
2540 tracing_read_pipe(struct file *filp, char __user *ubuf,
2541 		  size_t cnt, loff_t *ppos)
2542 {
2543 	struct trace_iterator *iter = filp->private_data;
2544 	ssize_t sret;
2545 
2546 	/* return any leftover data */
2547 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2548 	if (sret != -EBUSY)
2549 		return sret;
2550 
2551 	trace_seq_reset(&iter->seq);
2552 
2553 	mutex_lock(&trace_types_lock);
2554 	if (iter->trace->read) {
2555 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2556 		if (sret)
2557 			goto out;
2558 	}
2559 
2560 waitagain:
2561 	sret = 0;
2562 	while (trace_empty(iter)) {
2563 
2564 		if ((filp->f_flags & O_NONBLOCK)) {
2565 			sret = -EAGAIN;
2566 			goto out;
2567 		}
2568 
2569 		/*
2570 		 * This is a make-shift waitqueue. The reason we don't use
2571 		 * an actual wait queue is because:
2572 		 *  1) we only ever have one waiter
2573 		 *  2) the tracing, traces all functions, we don't want
2574 		 *     the overhead of calling wake_up and friends
2575 		 *     (and tracing them too)
2576 		 *     Anyway, this is really very primitive wakeup.
2577 		 */
2578 		set_current_state(TASK_INTERRUPTIBLE);
2579 		iter->tr->waiter = current;
2580 
2581 		mutex_unlock(&trace_types_lock);
2582 
2583 		/* sleep for 100 msecs, and try again. */
2584 		schedule_timeout(HZ/10);
2585 
2586 		mutex_lock(&trace_types_lock);
2587 
2588 		iter->tr->waiter = NULL;
2589 
2590 		if (signal_pending(current)) {
2591 			sret = -EINTR;
2592 			goto out;
2593 		}
2594 
2595 		if (iter->trace != current_trace)
2596 			goto out;
2597 
2598 		/*
2599 		 * We block until we read something and tracing is disabled.
2600 		 * We still block if tracing is disabled, but we have never
2601 		 * read anything. This allows a user to cat this file, and
2602 		 * then enable tracing. But after we have read something,
2603 		 * we give an EOF when tracing is again disabled.
2604 		 *
2605 		 * iter->pos will be 0 if we haven't read anything.
2606 		 */
2607 		if (!tracer_enabled && iter->pos)
2608 			break;
2609 
2610 		continue;
2611 	}
2612 
2613 	/* stop when tracing is finished */
2614 	if (trace_empty(iter))
2615 		goto out;
2616 
2617 	if (cnt >= PAGE_SIZE)
2618 		cnt = PAGE_SIZE - 1;
2619 
2620 	/* reset all but tr, trace, and overruns */
2621 	memset(&iter->seq, 0,
2622 	       sizeof(struct trace_iterator) -
2623 	       offsetof(struct trace_iterator, seq));
2624 	iter->pos = -1;
2625 
2626 	while (find_next_entry_inc(iter) != NULL) {
2627 		enum print_line_t ret;
2628 		int len = iter->seq.len;
2629 
2630 		ret = print_trace_line(iter);
2631 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
2632 			/* don't print partial lines */
2633 			iter->seq.len = len;
2634 			break;
2635 		}
2636 
2637 		trace_consume(iter);
2638 
2639 		if (iter->seq.len >= cnt)
2640 			break;
2641 	}
2642 
2643 	/* Now copy what we have to the user */
2644 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2645 	if (iter->seq.readpos >= iter->seq.len)
2646 		trace_seq_reset(&iter->seq);
2647 
2648 	/*
2649 	 * If there was nothing to send to user, inspite of consuming trace
2650 	 * entries, go back to wait for more entries.
2651 	 */
2652 	if (sret == -EBUSY)
2653 		goto waitagain;
2654 
2655 out:
2656 	mutex_unlock(&trace_types_lock);
2657 
2658 	return sret;
2659 }
2660 
2661 static ssize_t
2662 tracing_entries_read(struct file *filp, char __user *ubuf,
2663 		     size_t cnt, loff_t *ppos)
2664 {
2665 	struct trace_array *tr = filp->private_data;
2666 	char buf[64];
2667 	int r;
2668 
2669 	r = sprintf(buf, "%lu\n", tr->entries);
2670 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2671 }
2672 
2673 static ssize_t
2674 tracing_entries_write(struct file *filp, const char __user *ubuf,
2675 		      size_t cnt, loff_t *ppos)
2676 {
2677 	unsigned long val;
2678 	char buf[64];
2679 	int ret;
2680 	struct trace_array *tr = filp->private_data;
2681 
2682 	if (cnt >= sizeof(buf))
2683 		return -EINVAL;
2684 
2685 	if (copy_from_user(&buf, ubuf, cnt))
2686 		return -EFAULT;
2687 
2688 	buf[cnt] = 0;
2689 
2690 	ret = strict_strtoul(buf, 10, &val);
2691 	if (ret < 0)
2692 		return ret;
2693 
2694 	/* must have at least 1 entry */
2695 	if (!val)
2696 		return -EINVAL;
2697 
2698 	mutex_lock(&trace_types_lock);
2699 
2700 	if (tr->ctrl) {
2701 		cnt = -EBUSY;
2702 		pr_info("ftrace: please disable tracing"
2703 			" before modifying buffer size\n");
2704 		goto out;
2705 	}
2706 
2707 	if (val != global_trace.entries) {
2708 		ret = ring_buffer_resize(global_trace.buffer, val);
2709 		if (ret < 0) {
2710 			cnt = ret;
2711 			goto out;
2712 		}
2713 
2714 		ret = ring_buffer_resize(max_tr.buffer, val);
2715 		if (ret < 0) {
2716 			int r;
2717 			cnt = ret;
2718 			r = ring_buffer_resize(global_trace.buffer,
2719 					       global_trace.entries);
2720 			if (r < 0) {
2721 				/* AARGH! We are left with different
2722 				 * size max buffer!!!! */
2723 				WARN_ON(1);
2724 				tracing_disabled = 1;
2725 			}
2726 			goto out;
2727 		}
2728 
2729 		global_trace.entries = val;
2730 	}
2731 
2732 	filp->f_pos += cnt;
2733 
2734 	/* If check pages failed, return ENOMEM */
2735 	if (tracing_disabled)
2736 		cnt = -ENOMEM;
2737  out:
2738 	max_tr.entries = global_trace.entries;
2739 	mutex_unlock(&trace_types_lock);
2740 
2741 	return cnt;
2742 }
2743 
2744 static int mark_printk(const char *fmt, ...)
2745 {
2746 	int ret;
2747 	va_list args;
2748 	va_start(args, fmt);
2749 	ret = trace_vprintk(0, fmt, args);
2750 	va_end(args);
2751 	return ret;
2752 }
2753 
2754 static ssize_t
2755 tracing_mark_write(struct file *filp, const char __user *ubuf,
2756 					size_t cnt, loff_t *fpos)
2757 {
2758 	char *buf;
2759 	char *end;
2760 	struct trace_array *tr = &global_trace;
2761 
2762 	if (!tr->ctrl || tracing_disabled)
2763 		return -EINVAL;
2764 
2765 	if (cnt > TRACE_BUF_SIZE)
2766 		cnt = TRACE_BUF_SIZE;
2767 
2768 	buf = kmalloc(cnt + 1, GFP_KERNEL);
2769 	if (buf == NULL)
2770 		return -ENOMEM;
2771 
2772 	if (copy_from_user(buf, ubuf, cnt)) {
2773 		kfree(buf);
2774 		return -EFAULT;
2775 	}
2776 
2777 	/* Cut from the first nil or newline. */
2778 	buf[cnt] = '\0';
2779 	end = strchr(buf, '\n');
2780 	if (end)
2781 		*end = '\0';
2782 
2783 	cnt = mark_printk("%s\n", buf);
2784 	kfree(buf);
2785 	*fpos += cnt;
2786 
2787 	return cnt;
2788 }
2789 
2790 static struct file_operations tracing_max_lat_fops = {
2791 	.open		= tracing_open_generic,
2792 	.read		= tracing_max_lat_read,
2793 	.write		= tracing_max_lat_write,
2794 };
2795 
2796 static struct file_operations tracing_ctrl_fops = {
2797 	.open		= tracing_open_generic,
2798 	.read		= tracing_ctrl_read,
2799 	.write		= tracing_ctrl_write,
2800 };
2801 
2802 static struct file_operations set_tracer_fops = {
2803 	.open		= tracing_open_generic,
2804 	.read		= tracing_set_trace_read,
2805 	.write		= tracing_set_trace_write,
2806 };
2807 
2808 static struct file_operations tracing_pipe_fops = {
2809 	.open		= tracing_open_pipe,
2810 	.poll		= tracing_poll_pipe,
2811 	.read		= tracing_read_pipe,
2812 	.release	= tracing_release_pipe,
2813 };
2814 
2815 static struct file_operations tracing_entries_fops = {
2816 	.open		= tracing_open_generic,
2817 	.read		= tracing_entries_read,
2818 	.write		= tracing_entries_write,
2819 };
2820 
2821 static struct file_operations tracing_mark_fops = {
2822 	.open		= tracing_open_generic,
2823 	.write		= tracing_mark_write,
2824 };
2825 
2826 #ifdef CONFIG_DYNAMIC_FTRACE
2827 
2828 static ssize_t
2829 tracing_read_long(struct file *filp, char __user *ubuf,
2830 		  size_t cnt, loff_t *ppos)
2831 {
2832 	unsigned long *p = filp->private_data;
2833 	char buf[64];
2834 	int r;
2835 
2836 	r = sprintf(buf, "%ld\n", *p);
2837 
2838 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2839 }
2840 
2841 static struct file_operations tracing_read_long_fops = {
2842 	.open		= tracing_open_generic,
2843 	.read		= tracing_read_long,
2844 };
2845 #endif
2846 
2847 static struct dentry *d_tracer;
2848 
2849 struct dentry *tracing_init_dentry(void)
2850 {
2851 	static int once;
2852 
2853 	if (d_tracer)
2854 		return d_tracer;
2855 
2856 	d_tracer = debugfs_create_dir("tracing", NULL);
2857 
2858 	if (!d_tracer && !once) {
2859 		once = 1;
2860 		pr_warning("Could not create debugfs directory 'tracing'\n");
2861 		return NULL;
2862 	}
2863 
2864 	return d_tracer;
2865 }
2866 
2867 #ifdef CONFIG_FTRACE_SELFTEST
2868 /* Let selftest have access to static functions in this file */
2869 #include "trace_selftest.c"
2870 #endif
2871 
2872 static __init int tracer_init_debugfs(void)
2873 {
2874 	struct dentry *d_tracer;
2875 	struct dentry *entry;
2876 
2877 	d_tracer = tracing_init_dentry();
2878 
2879 	entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2880 				    &global_trace, &tracing_ctrl_fops);
2881 	if (!entry)
2882 		pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2883 
2884 	entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
2885 				    NULL, &tracing_iter_fops);
2886 	if (!entry)
2887 		pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
2888 
2889 	entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2890 				    NULL, &tracing_cpumask_fops);
2891 	if (!entry)
2892 		pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
2893 
2894 	entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2895 				    &global_trace, &tracing_lt_fops);
2896 	if (!entry)
2897 		pr_warning("Could not create debugfs 'latency_trace' entry\n");
2898 
2899 	entry = debugfs_create_file("trace", 0444, d_tracer,
2900 				    &global_trace, &tracing_fops);
2901 	if (!entry)
2902 		pr_warning("Could not create debugfs 'trace' entry\n");
2903 
2904 	entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2905 				    &global_trace, &show_traces_fops);
2906 	if (!entry)
2907 		pr_warning("Could not create debugfs 'available_tracers' entry\n");
2908 
2909 	entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2910 				    &global_trace, &set_tracer_fops);
2911 	if (!entry)
2912 		pr_warning("Could not create debugfs 'current_tracer' entry\n");
2913 
2914 	entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2915 				    &tracing_max_latency,
2916 				    &tracing_max_lat_fops);
2917 	if (!entry)
2918 		pr_warning("Could not create debugfs "
2919 			   "'tracing_max_latency' entry\n");
2920 
2921 	entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2922 				    &tracing_thresh, &tracing_max_lat_fops);
2923 	if (!entry)
2924 		pr_warning("Could not create debugfs "
2925 			   "'tracing_thresh' entry\n");
2926 	entry = debugfs_create_file("README", 0644, d_tracer,
2927 				    NULL, &tracing_readme_fops);
2928 	if (!entry)
2929 		pr_warning("Could not create debugfs 'README' entry\n");
2930 
2931 	entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2932 				    NULL, &tracing_pipe_fops);
2933 	if (!entry)
2934 		pr_warning("Could not create debugfs "
2935 			   "'trace_pipe' entry\n");
2936 
2937 	entry = debugfs_create_file("trace_entries", 0644, d_tracer,
2938 				    &global_trace, &tracing_entries_fops);
2939 	if (!entry)
2940 		pr_warning("Could not create debugfs "
2941 			   "'trace_entries' entry\n");
2942 
2943 	entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2944 				    NULL, &tracing_mark_fops);
2945 	if (!entry)
2946 		pr_warning("Could not create debugfs "
2947 			   "'trace_marker' entry\n");
2948 
2949 #ifdef CONFIG_DYNAMIC_FTRACE
2950 	entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2951 				    &ftrace_update_tot_cnt,
2952 				    &tracing_read_long_fops);
2953 	if (!entry)
2954 		pr_warning("Could not create debugfs "
2955 			   "'dyn_ftrace_total_info' entry\n");
2956 #endif
2957 #ifdef CONFIG_SYSPROF_TRACER
2958 	init_tracer_sysprof_debugfs(d_tracer);
2959 #endif
2960 	return 0;
2961 }
2962 
2963 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2964 {
2965 	static DEFINE_SPINLOCK(trace_buf_lock);
2966 	static char trace_buf[TRACE_BUF_SIZE];
2967 
2968 	struct ring_buffer_event *event;
2969 	struct trace_array *tr = &global_trace;
2970 	struct trace_array_cpu *data;
2971 	struct print_entry *entry;
2972 	unsigned long flags, irq_flags;
2973 	int cpu, len = 0, size, pc;
2974 
2975 	if (!tr->ctrl || tracing_disabled)
2976 		return 0;
2977 
2978 	pc = preempt_count();
2979 	preempt_disable_notrace();
2980 	cpu = raw_smp_processor_id();
2981 	data = tr->data[cpu];
2982 
2983 	if (unlikely(atomic_read(&data->disabled)))
2984 		goto out;
2985 
2986 	spin_lock_irqsave(&trace_buf_lock, flags);
2987 	len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
2988 
2989 	len = min(len, TRACE_BUF_SIZE-1);
2990 	trace_buf[len] = 0;
2991 
2992 	size = sizeof(*entry) + len + 1;
2993 	event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
2994 	if (!event)
2995 		goto out_unlock;
2996 	entry = ring_buffer_event_data(event);
2997 	tracing_generic_entry_update(&entry->ent, flags, pc);
2998 	entry->ent.type			= TRACE_PRINT;
2999 	entry->ip			= ip;
3000 
3001 	memcpy(&entry->buf, trace_buf, len);
3002 	entry->buf[len] = 0;
3003 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3004 
3005  out_unlock:
3006 	spin_unlock_irqrestore(&trace_buf_lock, flags);
3007 
3008  out:
3009 	preempt_enable_notrace();
3010 
3011 	return len;
3012 }
3013 EXPORT_SYMBOL_GPL(trace_vprintk);
3014 
3015 int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3016 {
3017 	int ret;
3018 	va_list ap;
3019 
3020 	if (!(trace_flags & TRACE_ITER_PRINTK))
3021 		return 0;
3022 
3023 	va_start(ap, fmt);
3024 	ret = trace_vprintk(ip, fmt, ap);
3025 	va_end(ap);
3026 	return ret;
3027 }
3028 EXPORT_SYMBOL_GPL(__ftrace_printk);
3029 
3030 static int trace_panic_handler(struct notifier_block *this,
3031 			       unsigned long event, void *unused)
3032 {
3033 	ftrace_dump();
3034 	return NOTIFY_OK;
3035 }
3036 
3037 static struct notifier_block trace_panic_notifier = {
3038 	.notifier_call  = trace_panic_handler,
3039 	.next           = NULL,
3040 	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
3041 };
3042 
3043 static int trace_die_handler(struct notifier_block *self,
3044 			     unsigned long val,
3045 			     void *data)
3046 {
3047 	switch (val) {
3048 	case DIE_OOPS:
3049 		ftrace_dump();
3050 		break;
3051 	default:
3052 		break;
3053 	}
3054 	return NOTIFY_OK;
3055 }
3056 
3057 static struct notifier_block trace_die_notifier = {
3058 	.notifier_call = trace_die_handler,
3059 	.priority = 200
3060 };
3061 
3062 /*
3063  * printk is set to max of 1024, we really don't need it that big.
3064  * Nothing should be printing 1000 characters anyway.
3065  */
3066 #define TRACE_MAX_PRINT		1000
3067 
3068 /*
3069  * Define here KERN_TRACE so that we have one place to modify
3070  * it if we decide to change what log level the ftrace dump
3071  * should be at.
3072  */
3073 #define KERN_TRACE		KERN_INFO
3074 
3075 static void
3076 trace_printk_seq(struct trace_seq *s)
3077 {
3078 	/* Probably should print a warning here. */
3079 	if (s->len >= 1000)
3080 		s->len = 1000;
3081 
3082 	/* should be zero ended, but we are paranoid. */
3083 	s->buffer[s->len] = 0;
3084 
3085 	printk(KERN_TRACE "%s", s->buffer);
3086 
3087 	trace_seq_reset(s);
3088 }
3089 
3090 
3091 void ftrace_dump(void)
3092 {
3093 	static DEFINE_SPINLOCK(ftrace_dump_lock);
3094 	/* use static because iter can be a bit big for the stack */
3095 	static struct trace_iterator iter;
3096 	static cpumask_t mask;
3097 	static int dump_ran;
3098 	unsigned long flags;
3099 	int cnt = 0, cpu;
3100 
3101 	/* only one dump */
3102 	spin_lock_irqsave(&ftrace_dump_lock, flags);
3103 	if (dump_ran)
3104 		goto out;
3105 
3106 	dump_ran = 1;
3107 
3108 	/* No turning back! */
3109 	ftrace_kill();
3110 
3111 	for_each_tracing_cpu(cpu) {
3112 		atomic_inc(&global_trace.data[cpu]->disabled);
3113 	}
3114 
3115 	printk(KERN_TRACE "Dumping ftrace buffer:\n");
3116 
3117 	iter.tr = &global_trace;
3118 	iter.trace = current_trace;
3119 
3120 	/*
3121 	 * We need to stop all tracing on all CPUS to read the
3122 	 * the next buffer. This is a bit expensive, but is
3123 	 * not done often. We fill all what we can read,
3124 	 * and then release the locks again.
3125 	 */
3126 
3127 	cpus_clear(mask);
3128 
3129 	while (!trace_empty(&iter)) {
3130 
3131 		if (!cnt)
3132 			printk(KERN_TRACE "---------------------------------\n");
3133 
3134 		cnt++;
3135 
3136 		/* reset all but tr, trace, and overruns */
3137 		memset(&iter.seq, 0,
3138 		       sizeof(struct trace_iterator) -
3139 		       offsetof(struct trace_iterator, seq));
3140 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
3141 		iter.pos = -1;
3142 
3143 		if (find_next_entry_inc(&iter) != NULL) {
3144 			print_trace_line(&iter);
3145 			trace_consume(&iter);
3146 		}
3147 
3148 		trace_printk_seq(&iter.seq);
3149 	}
3150 
3151 	if (!cnt)
3152 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
3153 	else
3154 		printk(KERN_TRACE "---------------------------------\n");
3155 
3156  out:
3157 	spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3158 }
3159 
3160 __init static int tracer_alloc_buffers(void)
3161 {
3162 	struct trace_array_cpu *data;
3163 	int i;
3164 
3165 	/* TODO: make the number of buffers hot pluggable with CPUS */
3166 	tracing_buffer_mask = cpu_possible_map;
3167 
3168 	global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3169 						   TRACE_BUFFER_FLAGS);
3170 	if (!global_trace.buffer) {
3171 		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3172 		WARN_ON(1);
3173 		return 0;
3174 	}
3175 	global_trace.entries = ring_buffer_size(global_trace.buffer);
3176 
3177 #ifdef CONFIG_TRACER_MAX_TRACE
3178 	max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3179 					     TRACE_BUFFER_FLAGS);
3180 	if (!max_tr.buffer) {
3181 		printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3182 		WARN_ON(1);
3183 		ring_buffer_free(global_trace.buffer);
3184 		return 0;
3185 	}
3186 	max_tr.entries = ring_buffer_size(max_tr.buffer);
3187 	WARN_ON(max_tr.entries != global_trace.entries);
3188 #endif
3189 
3190 	/* Allocate the first page for all buffers */
3191 	for_each_tracing_cpu(i) {
3192 		data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3193 		max_tr.data[i] = &per_cpu(max_data, i);
3194 	}
3195 
3196 	trace_init_cmdlines();
3197 
3198 	register_tracer(&nop_trace);
3199 #ifdef CONFIG_BOOT_TRACER
3200 	register_tracer(&boot_tracer);
3201 	current_trace = &boot_tracer;
3202 	current_trace->init(&global_trace);
3203 #else
3204 	current_trace = &nop_trace;
3205 #endif
3206 
3207 	/* All seems OK, enable tracing */
3208 	global_trace.ctrl = tracer_enabled;
3209 	tracing_disabled = 0;
3210 
3211 	atomic_notifier_chain_register(&panic_notifier_list,
3212 				       &trace_panic_notifier);
3213 
3214 	register_die_notifier(&trace_die_notifier);
3215 
3216 	return 0;
3217 }
3218 early_initcall(tracer_alloc_buffers);
3219 fs_initcall(tracer_init_debugfs);
3220