xref: /openbmc/linux/kernel/trace/trace.c (revision 545e4006)
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/poll.h>
28 #include <linux/gfp.h>
29 #include <linux/fs.h>
30 #include <linux/kprobes.h>
31 #include <linux/writeback.h>
32 
33 #include <linux/stacktrace.h>
34 
35 #include "trace.h"
36 
37 unsigned long __read_mostly	tracing_max_latency = (cycle_t)ULONG_MAX;
38 unsigned long __read_mostly	tracing_thresh;
39 
40 static unsigned long __read_mostly	tracing_nr_buffers;
41 static cpumask_t __read_mostly		tracing_buffer_mask;
42 
43 #define for_each_tracing_cpu(cpu)	\
44 	for_each_cpu_mask(cpu, tracing_buffer_mask)
45 
46 static int trace_alloc_page(void);
47 static int trace_free_page(void);
48 
49 static int tracing_disabled = 1;
50 
51 static unsigned long tracing_pages_allocated;
52 
53 long
54 ns2usecs(cycle_t nsec)
55 {
56 	nsec += 500;
57 	do_div(nsec, 1000);
58 	return nsec;
59 }
60 
61 cycle_t ftrace_now(int cpu)
62 {
63 	return cpu_clock(cpu);
64 }
65 
66 /*
67  * The global_trace is the descriptor that holds the tracing
68  * buffers for the live tracing. For each CPU, it contains
69  * a link list of pages that will store trace entries. The
70  * page descriptor of the pages in the memory is used to hold
71  * the link list by linking the lru item in the page descriptor
72  * to each of the pages in the buffer per CPU.
73  *
74  * For each active CPU there is a data field that holds the
75  * pages for the buffer for that CPU. Each CPU has the same number
76  * of pages allocated for its buffer.
77  */
78 static struct trace_array	global_trace;
79 
80 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
81 
82 /*
83  * The max_tr is used to snapshot the global_trace when a maximum
84  * latency is reached. Some tracers will use this to store a maximum
85  * trace while it continues examining live traces.
86  *
87  * The buffers for the max_tr are set up the same as the global_trace.
88  * When a snapshot is taken, the link list of the max_tr is swapped
89  * with the link list of the global_trace and the buffers are reset for
90  * the global_trace so the tracing can continue.
91  */
92 static struct trace_array	max_tr;
93 
94 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
95 
96 /* tracer_enabled is used to toggle activation of a tracer */
97 static int			tracer_enabled = 1;
98 
99 /* function tracing enabled */
100 int				ftrace_function_enabled;
101 
102 /*
103  * trace_nr_entries is the number of entries that is allocated
104  * for a buffer. Note, the number of entries is always rounded
105  * to ENTRIES_PER_PAGE.
106  */
107 static unsigned long		trace_nr_entries = 65536UL;
108 
109 /* trace_types holds a link list of available tracers. */
110 static struct tracer		*trace_types __read_mostly;
111 
112 /* current_trace points to the tracer that is currently active */
113 static struct tracer		*current_trace __read_mostly;
114 
115 /*
116  * max_tracer_type_len is used to simplify the allocating of
117  * buffers to read userspace tracer names. We keep track of
118  * the longest tracer name registered.
119  */
120 static int			max_tracer_type_len;
121 
122 /*
123  * trace_types_lock is used to protect the trace_types list.
124  * This lock is also used to keep user access serialized.
125  * Accesses from userspace will grab this lock while userspace
126  * activities happen inside the kernel.
127  */
128 static DEFINE_MUTEX(trace_types_lock);
129 
130 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
131 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
132 
133 /* trace_flags holds iter_ctrl options */
134 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
135 
136 static notrace void no_trace_init(struct trace_array *tr)
137 {
138 	int cpu;
139 
140 	ftrace_function_enabled = 0;
141 	if(tr->ctrl)
142 		for_each_online_cpu(cpu)
143 			tracing_reset(tr->data[cpu]);
144 	tracer_enabled = 0;
145 }
146 
147 /* dummy trace to disable tracing */
148 static struct tracer no_tracer __read_mostly = {
149 	.name		= "none",
150 	.init		= no_trace_init
151 };
152 
153 
154 /**
155  * trace_wake_up - wake up tasks waiting for trace input
156  *
157  * Simply wakes up any task that is blocked on the trace_wait
158  * queue. These is used with trace_poll for tasks polling the trace.
159  */
160 void trace_wake_up(void)
161 {
162 	/*
163 	 * The runqueue_is_locked() can fail, but this is the best we
164 	 * have for now:
165 	 */
166 	if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
167 		wake_up(&trace_wait);
168 }
169 
170 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
171 
172 static int __init set_nr_entries(char *str)
173 {
174 	unsigned long nr_entries;
175 	int ret;
176 
177 	if (!str)
178 		return 0;
179 	ret = strict_strtoul(str, 0, &nr_entries);
180 	/* nr_entries can not be zero */
181 	if (ret < 0 || nr_entries == 0)
182 		return 0;
183 	trace_nr_entries = nr_entries;
184 	return 1;
185 }
186 __setup("trace_entries=", set_nr_entries);
187 
188 unsigned long nsecs_to_usecs(unsigned long nsecs)
189 {
190 	return nsecs / 1000;
191 }
192 
193 /*
194  * trace_flag_type is an enumeration that holds different
195  * states when a trace occurs. These are:
196  *  IRQS_OFF	- interrupts were disabled
197  *  NEED_RESCED - reschedule is requested
198  *  HARDIRQ	- inside an interrupt handler
199  *  SOFTIRQ	- inside a softirq handler
200  */
201 enum trace_flag_type {
202 	TRACE_FLAG_IRQS_OFF		= 0x01,
203 	TRACE_FLAG_NEED_RESCHED		= 0x02,
204 	TRACE_FLAG_HARDIRQ		= 0x04,
205 	TRACE_FLAG_SOFTIRQ		= 0x08,
206 };
207 
208 /*
209  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
210  * control the output of kernel symbols.
211  */
212 #define TRACE_ITER_SYM_MASK \
213 	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
214 
215 /* These must match the bit postions in trace_iterator_flags */
216 static const char *trace_options[] = {
217 	"print-parent",
218 	"sym-offset",
219 	"sym-addr",
220 	"verbose",
221 	"raw",
222 	"hex",
223 	"bin",
224 	"block",
225 	"stacktrace",
226 	"sched-tree",
227 	NULL
228 };
229 
230 /*
231  * ftrace_max_lock is used to protect the swapping of buffers
232  * when taking a max snapshot. The buffers themselves are
233  * protected by per_cpu spinlocks. But the action of the swap
234  * needs its own lock.
235  *
236  * This is defined as a raw_spinlock_t in order to help
237  * with performance when lockdep debugging is enabled.
238  */
239 static raw_spinlock_t ftrace_max_lock =
240 	(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
241 
242 /*
243  * Copy the new maximum trace into the separate maximum-trace
244  * structure. (this way the maximum trace is permanently saved,
245  * for later retrieval via /debugfs/tracing/latency_trace)
246  */
247 static void
248 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
249 {
250 	struct trace_array_cpu *data = tr->data[cpu];
251 
252 	max_tr.cpu = cpu;
253 	max_tr.time_start = data->preempt_timestamp;
254 
255 	data = max_tr.data[cpu];
256 	data->saved_latency = tracing_max_latency;
257 
258 	memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
259 	data->pid = tsk->pid;
260 	data->uid = tsk->uid;
261 	data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
262 	data->policy = tsk->policy;
263 	data->rt_priority = tsk->rt_priority;
264 
265 	/* record this tasks comm */
266 	tracing_record_cmdline(current);
267 }
268 
269 #define CHECK_COND(cond)			\
270 	if (unlikely(cond)) {			\
271 		tracing_disabled = 1;		\
272 		WARN_ON(1);			\
273 		return -1;			\
274 	}
275 
276 /**
277  * check_pages - integrity check of trace buffers
278  *
279  * As a safty measure we check to make sure the data pages have not
280  * been corrupted.
281  */
282 int check_pages(struct trace_array_cpu *data)
283 {
284 	struct page *page, *tmp;
285 
286 	CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
287 	CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
288 
289 	list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
290 		CHECK_COND(page->lru.next->prev != &page->lru);
291 		CHECK_COND(page->lru.prev->next != &page->lru);
292 	}
293 
294 	return 0;
295 }
296 
297 /**
298  * head_page - page address of the first page in per_cpu buffer.
299  *
300  * head_page returns the page address of the first page in
301  * a per_cpu buffer. This also preforms various consistency
302  * checks to make sure the buffer has not been corrupted.
303  */
304 void *head_page(struct trace_array_cpu *data)
305 {
306 	struct page *page;
307 
308 	if (list_empty(&data->trace_pages))
309 		return NULL;
310 
311 	page = list_entry(data->trace_pages.next, struct page, lru);
312 	BUG_ON(&page->lru == &data->trace_pages);
313 
314 	return page_address(page);
315 }
316 
317 /**
318  * trace_seq_printf - sequence printing of trace information
319  * @s: trace sequence descriptor
320  * @fmt: printf format string
321  *
322  * The tracer may use either sequence operations or its own
323  * copy to user routines. To simplify formating of a trace
324  * trace_seq_printf is used to store strings into a special
325  * buffer (@s). Then the output may be either used by
326  * the sequencer or pulled into another buffer.
327  */
328 int
329 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
330 {
331 	int len = (PAGE_SIZE - 1) - s->len;
332 	va_list ap;
333 	int ret;
334 
335 	if (!len)
336 		return 0;
337 
338 	va_start(ap, fmt);
339 	ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
340 	va_end(ap);
341 
342 	/* If we can't write it all, don't bother writing anything */
343 	if (ret >= len)
344 		return 0;
345 
346 	s->len += ret;
347 
348 	return len;
349 }
350 
351 /**
352  * trace_seq_puts - trace sequence printing of simple string
353  * @s: trace sequence descriptor
354  * @str: simple string to record
355  *
356  * The tracer may use either the sequence operations or its own
357  * copy to user routines. This function records a simple string
358  * into a special buffer (@s) for later retrieval by a sequencer
359  * or other mechanism.
360  */
361 static int
362 trace_seq_puts(struct trace_seq *s, const char *str)
363 {
364 	int len = strlen(str);
365 
366 	if (len > ((PAGE_SIZE - 1) - s->len))
367 		return 0;
368 
369 	memcpy(s->buffer + s->len, str, len);
370 	s->len += len;
371 
372 	return len;
373 }
374 
375 static int
376 trace_seq_putc(struct trace_seq *s, unsigned char c)
377 {
378 	if (s->len >= (PAGE_SIZE - 1))
379 		return 0;
380 
381 	s->buffer[s->len++] = c;
382 
383 	return 1;
384 }
385 
386 static int
387 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
388 {
389 	if (len > ((PAGE_SIZE - 1) - s->len))
390 		return 0;
391 
392 	memcpy(s->buffer + s->len, mem, len);
393 	s->len += len;
394 
395 	return len;
396 }
397 
398 #define HEX_CHARS 17
399 static const char hex2asc[] = "0123456789abcdef";
400 
401 static int
402 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
403 {
404 	unsigned char hex[HEX_CHARS];
405 	unsigned char *data = mem;
406 	unsigned char byte;
407 	int i, j;
408 
409 	BUG_ON(len >= HEX_CHARS);
410 
411 #ifdef __BIG_ENDIAN
412 	for (i = 0, j = 0; i < len; i++) {
413 #else
414 	for (i = len-1, j = 0; i >= 0; i--) {
415 #endif
416 		byte = data[i];
417 
418 		hex[j++] = hex2asc[byte & 0x0f];
419 		hex[j++] = hex2asc[byte >> 4];
420 	}
421 	hex[j++] = ' ';
422 
423 	return trace_seq_putmem(s, hex, j);
424 }
425 
426 static void
427 trace_seq_reset(struct trace_seq *s)
428 {
429 	s->len = 0;
430 	s->readpos = 0;
431 }
432 
433 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
434 {
435 	int len;
436 	int ret;
437 
438 	if (s->len <= s->readpos)
439 		return -EBUSY;
440 
441 	len = s->len - s->readpos;
442 	if (cnt > len)
443 		cnt = len;
444 	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
445 	if (ret)
446 		return -EFAULT;
447 
448 	s->readpos += len;
449 	return cnt;
450 }
451 
452 static void
453 trace_print_seq(struct seq_file *m, struct trace_seq *s)
454 {
455 	int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
456 
457 	s->buffer[len] = 0;
458 	seq_puts(m, s->buffer);
459 
460 	trace_seq_reset(s);
461 }
462 
463 /*
464  * flip the trace buffers between two trace descriptors.
465  * This usually is the buffers between the global_trace and
466  * the max_tr to record a snapshot of a current trace.
467  *
468  * The ftrace_max_lock must be held.
469  */
470 static void
471 flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
472 {
473 	struct list_head flip_pages;
474 
475 	INIT_LIST_HEAD(&flip_pages);
476 
477 	memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
478 		sizeof(struct trace_array_cpu) -
479 		offsetof(struct trace_array_cpu, trace_head_idx));
480 
481 	check_pages(tr1);
482 	check_pages(tr2);
483 	list_splice_init(&tr1->trace_pages, &flip_pages);
484 	list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
485 	list_splice_init(&flip_pages, &tr2->trace_pages);
486 	BUG_ON(!list_empty(&flip_pages));
487 	check_pages(tr1);
488 	check_pages(tr2);
489 }
490 
491 /**
492  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
493  * @tr: tracer
494  * @tsk: the task with the latency
495  * @cpu: The cpu that initiated the trace.
496  *
497  * Flip the buffers between the @tr and the max_tr and record information
498  * about which task was the cause of this latency.
499  */
500 void
501 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
502 {
503 	struct trace_array_cpu *data;
504 	int i;
505 
506 	WARN_ON_ONCE(!irqs_disabled());
507 	__raw_spin_lock(&ftrace_max_lock);
508 	/* clear out all the previous traces */
509 	for_each_tracing_cpu(i) {
510 		data = tr->data[i];
511 		flip_trace(max_tr.data[i], data);
512 		tracing_reset(data);
513 	}
514 
515 	__update_max_tr(tr, tsk, cpu);
516 	__raw_spin_unlock(&ftrace_max_lock);
517 }
518 
519 /**
520  * update_max_tr_single - only copy one trace over, and reset the rest
521  * @tr - tracer
522  * @tsk - task with the latency
523  * @cpu - the cpu of the buffer to copy.
524  *
525  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
526  */
527 void
528 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
529 {
530 	struct trace_array_cpu *data = tr->data[cpu];
531 	int i;
532 
533 	WARN_ON_ONCE(!irqs_disabled());
534 	__raw_spin_lock(&ftrace_max_lock);
535 	for_each_tracing_cpu(i)
536 		tracing_reset(max_tr.data[i]);
537 
538 	flip_trace(max_tr.data[cpu], data);
539 	tracing_reset(data);
540 
541 	__update_max_tr(tr, tsk, cpu);
542 	__raw_spin_unlock(&ftrace_max_lock);
543 }
544 
545 /**
546  * register_tracer - register a tracer with the ftrace system.
547  * @type - the plugin for the tracer
548  *
549  * Register a new plugin tracer.
550  */
551 int register_tracer(struct tracer *type)
552 {
553 	struct tracer *t;
554 	int len;
555 	int ret = 0;
556 
557 	if (!type->name) {
558 		pr_info("Tracer must have a name\n");
559 		return -1;
560 	}
561 
562 	mutex_lock(&trace_types_lock);
563 	for (t = trace_types; t; t = t->next) {
564 		if (strcmp(type->name, t->name) == 0) {
565 			/* already found */
566 			pr_info("Trace %s already registered\n",
567 				type->name);
568 			ret = -1;
569 			goto out;
570 		}
571 	}
572 
573 #ifdef CONFIG_FTRACE_STARTUP_TEST
574 	if (type->selftest) {
575 		struct tracer *saved_tracer = current_trace;
576 		struct trace_array_cpu *data;
577 		struct trace_array *tr = &global_trace;
578 		int saved_ctrl = tr->ctrl;
579 		int i;
580 		/*
581 		 * Run a selftest on this tracer.
582 		 * Here we reset the trace buffer, and set the current
583 		 * tracer to be this tracer. The tracer can then run some
584 		 * internal tracing to verify that everything is in order.
585 		 * If we fail, we do not register this tracer.
586 		 */
587 		for_each_tracing_cpu(i) {
588 			data = tr->data[i];
589 			if (!head_page(data))
590 				continue;
591 			tracing_reset(data);
592 		}
593 		current_trace = type;
594 		tr->ctrl = 0;
595 		/* the test is responsible for initializing and enabling */
596 		pr_info("Testing tracer %s: ", type->name);
597 		ret = type->selftest(type, tr);
598 		/* the test is responsible for resetting too */
599 		current_trace = saved_tracer;
600 		tr->ctrl = saved_ctrl;
601 		if (ret) {
602 			printk(KERN_CONT "FAILED!\n");
603 			goto out;
604 		}
605 		/* Only reset on passing, to avoid touching corrupted buffers */
606 		for_each_tracing_cpu(i) {
607 			data = tr->data[i];
608 			if (!head_page(data))
609 				continue;
610 			tracing_reset(data);
611 		}
612 		printk(KERN_CONT "PASSED\n");
613 	}
614 #endif
615 
616 	type->next = trace_types;
617 	trace_types = type;
618 	len = strlen(type->name);
619 	if (len > max_tracer_type_len)
620 		max_tracer_type_len = len;
621 
622  out:
623 	mutex_unlock(&trace_types_lock);
624 
625 	return ret;
626 }
627 
628 void unregister_tracer(struct tracer *type)
629 {
630 	struct tracer **t;
631 	int len;
632 
633 	mutex_lock(&trace_types_lock);
634 	for (t = &trace_types; *t; t = &(*t)->next) {
635 		if (*t == type)
636 			goto found;
637 	}
638 	pr_info("Trace %s not registered\n", type->name);
639 	goto out;
640 
641  found:
642 	*t = (*t)->next;
643 	if (strlen(type->name) != max_tracer_type_len)
644 		goto out;
645 
646 	max_tracer_type_len = 0;
647 	for (t = &trace_types; *t; t = &(*t)->next) {
648 		len = strlen((*t)->name);
649 		if (len > max_tracer_type_len)
650 			max_tracer_type_len = len;
651 	}
652  out:
653 	mutex_unlock(&trace_types_lock);
654 }
655 
656 void tracing_reset(struct trace_array_cpu *data)
657 {
658 	data->trace_idx = 0;
659 	data->overrun = 0;
660 	data->trace_head = data->trace_tail = head_page(data);
661 	data->trace_head_idx = 0;
662 	data->trace_tail_idx = 0;
663 }
664 
665 #define SAVED_CMDLINES 128
666 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
667 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
668 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
669 static int cmdline_idx;
670 static DEFINE_SPINLOCK(trace_cmdline_lock);
671 
672 /* temporary disable recording */
673 atomic_t trace_record_cmdline_disabled __read_mostly;
674 
675 static void trace_init_cmdlines(void)
676 {
677 	memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
678 	memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
679 	cmdline_idx = 0;
680 }
681 
682 void trace_stop_cmdline_recording(void);
683 
684 static void trace_save_cmdline(struct task_struct *tsk)
685 {
686 	unsigned map;
687 	unsigned idx;
688 
689 	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
690 		return;
691 
692 	/*
693 	 * It's not the end of the world if we don't get
694 	 * the lock, but we also don't want to spin
695 	 * nor do we want to disable interrupts,
696 	 * so if we miss here, then better luck next time.
697 	 */
698 	if (!spin_trylock(&trace_cmdline_lock))
699 		return;
700 
701 	idx = map_pid_to_cmdline[tsk->pid];
702 	if (idx >= SAVED_CMDLINES) {
703 		idx = (cmdline_idx + 1) % SAVED_CMDLINES;
704 
705 		map = map_cmdline_to_pid[idx];
706 		if (map <= PID_MAX_DEFAULT)
707 			map_pid_to_cmdline[map] = (unsigned)-1;
708 
709 		map_pid_to_cmdline[tsk->pid] = idx;
710 
711 		cmdline_idx = idx;
712 	}
713 
714 	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
715 
716 	spin_unlock(&trace_cmdline_lock);
717 }
718 
719 static char *trace_find_cmdline(int pid)
720 {
721 	char *cmdline = "<...>";
722 	unsigned map;
723 
724 	if (!pid)
725 		return "<idle>";
726 
727 	if (pid > PID_MAX_DEFAULT)
728 		goto out;
729 
730 	map = map_pid_to_cmdline[pid];
731 	if (map >= SAVED_CMDLINES)
732 		goto out;
733 
734 	cmdline = saved_cmdlines[map];
735 
736  out:
737 	return cmdline;
738 }
739 
740 void tracing_record_cmdline(struct task_struct *tsk)
741 {
742 	if (atomic_read(&trace_record_cmdline_disabled))
743 		return;
744 
745 	trace_save_cmdline(tsk);
746 }
747 
748 static inline struct list_head *
749 trace_next_list(struct trace_array_cpu *data, struct list_head *next)
750 {
751 	/*
752 	 * Roundrobin - but skip the head (which is not a real page):
753 	 */
754 	next = next->next;
755 	if (unlikely(next == &data->trace_pages))
756 		next = next->next;
757 	BUG_ON(next == &data->trace_pages);
758 
759 	return next;
760 }
761 
762 static inline void *
763 trace_next_page(struct trace_array_cpu *data, void *addr)
764 {
765 	struct list_head *next;
766 	struct page *page;
767 
768 	page = virt_to_page(addr);
769 
770 	next = trace_next_list(data, &page->lru);
771 	page = list_entry(next, struct page, lru);
772 
773 	return page_address(page);
774 }
775 
776 static inline struct trace_entry *
777 tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
778 {
779 	unsigned long idx, idx_next;
780 	struct trace_entry *entry;
781 
782 	data->trace_idx++;
783 	idx = data->trace_head_idx;
784 	idx_next = idx + 1;
785 
786 	BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
787 
788 	entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
789 
790 	if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
791 		data->trace_head = trace_next_page(data, data->trace_head);
792 		idx_next = 0;
793 	}
794 
795 	if (data->trace_head == data->trace_tail &&
796 	    idx_next == data->trace_tail_idx) {
797 		/* overrun */
798 		data->overrun++;
799 		data->trace_tail_idx++;
800 		if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
801 			data->trace_tail =
802 				trace_next_page(data, data->trace_tail);
803 			data->trace_tail_idx = 0;
804 		}
805 	}
806 
807 	data->trace_head_idx = idx_next;
808 
809 	return entry;
810 }
811 
812 static inline void
813 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
814 {
815 	struct task_struct *tsk = current;
816 	unsigned long pc;
817 
818 	pc = preempt_count();
819 
820 	entry->preempt_count	= pc & 0xff;
821 	entry->pid		= (tsk) ? tsk->pid : 0;
822 	entry->t		= ftrace_now(raw_smp_processor_id());
823 	entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
824 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
825 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
826 		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
827 }
828 
829 void
830 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
831 	       unsigned long ip, unsigned long parent_ip, unsigned long flags)
832 {
833 	struct trace_entry *entry;
834 	unsigned long irq_flags;
835 
836 	raw_local_irq_save(irq_flags);
837 	__raw_spin_lock(&data->lock);
838 	entry			= tracing_get_trace_entry(tr, data);
839 	tracing_generic_entry_update(entry, flags);
840 	entry->type		= TRACE_FN;
841 	entry->fn.ip		= ip;
842 	entry->fn.parent_ip	= parent_ip;
843 	__raw_spin_unlock(&data->lock);
844 	raw_local_irq_restore(irq_flags);
845 }
846 
847 void
848 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
849        unsigned long ip, unsigned long parent_ip, unsigned long flags)
850 {
851 	if (likely(!atomic_read(&data->disabled)))
852 		trace_function(tr, data, ip, parent_ip, flags);
853 }
854 
855 #ifdef CONFIG_MMIOTRACE
856 void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data,
857 						struct mmiotrace_rw *rw)
858 {
859 	struct trace_entry *entry;
860 	unsigned long irq_flags;
861 
862 	raw_local_irq_save(irq_flags);
863 	__raw_spin_lock(&data->lock);
864 
865 	entry			= tracing_get_trace_entry(tr, data);
866 	tracing_generic_entry_update(entry, 0);
867 	entry->type		= TRACE_MMIO_RW;
868 	entry->mmiorw		= *rw;
869 
870 	__raw_spin_unlock(&data->lock);
871 	raw_local_irq_restore(irq_flags);
872 
873 	trace_wake_up();
874 }
875 
876 void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
877 						struct mmiotrace_map *map)
878 {
879 	struct trace_entry *entry;
880 	unsigned long irq_flags;
881 
882 	raw_local_irq_save(irq_flags);
883 	__raw_spin_lock(&data->lock);
884 
885 	entry			= tracing_get_trace_entry(tr, data);
886 	tracing_generic_entry_update(entry, 0);
887 	entry->type		= TRACE_MMIO_MAP;
888 	entry->mmiomap		= *map;
889 
890 	__raw_spin_unlock(&data->lock);
891 	raw_local_irq_restore(irq_flags);
892 
893 	trace_wake_up();
894 }
895 #endif
896 
897 void __trace_stack(struct trace_array *tr,
898 		   struct trace_array_cpu *data,
899 		   unsigned long flags,
900 		   int skip)
901 {
902 	struct trace_entry *entry;
903 	struct stack_trace trace;
904 
905 	if (!(trace_flags & TRACE_ITER_STACKTRACE))
906 		return;
907 
908 	entry			= tracing_get_trace_entry(tr, data);
909 	tracing_generic_entry_update(entry, flags);
910 	entry->type		= TRACE_STACK;
911 
912 	memset(&entry->stack, 0, sizeof(entry->stack));
913 
914 	trace.nr_entries	= 0;
915 	trace.max_entries	= FTRACE_STACK_ENTRIES;
916 	trace.skip		= skip;
917 	trace.entries		= entry->stack.caller;
918 
919 	save_stack_trace(&trace);
920 }
921 
922 void
923 __trace_special(void *__tr, void *__data,
924 		unsigned long arg1, unsigned long arg2, unsigned long arg3)
925 {
926 	struct trace_array_cpu *data = __data;
927 	struct trace_array *tr = __tr;
928 	struct trace_entry *entry;
929 	unsigned long irq_flags;
930 
931 	raw_local_irq_save(irq_flags);
932 	__raw_spin_lock(&data->lock);
933 	entry			= tracing_get_trace_entry(tr, data);
934 	tracing_generic_entry_update(entry, 0);
935 	entry->type		= TRACE_SPECIAL;
936 	entry->special.arg1	= arg1;
937 	entry->special.arg2	= arg2;
938 	entry->special.arg3	= arg3;
939 	__trace_stack(tr, data, irq_flags, 4);
940 	__raw_spin_unlock(&data->lock);
941 	raw_local_irq_restore(irq_flags);
942 
943 	trace_wake_up();
944 }
945 
946 void
947 tracing_sched_switch_trace(struct trace_array *tr,
948 			   struct trace_array_cpu *data,
949 			   struct task_struct *prev,
950 			   struct task_struct *next,
951 			   unsigned long flags)
952 {
953 	struct trace_entry *entry;
954 	unsigned long irq_flags;
955 
956 	raw_local_irq_save(irq_flags);
957 	__raw_spin_lock(&data->lock);
958 	entry			= tracing_get_trace_entry(tr, data);
959 	tracing_generic_entry_update(entry, flags);
960 	entry->type		= TRACE_CTX;
961 	entry->ctx.prev_pid	= prev->pid;
962 	entry->ctx.prev_prio	= prev->prio;
963 	entry->ctx.prev_state	= prev->state;
964 	entry->ctx.next_pid	= next->pid;
965 	entry->ctx.next_prio	= next->prio;
966 	entry->ctx.next_state	= next->state;
967 	__trace_stack(tr, data, flags, 5);
968 	__raw_spin_unlock(&data->lock);
969 	raw_local_irq_restore(irq_flags);
970 }
971 
972 void
973 tracing_sched_wakeup_trace(struct trace_array *tr,
974 			   struct trace_array_cpu *data,
975 			   struct task_struct *wakee,
976 			   struct task_struct *curr,
977 			   unsigned long flags)
978 {
979 	struct trace_entry *entry;
980 	unsigned long irq_flags;
981 
982 	raw_local_irq_save(irq_flags);
983 	__raw_spin_lock(&data->lock);
984 	entry			= tracing_get_trace_entry(tr, data);
985 	tracing_generic_entry_update(entry, flags);
986 	entry->type		= TRACE_WAKE;
987 	entry->ctx.prev_pid	= curr->pid;
988 	entry->ctx.prev_prio	= curr->prio;
989 	entry->ctx.prev_state	= curr->state;
990 	entry->ctx.next_pid	= wakee->pid;
991 	entry->ctx.next_prio	= wakee->prio;
992 	entry->ctx.next_state	= wakee->state;
993 	__trace_stack(tr, data, flags, 6);
994 	__raw_spin_unlock(&data->lock);
995 	raw_local_irq_restore(irq_flags);
996 
997 	trace_wake_up();
998 }
999 
1000 void
1001 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1002 {
1003 	struct trace_array *tr = &global_trace;
1004 	struct trace_array_cpu *data;
1005 	unsigned long flags;
1006 	long disabled;
1007 	int cpu;
1008 
1009 	if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl)
1010 		return;
1011 
1012 	local_irq_save(flags);
1013 	cpu = raw_smp_processor_id();
1014 	data = tr->data[cpu];
1015 	disabled = atomic_inc_return(&data->disabled);
1016 
1017 	if (likely(disabled == 1))
1018 		__trace_special(tr, data, arg1, arg2, arg3);
1019 
1020 	atomic_dec(&data->disabled);
1021 	local_irq_restore(flags);
1022 }
1023 
1024 #ifdef CONFIG_FTRACE
1025 static void
1026 function_trace_call(unsigned long ip, unsigned long parent_ip)
1027 {
1028 	struct trace_array *tr = &global_trace;
1029 	struct trace_array_cpu *data;
1030 	unsigned long flags;
1031 	long disabled;
1032 	int cpu;
1033 
1034 	if (unlikely(!ftrace_function_enabled))
1035 		return;
1036 
1037 	if (skip_trace(ip))
1038 		return;
1039 
1040 	local_irq_save(flags);
1041 	cpu = raw_smp_processor_id();
1042 	data = tr->data[cpu];
1043 	disabled = atomic_inc_return(&data->disabled);
1044 
1045 	if (likely(disabled == 1))
1046 		trace_function(tr, data, ip, parent_ip, flags);
1047 
1048 	atomic_dec(&data->disabled);
1049 	local_irq_restore(flags);
1050 }
1051 
1052 static struct ftrace_ops trace_ops __read_mostly =
1053 {
1054 	.func = function_trace_call,
1055 };
1056 
1057 void tracing_start_function_trace(void)
1058 {
1059 	ftrace_function_enabled = 0;
1060 	register_ftrace_function(&trace_ops);
1061 	if (tracer_enabled)
1062 		ftrace_function_enabled = 1;
1063 }
1064 
1065 void tracing_stop_function_trace(void)
1066 {
1067 	ftrace_function_enabled = 0;
1068 	unregister_ftrace_function(&trace_ops);
1069 }
1070 #endif
1071 
1072 enum trace_file_type {
1073 	TRACE_FILE_LAT_FMT	= 1,
1074 };
1075 
1076 static struct trace_entry *
1077 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
1078 		struct trace_iterator *iter, int cpu)
1079 {
1080 	struct page *page;
1081 	struct trace_entry *array;
1082 
1083 	if (iter->next_idx[cpu] >= tr->entries ||
1084 	    iter->next_idx[cpu] >= data->trace_idx ||
1085 	    (data->trace_head == data->trace_tail &&
1086 	     data->trace_head_idx == data->trace_tail_idx))
1087 		return NULL;
1088 
1089 	if (!iter->next_page[cpu]) {
1090 		/* Initialize the iterator for this cpu trace buffer */
1091 		WARN_ON(!data->trace_tail);
1092 		page = virt_to_page(data->trace_tail);
1093 		iter->next_page[cpu] = &page->lru;
1094 		iter->next_page_idx[cpu] = data->trace_tail_idx;
1095 	}
1096 
1097 	page = list_entry(iter->next_page[cpu], struct page, lru);
1098 	BUG_ON(&data->trace_pages == &page->lru);
1099 
1100 	array = page_address(page);
1101 
1102 	WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
1103 	return &array[iter->next_page_idx[cpu]];
1104 }
1105 
1106 static struct trace_entry *
1107 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
1108 {
1109 	struct trace_array *tr = iter->tr;
1110 	struct trace_entry *ent, *next = NULL;
1111 	int next_cpu = -1;
1112 	int cpu;
1113 
1114 	for_each_tracing_cpu(cpu) {
1115 		if (!head_page(tr->data[cpu]))
1116 			continue;
1117 		ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
1118 		/*
1119 		 * Pick the entry with the smallest timestamp:
1120 		 */
1121 		if (ent && (!next || ent->t < next->t)) {
1122 			next = ent;
1123 			next_cpu = cpu;
1124 		}
1125 	}
1126 
1127 	if (ent_cpu)
1128 		*ent_cpu = next_cpu;
1129 
1130 	return next;
1131 }
1132 
1133 static void trace_iterator_increment(struct trace_iterator *iter)
1134 {
1135 	iter->idx++;
1136 	iter->next_idx[iter->cpu]++;
1137 	iter->next_page_idx[iter->cpu]++;
1138 
1139 	if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
1140 		struct trace_array_cpu *data = iter->tr->data[iter->cpu];
1141 
1142 		iter->next_page_idx[iter->cpu] = 0;
1143 		iter->next_page[iter->cpu] =
1144 			trace_next_list(data, iter->next_page[iter->cpu]);
1145 	}
1146 }
1147 
1148 static void trace_consume(struct trace_iterator *iter)
1149 {
1150 	struct trace_array_cpu *data = iter->tr->data[iter->cpu];
1151 
1152 	data->trace_tail_idx++;
1153 	if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
1154 		data->trace_tail = trace_next_page(data, data->trace_tail);
1155 		data->trace_tail_idx = 0;
1156 	}
1157 
1158 	/* Check if we empty it, then reset the index */
1159 	if (data->trace_head == data->trace_tail &&
1160 	    data->trace_head_idx == data->trace_tail_idx)
1161 		data->trace_idx = 0;
1162 }
1163 
1164 static void *find_next_entry_inc(struct trace_iterator *iter)
1165 {
1166 	struct trace_entry *next;
1167 	int next_cpu = -1;
1168 
1169 	next = find_next_entry(iter, &next_cpu);
1170 
1171 	iter->prev_ent = iter->ent;
1172 	iter->prev_cpu = iter->cpu;
1173 
1174 	iter->ent = next;
1175 	iter->cpu = next_cpu;
1176 
1177 	if (next)
1178 		trace_iterator_increment(iter);
1179 
1180 	return next ? iter : NULL;
1181 }
1182 
1183 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1184 {
1185 	struct trace_iterator *iter = m->private;
1186 	void *last_ent = iter->ent;
1187 	int i = (int)*pos;
1188 	void *ent;
1189 
1190 	(*pos)++;
1191 
1192 	/* can't go backwards */
1193 	if (iter->idx > i)
1194 		return NULL;
1195 
1196 	if (iter->idx < 0)
1197 		ent = find_next_entry_inc(iter);
1198 	else
1199 		ent = iter;
1200 
1201 	while (ent && iter->idx < i)
1202 		ent = find_next_entry_inc(iter);
1203 
1204 	iter->pos = *pos;
1205 
1206 	if (last_ent && !ent)
1207 		seq_puts(m, "\n\nvim:ft=help\n");
1208 
1209 	return ent;
1210 }
1211 
1212 static void *s_start(struct seq_file *m, loff_t *pos)
1213 {
1214 	struct trace_iterator *iter = m->private;
1215 	void *p = NULL;
1216 	loff_t l = 0;
1217 	int i;
1218 
1219 	mutex_lock(&trace_types_lock);
1220 
1221 	if (!current_trace || current_trace != iter->trace) {
1222 		mutex_unlock(&trace_types_lock);
1223 		return NULL;
1224 	}
1225 
1226 	atomic_inc(&trace_record_cmdline_disabled);
1227 
1228 	/* let the tracer grab locks here if needed */
1229 	if (current_trace->start)
1230 		current_trace->start(iter);
1231 
1232 	if (*pos != iter->pos) {
1233 		iter->ent = NULL;
1234 		iter->cpu = 0;
1235 		iter->idx = -1;
1236 		iter->prev_ent = NULL;
1237 		iter->prev_cpu = -1;
1238 
1239 		for_each_tracing_cpu(i) {
1240 			iter->next_idx[i] = 0;
1241 			iter->next_page[i] = NULL;
1242 		}
1243 
1244 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1245 			;
1246 
1247 	} else {
1248 		l = *pos - 1;
1249 		p = s_next(m, p, &l);
1250 	}
1251 
1252 	return p;
1253 }
1254 
1255 static void s_stop(struct seq_file *m, void *p)
1256 {
1257 	struct trace_iterator *iter = m->private;
1258 
1259 	atomic_dec(&trace_record_cmdline_disabled);
1260 
1261 	/* let the tracer release locks here if needed */
1262 	if (current_trace && current_trace == iter->trace && iter->trace->stop)
1263 		iter->trace->stop(iter);
1264 
1265 	mutex_unlock(&trace_types_lock);
1266 }
1267 
1268 #define KRETPROBE_MSG "[unknown/kretprobe'd]"
1269 
1270 #ifdef CONFIG_KRETPROBES
1271 static inline int kretprobed(unsigned long addr)
1272 {
1273 	return addr == (unsigned long)kretprobe_trampoline;
1274 }
1275 #else
1276 static inline int kretprobed(unsigned long addr)
1277 {
1278 	return 0;
1279 }
1280 #endif /* CONFIG_KRETPROBES */
1281 
1282 static int
1283 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1284 {
1285 #ifdef CONFIG_KALLSYMS
1286 	char str[KSYM_SYMBOL_LEN];
1287 
1288 	kallsyms_lookup(address, NULL, NULL, NULL, str);
1289 
1290 	return trace_seq_printf(s, fmt, str);
1291 #endif
1292 	return 1;
1293 }
1294 
1295 static int
1296 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1297 		     unsigned long address)
1298 {
1299 #ifdef CONFIG_KALLSYMS
1300 	char str[KSYM_SYMBOL_LEN];
1301 
1302 	sprint_symbol(str, address);
1303 	return trace_seq_printf(s, fmt, str);
1304 #endif
1305 	return 1;
1306 }
1307 
1308 #ifndef CONFIG_64BIT
1309 # define IP_FMT "%08lx"
1310 #else
1311 # define IP_FMT "%016lx"
1312 #endif
1313 
1314 static int
1315 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1316 {
1317 	int ret;
1318 
1319 	if (!ip)
1320 		return trace_seq_printf(s, "0");
1321 
1322 	if (sym_flags & TRACE_ITER_SYM_OFFSET)
1323 		ret = seq_print_sym_offset(s, "%s", ip);
1324 	else
1325 		ret = seq_print_sym_short(s, "%s", ip);
1326 
1327 	if (!ret)
1328 		return 0;
1329 
1330 	if (sym_flags & TRACE_ITER_SYM_ADDR)
1331 		ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1332 	return ret;
1333 }
1334 
1335 static void print_lat_help_header(struct seq_file *m)
1336 {
1337 	seq_puts(m, "#                _------=> CPU#            \n");
1338 	seq_puts(m, "#               / _-----=> irqs-off        \n");
1339 	seq_puts(m, "#              | / _----=> need-resched    \n");
1340 	seq_puts(m, "#              || / _---=> hardirq/softirq \n");
1341 	seq_puts(m, "#              ||| / _--=> preempt-depth   \n");
1342 	seq_puts(m, "#              |||| /                      \n");
1343 	seq_puts(m, "#              |||||     delay             \n");
1344 	seq_puts(m, "#  cmd     pid ||||| time  |   caller      \n");
1345 	seq_puts(m, "#     \\   /    |||||   \\   |   /           \n");
1346 }
1347 
1348 static void print_func_help_header(struct seq_file *m)
1349 {
1350 	seq_puts(m, "#           TASK-PID   CPU#    TIMESTAMP  FUNCTION\n");
1351 	seq_puts(m, "#              | |      |          |         |\n");
1352 }
1353 
1354 
1355 static void
1356 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1357 {
1358 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1359 	struct trace_array *tr = iter->tr;
1360 	struct trace_array_cpu *data = tr->data[tr->cpu];
1361 	struct tracer *type = current_trace;
1362 	unsigned long total   = 0;
1363 	unsigned long entries = 0;
1364 	int cpu;
1365 	const char *name = "preemption";
1366 
1367 	if (type)
1368 		name = type->name;
1369 
1370 	for_each_tracing_cpu(cpu) {
1371 		if (head_page(tr->data[cpu])) {
1372 			total += tr->data[cpu]->trace_idx;
1373 			if (tr->data[cpu]->trace_idx > tr->entries)
1374 				entries += tr->entries;
1375 			else
1376 				entries += tr->data[cpu]->trace_idx;
1377 		}
1378 	}
1379 
1380 	seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1381 		   name, UTS_RELEASE);
1382 	seq_puts(m, "-----------------------------------"
1383 		 "---------------------------------\n");
1384 	seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1385 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1386 		   nsecs_to_usecs(data->saved_latency),
1387 		   entries,
1388 		   total,
1389 		   tr->cpu,
1390 #if defined(CONFIG_PREEMPT_NONE)
1391 		   "server",
1392 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1393 		   "desktop",
1394 #elif defined(CONFIG_PREEMPT)
1395 		   "preempt",
1396 #else
1397 		   "unknown",
1398 #endif
1399 		   /* These are reserved for later use */
1400 		   0, 0, 0, 0);
1401 #ifdef CONFIG_SMP
1402 	seq_printf(m, " #P:%d)\n", num_online_cpus());
1403 #else
1404 	seq_puts(m, ")\n");
1405 #endif
1406 	seq_puts(m, "    -----------------\n");
1407 	seq_printf(m, "    | task: %.16s-%d "
1408 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1409 		   data->comm, data->pid, data->uid, data->nice,
1410 		   data->policy, data->rt_priority);
1411 	seq_puts(m, "    -----------------\n");
1412 
1413 	if (data->critical_start) {
1414 		seq_puts(m, " => started at: ");
1415 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1416 		trace_print_seq(m, &iter->seq);
1417 		seq_puts(m, "\n => ended at:   ");
1418 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1419 		trace_print_seq(m, &iter->seq);
1420 		seq_puts(m, "\n");
1421 	}
1422 
1423 	seq_puts(m, "\n");
1424 }
1425 
1426 static void
1427 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1428 {
1429 	int hardirq, softirq;
1430 	char *comm;
1431 
1432 	comm = trace_find_cmdline(entry->pid);
1433 
1434 	trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1435 	trace_seq_printf(s, "%d", cpu);
1436 	trace_seq_printf(s, "%c%c",
1437 			(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1438 			((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1439 
1440 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1441 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1442 	if (hardirq && softirq) {
1443 		trace_seq_putc(s, 'H');
1444 	} else {
1445 		if (hardirq) {
1446 			trace_seq_putc(s, 'h');
1447 		} else {
1448 			if (softirq)
1449 				trace_seq_putc(s, 's');
1450 			else
1451 				trace_seq_putc(s, '.');
1452 		}
1453 	}
1454 
1455 	if (entry->preempt_count)
1456 		trace_seq_printf(s, "%x", entry->preempt_count);
1457 	else
1458 		trace_seq_puts(s, ".");
1459 }
1460 
1461 unsigned long preempt_mark_thresh = 100;
1462 
1463 static void
1464 lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1465 		    unsigned long rel_usecs)
1466 {
1467 	trace_seq_printf(s, " %4lldus", abs_usecs);
1468 	if (rel_usecs > preempt_mark_thresh)
1469 		trace_seq_puts(s, "!: ");
1470 	else if (rel_usecs > 1)
1471 		trace_seq_puts(s, "+: ");
1472 	else
1473 		trace_seq_puts(s, " : ");
1474 }
1475 
1476 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1477 
1478 static int
1479 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1480 {
1481 	struct trace_seq *s = &iter->seq;
1482 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1483 	struct trace_entry *next_entry = find_next_entry(iter, NULL);
1484 	unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1485 	struct trace_entry *entry = iter->ent;
1486 	unsigned long abs_usecs;
1487 	unsigned long rel_usecs;
1488 	char *comm;
1489 	int S, T;
1490 	int i;
1491 	unsigned state;
1492 
1493 	if (!next_entry)
1494 		next_entry = entry;
1495 	rel_usecs = ns2usecs(next_entry->t - entry->t);
1496 	abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
1497 
1498 	if (verbose) {
1499 		comm = trace_find_cmdline(entry->pid);
1500 		trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
1501 				 " %ld.%03ldms (+%ld.%03ldms): ",
1502 				 comm,
1503 				 entry->pid, cpu, entry->flags,
1504 				 entry->preempt_count, trace_idx,
1505 				 ns2usecs(entry->t),
1506 				 abs_usecs/1000,
1507 				 abs_usecs % 1000, rel_usecs/1000,
1508 				 rel_usecs % 1000);
1509 	} else {
1510 		lat_print_generic(s, entry, cpu);
1511 		lat_print_timestamp(s, abs_usecs, rel_usecs);
1512 	}
1513 	switch (entry->type) {
1514 	case TRACE_FN:
1515 		seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1516 		trace_seq_puts(s, " (");
1517 		if (kretprobed(entry->fn.parent_ip))
1518 			trace_seq_puts(s, KRETPROBE_MSG);
1519 		else
1520 			seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1521 		trace_seq_puts(s, ")\n");
1522 		break;
1523 	case TRACE_CTX:
1524 	case TRACE_WAKE:
1525 		T = entry->ctx.next_state < sizeof(state_to_char) ?
1526 			state_to_char[entry->ctx.next_state] : 'X';
1527 
1528 		state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
1529 		S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1530 		comm = trace_find_cmdline(entry->ctx.next_pid);
1531 		trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
1532 				 entry->ctx.prev_pid,
1533 				 entry->ctx.prev_prio,
1534 				 S, entry->type == TRACE_CTX ? "==>" : "  +",
1535 				 entry->ctx.next_pid,
1536 				 entry->ctx.next_prio,
1537 				 T, comm);
1538 		break;
1539 	case TRACE_SPECIAL:
1540 		trace_seq_printf(s, "# %ld %ld %ld\n",
1541 				 entry->special.arg1,
1542 				 entry->special.arg2,
1543 				 entry->special.arg3);
1544 		break;
1545 	case TRACE_STACK:
1546 		for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1547 			if (i)
1548 				trace_seq_puts(s, " <= ");
1549 			seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
1550 		}
1551 		trace_seq_puts(s, "\n");
1552 		break;
1553 	default:
1554 		trace_seq_printf(s, "Unknown type %d\n", entry->type);
1555 	}
1556 	return 1;
1557 }
1558 
1559 static int print_trace_fmt(struct trace_iterator *iter)
1560 {
1561 	struct trace_seq *s = &iter->seq;
1562 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1563 	struct trace_entry *entry;
1564 	unsigned long usec_rem;
1565 	unsigned long long t;
1566 	unsigned long secs;
1567 	char *comm;
1568 	int ret;
1569 	int S, T;
1570 	int i;
1571 
1572 	entry = iter->ent;
1573 
1574 	comm = trace_find_cmdline(iter->ent->pid);
1575 
1576 	t = ns2usecs(entry->t);
1577 	usec_rem = do_div(t, 1000000ULL);
1578 	secs = (unsigned long)t;
1579 
1580 	ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1581 	if (!ret)
1582 		return 0;
1583 	ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1584 	if (!ret)
1585 		return 0;
1586 	ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1587 	if (!ret)
1588 		return 0;
1589 
1590 	switch (entry->type) {
1591 	case TRACE_FN:
1592 		ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1593 		if (!ret)
1594 			return 0;
1595 		if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1596 						entry->fn.parent_ip) {
1597 			ret = trace_seq_printf(s, " <-");
1598 			if (!ret)
1599 				return 0;
1600 			if (kretprobed(entry->fn.parent_ip))
1601 				ret = trace_seq_puts(s, KRETPROBE_MSG);
1602 			else
1603 				ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1604 						       sym_flags);
1605 			if (!ret)
1606 				return 0;
1607 		}
1608 		ret = trace_seq_printf(s, "\n");
1609 		if (!ret)
1610 			return 0;
1611 		break;
1612 	case TRACE_CTX:
1613 	case TRACE_WAKE:
1614 		S = entry->ctx.prev_state < sizeof(state_to_char) ?
1615 			state_to_char[entry->ctx.prev_state] : 'X';
1616 		T = entry->ctx.next_state < sizeof(state_to_char) ?
1617 			state_to_char[entry->ctx.next_state] : 'X';
1618 		ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
1619 				       entry->ctx.prev_pid,
1620 				       entry->ctx.prev_prio,
1621 				       S,
1622 				       entry->type == TRACE_CTX ? "==>" : "  +",
1623 				       entry->ctx.next_pid,
1624 				       entry->ctx.next_prio,
1625 				       T);
1626 		if (!ret)
1627 			return 0;
1628 		break;
1629 	case TRACE_SPECIAL:
1630 		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1631 				 entry->special.arg1,
1632 				 entry->special.arg2,
1633 				 entry->special.arg3);
1634 		if (!ret)
1635 			return 0;
1636 		break;
1637 	case TRACE_STACK:
1638 		for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1639 			if (i) {
1640 				ret = trace_seq_puts(s, " <= ");
1641 				if (!ret)
1642 					return 0;
1643 			}
1644 			ret = seq_print_ip_sym(s, entry->stack.caller[i],
1645 					       sym_flags);
1646 			if (!ret)
1647 				return 0;
1648 		}
1649 		ret = trace_seq_puts(s, "\n");
1650 		if (!ret)
1651 			return 0;
1652 		break;
1653 	}
1654 	return 1;
1655 }
1656 
1657 static int print_raw_fmt(struct trace_iterator *iter)
1658 {
1659 	struct trace_seq *s = &iter->seq;
1660 	struct trace_entry *entry;
1661 	int ret;
1662 	int S, T;
1663 
1664 	entry = iter->ent;
1665 
1666 	ret = trace_seq_printf(s, "%d %d %llu ",
1667 		entry->pid, iter->cpu, entry->t);
1668 	if (!ret)
1669 		return 0;
1670 
1671 	switch (entry->type) {
1672 	case TRACE_FN:
1673 		ret = trace_seq_printf(s, "%x %x\n",
1674 					entry->fn.ip, entry->fn.parent_ip);
1675 		if (!ret)
1676 			return 0;
1677 		break;
1678 	case TRACE_CTX:
1679 	case TRACE_WAKE:
1680 		S = entry->ctx.prev_state < sizeof(state_to_char) ?
1681 			state_to_char[entry->ctx.prev_state] : 'X';
1682 		T = entry->ctx.next_state < sizeof(state_to_char) ?
1683 			state_to_char[entry->ctx.next_state] : 'X';
1684 		if (entry->type == TRACE_WAKE)
1685 			S = '+';
1686 		ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
1687 				       entry->ctx.prev_pid,
1688 				       entry->ctx.prev_prio,
1689 				       S,
1690 				       entry->ctx.next_pid,
1691 				       entry->ctx.next_prio,
1692 				       T);
1693 		if (!ret)
1694 			return 0;
1695 		break;
1696 	case TRACE_SPECIAL:
1697 	case TRACE_STACK:
1698 		ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1699 				 entry->special.arg1,
1700 				 entry->special.arg2,
1701 				 entry->special.arg3);
1702 		if (!ret)
1703 			return 0;
1704 		break;
1705 	}
1706 	return 1;
1707 }
1708 
1709 #define SEQ_PUT_FIELD_RET(s, x)				\
1710 do {							\
1711 	if (!trace_seq_putmem(s, &(x), sizeof(x)))	\
1712 		return 0;				\
1713 } while (0)
1714 
1715 #define SEQ_PUT_HEX_FIELD_RET(s, x)			\
1716 do {							\
1717 	if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))	\
1718 		return 0;				\
1719 } while (0)
1720 
1721 static int print_hex_fmt(struct trace_iterator *iter)
1722 {
1723 	struct trace_seq *s = &iter->seq;
1724 	unsigned char newline = '\n';
1725 	struct trace_entry *entry;
1726 	int S, T;
1727 
1728 	entry = iter->ent;
1729 
1730 	SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1731 	SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1732 	SEQ_PUT_HEX_FIELD_RET(s, entry->t);
1733 
1734 	switch (entry->type) {
1735 	case TRACE_FN:
1736 		SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip);
1737 		SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1738 		break;
1739 	case TRACE_CTX:
1740 	case TRACE_WAKE:
1741 		S = entry->ctx.prev_state < sizeof(state_to_char) ?
1742 			state_to_char[entry->ctx.prev_state] : 'X';
1743 		T = entry->ctx.next_state < sizeof(state_to_char) ?
1744 			state_to_char[entry->ctx.next_state] : 'X';
1745 		if (entry->type == TRACE_WAKE)
1746 			S = '+';
1747 		SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
1748 		SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
1749 		SEQ_PUT_HEX_FIELD_RET(s, S);
1750 		SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
1751 		SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
1752 		SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1753 		SEQ_PUT_HEX_FIELD_RET(s, T);
1754 		break;
1755 	case TRACE_SPECIAL:
1756 	case TRACE_STACK:
1757 		SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
1758 		SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
1759 		SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
1760 		break;
1761 	}
1762 	SEQ_PUT_FIELD_RET(s, newline);
1763 
1764 	return 1;
1765 }
1766 
1767 static int print_bin_fmt(struct trace_iterator *iter)
1768 {
1769 	struct trace_seq *s = &iter->seq;
1770 	struct trace_entry *entry;
1771 
1772 	entry = iter->ent;
1773 
1774 	SEQ_PUT_FIELD_RET(s, entry->pid);
1775 	SEQ_PUT_FIELD_RET(s, entry->cpu);
1776 	SEQ_PUT_FIELD_RET(s, entry->t);
1777 
1778 	switch (entry->type) {
1779 	case TRACE_FN:
1780 		SEQ_PUT_FIELD_RET(s, entry->fn.ip);
1781 		SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip);
1782 		break;
1783 	case TRACE_CTX:
1784 		SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid);
1785 		SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio);
1786 		SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
1787 		SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
1788 		SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
1789 		SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
1790 		break;
1791 	case TRACE_SPECIAL:
1792 	case TRACE_STACK:
1793 		SEQ_PUT_FIELD_RET(s, entry->special.arg1);
1794 		SEQ_PUT_FIELD_RET(s, entry->special.arg2);
1795 		SEQ_PUT_FIELD_RET(s, entry->special.arg3);
1796 		break;
1797 	}
1798 	return 1;
1799 }
1800 
1801 static int trace_empty(struct trace_iterator *iter)
1802 {
1803 	struct trace_array_cpu *data;
1804 	int cpu;
1805 
1806 	for_each_tracing_cpu(cpu) {
1807 		data = iter->tr->data[cpu];
1808 
1809 		if (head_page(data) && data->trace_idx &&
1810 		    (data->trace_tail != data->trace_head ||
1811 		     data->trace_tail_idx != data->trace_head_idx))
1812 			return 0;
1813 	}
1814 	return 1;
1815 }
1816 
1817 static int print_trace_line(struct trace_iterator *iter)
1818 {
1819 	if (iter->trace && iter->trace->print_line)
1820 		return iter->trace->print_line(iter);
1821 
1822 	if (trace_flags & TRACE_ITER_BIN)
1823 		return print_bin_fmt(iter);
1824 
1825 	if (trace_flags & TRACE_ITER_HEX)
1826 		return print_hex_fmt(iter);
1827 
1828 	if (trace_flags & TRACE_ITER_RAW)
1829 		return print_raw_fmt(iter);
1830 
1831 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1832 		return print_lat_fmt(iter, iter->idx, iter->cpu);
1833 
1834 	return print_trace_fmt(iter);
1835 }
1836 
1837 static int s_show(struct seq_file *m, void *v)
1838 {
1839 	struct trace_iterator *iter = v;
1840 
1841 	if (iter->ent == NULL) {
1842 		if (iter->tr) {
1843 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
1844 			seq_puts(m, "#\n");
1845 		}
1846 		if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1847 			/* print nothing if the buffers are empty */
1848 			if (trace_empty(iter))
1849 				return 0;
1850 			print_trace_header(m, iter);
1851 			if (!(trace_flags & TRACE_ITER_VERBOSE))
1852 				print_lat_help_header(m);
1853 		} else {
1854 			if (!(trace_flags & TRACE_ITER_VERBOSE))
1855 				print_func_help_header(m);
1856 		}
1857 	} else {
1858 		print_trace_line(iter);
1859 		trace_print_seq(m, &iter->seq);
1860 	}
1861 
1862 	return 0;
1863 }
1864 
1865 static struct seq_operations tracer_seq_ops = {
1866 	.start		= s_start,
1867 	.next		= s_next,
1868 	.stop		= s_stop,
1869 	.show		= s_show,
1870 };
1871 
1872 static struct trace_iterator *
1873 __tracing_open(struct inode *inode, struct file *file, int *ret)
1874 {
1875 	struct trace_iterator *iter;
1876 
1877 	if (tracing_disabled) {
1878 		*ret = -ENODEV;
1879 		return NULL;
1880 	}
1881 
1882 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1883 	if (!iter) {
1884 		*ret = -ENOMEM;
1885 		goto out;
1886 	}
1887 
1888 	mutex_lock(&trace_types_lock);
1889 	if (current_trace && current_trace->print_max)
1890 		iter->tr = &max_tr;
1891 	else
1892 		iter->tr = inode->i_private;
1893 	iter->trace = current_trace;
1894 	iter->pos = -1;
1895 
1896 	/* TODO stop tracer */
1897 	*ret = seq_open(file, &tracer_seq_ops);
1898 	if (!*ret) {
1899 		struct seq_file *m = file->private_data;
1900 		m->private = iter;
1901 
1902 		/* stop the trace while dumping */
1903 		if (iter->tr->ctrl) {
1904 			tracer_enabled = 0;
1905 			ftrace_function_enabled = 0;
1906 		}
1907 
1908 		if (iter->trace && iter->trace->open)
1909 			iter->trace->open(iter);
1910 	} else {
1911 		kfree(iter);
1912 		iter = NULL;
1913 	}
1914 	mutex_unlock(&trace_types_lock);
1915 
1916  out:
1917 	return iter;
1918 }
1919 
1920 int tracing_open_generic(struct inode *inode, struct file *filp)
1921 {
1922 	if (tracing_disabled)
1923 		return -ENODEV;
1924 
1925 	filp->private_data = inode->i_private;
1926 	return 0;
1927 }
1928 
1929 int tracing_release(struct inode *inode, struct file *file)
1930 {
1931 	struct seq_file *m = (struct seq_file *)file->private_data;
1932 	struct trace_iterator *iter = m->private;
1933 
1934 	mutex_lock(&trace_types_lock);
1935 	if (iter->trace && iter->trace->close)
1936 		iter->trace->close(iter);
1937 
1938 	/* reenable tracing if it was previously enabled */
1939 	if (iter->tr->ctrl) {
1940 		tracer_enabled = 1;
1941 		/*
1942 		 * It is safe to enable function tracing even if it
1943 		 * isn't used
1944 		 */
1945 		ftrace_function_enabled = 1;
1946 	}
1947 	mutex_unlock(&trace_types_lock);
1948 
1949 	seq_release(inode, file);
1950 	kfree(iter);
1951 	return 0;
1952 }
1953 
1954 static int tracing_open(struct inode *inode, struct file *file)
1955 {
1956 	int ret;
1957 
1958 	__tracing_open(inode, file, &ret);
1959 
1960 	return ret;
1961 }
1962 
1963 static int tracing_lt_open(struct inode *inode, struct file *file)
1964 {
1965 	struct trace_iterator *iter;
1966 	int ret;
1967 
1968 	iter = __tracing_open(inode, file, &ret);
1969 
1970 	if (!ret)
1971 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
1972 
1973 	return ret;
1974 }
1975 
1976 
1977 static void *
1978 t_next(struct seq_file *m, void *v, loff_t *pos)
1979 {
1980 	struct tracer *t = m->private;
1981 
1982 	(*pos)++;
1983 
1984 	if (t)
1985 		t = t->next;
1986 
1987 	m->private = t;
1988 
1989 	return t;
1990 }
1991 
1992 static void *t_start(struct seq_file *m, loff_t *pos)
1993 {
1994 	struct tracer *t = m->private;
1995 	loff_t l = 0;
1996 
1997 	mutex_lock(&trace_types_lock);
1998 	for (; t && l < *pos; t = t_next(m, t, &l))
1999 		;
2000 
2001 	return t;
2002 }
2003 
2004 static void t_stop(struct seq_file *m, void *p)
2005 {
2006 	mutex_unlock(&trace_types_lock);
2007 }
2008 
2009 static int t_show(struct seq_file *m, void *v)
2010 {
2011 	struct tracer *t = v;
2012 
2013 	if (!t)
2014 		return 0;
2015 
2016 	seq_printf(m, "%s", t->name);
2017 	if (t->next)
2018 		seq_putc(m, ' ');
2019 	else
2020 		seq_putc(m, '\n');
2021 
2022 	return 0;
2023 }
2024 
2025 static struct seq_operations show_traces_seq_ops = {
2026 	.start		= t_start,
2027 	.next		= t_next,
2028 	.stop		= t_stop,
2029 	.show		= t_show,
2030 };
2031 
2032 static int show_traces_open(struct inode *inode, struct file *file)
2033 {
2034 	int ret;
2035 
2036 	if (tracing_disabled)
2037 		return -ENODEV;
2038 
2039 	ret = seq_open(file, &show_traces_seq_ops);
2040 	if (!ret) {
2041 		struct seq_file *m = file->private_data;
2042 		m->private = trace_types;
2043 	}
2044 
2045 	return ret;
2046 }
2047 
2048 static struct file_operations tracing_fops = {
2049 	.open		= tracing_open,
2050 	.read		= seq_read,
2051 	.llseek		= seq_lseek,
2052 	.release	= tracing_release,
2053 };
2054 
2055 static struct file_operations tracing_lt_fops = {
2056 	.open		= tracing_lt_open,
2057 	.read		= seq_read,
2058 	.llseek		= seq_lseek,
2059 	.release	= tracing_release,
2060 };
2061 
2062 static struct file_operations show_traces_fops = {
2063 	.open		= show_traces_open,
2064 	.read		= seq_read,
2065 	.release	= seq_release,
2066 };
2067 
2068 /*
2069  * Only trace on a CPU if the bitmask is set:
2070  */
2071 static cpumask_t tracing_cpumask = CPU_MASK_ALL;
2072 
2073 /*
2074  * When tracing/tracing_cpu_mask is modified then this holds
2075  * the new bitmask we are about to install:
2076  */
2077 static cpumask_t tracing_cpumask_new;
2078 
2079 /*
2080  * The tracer itself will not take this lock, but still we want
2081  * to provide a consistent cpumask to user-space:
2082  */
2083 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2084 
2085 /*
2086  * Temporary storage for the character representation of the
2087  * CPU bitmask (and one more byte for the newline):
2088  */
2089 static char mask_str[NR_CPUS + 1];
2090 
2091 static ssize_t
2092 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2093 		     size_t count, loff_t *ppos)
2094 {
2095 	int len;
2096 
2097 	mutex_lock(&tracing_cpumask_update_lock);
2098 
2099 	len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2100 	if (count - len < 2) {
2101 		count = -EINVAL;
2102 		goto out_err;
2103 	}
2104 	len += sprintf(mask_str + len, "\n");
2105 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2106 
2107 out_err:
2108 	mutex_unlock(&tracing_cpumask_update_lock);
2109 
2110 	return count;
2111 }
2112 
2113 static ssize_t
2114 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2115 		      size_t count, loff_t *ppos)
2116 {
2117 	int err, cpu;
2118 
2119 	mutex_lock(&tracing_cpumask_update_lock);
2120 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2121 	if (err)
2122 		goto err_unlock;
2123 
2124 	raw_local_irq_disable();
2125 	__raw_spin_lock(&ftrace_max_lock);
2126 	for_each_tracing_cpu(cpu) {
2127 		/*
2128 		 * Increase/decrease the disabled counter if we are
2129 		 * about to flip a bit in the cpumask:
2130 		 */
2131 		if (cpu_isset(cpu, tracing_cpumask) &&
2132 				!cpu_isset(cpu, tracing_cpumask_new)) {
2133 			atomic_inc(&global_trace.data[cpu]->disabled);
2134 		}
2135 		if (!cpu_isset(cpu, tracing_cpumask) &&
2136 				cpu_isset(cpu, tracing_cpumask_new)) {
2137 			atomic_dec(&global_trace.data[cpu]->disabled);
2138 		}
2139 	}
2140 	__raw_spin_unlock(&ftrace_max_lock);
2141 	raw_local_irq_enable();
2142 
2143 	tracing_cpumask = tracing_cpumask_new;
2144 
2145 	mutex_unlock(&tracing_cpumask_update_lock);
2146 
2147 	return count;
2148 
2149 err_unlock:
2150 	mutex_unlock(&tracing_cpumask_update_lock);
2151 
2152 	return err;
2153 }
2154 
2155 static struct file_operations tracing_cpumask_fops = {
2156 	.open		= tracing_open_generic,
2157 	.read		= tracing_cpumask_read,
2158 	.write		= tracing_cpumask_write,
2159 };
2160 
2161 static ssize_t
2162 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2163 		       size_t cnt, loff_t *ppos)
2164 {
2165 	char *buf;
2166 	int r = 0;
2167 	int len = 0;
2168 	int i;
2169 
2170 	/* calulate max size */
2171 	for (i = 0; trace_options[i]; i++) {
2172 		len += strlen(trace_options[i]);
2173 		len += 3; /* "no" and space */
2174 	}
2175 
2176 	/* +2 for \n and \0 */
2177 	buf = kmalloc(len + 2, GFP_KERNEL);
2178 	if (!buf)
2179 		return -ENOMEM;
2180 
2181 	for (i = 0; trace_options[i]; i++) {
2182 		if (trace_flags & (1 << i))
2183 			r += sprintf(buf + r, "%s ", trace_options[i]);
2184 		else
2185 			r += sprintf(buf + r, "no%s ", trace_options[i]);
2186 	}
2187 
2188 	r += sprintf(buf + r, "\n");
2189 	WARN_ON(r >= len + 2);
2190 
2191 	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2192 
2193 	kfree(buf);
2194 
2195 	return r;
2196 }
2197 
2198 static ssize_t
2199 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2200 			size_t cnt, loff_t *ppos)
2201 {
2202 	char buf[64];
2203 	char *cmp = buf;
2204 	int neg = 0;
2205 	int i;
2206 
2207 	if (cnt >= sizeof(buf))
2208 		return -EINVAL;
2209 
2210 	if (copy_from_user(&buf, ubuf, cnt))
2211 		return -EFAULT;
2212 
2213 	buf[cnt] = 0;
2214 
2215 	if (strncmp(buf, "no", 2) == 0) {
2216 		neg = 1;
2217 		cmp += 2;
2218 	}
2219 
2220 	for (i = 0; trace_options[i]; i++) {
2221 		int len = strlen(trace_options[i]);
2222 
2223 		if (strncmp(cmp, trace_options[i], len) == 0) {
2224 			if (neg)
2225 				trace_flags &= ~(1 << i);
2226 			else
2227 				trace_flags |= (1 << i);
2228 			break;
2229 		}
2230 	}
2231 	/*
2232 	 * If no option could be set, return an error:
2233 	 */
2234 	if (!trace_options[i])
2235 		return -EINVAL;
2236 
2237 	filp->f_pos += cnt;
2238 
2239 	return cnt;
2240 }
2241 
2242 static struct file_operations tracing_iter_fops = {
2243 	.open		= tracing_open_generic,
2244 	.read		= tracing_iter_ctrl_read,
2245 	.write		= tracing_iter_ctrl_write,
2246 };
2247 
2248 static const char readme_msg[] =
2249 	"tracing mini-HOWTO:\n\n"
2250 	"# mkdir /debug\n"
2251 	"# mount -t debugfs nodev /debug\n\n"
2252 	"# cat /debug/tracing/available_tracers\n"
2253 	"wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
2254 	"# cat /debug/tracing/current_tracer\n"
2255 	"none\n"
2256 	"# echo sched_switch > /debug/tracing/current_tracer\n"
2257 	"# cat /debug/tracing/current_tracer\n"
2258 	"sched_switch\n"
2259 	"# cat /debug/tracing/iter_ctrl\n"
2260 	"noprint-parent nosym-offset nosym-addr noverbose\n"
2261 	"# echo print-parent > /debug/tracing/iter_ctrl\n"
2262 	"# echo 1 > /debug/tracing/tracing_enabled\n"
2263 	"# cat /debug/tracing/trace > /tmp/trace.txt\n"
2264 	"echo 0 > /debug/tracing/tracing_enabled\n"
2265 ;
2266 
2267 static ssize_t
2268 tracing_readme_read(struct file *filp, char __user *ubuf,
2269 		       size_t cnt, loff_t *ppos)
2270 {
2271 	return simple_read_from_buffer(ubuf, cnt, ppos,
2272 					readme_msg, strlen(readme_msg));
2273 }
2274 
2275 static struct file_operations tracing_readme_fops = {
2276 	.open		= tracing_open_generic,
2277 	.read		= tracing_readme_read,
2278 };
2279 
2280 static ssize_t
2281 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2282 		  size_t cnt, loff_t *ppos)
2283 {
2284 	struct trace_array *tr = filp->private_data;
2285 	char buf[64];
2286 	int r;
2287 
2288 	r = sprintf(buf, "%ld\n", tr->ctrl);
2289 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2290 }
2291 
2292 static ssize_t
2293 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2294 		   size_t cnt, loff_t *ppos)
2295 {
2296 	struct trace_array *tr = filp->private_data;
2297 	char buf[64];
2298 	long val;
2299 	int ret;
2300 
2301 	if (cnt >= sizeof(buf))
2302 		return -EINVAL;
2303 
2304 	if (copy_from_user(&buf, ubuf, cnt))
2305 		return -EFAULT;
2306 
2307 	buf[cnt] = 0;
2308 
2309 	ret = strict_strtoul(buf, 10, &val);
2310 	if (ret < 0)
2311 		return ret;
2312 
2313 	val = !!val;
2314 
2315 	mutex_lock(&trace_types_lock);
2316 	if (tr->ctrl ^ val) {
2317 		if (val)
2318 			tracer_enabled = 1;
2319 		else
2320 			tracer_enabled = 0;
2321 
2322 		tr->ctrl = val;
2323 
2324 		if (current_trace && current_trace->ctrl_update)
2325 			current_trace->ctrl_update(tr);
2326 	}
2327 	mutex_unlock(&trace_types_lock);
2328 
2329 	filp->f_pos += cnt;
2330 
2331 	return cnt;
2332 }
2333 
2334 static ssize_t
2335 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2336 		       size_t cnt, loff_t *ppos)
2337 {
2338 	char buf[max_tracer_type_len+2];
2339 	int r;
2340 
2341 	mutex_lock(&trace_types_lock);
2342 	if (current_trace)
2343 		r = sprintf(buf, "%s\n", current_trace->name);
2344 	else
2345 		r = sprintf(buf, "\n");
2346 	mutex_unlock(&trace_types_lock);
2347 
2348 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2349 }
2350 
2351 static ssize_t
2352 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2353 			size_t cnt, loff_t *ppos)
2354 {
2355 	struct trace_array *tr = &global_trace;
2356 	struct tracer *t;
2357 	char buf[max_tracer_type_len+1];
2358 	int i;
2359 
2360 	if (cnt > max_tracer_type_len)
2361 		cnt = max_tracer_type_len;
2362 
2363 	if (copy_from_user(&buf, ubuf, cnt))
2364 		return -EFAULT;
2365 
2366 	buf[cnt] = 0;
2367 
2368 	/* strip ending whitespace. */
2369 	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2370 		buf[i] = 0;
2371 
2372 	mutex_lock(&trace_types_lock);
2373 	for (t = trace_types; t; t = t->next) {
2374 		if (strcmp(t->name, buf) == 0)
2375 			break;
2376 	}
2377 	if (!t || t == current_trace)
2378 		goto out;
2379 
2380 	if (current_trace && current_trace->reset)
2381 		current_trace->reset(tr);
2382 
2383 	current_trace = t;
2384 	if (t->init)
2385 		t->init(tr);
2386 
2387  out:
2388 	mutex_unlock(&trace_types_lock);
2389 
2390 	filp->f_pos += cnt;
2391 
2392 	return cnt;
2393 }
2394 
2395 static ssize_t
2396 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2397 		     size_t cnt, loff_t *ppos)
2398 {
2399 	unsigned long *ptr = filp->private_data;
2400 	char buf[64];
2401 	int r;
2402 
2403 	r = snprintf(buf, sizeof(buf), "%ld\n",
2404 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2405 	if (r > sizeof(buf))
2406 		r = sizeof(buf);
2407 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2408 }
2409 
2410 static ssize_t
2411 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2412 		      size_t cnt, loff_t *ppos)
2413 {
2414 	long *ptr = filp->private_data;
2415 	char buf[64];
2416 	long val;
2417 	int ret;
2418 
2419 	if (cnt >= sizeof(buf))
2420 		return -EINVAL;
2421 
2422 	if (copy_from_user(&buf, ubuf, cnt))
2423 		return -EFAULT;
2424 
2425 	buf[cnt] = 0;
2426 
2427 	ret = strict_strtoul(buf, 10, &val);
2428 	if (ret < 0)
2429 		return ret;
2430 
2431 	*ptr = val * 1000;
2432 
2433 	return cnt;
2434 }
2435 
2436 static atomic_t tracing_reader;
2437 
2438 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2439 {
2440 	struct trace_iterator *iter;
2441 
2442 	if (tracing_disabled)
2443 		return -ENODEV;
2444 
2445 	/* We only allow for reader of the pipe */
2446 	if (atomic_inc_return(&tracing_reader) != 1) {
2447 		atomic_dec(&tracing_reader);
2448 		return -EBUSY;
2449 	}
2450 
2451 	/* create a buffer to store the information to pass to userspace */
2452 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2453 	if (!iter)
2454 		return -ENOMEM;
2455 
2456 	mutex_lock(&trace_types_lock);
2457 	iter->tr = &global_trace;
2458 	iter->trace = current_trace;
2459 	filp->private_data = iter;
2460 
2461 	if (iter->trace->pipe_open)
2462 		iter->trace->pipe_open(iter);
2463 	mutex_unlock(&trace_types_lock);
2464 
2465 	return 0;
2466 }
2467 
2468 static int tracing_release_pipe(struct inode *inode, struct file *file)
2469 {
2470 	struct trace_iterator *iter = file->private_data;
2471 
2472 	kfree(iter);
2473 	atomic_dec(&tracing_reader);
2474 
2475 	return 0;
2476 }
2477 
2478 static unsigned int
2479 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2480 {
2481 	struct trace_iterator *iter = filp->private_data;
2482 
2483 	if (trace_flags & TRACE_ITER_BLOCK) {
2484 		/*
2485 		 * Always select as readable when in blocking mode
2486 		 */
2487 		return POLLIN | POLLRDNORM;
2488 	} else {
2489 		if (!trace_empty(iter))
2490 			return POLLIN | POLLRDNORM;
2491 		poll_wait(filp, &trace_wait, poll_table);
2492 		if (!trace_empty(iter))
2493 			return POLLIN | POLLRDNORM;
2494 
2495 		return 0;
2496 	}
2497 }
2498 
2499 /*
2500  * Consumer reader.
2501  */
2502 static ssize_t
2503 tracing_read_pipe(struct file *filp, char __user *ubuf,
2504 		  size_t cnt, loff_t *ppos)
2505 {
2506 	struct trace_iterator *iter = filp->private_data;
2507 	struct trace_array_cpu *data;
2508 	static cpumask_t mask;
2509 	unsigned long flags;
2510 #ifdef CONFIG_FTRACE
2511 	int ftrace_save;
2512 #endif
2513 	int cpu;
2514 	ssize_t sret;
2515 
2516 	/* return any leftover data */
2517 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2518 	if (sret != -EBUSY)
2519 		return sret;
2520 	sret = 0;
2521 
2522 	trace_seq_reset(&iter->seq);
2523 
2524 	mutex_lock(&trace_types_lock);
2525 	if (iter->trace->read) {
2526 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2527 		if (sret)
2528 			goto out;
2529 	}
2530 
2531 	while (trace_empty(iter)) {
2532 
2533 		if ((filp->f_flags & O_NONBLOCK)) {
2534 			sret = -EAGAIN;
2535 			goto out;
2536 		}
2537 
2538 		/*
2539 		 * This is a make-shift waitqueue. The reason we don't use
2540 		 * an actual wait queue is because:
2541 		 *  1) we only ever have one waiter
2542 		 *  2) the tracing, traces all functions, we don't want
2543 		 *     the overhead of calling wake_up and friends
2544 		 *     (and tracing them too)
2545 		 *     Anyway, this is really very primitive wakeup.
2546 		 */
2547 		set_current_state(TASK_INTERRUPTIBLE);
2548 		iter->tr->waiter = current;
2549 
2550 		mutex_unlock(&trace_types_lock);
2551 
2552 		/* sleep for 100 msecs, and try again. */
2553 		schedule_timeout(HZ/10);
2554 
2555 		mutex_lock(&trace_types_lock);
2556 
2557 		iter->tr->waiter = NULL;
2558 
2559 		if (signal_pending(current)) {
2560 			sret = -EINTR;
2561 			goto out;
2562 		}
2563 
2564 		if (iter->trace != current_trace)
2565 			goto out;
2566 
2567 		/*
2568 		 * We block until we read something and tracing is disabled.
2569 		 * We still block if tracing is disabled, but we have never
2570 		 * read anything. This allows a user to cat this file, and
2571 		 * then enable tracing. But after we have read something,
2572 		 * we give an EOF when tracing is again disabled.
2573 		 *
2574 		 * iter->pos will be 0 if we haven't read anything.
2575 		 */
2576 		if (!tracer_enabled && iter->pos)
2577 			break;
2578 
2579 		continue;
2580 	}
2581 
2582 	/* stop when tracing is finished */
2583 	if (trace_empty(iter))
2584 		goto out;
2585 
2586 	if (cnt >= PAGE_SIZE)
2587 		cnt = PAGE_SIZE - 1;
2588 
2589 	/* reset all but tr, trace, and overruns */
2590 	memset(&iter->seq, 0,
2591 	       sizeof(struct trace_iterator) -
2592 	       offsetof(struct trace_iterator, seq));
2593 	iter->pos = -1;
2594 
2595 	/*
2596 	 * We need to stop all tracing on all CPUS to read the
2597 	 * the next buffer. This is a bit expensive, but is
2598 	 * not done often. We fill all what we can read,
2599 	 * and then release the locks again.
2600 	 */
2601 
2602 	cpus_clear(mask);
2603 	local_irq_save(flags);
2604 #ifdef CONFIG_FTRACE
2605 	ftrace_save = ftrace_enabled;
2606 	ftrace_enabled = 0;
2607 #endif
2608 	smp_wmb();
2609 	for_each_tracing_cpu(cpu) {
2610 		data = iter->tr->data[cpu];
2611 
2612 		if (!head_page(data) || !data->trace_idx)
2613 			continue;
2614 
2615 		atomic_inc(&data->disabled);
2616 		cpu_set(cpu, mask);
2617 	}
2618 
2619 	for_each_cpu_mask(cpu, mask) {
2620 		data = iter->tr->data[cpu];
2621 		__raw_spin_lock(&data->lock);
2622 
2623 		if (data->overrun > iter->last_overrun[cpu])
2624 			iter->overrun[cpu] +=
2625 				data->overrun - iter->last_overrun[cpu];
2626 		iter->last_overrun[cpu] = data->overrun;
2627 	}
2628 
2629 	while (find_next_entry_inc(iter) != NULL) {
2630 		int ret;
2631 		int len = iter->seq.len;
2632 
2633 		ret = print_trace_line(iter);
2634 		if (!ret) {
2635 			/* don't print partial lines */
2636 			iter->seq.len = len;
2637 			break;
2638 		}
2639 
2640 		trace_consume(iter);
2641 
2642 		if (iter->seq.len >= cnt)
2643 			break;
2644 	}
2645 
2646 	for_each_cpu_mask(cpu, mask) {
2647 		data = iter->tr->data[cpu];
2648 		__raw_spin_unlock(&data->lock);
2649 	}
2650 
2651 	for_each_cpu_mask(cpu, mask) {
2652 		data = iter->tr->data[cpu];
2653 		atomic_dec(&data->disabled);
2654 	}
2655 #ifdef CONFIG_FTRACE
2656 	ftrace_enabled = ftrace_save;
2657 #endif
2658 	local_irq_restore(flags);
2659 
2660 	/* Now copy what we have to the user */
2661 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2662 	if (iter->seq.readpos >= iter->seq.len)
2663 		trace_seq_reset(&iter->seq);
2664 	if (sret == -EBUSY)
2665 		sret = 0;
2666 
2667 out:
2668 	mutex_unlock(&trace_types_lock);
2669 
2670 	return sret;
2671 }
2672 
2673 static ssize_t
2674 tracing_entries_read(struct file *filp, char __user *ubuf,
2675 		     size_t cnt, loff_t *ppos)
2676 {
2677 	struct trace_array *tr = filp->private_data;
2678 	char buf[64];
2679 	int r;
2680 
2681 	r = sprintf(buf, "%lu\n", tr->entries);
2682 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2683 }
2684 
2685 static ssize_t
2686 tracing_entries_write(struct file *filp, const char __user *ubuf,
2687 		      size_t cnt, loff_t *ppos)
2688 {
2689 	unsigned long val;
2690 	char buf[64];
2691 	int i, ret;
2692 
2693 	if (cnt >= sizeof(buf))
2694 		return -EINVAL;
2695 
2696 	if (copy_from_user(&buf, ubuf, cnt))
2697 		return -EFAULT;
2698 
2699 	buf[cnt] = 0;
2700 
2701 	ret = strict_strtoul(buf, 10, &val);
2702 	if (ret < 0)
2703 		return ret;
2704 
2705 	/* must have at least 1 entry */
2706 	if (!val)
2707 		return -EINVAL;
2708 
2709 	mutex_lock(&trace_types_lock);
2710 
2711 	if (current_trace != &no_tracer) {
2712 		cnt = -EBUSY;
2713 		pr_info("ftrace: set current_tracer to none"
2714 			" before modifying buffer size\n");
2715 		goto out;
2716 	}
2717 
2718 	if (val > global_trace.entries) {
2719 		long pages_requested;
2720 		unsigned long freeable_pages;
2721 
2722 		/* make sure we have enough memory before mapping */
2723 		pages_requested =
2724 			(val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
2725 
2726 		/* account for each buffer (and max_tr) */
2727 		pages_requested *= tracing_nr_buffers * 2;
2728 
2729 		/* Check for overflow */
2730 		if (pages_requested < 0) {
2731 			cnt = -ENOMEM;
2732 			goto out;
2733 		}
2734 
2735 		freeable_pages = determine_dirtyable_memory();
2736 
2737 		/* we only allow to request 1/4 of useable memory */
2738 		if (pages_requested >
2739 		    ((freeable_pages + tracing_pages_allocated) / 4)) {
2740 			cnt = -ENOMEM;
2741 			goto out;
2742 		}
2743 
2744 		while (global_trace.entries < val) {
2745 			if (trace_alloc_page()) {
2746 				cnt = -ENOMEM;
2747 				goto out;
2748 			}
2749 			/* double check that we don't go over the known pages */
2750 			if (tracing_pages_allocated > pages_requested)
2751 				break;
2752 		}
2753 
2754 	} else {
2755 		/* include the number of entries in val (inc of page entries) */
2756 		while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
2757 			trace_free_page();
2758 	}
2759 
2760 	/* check integrity */
2761 	for_each_tracing_cpu(i)
2762 		check_pages(global_trace.data[i]);
2763 
2764 	filp->f_pos += cnt;
2765 
2766 	/* If check pages failed, return ENOMEM */
2767 	if (tracing_disabled)
2768 		cnt = -ENOMEM;
2769  out:
2770 	max_tr.entries = global_trace.entries;
2771 	mutex_unlock(&trace_types_lock);
2772 
2773 	return cnt;
2774 }
2775 
2776 static struct file_operations tracing_max_lat_fops = {
2777 	.open		= tracing_open_generic,
2778 	.read		= tracing_max_lat_read,
2779 	.write		= tracing_max_lat_write,
2780 };
2781 
2782 static struct file_operations tracing_ctrl_fops = {
2783 	.open		= tracing_open_generic,
2784 	.read		= tracing_ctrl_read,
2785 	.write		= tracing_ctrl_write,
2786 };
2787 
2788 static struct file_operations set_tracer_fops = {
2789 	.open		= tracing_open_generic,
2790 	.read		= tracing_set_trace_read,
2791 	.write		= tracing_set_trace_write,
2792 };
2793 
2794 static struct file_operations tracing_pipe_fops = {
2795 	.open		= tracing_open_pipe,
2796 	.poll		= tracing_poll_pipe,
2797 	.read		= tracing_read_pipe,
2798 	.release	= tracing_release_pipe,
2799 };
2800 
2801 static struct file_operations tracing_entries_fops = {
2802 	.open		= tracing_open_generic,
2803 	.read		= tracing_entries_read,
2804 	.write		= tracing_entries_write,
2805 };
2806 
2807 #ifdef CONFIG_DYNAMIC_FTRACE
2808 
2809 static ssize_t
2810 tracing_read_long(struct file *filp, char __user *ubuf,
2811 		  size_t cnt, loff_t *ppos)
2812 {
2813 	unsigned long *p = filp->private_data;
2814 	char buf[64];
2815 	int r;
2816 
2817 	r = sprintf(buf, "%ld\n", *p);
2818 
2819 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2820 }
2821 
2822 static struct file_operations tracing_read_long_fops = {
2823 	.open		= tracing_open_generic,
2824 	.read		= tracing_read_long,
2825 };
2826 #endif
2827 
2828 static struct dentry *d_tracer;
2829 
2830 struct dentry *tracing_init_dentry(void)
2831 {
2832 	static int once;
2833 
2834 	if (d_tracer)
2835 		return d_tracer;
2836 
2837 	d_tracer = debugfs_create_dir("tracing", NULL);
2838 
2839 	if (!d_tracer && !once) {
2840 		once = 1;
2841 		pr_warning("Could not create debugfs directory 'tracing'\n");
2842 		return NULL;
2843 	}
2844 
2845 	return d_tracer;
2846 }
2847 
2848 #ifdef CONFIG_FTRACE_SELFTEST
2849 /* Let selftest have access to static functions in this file */
2850 #include "trace_selftest.c"
2851 #endif
2852 
2853 static __init void tracer_init_debugfs(void)
2854 {
2855 	struct dentry *d_tracer;
2856 	struct dentry *entry;
2857 
2858 	d_tracer = tracing_init_dentry();
2859 
2860 	entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2861 				    &global_trace, &tracing_ctrl_fops);
2862 	if (!entry)
2863 		pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2864 
2865 	entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
2866 				    NULL, &tracing_iter_fops);
2867 	if (!entry)
2868 		pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
2869 
2870 	entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2871 				    NULL, &tracing_cpumask_fops);
2872 	if (!entry)
2873 		pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
2874 
2875 	entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2876 				    &global_trace, &tracing_lt_fops);
2877 	if (!entry)
2878 		pr_warning("Could not create debugfs 'latency_trace' entry\n");
2879 
2880 	entry = debugfs_create_file("trace", 0444, d_tracer,
2881 				    &global_trace, &tracing_fops);
2882 	if (!entry)
2883 		pr_warning("Could not create debugfs 'trace' entry\n");
2884 
2885 	entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2886 				    &global_trace, &show_traces_fops);
2887 	if (!entry)
2888 		pr_warning("Could not create debugfs 'trace' entry\n");
2889 
2890 	entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2891 				    &global_trace, &set_tracer_fops);
2892 	if (!entry)
2893 		pr_warning("Could not create debugfs 'trace' entry\n");
2894 
2895 	entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2896 				    &tracing_max_latency,
2897 				    &tracing_max_lat_fops);
2898 	if (!entry)
2899 		pr_warning("Could not create debugfs "
2900 			   "'tracing_max_latency' entry\n");
2901 
2902 	entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2903 				    &tracing_thresh, &tracing_max_lat_fops);
2904 	if (!entry)
2905 		pr_warning("Could not create debugfs "
2906 			   "'tracing_threash' entry\n");
2907 	entry = debugfs_create_file("README", 0644, d_tracer,
2908 				    NULL, &tracing_readme_fops);
2909 	if (!entry)
2910 		pr_warning("Could not create debugfs 'README' entry\n");
2911 
2912 	entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2913 				    NULL, &tracing_pipe_fops);
2914 	if (!entry)
2915 		pr_warning("Could not create debugfs "
2916 			   "'tracing_threash' entry\n");
2917 
2918 	entry = debugfs_create_file("trace_entries", 0644, d_tracer,
2919 				    &global_trace, &tracing_entries_fops);
2920 	if (!entry)
2921 		pr_warning("Could not create debugfs "
2922 			   "'tracing_threash' entry\n");
2923 
2924 #ifdef CONFIG_DYNAMIC_FTRACE
2925 	entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2926 				    &ftrace_update_tot_cnt,
2927 				    &tracing_read_long_fops);
2928 	if (!entry)
2929 		pr_warning("Could not create debugfs "
2930 			   "'dyn_ftrace_total_info' entry\n");
2931 #endif
2932 #ifdef CONFIG_SYSPROF_TRACER
2933 	init_tracer_sysprof_debugfs(d_tracer);
2934 #endif
2935 }
2936 
2937 static int trace_alloc_page(void)
2938 {
2939 	struct trace_array_cpu *data;
2940 	struct page *page, *tmp;
2941 	LIST_HEAD(pages);
2942 	void *array;
2943 	unsigned pages_allocated = 0;
2944 	int i;
2945 
2946 	/* first allocate a page for each CPU */
2947 	for_each_tracing_cpu(i) {
2948 		array = (void *)__get_free_page(GFP_KERNEL);
2949 		if (array == NULL) {
2950 			printk(KERN_ERR "tracer: failed to allocate page"
2951 			       "for trace buffer!\n");
2952 			goto free_pages;
2953 		}
2954 
2955 		pages_allocated++;
2956 		page = virt_to_page(array);
2957 		list_add(&page->lru, &pages);
2958 
2959 /* Only allocate if we are actually using the max trace */
2960 #ifdef CONFIG_TRACER_MAX_TRACE
2961 		array = (void *)__get_free_page(GFP_KERNEL);
2962 		if (array == NULL) {
2963 			printk(KERN_ERR "tracer: failed to allocate page"
2964 			       "for trace buffer!\n");
2965 			goto free_pages;
2966 		}
2967 		pages_allocated++;
2968 		page = virt_to_page(array);
2969 		list_add(&page->lru, &pages);
2970 #endif
2971 	}
2972 
2973 	/* Now that we successfully allocate a page per CPU, add them */
2974 	for_each_tracing_cpu(i) {
2975 		data = global_trace.data[i];
2976 		page = list_entry(pages.next, struct page, lru);
2977 		list_del_init(&page->lru);
2978 		list_add_tail(&page->lru, &data->trace_pages);
2979 		ClearPageLRU(page);
2980 
2981 #ifdef CONFIG_TRACER_MAX_TRACE
2982 		data = max_tr.data[i];
2983 		page = list_entry(pages.next, struct page, lru);
2984 		list_del_init(&page->lru);
2985 		list_add_tail(&page->lru, &data->trace_pages);
2986 		SetPageLRU(page);
2987 #endif
2988 	}
2989 	tracing_pages_allocated += pages_allocated;
2990 	global_trace.entries += ENTRIES_PER_PAGE;
2991 
2992 	return 0;
2993 
2994  free_pages:
2995 	list_for_each_entry_safe(page, tmp, &pages, lru) {
2996 		list_del_init(&page->lru);
2997 		__free_page(page);
2998 	}
2999 	return -ENOMEM;
3000 }
3001 
3002 static int trace_free_page(void)
3003 {
3004 	struct trace_array_cpu *data;
3005 	struct page *page;
3006 	struct list_head *p;
3007 	int i;
3008 	int ret = 0;
3009 
3010 	/* free one page from each buffer */
3011 	for_each_tracing_cpu(i) {
3012 		data = global_trace.data[i];
3013 		p = data->trace_pages.next;
3014 		if (p == &data->trace_pages) {
3015 			/* should never happen */
3016 			WARN_ON(1);
3017 			tracing_disabled = 1;
3018 			ret = -1;
3019 			break;
3020 		}
3021 		page = list_entry(p, struct page, lru);
3022 		ClearPageLRU(page);
3023 		list_del(&page->lru);
3024 		tracing_pages_allocated--;
3025 		tracing_pages_allocated--;
3026 		__free_page(page);
3027 
3028 		tracing_reset(data);
3029 
3030 #ifdef CONFIG_TRACER_MAX_TRACE
3031 		data = max_tr.data[i];
3032 		p = data->trace_pages.next;
3033 		if (p == &data->trace_pages) {
3034 			/* should never happen */
3035 			WARN_ON(1);
3036 			tracing_disabled = 1;
3037 			ret = -1;
3038 			break;
3039 		}
3040 		page = list_entry(p, struct page, lru);
3041 		ClearPageLRU(page);
3042 		list_del(&page->lru);
3043 		__free_page(page);
3044 
3045 		tracing_reset(data);
3046 #endif
3047 	}
3048 	global_trace.entries -= ENTRIES_PER_PAGE;
3049 
3050 	return ret;
3051 }
3052 
3053 __init static int tracer_alloc_buffers(void)
3054 {
3055 	struct trace_array_cpu *data;
3056 	void *array;
3057 	struct page *page;
3058 	int pages = 0;
3059 	int ret = -ENOMEM;
3060 	int i;
3061 
3062 	/* TODO: make the number of buffers hot pluggable with CPUS */
3063 	tracing_nr_buffers = num_possible_cpus();
3064 	tracing_buffer_mask = cpu_possible_map;
3065 
3066 	/* Allocate the first page for all buffers */
3067 	for_each_tracing_cpu(i) {
3068 		data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3069 		max_tr.data[i] = &per_cpu(max_data, i);
3070 
3071 		array = (void *)__get_free_page(GFP_KERNEL);
3072 		if (array == NULL) {
3073 			printk(KERN_ERR "tracer: failed to allocate page"
3074 			       "for trace buffer!\n");
3075 			goto free_buffers;
3076 		}
3077 
3078 		/* set the array to the list */
3079 		INIT_LIST_HEAD(&data->trace_pages);
3080 		page = virt_to_page(array);
3081 		list_add(&page->lru, &data->trace_pages);
3082 		/* use the LRU flag to differentiate the two buffers */
3083 		ClearPageLRU(page);
3084 
3085 		data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3086 		max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3087 
3088 /* Only allocate if we are actually using the max trace */
3089 #ifdef CONFIG_TRACER_MAX_TRACE
3090 		array = (void *)__get_free_page(GFP_KERNEL);
3091 		if (array == NULL) {
3092 			printk(KERN_ERR "tracer: failed to allocate page"
3093 			       "for trace buffer!\n");
3094 			goto free_buffers;
3095 		}
3096 
3097 		INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
3098 		page = virt_to_page(array);
3099 		list_add(&page->lru, &max_tr.data[i]->trace_pages);
3100 		SetPageLRU(page);
3101 #endif
3102 	}
3103 
3104 	/*
3105 	 * Since we allocate by orders of pages, we may be able to
3106 	 * round up a bit.
3107 	 */
3108 	global_trace.entries = ENTRIES_PER_PAGE;
3109 	pages++;
3110 
3111 	while (global_trace.entries < trace_nr_entries) {
3112 		if (trace_alloc_page())
3113 			break;
3114 		pages++;
3115 	}
3116 	max_tr.entries = global_trace.entries;
3117 
3118 	pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n",
3119 		pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE);
3120 	pr_info("   actual entries %ld\n", global_trace.entries);
3121 
3122 	tracer_init_debugfs();
3123 
3124 	trace_init_cmdlines();
3125 
3126 	register_tracer(&no_tracer);
3127 	current_trace = &no_tracer;
3128 
3129 	/* All seems OK, enable tracing */
3130 	global_trace.ctrl = tracer_enabled;
3131 	tracing_disabled = 0;
3132 
3133 	return 0;
3134 
3135  free_buffers:
3136 	for (i-- ; i >= 0; i--) {
3137 		struct page *page, *tmp;
3138 		struct trace_array_cpu *data = global_trace.data[i];
3139 
3140 		if (data) {
3141 			list_for_each_entry_safe(page, tmp,
3142 						 &data->trace_pages, lru) {
3143 				list_del_init(&page->lru);
3144 				__free_page(page);
3145 			}
3146 		}
3147 
3148 #ifdef CONFIG_TRACER_MAX_TRACE
3149 		data = max_tr.data[i];
3150 		if (data) {
3151 			list_for_each_entry_safe(page, tmp,
3152 						 &data->trace_pages, lru) {
3153 				list_del_init(&page->lru);
3154 				__free_page(page);
3155 			}
3156 		}
3157 #endif
3158 	}
3159 	return ret;
3160 }
3161 fs_initcall(tracer_alloc_buffers);
3162