xref: /openbmc/linux/kernel/trace/ring_buffer.c (revision 78c99ba1)
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/mutex.h>
16 #include <linux/init.h>
17 #include <linux/hash.h>
18 #include <linux/list.h>
19 #include <linux/cpu.h>
20 #include <linux/fs.h>
21 
22 #include "trace.h"
23 
24 /*
25  * The ring buffer header is special. We must manually up keep it.
26  */
27 int ring_buffer_print_entry_header(struct trace_seq *s)
28 {
29 	int ret;
30 
31 	ret = trace_seq_printf(s, "# compressed entry header\n");
32 	ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
33 	ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
34 	ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
35 	ret = trace_seq_printf(s, "\n");
36 	ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
37 			       RINGBUF_TYPE_PADDING);
38 	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39 			       RINGBUF_TYPE_TIME_EXTEND);
40 	ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
41 			       RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
42 
43 	return ret;
44 }
45 
46 /*
47  * The ring buffer is made up of a list of pages. A separate list of pages is
48  * allocated for each CPU. A writer may only write to a buffer that is
49  * associated with the CPU it is currently executing on.  A reader may read
50  * from any per cpu buffer.
51  *
52  * The reader is special. For each per cpu buffer, the reader has its own
53  * reader page. When a reader has read the entire reader page, this reader
54  * page is swapped with another page in the ring buffer.
55  *
56  * Now, as long as the writer is off the reader page, the reader can do what
57  * ever it wants with that page. The writer will never write to that page
58  * again (as long as it is out of the ring buffer).
59  *
60  * Here's some silly ASCII art.
61  *
62  *   +------+
63  *   |reader|          RING BUFFER
64  *   |page  |
65  *   +------+        +---+   +---+   +---+
66  *                   |   |-->|   |-->|   |
67  *                   +---+   +---+   +---+
68  *                     ^               |
69  *                     |               |
70  *                     +---------------+
71  *
72  *
73  *   +------+
74  *   |reader|          RING BUFFER
75  *   |page  |------------------v
76  *   +------+        +---+   +---+   +---+
77  *                   |   |-->|   |-->|   |
78  *                   +---+   +---+   +---+
79  *                     ^               |
80  *                     |               |
81  *                     +---------------+
82  *
83  *
84  *   +------+
85  *   |reader|          RING BUFFER
86  *   |page  |------------------v
87  *   +------+        +---+   +---+   +---+
88  *      ^            |   |-->|   |-->|   |
89  *      |            +---+   +---+   +---+
90  *      |                              |
91  *      |                              |
92  *      +------------------------------+
93  *
94  *
95  *   +------+
96  *   |buffer|          RING BUFFER
97  *   |page  |------------------v
98  *   +------+        +---+   +---+   +---+
99  *      ^            |   |   |   |-->|   |
100  *      |   New      +---+   +---+   +---+
101  *      |  Reader------^               |
102  *      |   page                       |
103  *      +------------------------------+
104  *
105  *
106  * After we make this swap, the reader can hand this page off to the splice
107  * code and be done with it. It can even allocate a new page if it needs to
108  * and swap that into the ring buffer.
109  *
110  * We will be using cmpxchg soon to make all this lockless.
111  *
112  */
113 
114 /*
115  * A fast way to enable or disable all ring buffers is to
116  * call tracing_on or tracing_off. Turning off the ring buffers
117  * prevents all ring buffers from being recorded to.
118  * Turning this switch on, makes it OK to write to the
119  * ring buffer, if the ring buffer is enabled itself.
120  *
121  * There's three layers that must be on in order to write
122  * to the ring buffer.
123  *
124  * 1) This global flag must be set.
125  * 2) The ring buffer must be enabled for recording.
126  * 3) The per cpu buffer must be enabled for recording.
127  *
128  * In case of an anomaly, this global flag has a bit set that
129  * will permantly disable all ring buffers.
130  */
131 
132 /*
133  * Global flag to disable all recording to ring buffers
134  *  This has two bits: ON, DISABLED
135  *
136  *  ON   DISABLED
137  * ---- ----------
138  *   0      0        : ring buffers are off
139  *   1      0        : ring buffers are on
140  *   X      1        : ring buffers are permanently disabled
141  */
142 
143 enum {
144 	RB_BUFFERS_ON_BIT	= 0,
145 	RB_BUFFERS_DISABLED_BIT	= 1,
146 };
147 
148 enum {
149 	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
150 	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
151 };
152 
153 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
154 
155 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156 
157 /**
158  * tracing_on - enable all tracing buffers
159  *
160  * This function enables all tracing buffers that may have been
161  * disabled with tracing_off.
162  */
163 void tracing_on(void)
164 {
165 	set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
166 }
167 EXPORT_SYMBOL_GPL(tracing_on);
168 
169 /**
170  * tracing_off - turn off all tracing buffers
171  *
172  * This function stops all tracing buffers from recording data.
173  * It does not disable any overhead the tracers themselves may
174  * be causing. This function simply causes all recording to
175  * the ring buffers to fail.
176  */
177 void tracing_off(void)
178 {
179 	clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180 }
181 EXPORT_SYMBOL_GPL(tracing_off);
182 
183 /**
184  * tracing_off_permanent - permanently disable ring buffers
185  *
186  * This function, once called, will disable all ring buffers
187  * permanently.
188  */
189 void tracing_off_permanent(void)
190 {
191 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
192 }
193 
194 /**
195  * tracing_is_on - show state of ring buffers enabled
196  */
197 int tracing_is_on(void)
198 {
199 	return ring_buffer_flags == RB_BUFFERS_ON;
200 }
201 EXPORT_SYMBOL_GPL(tracing_is_on);
202 
203 #include "trace.h"
204 
205 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206 #define RB_ALIGNMENT		4U
207 #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208 
209 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
211 
212 enum {
213 	RB_LEN_TIME_EXTEND = 8,
214 	RB_LEN_TIME_STAMP = 16,
215 };
216 
217 static inline int rb_null_event(struct ring_buffer_event *event)
218 {
219 	return event->type_len == RINGBUF_TYPE_PADDING
220 			&& event->time_delta == 0;
221 }
222 
223 static inline int rb_discarded_event(struct ring_buffer_event *event)
224 {
225 	return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
226 }
227 
228 static void rb_event_set_padding(struct ring_buffer_event *event)
229 {
230 	event->type_len = RINGBUF_TYPE_PADDING;
231 	event->time_delta = 0;
232 }
233 
234 static unsigned
235 rb_event_data_length(struct ring_buffer_event *event)
236 {
237 	unsigned length;
238 
239 	if (event->type_len)
240 		length = event->type_len * RB_ALIGNMENT;
241 	else
242 		length = event->array[0];
243 	return length + RB_EVNT_HDR_SIZE;
244 }
245 
246 /* inline for ring buffer fast paths */
247 static unsigned
248 rb_event_length(struct ring_buffer_event *event)
249 {
250 	switch (event->type_len) {
251 	case RINGBUF_TYPE_PADDING:
252 		if (rb_null_event(event))
253 			/* undefined */
254 			return -1;
255 		return  event->array[0] + RB_EVNT_HDR_SIZE;
256 
257 	case RINGBUF_TYPE_TIME_EXTEND:
258 		return RB_LEN_TIME_EXTEND;
259 
260 	case RINGBUF_TYPE_TIME_STAMP:
261 		return RB_LEN_TIME_STAMP;
262 
263 	case RINGBUF_TYPE_DATA:
264 		return rb_event_data_length(event);
265 	default:
266 		BUG();
267 	}
268 	/* not hit */
269 	return 0;
270 }
271 
272 /**
273  * ring_buffer_event_length - return the length of the event
274  * @event: the event to get the length of
275  */
276 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
277 {
278 	unsigned length = rb_event_length(event);
279 	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
280 		return length;
281 	length -= RB_EVNT_HDR_SIZE;
282 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283                 length -= sizeof(event->array[0]);
284 	return length;
285 }
286 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
287 
288 /* inline for ring buffer fast paths */
289 static void *
290 rb_event_data(struct ring_buffer_event *event)
291 {
292 	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
293 	/* If length is in len field, then array[0] has the data */
294 	if (event->type_len)
295 		return (void *)&event->array[0];
296 	/* Otherwise length is in array[0] and array[1] has the data */
297 	return (void *)&event->array[1];
298 }
299 
300 /**
301  * ring_buffer_event_data - return the data of the event
302  * @event: the event to get the data from
303  */
304 void *ring_buffer_event_data(struct ring_buffer_event *event)
305 {
306 	return rb_event_data(event);
307 }
308 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
309 
310 #define for_each_buffer_cpu(buffer, cpu)		\
311 	for_each_cpu(cpu, buffer->cpumask)
312 
313 #define TS_SHIFT	27
314 #define TS_MASK		((1ULL << TS_SHIFT) - 1)
315 #define TS_DELTA_TEST	(~TS_MASK)
316 
317 struct buffer_data_page {
318 	u64		 time_stamp;	/* page time stamp */
319 	local_t		 commit;	/* write committed index */
320 	unsigned char	 data[];	/* data of buffer page */
321 };
322 
323 struct buffer_page {
324 	struct list_head list;		/* list of buffer pages */
325 	local_t		 write;		/* index for next write */
326 	unsigned	 read;		/* index for next read */
327 	local_t		 entries;	/* entries on this page */
328 	struct buffer_data_page *page;	/* Actual data page */
329 };
330 
331 static void rb_init_page(struct buffer_data_page *bpage)
332 {
333 	local_set(&bpage->commit, 0);
334 }
335 
336 /**
337  * ring_buffer_page_len - the size of data on the page.
338  * @page: The page to read
339  *
340  * Returns the amount of data on the page, including buffer page header.
341  */
342 size_t ring_buffer_page_len(void *page)
343 {
344 	return local_read(&((struct buffer_data_page *)page)->commit)
345 		+ BUF_PAGE_HDR_SIZE;
346 }
347 
348 /*
349  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
350  * this issue out.
351  */
352 static void free_buffer_page(struct buffer_page *bpage)
353 {
354 	free_page((unsigned long)bpage->page);
355 	kfree(bpage);
356 }
357 
358 /*
359  * We need to fit the time_stamp delta into 27 bits.
360  */
361 static inline int test_time_stamp(u64 delta)
362 {
363 	if (delta & TS_DELTA_TEST)
364 		return 1;
365 	return 0;
366 }
367 
368 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
369 
370 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
371 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
372 
373 /* Max number of timestamps that can fit on a page */
374 #define RB_TIMESTAMPS_PER_PAGE	(BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
375 
376 int ring_buffer_print_page_header(struct trace_seq *s)
377 {
378 	struct buffer_data_page field;
379 	int ret;
380 
381 	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
382 			       "offset:0;\tsize:%u;\n",
383 			       (unsigned int)sizeof(field.time_stamp));
384 
385 	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
386 			       "offset:%u;\tsize:%u;\n",
387 			       (unsigned int)offsetof(typeof(field), commit),
388 			       (unsigned int)sizeof(field.commit));
389 
390 	ret = trace_seq_printf(s, "\tfield: char data;\t"
391 			       "offset:%u;\tsize:%u;\n",
392 			       (unsigned int)offsetof(typeof(field), data),
393 			       (unsigned int)BUF_PAGE_SIZE);
394 
395 	return ret;
396 }
397 
398 /*
399  * head_page == tail_page && head == tail then buffer is empty.
400  */
401 struct ring_buffer_per_cpu {
402 	int				cpu;
403 	struct ring_buffer		*buffer;
404 	spinlock_t			reader_lock; /* serialize readers */
405 	raw_spinlock_t			lock;
406 	struct lock_class_key		lock_key;
407 	struct list_head		pages;
408 	struct buffer_page		*head_page;	/* read from head */
409 	struct buffer_page		*tail_page;	/* write to tail */
410 	struct buffer_page		*commit_page;	/* committed pages */
411 	struct buffer_page		*reader_page;
412 	unsigned long			nmi_dropped;
413 	unsigned long			commit_overrun;
414 	unsigned long			overrun;
415 	unsigned long			read;
416 	local_t				entries;
417 	u64				write_stamp;
418 	u64				read_stamp;
419 	atomic_t			record_disabled;
420 };
421 
422 struct ring_buffer {
423 	unsigned			pages;
424 	unsigned			flags;
425 	int				cpus;
426 	atomic_t			record_disabled;
427 	cpumask_var_t			cpumask;
428 
429 	struct lock_class_key		*reader_lock_key;
430 
431 	struct mutex			mutex;
432 
433 	struct ring_buffer_per_cpu	**buffers;
434 
435 #ifdef CONFIG_HOTPLUG_CPU
436 	struct notifier_block		cpu_notify;
437 #endif
438 	u64				(*clock)(void);
439 };
440 
441 struct ring_buffer_iter {
442 	struct ring_buffer_per_cpu	*cpu_buffer;
443 	unsigned long			head;
444 	struct buffer_page		*head_page;
445 	u64				read_stamp;
446 };
447 
448 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
449 #define RB_WARN_ON(buffer, cond)				\
450 	({							\
451 		int _____ret = unlikely(cond);			\
452 		if (_____ret) {					\
453 			atomic_inc(&buffer->record_disabled);	\
454 			WARN_ON(1);				\
455 		}						\
456 		_____ret;					\
457 	})
458 
459 /* Up this if you want to test the TIME_EXTENTS and normalization */
460 #define DEBUG_SHIFT 0
461 
462 static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
463 {
464 	/* shift to debug/test normalization and TIME_EXTENTS */
465 	return buffer->clock() << DEBUG_SHIFT;
466 }
467 
468 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
469 {
470 	u64 time;
471 
472 	preempt_disable_notrace();
473 	time = rb_time_stamp(buffer, cpu);
474 	preempt_enable_no_resched_notrace();
475 
476 	return time;
477 }
478 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
479 
480 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
481 				      int cpu, u64 *ts)
482 {
483 	/* Just stupid testing the normalize function and deltas */
484 	*ts >>= DEBUG_SHIFT;
485 }
486 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
487 
488 /**
489  * check_pages - integrity check of buffer pages
490  * @cpu_buffer: CPU buffer with pages to test
491  *
492  * As a safety measure we check to make sure the data pages have not
493  * been corrupted.
494  */
495 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
496 {
497 	struct list_head *head = &cpu_buffer->pages;
498 	struct buffer_page *bpage, *tmp;
499 
500 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
501 		return -1;
502 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
503 		return -1;
504 
505 	list_for_each_entry_safe(bpage, tmp, head, list) {
506 		if (RB_WARN_ON(cpu_buffer,
507 			       bpage->list.next->prev != &bpage->list))
508 			return -1;
509 		if (RB_WARN_ON(cpu_buffer,
510 			       bpage->list.prev->next != &bpage->list))
511 			return -1;
512 	}
513 
514 	return 0;
515 }
516 
517 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
518 			     unsigned nr_pages)
519 {
520 	struct list_head *head = &cpu_buffer->pages;
521 	struct buffer_page *bpage, *tmp;
522 	unsigned long addr;
523 	LIST_HEAD(pages);
524 	unsigned i;
525 
526 	for (i = 0; i < nr_pages; i++) {
527 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
528 				    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
529 		if (!bpage)
530 			goto free_pages;
531 		list_add(&bpage->list, &pages);
532 
533 		addr = __get_free_page(GFP_KERNEL);
534 		if (!addr)
535 			goto free_pages;
536 		bpage->page = (void *)addr;
537 		rb_init_page(bpage->page);
538 	}
539 
540 	list_splice(&pages, head);
541 
542 	rb_check_pages(cpu_buffer);
543 
544 	return 0;
545 
546  free_pages:
547 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
548 		list_del_init(&bpage->list);
549 		free_buffer_page(bpage);
550 	}
551 	return -ENOMEM;
552 }
553 
554 static struct ring_buffer_per_cpu *
555 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
556 {
557 	struct ring_buffer_per_cpu *cpu_buffer;
558 	struct buffer_page *bpage;
559 	unsigned long addr;
560 	int ret;
561 
562 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
563 				  GFP_KERNEL, cpu_to_node(cpu));
564 	if (!cpu_buffer)
565 		return NULL;
566 
567 	cpu_buffer->cpu = cpu;
568 	cpu_buffer->buffer = buffer;
569 	spin_lock_init(&cpu_buffer->reader_lock);
570 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
571 	cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
572 	INIT_LIST_HEAD(&cpu_buffer->pages);
573 
574 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
575 			    GFP_KERNEL, cpu_to_node(cpu));
576 	if (!bpage)
577 		goto fail_free_buffer;
578 
579 	cpu_buffer->reader_page = bpage;
580 	addr = __get_free_page(GFP_KERNEL);
581 	if (!addr)
582 		goto fail_free_reader;
583 	bpage->page = (void *)addr;
584 	rb_init_page(bpage->page);
585 
586 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
587 
588 	ret = rb_allocate_pages(cpu_buffer, buffer->pages);
589 	if (ret < 0)
590 		goto fail_free_reader;
591 
592 	cpu_buffer->head_page
593 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
594 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
595 
596 	return cpu_buffer;
597 
598  fail_free_reader:
599 	free_buffer_page(cpu_buffer->reader_page);
600 
601  fail_free_buffer:
602 	kfree(cpu_buffer);
603 	return NULL;
604 }
605 
606 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
607 {
608 	struct list_head *head = &cpu_buffer->pages;
609 	struct buffer_page *bpage, *tmp;
610 
611 	free_buffer_page(cpu_buffer->reader_page);
612 
613 	list_for_each_entry_safe(bpage, tmp, head, list) {
614 		list_del_init(&bpage->list);
615 		free_buffer_page(bpage);
616 	}
617 	kfree(cpu_buffer);
618 }
619 
620 /*
621  * Causes compile errors if the struct buffer_page gets bigger
622  * than the struct page.
623  */
624 extern int ring_buffer_page_too_big(void);
625 
626 #ifdef CONFIG_HOTPLUG_CPU
627 static int rb_cpu_notify(struct notifier_block *self,
628 			 unsigned long action, void *hcpu);
629 #endif
630 
631 /**
632  * ring_buffer_alloc - allocate a new ring_buffer
633  * @size: the size in bytes per cpu that is needed.
634  * @flags: attributes to set for the ring buffer.
635  *
636  * Currently the only flag that is available is the RB_FL_OVERWRITE
637  * flag. This flag means that the buffer will overwrite old data
638  * when the buffer wraps. If this flag is not set, the buffer will
639  * drop data when the tail hits the head.
640  */
641 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
642 					struct lock_class_key *key)
643 {
644 	struct ring_buffer *buffer;
645 	int bsize;
646 	int cpu;
647 
648 	/* Paranoid! Optimizes out when all is well */
649 	if (sizeof(struct buffer_page) > sizeof(struct page))
650 		ring_buffer_page_too_big();
651 
652 
653 	/* keep it in its own cache line */
654 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
655 			 GFP_KERNEL);
656 	if (!buffer)
657 		return NULL;
658 
659 	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
660 		goto fail_free_buffer;
661 
662 	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
663 	buffer->flags = flags;
664 	buffer->clock = trace_clock_local;
665 	buffer->reader_lock_key = key;
666 
667 	/* need at least two pages */
668 	if (buffer->pages == 1)
669 		buffer->pages++;
670 
671 	/*
672 	 * In case of non-hotplug cpu, if the ring-buffer is allocated
673 	 * in early initcall, it will not be notified of secondary cpus.
674 	 * In that off case, we need to allocate for all possible cpus.
675 	 */
676 #ifdef CONFIG_HOTPLUG_CPU
677 	get_online_cpus();
678 	cpumask_copy(buffer->cpumask, cpu_online_mask);
679 #else
680 	cpumask_copy(buffer->cpumask, cpu_possible_mask);
681 #endif
682 	buffer->cpus = nr_cpu_ids;
683 
684 	bsize = sizeof(void *) * nr_cpu_ids;
685 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
686 				  GFP_KERNEL);
687 	if (!buffer->buffers)
688 		goto fail_free_cpumask;
689 
690 	for_each_buffer_cpu(buffer, cpu) {
691 		buffer->buffers[cpu] =
692 			rb_allocate_cpu_buffer(buffer, cpu);
693 		if (!buffer->buffers[cpu])
694 			goto fail_free_buffers;
695 	}
696 
697 #ifdef CONFIG_HOTPLUG_CPU
698 	buffer->cpu_notify.notifier_call = rb_cpu_notify;
699 	buffer->cpu_notify.priority = 0;
700 	register_cpu_notifier(&buffer->cpu_notify);
701 #endif
702 
703 	put_online_cpus();
704 	mutex_init(&buffer->mutex);
705 
706 	return buffer;
707 
708  fail_free_buffers:
709 	for_each_buffer_cpu(buffer, cpu) {
710 		if (buffer->buffers[cpu])
711 			rb_free_cpu_buffer(buffer->buffers[cpu]);
712 	}
713 	kfree(buffer->buffers);
714 
715  fail_free_cpumask:
716 	free_cpumask_var(buffer->cpumask);
717 	put_online_cpus();
718 
719  fail_free_buffer:
720 	kfree(buffer);
721 	return NULL;
722 }
723 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
724 
725 /**
726  * ring_buffer_free - free a ring buffer.
727  * @buffer: the buffer to free.
728  */
729 void
730 ring_buffer_free(struct ring_buffer *buffer)
731 {
732 	int cpu;
733 
734 	get_online_cpus();
735 
736 #ifdef CONFIG_HOTPLUG_CPU
737 	unregister_cpu_notifier(&buffer->cpu_notify);
738 #endif
739 
740 	for_each_buffer_cpu(buffer, cpu)
741 		rb_free_cpu_buffer(buffer->buffers[cpu]);
742 
743 	put_online_cpus();
744 
745 	free_cpumask_var(buffer->cpumask);
746 
747 	kfree(buffer);
748 }
749 EXPORT_SYMBOL_GPL(ring_buffer_free);
750 
751 void ring_buffer_set_clock(struct ring_buffer *buffer,
752 			   u64 (*clock)(void))
753 {
754 	buffer->clock = clock;
755 }
756 
757 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
758 
759 static void
760 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
761 {
762 	struct buffer_page *bpage;
763 	struct list_head *p;
764 	unsigned i;
765 
766 	atomic_inc(&cpu_buffer->record_disabled);
767 	synchronize_sched();
768 
769 	for (i = 0; i < nr_pages; i++) {
770 		if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
771 			return;
772 		p = cpu_buffer->pages.next;
773 		bpage = list_entry(p, struct buffer_page, list);
774 		list_del_init(&bpage->list);
775 		free_buffer_page(bpage);
776 	}
777 	if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
778 		return;
779 
780 	rb_reset_cpu(cpu_buffer);
781 
782 	rb_check_pages(cpu_buffer);
783 
784 	atomic_dec(&cpu_buffer->record_disabled);
785 
786 }
787 
788 static void
789 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
790 		struct list_head *pages, unsigned nr_pages)
791 {
792 	struct buffer_page *bpage;
793 	struct list_head *p;
794 	unsigned i;
795 
796 	atomic_inc(&cpu_buffer->record_disabled);
797 	synchronize_sched();
798 
799 	for (i = 0; i < nr_pages; i++) {
800 		if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
801 			return;
802 		p = pages->next;
803 		bpage = list_entry(p, struct buffer_page, list);
804 		list_del_init(&bpage->list);
805 		list_add_tail(&bpage->list, &cpu_buffer->pages);
806 	}
807 	rb_reset_cpu(cpu_buffer);
808 
809 	rb_check_pages(cpu_buffer);
810 
811 	atomic_dec(&cpu_buffer->record_disabled);
812 }
813 
814 /**
815  * ring_buffer_resize - resize the ring buffer
816  * @buffer: the buffer to resize.
817  * @size: the new size.
818  *
819  * The tracer is responsible for making sure that the buffer is
820  * not being used while changing the size.
821  * Note: We may be able to change the above requirement by using
822  *  RCU synchronizations.
823  *
824  * Minimum size is 2 * BUF_PAGE_SIZE.
825  *
826  * Returns -1 on failure.
827  */
828 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
829 {
830 	struct ring_buffer_per_cpu *cpu_buffer;
831 	unsigned nr_pages, rm_pages, new_pages;
832 	struct buffer_page *bpage, *tmp;
833 	unsigned long buffer_size;
834 	unsigned long addr;
835 	LIST_HEAD(pages);
836 	int i, cpu;
837 
838 	/*
839 	 * Always succeed at resizing a non-existent buffer:
840 	 */
841 	if (!buffer)
842 		return size;
843 
844 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
845 	size *= BUF_PAGE_SIZE;
846 	buffer_size = buffer->pages * BUF_PAGE_SIZE;
847 
848 	/* we need a minimum of two pages */
849 	if (size < BUF_PAGE_SIZE * 2)
850 		size = BUF_PAGE_SIZE * 2;
851 
852 	if (size == buffer_size)
853 		return size;
854 
855 	mutex_lock(&buffer->mutex);
856 	get_online_cpus();
857 
858 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
859 
860 	if (size < buffer_size) {
861 
862 		/* easy case, just free pages */
863 		if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
864 			goto out_fail;
865 
866 		rm_pages = buffer->pages - nr_pages;
867 
868 		for_each_buffer_cpu(buffer, cpu) {
869 			cpu_buffer = buffer->buffers[cpu];
870 			rb_remove_pages(cpu_buffer, rm_pages);
871 		}
872 		goto out;
873 	}
874 
875 	/*
876 	 * This is a bit more difficult. We only want to add pages
877 	 * when we can allocate enough for all CPUs. We do this
878 	 * by allocating all the pages and storing them on a local
879 	 * link list. If we succeed in our allocation, then we
880 	 * add these pages to the cpu_buffers. Otherwise we just free
881 	 * them all and return -ENOMEM;
882 	 */
883 	if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
884 		goto out_fail;
885 
886 	new_pages = nr_pages - buffer->pages;
887 
888 	for_each_buffer_cpu(buffer, cpu) {
889 		for (i = 0; i < new_pages; i++) {
890 			bpage = kzalloc_node(ALIGN(sizeof(*bpage),
891 						  cache_line_size()),
892 					    GFP_KERNEL, cpu_to_node(cpu));
893 			if (!bpage)
894 				goto free_pages;
895 			list_add(&bpage->list, &pages);
896 			addr = __get_free_page(GFP_KERNEL);
897 			if (!addr)
898 				goto free_pages;
899 			bpage->page = (void *)addr;
900 			rb_init_page(bpage->page);
901 		}
902 	}
903 
904 	for_each_buffer_cpu(buffer, cpu) {
905 		cpu_buffer = buffer->buffers[cpu];
906 		rb_insert_pages(cpu_buffer, &pages, new_pages);
907 	}
908 
909 	if (RB_WARN_ON(buffer, !list_empty(&pages)))
910 		goto out_fail;
911 
912  out:
913 	buffer->pages = nr_pages;
914 	put_online_cpus();
915 	mutex_unlock(&buffer->mutex);
916 
917 	return size;
918 
919  free_pages:
920 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
921 		list_del_init(&bpage->list);
922 		free_buffer_page(bpage);
923 	}
924 	put_online_cpus();
925 	mutex_unlock(&buffer->mutex);
926 	return -ENOMEM;
927 
928 	/*
929 	 * Something went totally wrong, and we are too paranoid
930 	 * to even clean up the mess.
931 	 */
932  out_fail:
933 	put_online_cpus();
934 	mutex_unlock(&buffer->mutex);
935 	return -1;
936 }
937 EXPORT_SYMBOL_GPL(ring_buffer_resize);
938 
939 static inline void *
940 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
941 {
942 	return bpage->data + index;
943 }
944 
945 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
946 {
947 	return bpage->page->data + index;
948 }
949 
950 static inline struct ring_buffer_event *
951 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
952 {
953 	return __rb_page_index(cpu_buffer->reader_page,
954 			       cpu_buffer->reader_page->read);
955 }
956 
957 static inline struct ring_buffer_event *
958 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
959 {
960 	return __rb_page_index(cpu_buffer->head_page,
961 			       cpu_buffer->head_page->read);
962 }
963 
964 static inline struct ring_buffer_event *
965 rb_iter_head_event(struct ring_buffer_iter *iter)
966 {
967 	return __rb_page_index(iter->head_page, iter->head);
968 }
969 
970 static inline unsigned rb_page_write(struct buffer_page *bpage)
971 {
972 	return local_read(&bpage->write);
973 }
974 
975 static inline unsigned rb_page_commit(struct buffer_page *bpage)
976 {
977 	return local_read(&bpage->page->commit);
978 }
979 
980 /* Size is determined by what has been commited */
981 static inline unsigned rb_page_size(struct buffer_page *bpage)
982 {
983 	return rb_page_commit(bpage);
984 }
985 
986 static inline unsigned
987 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
988 {
989 	return rb_page_commit(cpu_buffer->commit_page);
990 }
991 
992 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
993 {
994 	return rb_page_commit(cpu_buffer->head_page);
995 }
996 
997 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
998 			       struct buffer_page **bpage)
999 {
1000 	struct list_head *p = (*bpage)->list.next;
1001 
1002 	if (p == &cpu_buffer->pages)
1003 		p = p->next;
1004 
1005 	*bpage = list_entry(p, struct buffer_page, list);
1006 }
1007 
1008 static inline unsigned
1009 rb_event_index(struct ring_buffer_event *event)
1010 {
1011 	unsigned long addr = (unsigned long)event;
1012 
1013 	return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
1014 }
1015 
1016 static inline int
1017 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1018 	     struct ring_buffer_event *event)
1019 {
1020 	unsigned long addr = (unsigned long)event;
1021 	unsigned long index;
1022 
1023 	index = rb_event_index(event);
1024 	addr &= PAGE_MASK;
1025 
1026 	return cpu_buffer->commit_page->page == (void *)addr &&
1027 		rb_commit_index(cpu_buffer) == index;
1028 }
1029 
1030 static void
1031 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1032 		    struct ring_buffer_event *event)
1033 {
1034 	unsigned long addr = (unsigned long)event;
1035 	unsigned long index;
1036 
1037 	index = rb_event_index(event);
1038 	addr &= PAGE_MASK;
1039 
1040 	while (cpu_buffer->commit_page->page != (void *)addr) {
1041 		if (RB_WARN_ON(cpu_buffer,
1042 			  cpu_buffer->commit_page == cpu_buffer->tail_page))
1043 			return;
1044 		cpu_buffer->commit_page->page->commit =
1045 			cpu_buffer->commit_page->write;
1046 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1047 		cpu_buffer->write_stamp =
1048 			cpu_buffer->commit_page->page->time_stamp;
1049 	}
1050 
1051 	/* Now set the commit to the event's index */
1052 	local_set(&cpu_buffer->commit_page->page->commit, index);
1053 }
1054 
1055 static void
1056 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1057 {
1058 	/*
1059 	 * We only race with interrupts and NMIs on this CPU.
1060 	 * If we own the commit event, then we can commit
1061 	 * all others that interrupted us, since the interruptions
1062 	 * are in stack format (they finish before they come
1063 	 * back to us). This allows us to do a simple loop to
1064 	 * assign the commit to the tail.
1065 	 */
1066  again:
1067 	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1068 		cpu_buffer->commit_page->page->commit =
1069 			cpu_buffer->commit_page->write;
1070 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1071 		cpu_buffer->write_stamp =
1072 			cpu_buffer->commit_page->page->time_stamp;
1073 		/* add barrier to keep gcc from optimizing too much */
1074 		barrier();
1075 	}
1076 	while (rb_commit_index(cpu_buffer) !=
1077 	       rb_page_write(cpu_buffer->commit_page)) {
1078 		cpu_buffer->commit_page->page->commit =
1079 			cpu_buffer->commit_page->write;
1080 		barrier();
1081 	}
1082 
1083 	/* again, keep gcc from optimizing */
1084 	barrier();
1085 
1086 	/*
1087 	 * If an interrupt came in just after the first while loop
1088 	 * and pushed the tail page forward, we will be left with
1089 	 * a dangling commit that will never go forward.
1090 	 */
1091 	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1092 		goto again;
1093 }
1094 
1095 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1096 {
1097 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1098 	cpu_buffer->reader_page->read = 0;
1099 }
1100 
1101 static void rb_inc_iter(struct ring_buffer_iter *iter)
1102 {
1103 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1104 
1105 	/*
1106 	 * The iterator could be on the reader page (it starts there).
1107 	 * But the head could have moved, since the reader was
1108 	 * found. Check for this case and assign the iterator
1109 	 * to the head page instead of next.
1110 	 */
1111 	if (iter->head_page == cpu_buffer->reader_page)
1112 		iter->head_page = cpu_buffer->head_page;
1113 	else
1114 		rb_inc_page(cpu_buffer, &iter->head_page);
1115 
1116 	iter->read_stamp = iter->head_page->page->time_stamp;
1117 	iter->head = 0;
1118 }
1119 
1120 /**
1121  * ring_buffer_update_event - update event type and data
1122  * @event: the even to update
1123  * @type: the type of event
1124  * @length: the size of the event field in the ring buffer
1125  *
1126  * Update the type and data fields of the event. The length
1127  * is the actual size that is written to the ring buffer,
1128  * and with this, we can determine what to place into the
1129  * data field.
1130  */
1131 static void
1132 rb_update_event(struct ring_buffer_event *event,
1133 			 unsigned type, unsigned length)
1134 {
1135 	event->type_len = type;
1136 
1137 	switch (type) {
1138 
1139 	case RINGBUF_TYPE_PADDING:
1140 	case RINGBUF_TYPE_TIME_EXTEND:
1141 	case RINGBUF_TYPE_TIME_STAMP:
1142 		break;
1143 
1144 	case 0:
1145 		length -= RB_EVNT_HDR_SIZE;
1146 		if (length > RB_MAX_SMALL_DATA)
1147 			event->array[0] = length;
1148 		else
1149 			event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1150 		break;
1151 	default:
1152 		BUG();
1153 	}
1154 }
1155 
1156 static unsigned rb_calculate_event_length(unsigned length)
1157 {
1158 	struct ring_buffer_event event; /* Used only for sizeof array */
1159 
1160 	/* zero length can cause confusions */
1161 	if (!length)
1162 		length = 1;
1163 
1164 	if (length > RB_MAX_SMALL_DATA)
1165 		length += sizeof(event.array[0]);
1166 
1167 	length += RB_EVNT_HDR_SIZE;
1168 	length = ALIGN(length, RB_ALIGNMENT);
1169 
1170 	return length;
1171 }
1172 
1173 
1174 static struct ring_buffer_event *
1175 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1176 	     unsigned long length, unsigned long tail,
1177 	     struct buffer_page *commit_page,
1178 	     struct buffer_page *tail_page, u64 *ts)
1179 {
1180 	struct buffer_page *next_page, *head_page, *reader_page;
1181 	struct ring_buffer *buffer = cpu_buffer->buffer;
1182 	struct ring_buffer_event *event;
1183 	bool lock_taken = false;
1184 	unsigned long flags;
1185 
1186 	next_page = tail_page;
1187 
1188 	local_irq_save(flags);
1189 	/*
1190 	 * Since the write to the buffer is still not
1191 	 * fully lockless, we must be careful with NMIs.
1192 	 * The locks in the writers are taken when a write
1193 	 * crosses to a new page. The locks protect against
1194 	 * races with the readers (this will soon be fixed
1195 	 * with a lockless solution).
1196 	 *
1197 	 * Because we can not protect against NMIs, and we
1198 	 * want to keep traces reentrant, we need to manage
1199 	 * what happens when we are in an NMI.
1200 	 *
1201 	 * NMIs can happen after we take the lock.
1202 	 * If we are in an NMI, only take the lock
1203 	 * if it is not already taken. Otherwise
1204 	 * simply fail.
1205 	 */
1206 	if (unlikely(in_nmi())) {
1207 		if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1208 			cpu_buffer->nmi_dropped++;
1209 			goto out_reset;
1210 		}
1211 	} else
1212 		__raw_spin_lock(&cpu_buffer->lock);
1213 
1214 	lock_taken = true;
1215 
1216 	rb_inc_page(cpu_buffer, &next_page);
1217 
1218 	head_page = cpu_buffer->head_page;
1219 	reader_page = cpu_buffer->reader_page;
1220 
1221 	/* we grabbed the lock before incrementing */
1222 	if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1223 		goto out_reset;
1224 
1225 	/*
1226 	 * If for some reason, we had an interrupt storm that made
1227 	 * it all the way around the buffer, bail, and warn
1228 	 * about it.
1229 	 */
1230 	if (unlikely(next_page == commit_page)) {
1231 		cpu_buffer->commit_overrun++;
1232 		goto out_reset;
1233 	}
1234 
1235 	if (next_page == head_page) {
1236 		if (!(buffer->flags & RB_FL_OVERWRITE))
1237 			goto out_reset;
1238 
1239 		/* tail_page has not moved yet? */
1240 		if (tail_page == cpu_buffer->tail_page) {
1241 			/* count overflows */
1242 			cpu_buffer->overrun +=
1243 				local_read(&head_page->entries);
1244 
1245 			rb_inc_page(cpu_buffer, &head_page);
1246 			cpu_buffer->head_page = head_page;
1247 			cpu_buffer->head_page->read = 0;
1248 		}
1249 	}
1250 
1251 	/*
1252 	 * If the tail page is still the same as what we think
1253 	 * it is, then it is up to us to update the tail
1254 	 * pointer.
1255 	 */
1256 	if (tail_page == cpu_buffer->tail_page) {
1257 		local_set(&next_page->write, 0);
1258 		local_set(&next_page->entries, 0);
1259 		local_set(&next_page->page->commit, 0);
1260 		cpu_buffer->tail_page = next_page;
1261 
1262 		/* reread the time stamp */
1263 		*ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1264 		cpu_buffer->tail_page->page->time_stamp = *ts;
1265 	}
1266 
1267 	/*
1268 	 * The actual tail page has moved forward.
1269 	 */
1270 	if (tail < BUF_PAGE_SIZE) {
1271 		/* Mark the rest of the page with padding */
1272 		event = __rb_page_index(tail_page, tail);
1273 		rb_event_set_padding(event);
1274 	}
1275 
1276 	/* Set the write back to the previous setting */
1277 	local_sub(length, &tail_page->write);
1278 
1279 	/*
1280 	 * If this was a commit entry that failed,
1281 	 * increment that too
1282 	 */
1283 	if (tail_page == cpu_buffer->commit_page &&
1284 	    tail == rb_commit_index(cpu_buffer)) {
1285 		rb_set_commit_to_write(cpu_buffer);
1286 	}
1287 
1288 	__raw_spin_unlock(&cpu_buffer->lock);
1289 	local_irq_restore(flags);
1290 
1291 	/* fail and let the caller try again */
1292 	return ERR_PTR(-EAGAIN);
1293 
1294  out_reset:
1295 	/* reset write */
1296 	local_sub(length, &tail_page->write);
1297 
1298 	if (likely(lock_taken))
1299 		__raw_spin_unlock(&cpu_buffer->lock);
1300 	local_irq_restore(flags);
1301 	return NULL;
1302 }
1303 
1304 static struct ring_buffer_event *
1305 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1306 		  unsigned type, unsigned long length, u64 *ts)
1307 {
1308 	struct buffer_page *tail_page, *commit_page;
1309 	struct ring_buffer_event *event;
1310 	unsigned long tail, write;
1311 
1312 	commit_page = cpu_buffer->commit_page;
1313 	/* we just need to protect against interrupts */
1314 	barrier();
1315 	tail_page = cpu_buffer->tail_page;
1316 	write = local_add_return(length, &tail_page->write);
1317 	tail = write - length;
1318 
1319 	/* See if we shot pass the end of this buffer page */
1320 	if (write > BUF_PAGE_SIZE)
1321 		return rb_move_tail(cpu_buffer, length, tail,
1322 				    commit_page, tail_page, ts);
1323 
1324 	/* We reserved something on the buffer */
1325 
1326 	if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1327 		return NULL;
1328 
1329 	event = __rb_page_index(tail_page, tail);
1330 	rb_update_event(event, type, length);
1331 
1332 	/* The passed in type is zero for DATA */
1333 	if (likely(!type))
1334 		local_inc(&tail_page->entries);
1335 
1336 	/*
1337 	 * If this is a commit and the tail is zero, then update
1338 	 * this page's time stamp.
1339 	 */
1340 	if (!tail && rb_is_commit(cpu_buffer, event))
1341 		cpu_buffer->commit_page->page->time_stamp = *ts;
1342 
1343 	return event;
1344 }
1345 
1346 static inline int
1347 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1348 		  struct ring_buffer_event *event)
1349 {
1350 	unsigned long new_index, old_index;
1351 	struct buffer_page *bpage;
1352 	unsigned long index;
1353 	unsigned long addr;
1354 
1355 	new_index = rb_event_index(event);
1356 	old_index = new_index + rb_event_length(event);
1357 	addr = (unsigned long)event;
1358 	addr &= PAGE_MASK;
1359 
1360 	bpage = cpu_buffer->tail_page;
1361 
1362 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1363 		/*
1364 		 * This is on the tail page. It is possible that
1365 		 * a write could come in and move the tail page
1366 		 * and write to the next page. That is fine
1367 		 * because we just shorten what is on this page.
1368 		 */
1369 		index = local_cmpxchg(&bpage->write, old_index, new_index);
1370 		if (index == old_index)
1371 			return 1;
1372 	}
1373 
1374 	/* could not discard */
1375 	return 0;
1376 }
1377 
1378 static int
1379 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1380 		  u64 *ts, u64 *delta)
1381 {
1382 	struct ring_buffer_event *event;
1383 	static int once;
1384 	int ret;
1385 
1386 	if (unlikely(*delta > (1ULL << 59) && !once++)) {
1387 		printk(KERN_WARNING "Delta way too big! %llu"
1388 		       " ts=%llu write stamp = %llu\n",
1389 		       (unsigned long long)*delta,
1390 		       (unsigned long long)*ts,
1391 		       (unsigned long long)cpu_buffer->write_stamp);
1392 		WARN_ON(1);
1393 	}
1394 
1395 	/*
1396 	 * The delta is too big, we to add a
1397 	 * new timestamp.
1398 	 */
1399 	event = __rb_reserve_next(cpu_buffer,
1400 				  RINGBUF_TYPE_TIME_EXTEND,
1401 				  RB_LEN_TIME_EXTEND,
1402 				  ts);
1403 	if (!event)
1404 		return -EBUSY;
1405 
1406 	if (PTR_ERR(event) == -EAGAIN)
1407 		return -EAGAIN;
1408 
1409 	/* Only a commited time event can update the write stamp */
1410 	if (rb_is_commit(cpu_buffer, event)) {
1411 		/*
1412 		 * If this is the first on the page, then we need to
1413 		 * update the page itself, and just put in a zero.
1414 		 */
1415 		if (rb_event_index(event)) {
1416 			event->time_delta = *delta & TS_MASK;
1417 			event->array[0] = *delta >> TS_SHIFT;
1418 		} else {
1419 			cpu_buffer->commit_page->page->time_stamp = *ts;
1420 			/* try to discard, since we do not need this */
1421 			if (!rb_try_to_discard(cpu_buffer, event)) {
1422 				/* nope, just zero it */
1423 				event->time_delta = 0;
1424 				event->array[0] = 0;
1425 			}
1426 		}
1427 		cpu_buffer->write_stamp = *ts;
1428 		/* let the caller know this was the commit */
1429 		ret = 1;
1430 	} else {
1431 		/* Try to discard the event */
1432 		if (!rb_try_to_discard(cpu_buffer, event)) {
1433 			/* Darn, this is just wasted space */
1434 			event->time_delta = 0;
1435 			event->array[0] = 0;
1436 		}
1437 		ret = 0;
1438 	}
1439 
1440 	*delta = 0;
1441 
1442 	return ret;
1443 }
1444 
1445 static struct ring_buffer_event *
1446 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1447 		      unsigned long length)
1448 {
1449 	struct ring_buffer_event *event;
1450 	u64 ts, delta = 0;
1451 	int commit = 0;
1452 	int nr_loops = 0;
1453 
1454 	length = rb_calculate_event_length(length);
1455  again:
1456 	/*
1457 	 * We allow for interrupts to reenter here and do a trace.
1458 	 * If one does, it will cause this original code to loop
1459 	 * back here. Even with heavy interrupts happening, this
1460 	 * should only happen a few times in a row. If this happens
1461 	 * 1000 times in a row, there must be either an interrupt
1462 	 * storm or we have something buggy.
1463 	 * Bail!
1464 	 */
1465 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1466 		return NULL;
1467 
1468 	ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
1469 
1470 	/*
1471 	 * Only the first commit can update the timestamp.
1472 	 * Yes there is a race here. If an interrupt comes in
1473 	 * just after the conditional and it traces too, then it
1474 	 * will also check the deltas. More than one timestamp may
1475 	 * also be made. But only the entry that did the actual
1476 	 * commit will be something other than zero.
1477 	 */
1478 	if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1479 		   rb_page_write(cpu_buffer->tail_page) ==
1480 		   rb_commit_index(cpu_buffer))) {
1481 		u64 diff;
1482 
1483 		diff = ts - cpu_buffer->write_stamp;
1484 
1485 		/* make sure this diff is calculated here */
1486 		barrier();
1487 
1488 		/* Did the write stamp get updated already? */
1489 		if (unlikely(ts < cpu_buffer->write_stamp))
1490 			goto get_event;
1491 
1492 		delta = diff;
1493 		if (unlikely(test_time_stamp(delta))) {
1494 
1495 			commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1496 			if (commit == -EBUSY)
1497 				return NULL;
1498 
1499 			if (commit == -EAGAIN)
1500 				goto again;
1501 
1502 			RB_WARN_ON(cpu_buffer, commit < 0);
1503 		}
1504 	}
1505 
1506  get_event:
1507 	event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
1508 	if (unlikely(PTR_ERR(event) == -EAGAIN))
1509 		goto again;
1510 
1511 	if (!event) {
1512 		if (unlikely(commit))
1513 			/*
1514 			 * Ouch! We needed a timestamp and it was commited. But
1515 			 * we didn't get our event reserved.
1516 			 */
1517 			rb_set_commit_to_write(cpu_buffer);
1518 		return NULL;
1519 	}
1520 
1521 	/*
1522 	 * If the timestamp was commited, make the commit our entry
1523 	 * now so that we will update it when needed.
1524 	 */
1525 	if (unlikely(commit))
1526 		rb_set_commit_event(cpu_buffer, event);
1527 	else if (!rb_is_commit(cpu_buffer, event))
1528 		delta = 0;
1529 
1530 	event->time_delta = delta;
1531 
1532 	return event;
1533 }
1534 
1535 #define TRACE_RECURSIVE_DEPTH 16
1536 
1537 static int trace_recursive_lock(void)
1538 {
1539 	current->trace_recursion++;
1540 
1541 	if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1542 		return 0;
1543 
1544 	/* Disable all tracing before we do anything else */
1545 	tracing_off_permanent();
1546 
1547 	printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
1548 		    "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1549 		    current->trace_recursion,
1550 		    hardirq_count() >> HARDIRQ_SHIFT,
1551 		    softirq_count() >> SOFTIRQ_SHIFT,
1552 		    in_nmi());
1553 
1554 	WARN_ON_ONCE(1);
1555 	return -1;
1556 }
1557 
1558 static void trace_recursive_unlock(void)
1559 {
1560 	WARN_ON_ONCE(!current->trace_recursion);
1561 
1562 	current->trace_recursion--;
1563 }
1564 
1565 static DEFINE_PER_CPU(int, rb_need_resched);
1566 
1567 /**
1568  * ring_buffer_lock_reserve - reserve a part of the buffer
1569  * @buffer: the ring buffer to reserve from
1570  * @length: the length of the data to reserve (excluding event header)
1571  *
1572  * Returns a reseverd event on the ring buffer to copy directly to.
1573  * The user of this interface will need to get the body to write into
1574  * and can use the ring_buffer_event_data() interface.
1575  *
1576  * The length is the length of the data needed, not the event length
1577  * which also includes the event header.
1578  *
1579  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1580  * If NULL is returned, then nothing has been allocated or locked.
1581  */
1582 struct ring_buffer_event *
1583 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1584 {
1585 	struct ring_buffer_per_cpu *cpu_buffer;
1586 	struct ring_buffer_event *event;
1587 	int cpu, resched;
1588 
1589 	if (ring_buffer_flags != RB_BUFFERS_ON)
1590 		return NULL;
1591 
1592 	if (atomic_read(&buffer->record_disabled))
1593 		return NULL;
1594 
1595 	/* If we are tracing schedule, we don't want to recurse */
1596 	resched = ftrace_preempt_disable();
1597 
1598 	if (trace_recursive_lock())
1599 		goto out_nocheck;
1600 
1601 	cpu = raw_smp_processor_id();
1602 
1603 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1604 		goto out;
1605 
1606 	cpu_buffer = buffer->buffers[cpu];
1607 
1608 	if (atomic_read(&cpu_buffer->record_disabled))
1609 		goto out;
1610 
1611 	if (length > BUF_MAX_DATA_SIZE)
1612 		goto out;
1613 
1614 	event = rb_reserve_next_event(cpu_buffer, length);
1615 	if (!event)
1616 		goto out;
1617 
1618 	/*
1619 	 * Need to store resched state on this cpu.
1620 	 * Only the first needs to.
1621 	 */
1622 
1623 	if (preempt_count() == 1)
1624 		per_cpu(rb_need_resched, cpu) = resched;
1625 
1626 	return event;
1627 
1628  out:
1629 	trace_recursive_unlock();
1630 
1631  out_nocheck:
1632 	ftrace_preempt_enable(resched);
1633 	return NULL;
1634 }
1635 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1636 
1637 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1638 		      struct ring_buffer_event *event)
1639 {
1640 	local_inc(&cpu_buffer->entries);
1641 
1642 	/* Only process further if we own the commit */
1643 	if (!rb_is_commit(cpu_buffer, event))
1644 		return;
1645 
1646 	cpu_buffer->write_stamp += event->time_delta;
1647 
1648 	rb_set_commit_to_write(cpu_buffer);
1649 }
1650 
1651 /**
1652  * ring_buffer_unlock_commit - commit a reserved
1653  * @buffer: The buffer to commit to
1654  * @event: The event pointer to commit.
1655  *
1656  * This commits the data to the ring buffer, and releases any locks held.
1657  *
1658  * Must be paired with ring_buffer_lock_reserve.
1659  */
1660 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1661 			      struct ring_buffer_event *event)
1662 {
1663 	struct ring_buffer_per_cpu *cpu_buffer;
1664 	int cpu = raw_smp_processor_id();
1665 
1666 	cpu_buffer = buffer->buffers[cpu];
1667 
1668 	rb_commit(cpu_buffer, event);
1669 
1670 	trace_recursive_unlock();
1671 
1672 	/*
1673 	 * Only the last preempt count needs to restore preemption.
1674 	 */
1675 	if (preempt_count() == 1)
1676 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1677 	else
1678 		preempt_enable_no_resched_notrace();
1679 
1680 	return 0;
1681 }
1682 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1683 
1684 static inline void rb_event_discard(struct ring_buffer_event *event)
1685 {
1686 	/* array[0] holds the actual length for the discarded event */
1687 	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1688 	event->type_len = RINGBUF_TYPE_PADDING;
1689 	/* time delta must be non zero */
1690 	if (!event->time_delta)
1691 		event->time_delta = 1;
1692 }
1693 
1694 /**
1695  * ring_buffer_event_discard - discard any event in the ring buffer
1696  * @event: the event to discard
1697  *
1698  * Sometimes a event that is in the ring buffer needs to be ignored.
1699  * This function lets the user discard an event in the ring buffer
1700  * and then that event will not be read later.
1701  *
1702  * Note, it is up to the user to be careful with this, and protect
1703  * against races. If the user discards an event that has been consumed
1704  * it is possible that it could corrupt the ring buffer.
1705  */
1706 void ring_buffer_event_discard(struct ring_buffer_event *event)
1707 {
1708 	rb_event_discard(event);
1709 }
1710 EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1711 
1712 /**
1713  * ring_buffer_commit_discard - discard an event that has not been committed
1714  * @buffer: the ring buffer
1715  * @event: non committed event to discard
1716  *
1717  * This is similar to ring_buffer_event_discard but must only be
1718  * performed on an event that has not been committed yet. The difference
1719  * is that this will also try to free the event from the ring buffer
1720  * if another event has not been added behind it.
1721  *
1722  * If another event has been added behind it, it will set the event
1723  * up as discarded, and perform the commit.
1724  *
1725  * If this function is called, do not call ring_buffer_unlock_commit on
1726  * the event.
1727  */
1728 void ring_buffer_discard_commit(struct ring_buffer *buffer,
1729 				struct ring_buffer_event *event)
1730 {
1731 	struct ring_buffer_per_cpu *cpu_buffer;
1732 	int cpu;
1733 
1734 	/* The event is discarded regardless */
1735 	rb_event_discard(event);
1736 
1737 	/*
1738 	 * This must only be called if the event has not been
1739 	 * committed yet. Thus we can assume that preemption
1740 	 * is still disabled.
1741 	 */
1742 	RB_WARN_ON(buffer, preemptible());
1743 
1744 	cpu = smp_processor_id();
1745 	cpu_buffer = buffer->buffers[cpu];
1746 
1747 	if (!rb_try_to_discard(cpu_buffer, event))
1748 		goto out;
1749 
1750 	/*
1751 	 * The commit is still visible by the reader, so we
1752 	 * must increment entries.
1753 	 */
1754 	local_inc(&cpu_buffer->entries);
1755  out:
1756 	/*
1757 	 * If a write came in and pushed the tail page
1758 	 * we still need to update the commit pointer
1759 	 * if we were the commit.
1760 	 */
1761 	if (rb_is_commit(cpu_buffer, event))
1762 		rb_set_commit_to_write(cpu_buffer);
1763 
1764 	trace_recursive_unlock();
1765 
1766 	/*
1767 	 * Only the last preempt count needs to restore preemption.
1768 	 */
1769 	if (preempt_count() == 1)
1770 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1771 	else
1772 		preempt_enable_no_resched_notrace();
1773 
1774 }
1775 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1776 
1777 /**
1778  * ring_buffer_write - write data to the buffer without reserving
1779  * @buffer: The ring buffer to write to.
1780  * @length: The length of the data being written (excluding the event header)
1781  * @data: The data to write to the buffer.
1782  *
1783  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1784  * one function. If you already have the data to write to the buffer, it
1785  * may be easier to simply call this function.
1786  *
1787  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1788  * and not the length of the event which would hold the header.
1789  */
1790 int ring_buffer_write(struct ring_buffer *buffer,
1791 			unsigned long length,
1792 			void *data)
1793 {
1794 	struct ring_buffer_per_cpu *cpu_buffer;
1795 	struct ring_buffer_event *event;
1796 	void *body;
1797 	int ret = -EBUSY;
1798 	int cpu, resched;
1799 
1800 	if (ring_buffer_flags != RB_BUFFERS_ON)
1801 		return -EBUSY;
1802 
1803 	if (atomic_read(&buffer->record_disabled))
1804 		return -EBUSY;
1805 
1806 	resched = ftrace_preempt_disable();
1807 
1808 	cpu = raw_smp_processor_id();
1809 
1810 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1811 		goto out;
1812 
1813 	cpu_buffer = buffer->buffers[cpu];
1814 
1815 	if (atomic_read(&cpu_buffer->record_disabled))
1816 		goto out;
1817 
1818 	if (length > BUF_MAX_DATA_SIZE)
1819 		goto out;
1820 
1821 	event = rb_reserve_next_event(cpu_buffer, length);
1822 	if (!event)
1823 		goto out;
1824 
1825 	body = rb_event_data(event);
1826 
1827 	memcpy(body, data, length);
1828 
1829 	rb_commit(cpu_buffer, event);
1830 
1831 	ret = 0;
1832  out:
1833 	ftrace_preempt_enable(resched);
1834 
1835 	return ret;
1836 }
1837 EXPORT_SYMBOL_GPL(ring_buffer_write);
1838 
1839 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1840 {
1841 	struct buffer_page *reader = cpu_buffer->reader_page;
1842 	struct buffer_page *head = cpu_buffer->head_page;
1843 	struct buffer_page *commit = cpu_buffer->commit_page;
1844 
1845 	return reader->read == rb_page_commit(reader) &&
1846 		(commit == reader ||
1847 		 (commit == head &&
1848 		  head->read == rb_page_commit(commit)));
1849 }
1850 
1851 /**
1852  * ring_buffer_record_disable - stop all writes into the buffer
1853  * @buffer: The ring buffer to stop writes to.
1854  *
1855  * This prevents all writes to the buffer. Any attempt to write
1856  * to the buffer after this will fail and return NULL.
1857  *
1858  * The caller should call synchronize_sched() after this.
1859  */
1860 void ring_buffer_record_disable(struct ring_buffer *buffer)
1861 {
1862 	atomic_inc(&buffer->record_disabled);
1863 }
1864 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1865 
1866 /**
1867  * ring_buffer_record_enable - enable writes to the buffer
1868  * @buffer: The ring buffer to enable writes
1869  *
1870  * Note, multiple disables will need the same number of enables
1871  * to truely enable the writing (much like preempt_disable).
1872  */
1873 void ring_buffer_record_enable(struct ring_buffer *buffer)
1874 {
1875 	atomic_dec(&buffer->record_disabled);
1876 }
1877 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1878 
1879 /**
1880  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1881  * @buffer: The ring buffer to stop writes to.
1882  * @cpu: The CPU buffer to stop
1883  *
1884  * This prevents all writes to the buffer. Any attempt to write
1885  * to the buffer after this will fail and return NULL.
1886  *
1887  * The caller should call synchronize_sched() after this.
1888  */
1889 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1890 {
1891 	struct ring_buffer_per_cpu *cpu_buffer;
1892 
1893 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1894 		return;
1895 
1896 	cpu_buffer = buffer->buffers[cpu];
1897 	atomic_inc(&cpu_buffer->record_disabled);
1898 }
1899 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1900 
1901 /**
1902  * ring_buffer_record_enable_cpu - enable writes to the buffer
1903  * @buffer: The ring buffer to enable writes
1904  * @cpu: The CPU to enable.
1905  *
1906  * Note, multiple disables will need the same number of enables
1907  * to truely enable the writing (much like preempt_disable).
1908  */
1909 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1910 {
1911 	struct ring_buffer_per_cpu *cpu_buffer;
1912 
1913 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1914 		return;
1915 
1916 	cpu_buffer = buffer->buffers[cpu];
1917 	atomic_dec(&cpu_buffer->record_disabled);
1918 }
1919 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1920 
1921 /**
1922  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1923  * @buffer: The ring buffer
1924  * @cpu: The per CPU buffer to get the entries from.
1925  */
1926 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1927 {
1928 	struct ring_buffer_per_cpu *cpu_buffer;
1929 	unsigned long ret;
1930 
1931 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1932 		return 0;
1933 
1934 	cpu_buffer = buffer->buffers[cpu];
1935 	ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1936 		- cpu_buffer->read;
1937 
1938 	return ret;
1939 }
1940 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1941 
1942 /**
1943  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1944  * @buffer: The ring buffer
1945  * @cpu: The per CPU buffer to get the number of overruns from
1946  */
1947 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1948 {
1949 	struct ring_buffer_per_cpu *cpu_buffer;
1950 	unsigned long ret;
1951 
1952 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1953 		return 0;
1954 
1955 	cpu_buffer = buffer->buffers[cpu];
1956 	ret = cpu_buffer->overrun;
1957 
1958 	return ret;
1959 }
1960 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1961 
1962 /**
1963  * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
1964  * @buffer: The ring buffer
1965  * @cpu: The per CPU buffer to get the number of overruns from
1966  */
1967 unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
1968 {
1969 	struct ring_buffer_per_cpu *cpu_buffer;
1970 	unsigned long ret;
1971 
1972 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1973 		return 0;
1974 
1975 	cpu_buffer = buffer->buffers[cpu];
1976 	ret = cpu_buffer->nmi_dropped;
1977 
1978 	return ret;
1979 }
1980 EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
1981 
1982 /**
1983  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
1984  * @buffer: The ring buffer
1985  * @cpu: The per CPU buffer to get the number of overruns from
1986  */
1987 unsigned long
1988 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
1989 {
1990 	struct ring_buffer_per_cpu *cpu_buffer;
1991 	unsigned long ret;
1992 
1993 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1994 		return 0;
1995 
1996 	cpu_buffer = buffer->buffers[cpu];
1997 	ret = cpu_buffer->commit_overrun;
1998 
1999 	return ret;
2000 }
2001 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2002 
2003 /**
2004  * ring_buffer_entries - get the number of entries in a buffer
2005  * @buffer: The ring buffer
2006  *
2007  * Returns the total number of entries in the ring buffer
2008  * (all CPU entries)
2009  */
2010 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2011 {
2012 	struct ring_buffer_per_cpu *cpu_buffer;
2013 	unsigned long entries = 0;
2014 	int cpu;
2015 
2016 	/* if you care about this being correct, lock the buffer */
2017 	for_each_buffer_cpu(buffer, cpu) {
2018 		cpu_buffer = buffer->buffers[cpu];
2019 		entries += (local_read(&cpu_buffer->entries) -
2020 			    cpu_buffer->overrun) - cpu_buffer->read;
2021 	}
2022 
2023 	return entries;
2024 }
2025 EXPORT_SYMBOL_GPL(ring_buffer_entries);
2026 
2027 /**
2028  * ring_buffer_overrun_cpu - get the number of overruns in buffer
2029  * @buffer: The ring buffer
2030  *
2031  * Returns the total number of overruns in the ring buffer
2032  * (all CPU entries)
2033  */
2034 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2035 {
2036 	struct ring_buffer_per_cpu *cpu_buffer;
2037 	unsigned long overruns = 0;
2038 	int cpu;
2039 
2040 	/* if you care about this being correct, lock the buffer */
2041 	for_each_buffer_cpu(buffer, cpu) {
2042 		cpu_buffer = buffer->buffers[cpu];
2043 		overruns += cpu_buffer->overrun;
2044 	}
2045 
2046 	return overruns;
2047 }
2048 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2049 
2050 static void rb_iter_reset(struct ring_buffer_iter *iter)
2051 {
2052 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2053 
2054 	/* Iterator usage is expected to have record disabled */
2055 	if (list_empty(&cpu_buffer->reader_page->list)) {
2056 		iter->head_page = cpu_buffer->head_page;
2057 		iter->head = cpu_buffer->head_page->read;
2058 	} else {
2059 		iter->head_page = cpu_buffer->reader_page;
2060 		iter->head = cpu_buffer->reader_page->read;
2061 	}
2062 	if (iter->head)
2063 		iter->read_stamp = cpu_buffer->read_stamp;
2064 	else
2065 		iter->read_stamp = iter->head_page->page->time_stamp;
2066 }
2067 
2068 /**
2069  * ring_buffer_iter_reset - reset an iterator
2070  * @iter: The iterator to reset
2071  *
2072  * Resets the iterator, so that it will start from the beginning
2073  * again.
2074  */
2075 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2076 {
2077 	struct ring_buffer_per_cpu *cpu_buffer;
2078 	unsigned long flags;
2079 
2080 	if (!iter)
2081 		return;
2082 
2083 	cpu_buffer = iter->cpu_buffer;
2084 
2085 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2086 	rb_iter_reset(iter);
2087 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2088 }
2089 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2090 
2091 /**
2092  * ring_buffer_iter_empty - check if an iterator has no more to read
2093  * @iter: The iterator to check
2094  */
2095 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2096 {
2097 	struct ring_buffer_per_cpu *cpu_buffer;
2098 
2099 	cpu_buffer = iter->cpu_buffer;
2100 
2101 	return iter->head_page == cpu_buffer->commit_page &&
2102 		iter->head == rb_commit_index(cpu_buffer);
2103 }
2104 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2105 
2106 static void
2107 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2108 		     struct ring_buffer_event *event)
2109 {
2110 	u64 delta;
2111 
2112 	switch (event->type_len) {
2113 	case RINGBUF_TYPE_PADDING:
2114 		return;
2115 
2116 	case RINGBUF_TYPE_TIME_EXTEND:
2117 		delta = event->array[0];
2118 		delta <<= TS_SHIFT;
2119 		delta += event->time_delta;
2120 		cpu_buffer->read_stamp += delta;
2121 		return;
2122 
2123 	case RINGBUF_TYPE_TIME_STAMP:
2124 		/* FIXME: not implemented */
2125 		return;
2126 
2127 	case RINGBUF_TYPE_DATA:
2128 		cpu_buffer->read_stamp += event->time_delta;
2129 		return;
2130 
2131 	default:
2132 		BUG();
2133 	}
2134 	return;
2135 }
2136 
2137 static void
2138 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2139 			  struct ring_buffer_event *event)
2140 {
2141 	u64 delta;
2142 
2143 	switch (event->type_len) {
2144 	case RINGBUF_TYPE_PADDING:
2145 		return;
2146 
2147 	case RINGBUF_TYPE_TIME_EXTEND:
2148 		delta = event->array[0];
2149 		delta <<= TS_SHIFT;
2150 		delta += event->time_delta;
2151 		iter->read_stamp += delta;
2152 		return;
2153 
2154 	case RINGBUF_TYPE_TIME_STAMP:
2155 		/* FIXME: not implemented */
2156 		return;
2157 
2158 	case RINGBUF_TYPE_DATA:
2159 		iter->read_stamp += event->time_delta;
2160 		return;
2161 
2162 	default:
2163 		BUG();
2164 	}
2165 	return;
2166 }
2167 
2168 static struct buffer_page *
2169 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2170 {
2171 	struct buffer_page *reader = NULL;
2172 	unsigned long flags;
2173 	int nr_loops = 0;
2174 
2175 	local_irq_save(flags);
2176 	__raw_spin_lock(&cpu_buffer->lock);
2177 
2178  again:
2179 	/*
2180 	 * This should normally only loop twice. But because the
2181 	 * start of the reader inserts an empty page, it causes
2182 	 * a case where we will loop three times. There should be no
2183 	 * reason to loop four times (that I know of).
2184 	 */
2185 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2186 		reader = NULL;
2187 		goto out;
2188 	}
2189 
2190 	reader = cpu_buffer->reader_page;
2191 
2192 	/* If there's more to read, return this page */
2193 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
2194 		goto out;
2195 
2196 	/* Never should we have an index greater than the size */
2197 	if (RB_WARN_ON(cpu_buffer,
2198 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
2199 		goto out;
2200 
2201 	/* check if we caught up to the tail */
2202 	reader = NULL;
2203 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2204 		goto out;
2205 
2206 	/*
2207 	 * Splice the empty reader page into the list around the head.
2208 	 * Reset the reader page to size zero.
2209 	 */
2210 
2211 	reader = cpu_buffer->head_page;
2212 	cpu_buffer->reader_page->list.next = reader->list.next;
2213 	cpu_buffer->reader_page->list.prev = reader->list.prev;
2214 
2215 	local_set(&cpu_buffer->reader_page->write, 0);
2216 	local_set(&cpu_buffer->reader_page->entries, 0);
2217 	local_set(&cpu_buffer->reader_page->page->commit, 0);
2218 
2219 	/* Make the reader page now replace the head */
2220 	reader->list.prev->next = &cpu_buffer->reader_page->list;
2221 	reader->list.next->prev = &cpu_buffer->reader_page->list;
2222 
2223 	/*
2224 	 * If the tail is on the reader, then we must set the head
2225 	 * to the inserted page, otherwise we set it one before.
2226 	 */
2227 	cpu_buffer->head_page = cpu_buffer->reader_page;
2228 
2229 	if (cpu_buffer->commit_page != reader)
2230 		rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2231 
2232 	/* Finally update the reader page to the new head */
2233 	cpu_buffer->reader_page = reader;
2234 	rb_reset_reader_page(cpu_buffer);
2235 
2236 	goto again;
2237 
2238  out:
2239 	__raw_spin_unlock(&cpu_buffer->lock);
2240 	local_irq_restore(flags);
2241 
2242 	return reader;
2243 }
2244 
2245 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2246 {
2247 	struct ring_buffer_event *event;
2248 	struct buffer_page *reader;
2249 	unsigned length;
2250 
2251 	reader = rb_get_reader_page(cpu_buffer);
2252 
2253 	/* This function should not be called when buffer is empty */
2254 	if (RB_WARN_ON(cpu_buffer, !reader))
2255 		return;
2256 
2257 	event = rb_reader_event(cpu_buffer);
2258 
2259 	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2260 			|| rb_discarded_event(event))
2261 		cpu_buffer->read++;
2262 
2263 	rb_update_read_stamp(cpu_buffer, event);
2264 
2265 	length = rb_event_length(event);
2266 	cpu_buffer->reader_page->read += length;
2267 }
2268 
2269 static void rb_advance_iter(struct ring_buffer_iter *iter)
2270 {
2271 	struct ring_buffer *buffer;
2272 	struct ring_buffer_per_cpu *cpu_buffer;
2273 	struct ring_buffer_event *event;
2274 	unsigned length;
2275 
2276 	cpu_buffer = iter->cpu_buffer;
2277 	buffer = cpu_buffer->buffer;
2278 
2279 	/*
2280 	 * Check if we are at the end of the buffer.
2281 	 */
2282 	if (iter->head >= rb_page_size(iter->head_page)) {
2283 		/* discarded commits can make the page empty */
2284 		if (iter->head_page == cpu_buffer->commit_page)
2285 			return;
2286 		rb_inc_iter(iter);
2287 		return;
2288 	}
2289 
2290 	event = rb_iter_head_event(iter);
2291 
2292 	length = rb_event_length(event);
2293 
2294 	/*
2295 	 * This should not be called to advance the header if we are
2296 	 * at the tail of the buffer.
2297 	 */
2298 	if (RB_WARN_ON(cpu_buffer,
2299 		       (iter->head_page == cpu_buffer->commit_page) &&
2300 		       (iter->head + length > rb_commit_index(cpu_buffer))))
2301 		return;
2302 
2303 	rb_update_iter_read_stamp(iter, event);
2304 
2305 	iter->head += length;
2306 
2307 	/* check for end of page padding */
2308 	if ((iter->head >= rb_page_size(iter->head_page)) &&
2309 	    (iter->head_page != cpu_buffer->commit_page))
2310 		rb_advance_iter(iter);
2311 }
2312 
2313 static struct ring_buffer_event *
2314 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2315 {
2316 	struct ring_buffer_per_cpu *cpu_buffer;
2317 	struct ring_buffer_event *event;
2318 	struct buffer_page *reader;
2319 	int nr_loops = 0;
2320 
2321 	cpu_buffer = buffer->buffers[cpu];
2322 
2323  again:
2324 	/*
2325 	 * We repeat when a timestamp is encountered. It is possible
2326 	 * to get multiple timestamps from an interrupt entering just
2327 	 * as one timestamp is about to be written, or from discarded
2328 	 * commits. The most that we can have is the number on a single page.
2329 	 */
2330 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2331 		return NULL;
2332 
2333 	reader = rb_get_reader_page(cpu_buffer);
2334 	if (!reader)
2335 		return NULL;
2336 
2337 	event = rb_reader_event(cpu_buffer);
2338 
2339 	switch (event->type_len) {
2340 	case RINGBUF_TYPE_PADDING:
2341 		if (rb_null_event(event))
2342 			RB_WARN_ON(cpu_buffer, 1);
2343 		/*
2344 		 * Because the writer could be discarding every
2345 		 * event it creates (which would probably be bad)
2346 		 * if we were to go back to "again" then we may never
2347 		 * catch up, and will trigger the warn on, or lock
2348 		 * the box. Return the padding, and we will release
2349 		 * the current locks, and try again.
2350 		 */
2351 		rb_advance_reader(cpu_buffer);
2352 		return event;
2353 
2354 	case RINGBUF_TYPE_TIME_EXTEND:
2355 		/* Internal data, OK to advance */
2356 		rb_advance_reader(cpu_buffer);
2357 		goto again;
2358 
2359 	case RINGBUF_TYPE_TIME_STAMP:
2360 		/* FIXME: not implemented */
2361 		rb_advance_reader(cpu_buffer);
2362 		goto again;
2363 
2364 	case RINGBUF_TYPE_DATA:
2365 		if (ts) {
2366 			*ts = cpu_buffer->read_stamp + event->time_delta;
2367 			ring_buffer_normalize_time_stamp(buffer,
2368 							 cpu_buffer->cpu, ts);
2369 		}
2370 		return event;
2371 
2372 	default:
2373 		BUG();
2374 	}
2375 
2376 	return NULL;
2377 }
2378 EXPORT_SYMBOL_GPL(ring_buffer_peek);
2379 
2380 static struct ring_buffer_event *
2381 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2382 {
2383 	struct ring_buffer *buffer;
2384 	struct ring_buffer_per_cpu *cpu_buffer;
2385 	struct ring_buffer_event *event;
2386 	int nr_loops = 0;
2387 
2388 	if (ring_buffer_iter_empty(iter))
2389 		return NULL;
2390 
2391 	cpu_buffer = iter->cpu_buffer;
2392 	buffer = cpu_buffer->buffer;
2393 
2394  again:
2395 	/*
2396 	 * We repeat when a timestamp is encountered.
2397 	 * We can get multiple timestamps by nested interrupts or also
2398 	 * if filtering is on (discarding commits). Since discarding
2399 	 * commits can be frequent we can get a lot of timestamps.
2400 	 * But we limit them by not adding timestamps if they begin
2401 	 * at the start of a page.
2402 	 */
2403 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2404 		return NULL;
2405 
2406 	if (rb_per_cpu_empty(cpu_buffer))
2407 		return NULL;
2408 
2409 	event = rb_iter_head_event(iter);
2410 
2411 	switch (event->type_len) {
2412 	case RINGBUF_TYPE_PADDING:
2413 		if (rb_null_event(event)) {
2414 			rb_inc_iter(iter);
2415 			goto again;
2416 		}
2417 		rb_advance_iter(iter);
2418 		return event;
2419 
2420 	case RINGBUF_TYPE_TIME_EXTEND:
2421 		/* Internal data, OK to advance */
2422 		rb_advance_iter(iter);
2423 		goto again;
2424 
2425 	case RINGBUF_TYPE_TIME_STAMP:
2426 		/* FIXME: not implemented */
2427 		rb_advance_iter(iter);
2428 		goto again;
2429 
2430 	case RINGBUF_TYPE_DATA:
2431 		if (ts) {
2432 			*ts = iter->read_stamp + event->time_delta;
2433 			ring_buffer_normalize_time_stamp(buffer,
2434 							 cpu_buffer->cpu, ts);
2435 		}
2436 		return event;
2437 
2438 	default:
2439 		BUG();
2440 	}
2441 
2442 	return NULL;
2443 }
2444 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
2445 
2446 /**
2447  * ring_buffer_peek - peek at the next event to be read
2448  * @buffer: The ring buffer to read
2449  * @cpu: The cpu to peak at
2450  * @ts: The timestamp counter of this event.
2451  *
2452  * This will return the event that will be read next, but does
2453  * not consume the data.
2454  */
2455 struct ring_buffer_event *
2456 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2457 {
2458 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2459 	struct ring_buffer_event *event;
2460 	unsigned long flags;
2461 
2462 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2463 		return NULL;
2464 
2465  again:
2466 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2467 	event = rb_buffer_peek(buffer, cpu, ts);
2468 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2469 
2470 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2471 		cpu_relax();
2472 		goto again;
2473 	}
2474 
2475 	return event;
2476 }
2477 
2478 /**
2479  * ring_buffer_iter_peek - peek at the next event to be read
2480  * @iter: The ring buffer iterator
2481  * @ts: The timestamp counter of this event.
2482  *
2483  * This will return the event that will be read next, but does
2484  * not increment the iterator.
2485  */
2486 struct ring_buffer_event *
2487 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2488 {
2489 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2490 	struct ring_buffer_event *event;
2491 	unsigned long flags;
2492 
2493  again:
2494 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2495 	event = rb_iter_peek(iter, ts);
2496 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2497 
2498 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2499 		cpu_relax();
2500 		goto again;
2501 	}
2502 
2503 	return event;
2504 }
2505 
2506 /**
2507  * ring_buffer_consume - return an event and consume it
2508  * @buffer: The ring buffer to get the next event from
2509  *
2510  * Returns the next event in the ring buffer, and that event is consumed.
2511  * Meaning, that sequential reads will keep returning a different event,
2512  * and eventually empty the ring buffer if the producer is slower.
2513  */
2514 struct ring_buffer_event *
2515 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2516 {
2517 	struct ring_buffer_per_cpu *cpu_buffer;
2518 	struct ring_buffer_event *event = NULL;
2519 	unsigned long flags;
2520 
2521  again:
2522 	/* might be called in atomic */
2523 	preempt_disable();
2524 
2525 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2526 		goto out;
2527 
2528 	cpu_buffer = buffer->buffers[cpu];
2529 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2530 
2531 	event = rb_buffer_peek(buffer, cpu, ts);
2532 	if (!event)
2533 		goto out_unlock;
2534 
2535 	rb_advance_reader(cpu_buffer);
2536 
2537  out_unlock:
2538 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2539 
2540  out:
2541 	preempt_enable();
2542 
2543 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2544 		cpu_relax();
2545 		goto again;
2546 	}
2547 
2548 	return event;
2549 }
2550 EXPORT_SYMBOL_GPL(ring_buffer_consume);
2551 
2552 /**
2553  * ring_buffer_read_start - start a non consuming read of the buffer
2554  * @buffer: The ring buffer to read from
2555  * @cpu: The cpu buffer to iterate over
2556  *
2557  * This starts up an iteration through the buffer. It also disables
2558  * the recording to the buffer until the reading is finished.
2559  * This prevents the reading from being corrupted. This is not
2560  * a consuming read, so a producer is not expected.
2561  *
2562  * Must be paired with ring_buffer_finish.
2563  */
2564 struct ring_buffer_iter *
2565 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2566 {
2567 	struct ring_buffer_per_cpu *cpu_buffer;
2568 	struct ring_buffer_iter *iter;
2569 	unsigned long flags;
2570 
2571 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2572 		return NULL;
2573 
2574 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2575 	if (!iter)
2576 		return NULL;
2577 
2578 	cpu_buffer = buffer->buffers[cpu];
2579 
2580 	iter->cpu_buffer = cpu_buffer;
2581 
2582 	atomic_inc(&cpu_buffer->record_disabled);
2583 	synchronize_sched();
2584 
2585 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2586 	__raw_spin_lock(&cpu_buffer->lock);
2587 	rb_iter_reset(iter);
2588 	__raw_spin_unlock(&cpu_buffer->lock);
2589 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2590 
2591 	return iter;
2592 }
2593 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2594 
2595 /**
2596  * ring_buffer_finish - finish reading the iterator of the buffer
2597  * @iter: The iterator retrieved by ring_buffer_start
2598  *
2599  * This re-enables the recording to the buffer, and frees the
2600  * iterator.
2601  */
2602 void
2603 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2604 {
2605 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2606 
2607 	atomic_dec(&cpu_buffer->record_disabled);
2608 	kfree(iter);
2609 }
2610 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2611 
2612 /**
2613  * ring_buffer_read - read the next item in the ring buffer by the iterator
2614  * @iter: The ring buffer iterator
2615  * @ts: The time stamp of the event read.
2616  *
2617  * This reads the next event in the ring buffer and increments the iterator.
2618  */
2619 struct ring_buffer_event *
2620 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2621 {
2622 	struct ring_buffer_event *event;
2623 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2624 	unsigned long flags;
2625 
2626  again:
2627 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2628 	event = rb_iter_peek(iter, ts);
2629 	if (!event)
2630 		goto out;
2631 
2632 	rb_advance_iter(iter);
2633  out:
2634 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2635 
2636 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2637 		cpu_relax();
2638 		goto again;
2639 	}
2640 
2641 	return event;
2642 }
2643 EXPORT_SYMBOL_GPL(ring_buffer_read);
2644 
2645 /**
2646  * ring_buffer_size - return the size of the ring buffer (in bytes)
2647  * @buffer: The ring buffer.
2648  */
2649 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2650 {
2651 	return BUF_PAGE_SIZE * buffer->pages;
2652 }
2653 EXPORT_SYMBOL_GPL(ring_buffer_size);
2654 
2655 static void
2656 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2657 {
2658 	cpu_buffer->head_page
2659 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2660 	local_set(&cpu_buffer->head_page->write, 0);
2661 	local_set(&cpu_buffer->head_page->entries, 0);
2662 	local_set(&cpu_buffer->head_page->page->commit, 0);
2663 
2664 	cpu_buffer->head_page->read = 0;
2665 
2666 	cpu_buffer->tail_page = cpu_buffer->head_page;
2667 	cpu_buffer->commit_page = cpu_buffer->head_page;
2668 
2669 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2670 	local_set(&cpu_buffer->reader_page->write, 0);
2671 	local_set(&cpu_buffer->reader_page->entries, 0);
2672 	local_set(&cpu_buffer->reader_page->page->commit, 0);
2673 	cpu_buffer->reader_page->read = 0;
2674 
2675 	cpu_buffer->nmi_dropped = 0;
2676 	cpu_buffer->commit_overrun = 0;
2677 	cpu_buffer->overrun = 0;
2678 	cpu_buffer->read = 0;
2679 	local_set(&cpu_buffer->entries, 0);
2680 
2681 	cpu_buffer->write_stamp = 0;
2682 	cpu_buffer->read_stamp = 0;
2683 }
2684 
2685 /**
2686  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2687  * @buffer: The ring buffer to reset a per cpu buffer of
2688  * @cpu: The CPU buffer to be reset
2689  */
2690 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2691 {
2692 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2693 	unsigned long flags;
2694 
2695 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2696 		return;
2697 
2698 	atomic_inc(&cpu_buffer->record_disabled);
2699 
2700 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2701 
2702 	__raw_spin_lock(&cpu_buffer->lock);
2703 
2704 	rb_reset_cpu(cpu_buffer);
2705 
2706 	__raw_spin_unlock(&cpu_buffer->lock);
2707 
2708 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2709 
2710 	atomic_dec(&cpu_buffer->record_disabled);
2711 }
2712 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2713 
2714 /**
2715  * ring_buffer_reset - reset a ring buffer
2716  * @buffer: The ring buffer to reset all cpu buffers
2717  */
2718 void ring_buffer_reset(struct ring_buffer *buffer)
2719 {
2720 	int cpu;
2721 
2722 	for_each_buffer_cpu(buffer, cpu)
2723 		ring_buffer_reset_cpu(buffer, cpu);
2724 }
2725 EXPORT_SYMBOL_GPL(ring_buffer_reset);
2726 
2727 /**
2728  * rind_buffer_empty - is the ring buffer empty?
2729  * @buffer: The ring buffer to test
2730  */
2731 int ring_buffer_empty(struct ring_buffer *buffer)
2732 {
2733 	struct ring_buffer_per_cpu *cpu_buffer;
2734 	int cpu;
2735 
2736 	/* yes this is racy, but if you don't like the race, lock the buffer */
2737 	for_each_buffer_cpu(buffer, cpu) {
2738 		cpu_buffer = buffer->buffers[cpu];
2739 		if (!rb_per_cpu_empty(cpu_buffer))
2740 			return 0;
2741 	}
2742 
2743 	return 1;
2744 }
2745 EXPORT_SYMBOL_GPL(ring_buffer_empty);
2746 
2747 /**
2748  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2749  * @buffer: The ring buffer
2750  * @cpu: The CPU buffer to test
2751  */
2752 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2753 {
2754 	struct ring_buffer_per_cpu *cpu_buffer;
2755 	int ret;
2756 
2757 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2758 		return 1;
2759 
2760 	cpu_buffer = buffer->buffers[cpu];
2761 	ret = rb_per_cpu_empty(cpu_buffer);
2762 
2763 
2764 	return ret;
2765 }
2766 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2767 
2768 /**
2769  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2770  * @buffer_a: One buffer to swap with
2771  * @buffer_b: The other buffer to swap with
2772  *
2773  * This function is useful for tracers that want to take a "snapshot"
2774  * of a CPU buffer and has another back up buffer lying around.
2775  * it is expected that the tracer handles the cpu buffer not being
2776  * used at the moment.
2777  */
2778 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2779 			 struct ring_buffer *buffer_b, int cpu)
2780 {
2781 	struct ring_buffer_per_cpu *cpu_buffer_a;
2782 	struct ring_buffer_per_cpu *cpu_buffer_b;
2783 	int ret = -EINVAL;
2784 
2785 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2786 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
2787 		goto out;
2788 
2789 	/* At least make sure the two buffers are somewhat the same */
2790 	if (buffer_a->pages != buffer_b->pages)
2791 		goto out;
2792 
2793 	ret = -EAGAIN;
2794 
2795 	if (ring_buffer_flags != RB_BUFFERS_ON)
2796 		goto out;
2797 
2798 	if (atomic_read(&buffer_a->record_disabled))
2799 		goto out;
2800 
2801 	if (atomic_read(&buffer_b->record_disabled))
2802 		goto out;
2803 
2804 	cpu_buffer_a = buffer_a->buffers[cpu];
2805 	cpu_buffer_b = buffer_b->buffers[cpu];
2806 
2807 	if (atomic_read(&cpu_buffer_a->record_disabled))
2808 		goto out;
2809 
2810 	if (atomic_read(&cpu_buffer_b->record_disabled))
2811 		goto out;
2812 
2813 	/*
2814 	 * We can't do a synchronize_sched here because this
2815 	 * function can be called in atomic context.
2816 	 * Normally this will be called from the same CPU as cpu.
2817 	 * If not it's up to the caller to protect this.
2818 	 */
2819 	atomic_inc(&cpu_buffer_a->record_disabled);
2820 	atomic_inc(&cpu_buffer_b->record_disabled);
2821 
2822 	buffer_a->buffers[cpu] = cpu_buffer_b;
2823 	buffer_b->buffers[cpu] = cpu_buffer_a;
2824 
2825 	cpu_buffer_b->buffer = buffer_a;
2826 	cpu_buffer_a->buffer = buffer_b;
2827 
2828 	atomic_dec(&cpu_buffer_a->record_disabled);
2829 	atomic_dec(&cpu_buffer_b->record_disabled);
2830 
2831 	ret = 0;
2832 out:
2833 	return ret;
2834 }
2835 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2836 
2837 /**
2838  * ring_buffer_alloc_read_page - allocate a page to read from buffer
2839  * @buffer: the buffer to allocate for.
2840  *
2841  * This function is used in conjunction with ring_buffer_read_page.
2842  * When reading a full page from the ring buffer, these functions
2843  * can be used to speed up the process. The calling function should
2844  * allocate a few pages first with this function. Then when it
2845  * needs to get pages from the ring buffer, it passes the result
2846  * of this function into ring_buffer_read_page, which will swap
2847  * the page that was allocated, with the read page of the buffer.
2848  *
2849  * Returns:
2850  *  The page allocated, or NULL on error.
2851  */
2852 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2853 {
2854 	struct buffer_data_page *bpage;
2855 	unsigned long addr;
2856 
2857 	addr = __get_free_page(GFP_KERNEL);
2858 	if (!addr)
2859 		return NULL;
2860 
2861 	bpage = (void *)addr;
2862 
2863 	rb_init_page(bpage);
2864 
2865 	return bpage;
2866 }
2867 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
2868 
2869 /**
2870  * ring_buffer_free_read_page - free an allocated read page
2871  * @buffer: the buffer the page was allocate for
2872  * @data: the page to free
2873  *
2874  * Free a page allocated from ring_buffer_alloc_read_page.
2875  */
2876 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2877 {
2878 	free_page((unsigned long)data);
2879 }
2880 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
2881 
2882 /**
2883  * ring_buffer_read_page - extract a page from the ring buffer
2884  * @buffer: buffer to extract from
2885  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2886  * @len: amount to extract
2887  * @cpu: the cpu of the buffer to extract
2888  * @full: should the extraction only happen when the page is full.
2889  *
2890  * This function will pull out a page from the ring buffer and consume it.
2891  * @data_page must be the address of the variable that was returned
2892  * from ring_buffer_alloc_read_page. This is because the page might be used
2893  * to swap with a page in the ring buffer.
2894  *
2895  * for example:
2896  *	rpage = ring_buffer_alloc_read_page(buffer);
2897  *	if (!rpage)
2898  *		return error;
2899  *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2900  *	if (ret >= 0)
2901  *		process_page(rpage, ret);
2902  *
2903  * When @full is set, the function will not return true unless
2904  * the writer is off the reader page.
2905  *
2906  * Note: it is up to the calling functions to handle sleeps and wakeups.
2907  *  The ring buffer can be used anywhere in the kernel and can not
2908  *  blindly call wake_up. The layer that uses the ring buffer must be
2909  *  responsible for that.
2910  *
2911  * Returns:
2912  *  >=0 if data has been transferred, returns the offset of consumed data.
2913  *  <0 if no data has been transferred.
2914  */
2915 int ring_buffer_read_page(struct ring_buffer *buffer,
2916 			  void **data_page, size_t len, int cpu, int full)
2917 {
2918 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2919 	struct ring_buffer_event *event;
2920 	struct buffer_data_page *bpage;
2921 	struct buffer_page *reader;
2922 	unsigned long flags;
2923 	unsigned int commit;
2924 	unsigned int read;
2925 	u64 save_timestamp;
2926 	int ret = -1;
2927 
2928 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2929 		goto out;
2930 
2931 	/*
2932 	 * If len is not big enough to hold the page header, then
2933 	 * we can not copy anything.
2934 	 */
2935 	if (len <= BUF_PAGE_HDR_SIZE)
2936 		goto out;
2937 
2938 	len -= BUF_PAGE_HDR_SIZE;
2939 
2940 	if (!data_page)
2941 		goto out;
2942 
2943 	bpage = *data_page;
2944 	if (!bpage)
2945 		goto out;
2946 
2947 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2948 
2949 	reader = rb_get_reader_page(cpu_buffer);
2950 	if (!reader)
2951 		goto out_unlock;
2952 
2953 	event = rb_reader_event(cpu_buffer);
2954 
2955 	read = reader->read;
2956 	commit = rb_page_commit(reader);
2957 
2958 	/*
2959 	 * If this page has been partially read or
2960 	 * if len is not big enough to read the rest of the page or
2961 	 * a writer is still on the page, then
2962 	 * we must copy the data from the page to the buffer.
2963 	 * Otherwise, we can simply swap the page with the one passed in.
2964 	 */
2965 	if (read || (len < (commit - read)) ||
2966 	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
2967 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2968 		unsigned int rpos = read;
2969 		unsigned int pos = 0;
2970 		unsigned int size;
2971 
2972 		if (full)
2973 			goto out_unlock;
2974 
2975 		if (len > (commit - read))
2976 			len = (commit - read);
2977 
2978 		size = rb_event_length(event);
2979 
2980 		if (len < size)
2981 			goto out_unlock;
2982 
2983 		/* save the current timestamp, since the user will need it */
2984 		save_timestamp = cpu_buffer->read_stamp;
2985 
2986 		/* Need to copy one event at a time */
2987 		do {
2988 			memcpy(bpage->data + pos, rpage->data + rpos, size);
2989 
2990 			len -= size;
2991 
2992 			rb_advance_reader(cpu_buffer);
2993 			rpos = reader->read;
2994 			pos += size;
2995 
2996 			event = rb_reader_event(cpu_buffer);
2997 			size = rb_event_length(event);
2998 		} while (len > size);
2999 
3000 		/* update bpage */
3001 		local_set(&bpage->commit, pos);
3002 		bpage->time_stamp = save_timestamp;
3003 
3004 		/* we copied everything to the beginning */
3005 		read = 0;
3006 	} else {
3007 		/* update the entry counter */
3008 		cpu_buffer->read += local_read(&reader->entries);
3009 
3010 		/* swap the pages */
3011 		rb_init_page(bpage);
3012 		bpage = reader->page;
3013 		reader->page = *data_page;
3014 		local_set(&reader->write, 0);
3015 		local_set(&reader->entries, 0);
3016 		reader->read = 0;
3017 		*data_page = bpage;
3018 	}
3019 	ret = read;
3020 
3021  out_unlock:
3022 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3023 
3024  out:
3025 	return ret;
3026 }
3027 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3028 
3029 static ssize_t
3030 rb_simple_read(struct file *filp, char __user *ubuf,
3031 	       size_t cnt, loff_t *ppos)
3032 {
3033 	unsigned long *p = filp->private_data;
3034 	char buf[64];
3035 	int r;
3036 
3037 	if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3038 		r = sprintf(buf, "permanently disabled\n");
3039 	else
3040 		r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3041 
3042 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3043 }
3044 
3045 static ssize_t
3046 rb_simple_write(struct file *filp, const char __user *ubuf,
3047 		size_t cnt, loff_t *ppos)
3048 {
3049 	unsigned long *p = filp->private_data;
3050 	char buf[64];
3051 	unsigned long val;
3052 	int ret;
3053 
3054 	if (cnt >= sizeof(buf))
3055 		return -EINVAL;
3056 
3057 	if (copy_from_user(&buf, ubuf, cnt))
3058 		return -EFAULT;
3059 
3060 	buf[cnt] = 0;
3061 
3062 	ret = strict_strtoul(buf, 10, &val);
3063 	if (ret < 0)
3064 		return ret;
3065 
3066 	if (val)
3067 		set_bit(RB_BUFFERS_ON_BIT, p);
3068 	else
3069 		clear_bit(RB_BUFFERS_ON_BIT, p);
3070 
3071 	(*ppos)++;
3072 
3073 	return cnt;
3074 }
3075 
3076 static const struct file_operations rb_simple_fops = {
3077 	.open		= tracing_open_generic,
3078 	.read		= rb_simple_read,
3079 	.write		= rb_simple_write,
3080 };
3081 
3082 
3083 static __init int rb_init_debugfs(void)
3084 {
3085 	struct dentry *d_tracer;
3086 
3087 	d_tracer = tracing_init_dentry();
3088 
3089 	trace_create_file("tracing_on", 0644, d_tracer,
3090 			    &ring_buffer_flags, &rb_simple_fops);
3091 
3092 	return 0;
3093 }
3094 
3095 fs_initcall(rb_init_debugfs);
3096 
3097 #ifdef CONFIG_HOTPLUG_CPU
3098 static int rb_cpu_notify(struct notifier_block *self,
3099 			 unsigned long action, void *hcpu)
3100 {
3101 	struct ring_buffer *buffer =
3102 		container_of(self, struct ring_buffer, cpu_notify);
3103 	long cpu = (long)hcpu;
3104 
3105 	switch (action) {
3106 	case CPU_UP_PREPARE:
3107 	case CPU_UP_PREPARE_FROZEN:
3108 		if (cpu_isset(cpu, *buffer->cpumask))
3109 			return NOTIFY_OK;
3110 
3111 		buffer->buffers[cpu] =
3112 			rb_allocate_cpu_buffer(buffer, cpu);
3113 		if (!buffer->buffers[cpu]) {
3114 			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3115 			     cpu);
3116 			return NOTIFY_OK;
3117 		}
3118 		smp_wmb();
3119 		cpu_set(cpu, *buffer->cpumask);
3120 		break;
3121 	case CPU_DOWN_PREPARE:
3122 	case CPU_DOWN_PREPARE_FROZEN:
3123 		/*
3124 		 * Do nothing.
3125 		 *  If we were to free the buffer, then the user would
3126 		 *  lose any trace that was in the buffer.
3127 		 */
3128 		break;
3129 	default:
3130 		break;
3131 	}
3132 	return NOTIFY_OK;
3133 }
3134 #endif
3135