ring_buffer.c (918143e8b7d6153d7a83a3f854323407939f4a7e) ring_buffer.c (1f8a6a10fb9437eac3f516ea4324a19087872f30)
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/trace_clock.h>
8#include <linux/ftrace_irq.h>

--- 412 unchanged lines hidden (view full) ---

421
422struct ring_buffer {
423 unsigned pages;
424 unsigned flags;
425 int cpus;
426 atomic_t record_disabled;
427 cpumask_var_t cpumask;
428
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/trace_clock.h>
8#include <linux/ftrace_irq.h>

--- 412 unchanged lines hidden (view full) ---

421
422struct ring_buffer {
423 unsigned pages;
424 unsigned flags;
425 int cpus;
426 atomic_t record_disabled;
427 cpumask_var_t cpumask;
428
429 struct lock_class_key *reader_lock_key;
430
429 struct mutex mutex;
430
431 struct ring_buffer_per_cpu **buffers;
432
433#ifdef CONFIG_HOTPLUG_CPU
434 struct notifier_block cpu_notify;
435#endif
436 u64 (*clock)(void);

--- 123 unchanged lines hidden (view full) ---

560 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
561 GFP_KERNEL, cpu_to_node(cpu));
562 if (!cpu_buffer)
563 return NULL;
564
565 cpu_buffer->cpu = cpu;
566 cpu_buffer->buffer = buffer;
567 spin_lock_init(&cpu_buffer->reader_lock);
431 struct mutex mutex;
432
433 struct ring_buffer_per_cpu **buffers;
434
435#ifdef CONFIG_HOTPLUG_CPU
436 struct notifier_block cpu_notify;
437#endif
438 u64 (*clock)(void);

--- 123 unchanged lines hidden (view full) ---

562 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
563 GFP_KERNEL, cpu_to_node(cpu));
564 if (!cpu_buffer)
565 return NULL;
566
567 cpu_buffer->cpu = cpu;
568 cpu_buffer->buffer = buffer;
569 spin_lock_init(&cpu_buffer->reader_lock);
570 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
568 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
569 INIT_LIST_HEAD(&cpu_buffer->pages);
570
571 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
572 GFP_KERNEL, cpu_to_node(cpu));
573 if (!bpage)
574 goto fail_free_buffer;
575

--- 54 unchanged lines hidden (view full) ---

630 * @size: the size in bytes per cpu that is needed.
631 * @flags: attributes to set for the ring buffer.
632 *
633 * Currently the only flag that is available is the RB_FL_OVERWRITE
634 * flag. This flag means that the buffer will overwrite old data
635 * when the buffer wraps. If this flag is not set, the buffer will
636 * drop data when the tail hits the head.
637 */
571 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
572 INIT_LIST_HEAD(&cpu_buffer->pages);
573
574 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
575 GFP_KERNEL, cpu_to_node(cpu));
576 if (!bpage)
577 goto fail_free_buffer;
578

--- 54 unchanged lines hidden (view full) ---

633 * @size: the size in bytes per cpu that is needed.
634 * @flags: attributes to set for the ring buffer.
635 *
636 * Currently the only flag that is available is the RB_FL_OVERWRITE
637 * flag. This flag means that the buffer will overwrite old data
638 * when the buffer wraps. If this flag is not set, the buffer will
639 * drop data when the tail hits the head.
640 */
638struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
641struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
642 struct lock_class_key *key)
639{
640 struct ring_buffer *buffer;
641 int bsize;
642 int cpu;
643
644 /* Paranoid! Optimizes out when all is well */
645 if (sizeof(struct buffer_page) > sizeof(struct page))
646 ring_buffer_page_too_big();

--- 6 unchanged lines hidden (view full) ---

653 return NULL;
654
655 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
656 goto fail_free_buffer;
657
658 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
659 buffer->flags = flags;
660 buffer->clock = trace_clock_local;
643{
644 struct ring_buffer *buffer;
645 int bsize;
646 int cpu;
647
648 /* Paranoid! Optimizes out when all is well */
649 if (sizeof(struct buffer_page) > sizeof(struct page))
650 ring_buffer_page_too_big();

--- 6 unchanged lines hidden (view full) ---

657 return NULL;
658
659 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
660 goto fail_free_buffer;
661
662 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
663 buffer->flags = flags;
664 buffer->clock = trace_clock_local;
665 buffer->reader_lock_key = key;
661
662 /* need at least two pages */
663 if (buffer->pages == 1)
664 buffer->pages++;
665
666 /*
667 * In case of non-hotplug cpu, if the ring-buffer is allocated
668 * in early initcall, it will not be notified of secondary cpus.

--- 41 unchanged lines hidden (view full) ---

710 fail_free_cpumask:
711 free_cpumask_var(buffer->cpumask);
712 put_online_cpus();
713
714 fail_free_buffer:
715 kfree(buffer);
716 return NULL;
717}
666
667 /* need at least two pages */
668 if (buffer->pages == 1)
669 buffer->pages++;
670
671 /*
672 * In case of non-hotplug cpu, if the ring-buffer is allocated
673 * in early initcall, it will not be notified of secondary cpus.

--- 41 unchanged lines hidden (view full) ---

715 fail_free_cpumask:
716 free_cpumask_var(buffer->cpumask);
717 put_online_cpus();
718
719 fail_free_buffer:
720 kfree(buffer);
721 return NULL;
722}
718EXPORT_SYMBOL_GPL(ring_buffer_alloc);
723EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
719
720/**
721 * ring_buffer_free - free a ring buffer.
722 * @buffer: the buffer to free.
723 */
724void
725ring_buffer_free(struct ring_buffer *buffer)
726{

--- 2403 unchanged lines hidden ---
724
725/**
726 * ring_buffer_free - free a ring buffer.
727 * @buffer: the buffer to free.
728 */
729void
730ring_buffer_free(struct ring_buffer *buffer)
731{

--- 2403 unchanged lines hidden ---