Lines Matching full:rb

22 	atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM);  in perf_output_wakeup()
38 struct perf_buffer *rb = handle->rb; in perf_output_get_handle() local
46 (*(volatile unsigned int *)&rb->nest)++; in perf_output_get_handle()
47 handle->wakeup = local_read(&rb->wakeup); in perf_output_get_handle()
52 struct perf_buffer *rb = handle->rb; in perf_output_put_handle() local
58 * @rb->user_page->data_head. in perf_output_put_handle()
60 nest = READ_ONCE(rb->nest); in perf_output_put_handle()
62 WRITE_ONCE(rb->nest, nest - 1); in perf_output_put_handle()
69 * we must ensure the load of @rb->head happens after we've in perf_output_put_handle()
70 * incremented @rb->nest. in perf_output_put_handle()
72 * Otherwise we can observe a @rb->head value before one published in perf_output_put_handle()
76 head = local_read(&rb->head); in perf_output_put_handle()
79 * IRQ/NMI can happen here and advance @rb->head, causing our in perf_output_put_handle()
110 WRITE_ONCE(rb->user_page->data_head, head); in perf_output_put_handle()
118 WRITE_ONCE(rb->nest, 0); in perf_output_put_handle()
121 * Ensure we decrement @rb->nest before we validate the @rb->head. in perf_output_put_handle()
125 if (unlikely(head != local_read(&rb->head))) { in perf_output_put_handle()
126 WRITE_ONCE(rb->nest, 1); in perf_output_put_handle()
130 if (handle->wakeup != local_read(&rb->wakeup)) in perf_output_put_handle()
154 struct perf_buffer *rb; in __perf_output_begin() local
170 rb = rcu_dereference(event->rb); in __perf_output_begin()
171 if (unlikely(!rb)) in __perf_output_begin()
174 if (unlikely(rb->paused)) { in __perf_output_begin()
175 if (rb->nr_pages) { in __perf_output_begin()
176 local_inc(&rb->lost); in __perf_output_begin()
182 handle->rb = rb; in __perf_output_begin()
186 have_lost = local_read(&rb->lost); in __perf_output_begin()
195 offset = local_read(&rb->head); in __perf_output_begin()
198 tail = READ_ONCE(rb->user_page->data_tail); in __perf_output_begin()
199 if (!rb->overwrite) { in __perf_output_begin()
201 perf_data_size(rb), in __perf_output_begin()
222 } while (!local_try_cmpxchg(&rb->head, &offset, head)); in __perf_output_begin()
234 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) in __perf_output_begin()
235 local_add(rb->watermark, &rb->wakeup); in __perf_output_begin()
237 page_shift = PAGE_SHIFT + page_order(rb); in __perf_output_begin()
239 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
241 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin()
249 lost_event.lost = local_xchg(&rb->lost, 0); in __perf_output_begin()
260 local_inc(&rb->lost); in __perf_output_begin()
311 ring_buffer_init(struct perf_buffer *rb, long watermark, int flags) in ring_buffer_init() argument
313 long max_size = perf_data_size(rb); in ring_buffer_init()
316 rb->watermark = min(max_size, watermark); in ring_buffer_init()
318 if (!rb->watermark) in ring_buffer_init()
319 rb->watermark = max_size / 2; in ring_buffer_init()
322 rb->overwrite = 0; in ring_buffer_init()
324 rb->overwrite = 1; in ring_buffer_init()
326 refcount_set(&rb->refcount, 1); in ring_buffer_init()
328 INIT_LIST_HEAD(&rb->event_list); in ring_buffer_init()
329 spin_lock_init(&rb->event_lock); in ring_buffer_init()
332 * perf_output_begin() only checks rb->paused, therefore in ring_buffer_init()
333 * rb->paused must be true if we have no pages for output. in ring_buffer_init()
335 if (!rb->nr_pages) in ring_buffer_init()
336 rb->paused = 1; in ring_buffer_init()
338 mutex_init(&rb->aux_mutex); in ring_buffer_init()
373 struct perf_buffer *rb; in perf_aux_output_begin() local
384 rb = ring_buffer_get(output_event); in perf_aux_output_begin()
385 if (!rb) in perf_aux_output_begin()
388 if (!rb_has_aux(rb)) in perf_aux_output_begin()
395 * Checking rb::aux_mmap_count and rb::refcount has to be done in in perf_aux_output_begin()
399 if (!atomic_read(&rb->aux_mmap_count)) in perf_aux_output_begin()
402 if (!refcount_inc_not_zero(&rb->aux_refcount)) in perf_aux_output_begin()
405 nest = READ_ONCE(rb->aux_nest); in perf_aux_output_begin()
413 WRITE_ONCE(rb->aux_nest, nest + 1); in perf_aux_output_begin()
415 aux_head = rb->aux_head; in perf_aux_output_begin()
417 handle->rb = rb; in perf_aux_output_begin()
428 if (!rb->aux_overwrite) { in perf_aux_output_begin()
429 aux_tail = READ_ONCE(rb->user_page->aux_tail); in perf_aux_output_begin()
430 handle->wakeup = rb->aux_wakeup + rb->aux_watermark; in perf_aux_output_begin()
431 if (aux_head - aux_tail < perf_aux_size(rb)) in perf_aux_output_begin()
432 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); in perf_aux_output_begin()
442 WRITE_ONCE(rb->aux_nest, 0); in perf_aux_output_begin()
447 return handle->rb->aux_priv; in perf_aux_output_begin()
451 rb_free_aux(rb); in perf_aux_output_begin()
454 ring_buffer_put(rb); in perf_aux_output_begin()
461 static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb) in rb_need_aux_wakeup() argument
463 if (rb->aux_overwrite) in rb_need_aux_wakeup()
466 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { in rb_need_aux_wakeup()
467 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark); in rb_need_aux_wakeup()
487 struct perf_buffer *rb = handle->rb; in perf_aux_output_end() local
491 if (rb->aux_overwrite) { in perf_aux_output_end()
495 rb->aux_head = aux_head; in perf_aux_output_end()
499 aux_head = rb->aux_head; in perf_aux_output_end()
500 rb->aux_head += size; in perf_aux_output_end()
519 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head); in perf_aux_output_end()
520 if (rb_need_aux_wakeup(rb)) in perf_aux_output_end()
531 WRITE_ONCE(rb->aux_nest, 0); in perf_aux_output_end()
533 rb_free_aux(rb); in perf_aux_output_end()
534 ring_buffer_put(rb); in perf_aux_output_end()
544 struct perf_buffer *rb = handle->rb; in perf_aux_output_skip() local
549 rb->aux_head += size; in perf_aux_output_skip()
551 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head); in perf_aux_output_skip()
552 if (rb_need_aux_wakeup(rb)) { in perf_aux_output_skip()
554 handle->wakeup = rb->aux_wakeup + rb->aux_watermark; in perf_aux_output_skip()
557 handle->head = rb->aux_head; in perf_aux_output_skip()
570 return handle->rb->aux_priv; in perf_get_aux()
581 struct perf_buffer *rb = aux_handle->rb; in perf_output_copy_aux() local
585 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1; in perf_output_copy_aux()
586 to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1; in perf_output_copy_aux()
595 addr = rb->aux_pages[from >> PAGE_SHIFT]; in perf_output_copy_aux()
604 from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1; in perf_output_copy_aux()
638 static void rb_free_aux_page(struct perf_buffer *rb, int idx) in rb_free_aux_page() argument
640 struct page *page = virt_to_page(rb->aux_pages[idx]); in rb_free_aux_page()
647 static void __rb_free_aux(struct perf_buffer *rb) in __rb_free_aux() argument
659 if (rb->aux_priv) { in __rb_free_aux()
660 rb->free_aux(rb->aux_priv); in __rb_free_aux()
661 rb->free_aux = NULL; in __rb_free_aux()
662 rb->aux_priv = NULL; in __rb_free_aux()
665 if (rb->aux_nr_pages) { in __rb_free_aux()
666 for (pg = 0; pg < rb->aux_nr_pages; pg++) in __rb_free_aux()
667 rb_free_aux_page(rb, pg); in __rb_free_aux()
669 kfree(rb->aux_pages); in __rb_free_aux()
670 rb->aux_nr_pages = 0; in __rb_free_aux()
674 int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, in rb_alloc_aux() argument
714 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, in rb_alloc_aux()
716 if (!rb->aux_pages) in rb_alloc_aux()
719 rb->free_aux = event->pmu->free_aux; in rb_alloc_aux()
720 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux()
724 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux()
729 for (last = rb->aux_nr_pages + (1 << page_private(page)); in rb_alloc_aux()
730 last > rb->aux_nr_pages; rb->aux_nr_pages++) in rb_alloc_aux()
731 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); in rb_alloc_aux()
742 struct page *page = virt_to_page(rb->aux_pages[0]); in rb_alloc_aux()
748 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages, in rb_alloc_aux()
750 if (!rb->aux_priv) in rb_alloc_aux()
761 refcount_set(&rb->aux_refcount, 1); in rb_alloc_aux()
763 rb->aux_overwrite = overwrite; in rb_alloc_aux()
764 rb->aux_watermark = watermark; in rb_alloc_aux()
768 rb->aux_pgoff = pgoff; in rb_alloc_aux()
770 __rb_free_aux(rb); in rb_alloc_aux()
775 void rb_free_aux(struct perf_buffer *rb) in rb_free_aux() argument
777 if (refcount_dec_and_test(&rb->aux_refcount)) in rb_free_aux()
778 __rb_free_aux(rb); in rb_free_aux()
788 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) in __perf_mmap_to_page() argument
790 if (pgoff > rb->nr_pages) in __perf_mmap_to_page()
794 return virt_to_page(rb->user_page); in __perf_mmap_to_page()
796 return virt_to_page(rb->data_pages[pgoff - 1]); in __perf_mmap_to_page()
822 struct perf_buffer *rb; in rb_alloc() local
833 rb = kzalloc_node(size, GFP_KERNEL, node); in rb_alloc()
834 if (!rb) in rb_alloc()
837 rb->user_page = perf_mmap_alloc_page(cpu); in rb_alloc()
838 if (!rb->user_page) in rb_alloc()
842 rb->data_pages[i] = perf_mmap_alloc_page(cpu); in rb_alloc()
843 if (!rb->data_pages[i]) in rb_alloc()
847 rb->nr_pages = nr_pages; in rb_alloc()
849 ring_buffer_init(rb, watermark, flags); in rb_alloc()
851 return rb; in rb_alloc()
855 perf_mmap_free_page(rb->data_pages[i]); in rb_alloc()
857 perf_mmap_free_page(rb->user_page); in rb_alloc()
860 kfree(rb); in rb_alloc()
866 void rb_free(struct perf_buffer *rb) in rb_free() argument
870 perf_mmap_free_page(rb->user_page); in rb_free()
871 for (i = 0; i < rb->nr_pages; i++) in rb_free()
872 perf_mmap_free_page(rb->data_pages[i]); in rb_free()
873 kfree(rb); in rb_free()
878 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) in __perf_mmap_to_page() argument
881 if (pgoff > data_page_nr(rb)) in __perf_mmap_to_page()
884 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); in __perf_mmap_to_page()
896 struct perf_buffer *rb; in rb_free_work() local
900 rb = container_of(work, struct perf_buffer, work); in rb_free_work()
901 nr = data_page_nr(rb); in rb_free_work()
903 base = rb->user_page; in rb_free_work()
909 kfree(rb); in rb_free_work()
912 void rb_free(struct perf_buffer *rb) in rb_free() argument
914 schedule_work(&rb->work); in rb_free()
919 struct perf_buffer *rb; in rb_alloc() local
928 rb = kzalloc_node(size, GFP_KERNEL, node); in rb_alloc()
929 if (!rb) in rb_alloc()
932 INIT_WORK(&rb->work, rb_free_work); in rb_alloc()
938 rb->user_page = all_buf; in rb_alloc()
939 rb->data_pages[0] = all_buf + PAGE_SIZE; in rb_alloc()
941 rb->nr_pages = 1; in rb_alloc()
942 rb->page_order = ilog2(nr_pages); in rb_alloc()
945 ring_buffer_init(rb, watermark, flags); in rb_alloc()
947 return rb; in rb_alloc()
950 kfree(rb); in rb_alloc()
959 perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) in perf_mmap_to_page() argument
961 if (rb->aux_nr_pages) { in perf_mmap_to_page()
963 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages) in perf_mmap_to_page()
967 if (pgoff >= rb->aux_pgoff) { in perf_mmap_to_page()
968 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages); in perf_mmap_to_page()
969 return virt_to_page(rb->aux_pages[aux_pgoff]); in perf_mmap_to_page()
973 return __perf_mmap_to_page(rb, pgoff); in perf_mmap_to_page()