Lines Matching refs:rb

78 	struct bpf_ringbuf *rb;  member
95 struct bpf_ringbuf *rb; in bpf_ringbuf_area_alloc() local
132 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc()
134 if (rb) { in bpf_ringbuf_area_alloc()
136 rb->pages = pages; in bpf_ringbuf_area_alloc()
137 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc()
138 return rb; in bpf_ringbuf_area_alloc()
150 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() local
152 wake_up_all(&rb->waitq); in bpf_ringbuf_notify()
168 struct bpf_ringbuf *rb; in bpf_ringbuf_alloc() local
170 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); in bpf_ringbuf_alloc()
171 if (!rb) in bpf_ringbuf_alloc()
174 spin_lock_init(&rb->spinlock); in bpf_ringbuf_alloc()
175 atomic_set(&rb->busy, 0); in bpf_ringbuf_alloc()
176 init_waitqueue_head(&rb->waitq); in bpf_ringbuf_alloc()
177 init_irq_work(&rb->work, bpf_ringbuf_notify); in bpf_ringbuf_alloc()
179 rb->mask = data_sz - 1; in bpf_ringbuf_alloc()
180 rb->consumer_pos = 0; in bpf_ringbuf_alloc()
181 rb->producer_pos = 0; in bpf_ringbuf_alloc()
183 return rb; in bpf_ringbuf_alloc()
204 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
205 if (!rb_map->rb) { in ringbuf_map_alloc()
213 static void bpf_ringbuf_free(struct bpf_ringbuf *rb) in bpf_ringbuf_free() argument
218 struct page **pages = rb->pages; in bpf_ringbuf_free()
219 int i, nr_pages = rb->nr_pages; in bpf_ringbuf_free()
221 vunmap(rb); in bpf_ringbuf_free()
232 bpf_ringbuf_free(rb_map->rb); in ringbuf_map_free()
272 return remap_vmalloc_range(vma, rb_map->rb, in ringbuf_map_mmap_kern()
293 return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF); in ringbuf_map_mmap_user()
296 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb) in ringbuf_avail_data_sz() argument
300 cons_pos = smp_load_acquire(&rb->consumer_pos); in ringbuf_avail_data_sz()
301 prod_pos = smp_load_acquire(&rb->producer_pos); in ringbuf_avail_data_sz()
305 static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb) in ringbuf_total_data_sz() argument
307 return rb->mask + 1; in ringbuf_total_data_sz()
316 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll_kern()
318 if (ringbuf_avail_data_sz(rb_map->rb)) in ringbuf_map_poll_kern()
329 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll_user()
331 if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb)) in ringbuf_map_poll_user()
338 struct bpf_ringbuf *rb; in ringbuf_map_mem_usage() local
343 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in ringbuf_map_mem_usage()
344 usage += (u64)rb->nr_pages << PAGE_SHIFT; in ringbuf_map_mem_usage()
387 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb, in bpf_ringbuf_rec_pg_off() argument
390 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT; in bpf_ringbuf_rec_pg_off()
405 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) in __bpf_ringbuf_reserve() argument
415 if (len > ringbuf_total_data_sz(rb)) in __bpf_ringbuf_reserve()
418 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_ringbuf_reserve()
421 if (!spin_trylock_irqsave(&rb->spinlock, flags)) in __bpf_ringbuf_reserve()
424 spin_lock_irqsave(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
427 prod_pos = rb->producer_pos; in __bpf_ringbuf_reserve()
433 if (new_prod_pos - cons_pos > rb->mask) { in __bpf_ringbuf_reserve()
434 spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
438 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve()
439 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr); in __bpf_ringbuf_reserve()
444 smp_store_release(&rb->producer_pos, new_prod_pos); in __bpf_ringbuf_reserve()
446 spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
459 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_3()
474 struct bpf_ringbuf *rb; in bpf_ringbuf_commit() local
478 rb = bpf_ringbuf_restore_from_rec(hdr); in bpf_ringbuf_commit()
489 rec_pos = (void *)hdr - (void *)rb->data; in bpf_ringbuf_commit()
490 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit()
493 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
495 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
534 rec = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
554 struct bpf_ringbuf *rb; in BPF_CALL_2() local
556 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_2()
560 return ringbuf_avail_data_sz(rb); in BPF_CALL_2()
562 return ringbuf_total_data_sz(rb); in BPF_CALL_2()
564 return smp_load_acquire(&rb->consumer_pos); in BPF_CALL_2()
566 return smp_load_acquire(&rb->producer_pos); in BPF_CALL_2()
599 sample = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
657 static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size) in __bpf_user_ringbuf_peek() argument
664 prod_pos = smp_load_acquire(&rb->producer_pos); in __bpf_user_ringbuf_peek()
669 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_user_ringbuf_peek()
673 hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask)); in __bpf_user_ringbuf_peek()
685 if (total_len > ringbuf_total_data_sz(rb)) in __bpf_user_ringbuf_peek()
699 smp_store_release(&rb->consumer_pos, cons_pos + total_len); in __bpf_user_ringbuf_peek()
706 *sample = (void *)((uintptr_t)rb->data + in __bpf_user_ringbuf_peek()
707 (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask)); in __bpf_user_ringbuf_peek()
712 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags) in __bpf_user_ringbuf_sample_release() argument
721 consumer_pos = rb->consumer_pos; in __bpf_user_ringbuf_sample_release()
723 smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size); in __bpf_user_ringbuf_sample_release()
729 struct bpf_ringbuf *rb; in BPF_CALL_4() local
738 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_4()
741 if (!atomic_try_cmpxchg(&rb->busy, &busy, 1)) in BPF_CALL_4()
750 err = __bpf_user_ringbuf_peek(rb, &sample, &size); in BPF_CALL_4()
765 __bpf_user_ringbuf_sample_release(rb, size, flags); in BPF_CALL_4()
774 atomic_set(&rb->busy, 0); in BPF_CALL_4()
777 irq_work_queue(&rb->work); in BPF_CALL_4()
779 irq_work_queue(&rb->work); in BPF_CALL_4()