1457f4436SAndrii Nakryiko #include <linux/bpf.h>
2457f4436SAndrii Nakryiko #include <linux/btf.h>
3457f4436SAndrii Nakryiko #include <linux/err.h>
4457f4436SAndrii Nakryiko #include <linux/irq_work.h>
5457f4436SAndrii Nakryiko #include <linux/slab.h>
6457f4436SAndrii Nakryiko #include <linux/filter.h>
7457f4436SAndrii Nakryiko #include <linux/mm.h>
8457f4436SAndrii Nakryiko #include <linux/vmalloc.h>
9457f4436SAndrii Nakryiko #include <linux/wait.h>
10457f4436SAndrii Nakryiko #include <linux/poll.h>
11ccff81e1SRustam Kovhaev #include <linux/kmemleak.h>
12457f4436SAndrii Nakryiko #include <uapi/linux/btf.h>
13c317ab71SMenglong Dong #include <linux/btf_ids.h>
14457f4436SAndrii Nakryiko
15457f4436SAndrii Nakryiko #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
16457f4436SAndrii Nakryiko
17457f4436SAndrii Nakryiko /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
18457f4436SAndrii Nakryiko #define RINGBUF_PGOFF \
19457f4436SAndrii Nakryiko (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
20457f4436SAndrii Nakryiko /* consumer page and producer page */
21457f4436SAndrii Nakryiko #define RINGBUF_POS_PAGES 2
222f7e4ab2SYafang Shao #define RINGBUF_NR_META_PAGES (RINGBUF_PGOFF + RINGBUF_POS_PAGES)
23457f4436SAndrii Nakryiko
24457f4436SAndrii Nakryiko #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
25457f4436SAndrii Nakryiko
26457f4436SAndrii Nakryiko struct bpf_ringbuf {
27457f4436SAndrii Nakryiko wait_queue_head_t waitq;
28457f4436SAndrii Nakryiko struct irq_work work;
29457f4436SAndrii Nakryiko u64 mask;
30457f4436SAndrii Nakryiko struct page **pages;
31457f4436SAndrii Nakryiko int nr_pages;
32457f4436SAndrii Nakryiko spinlock_t spinlock ____cacheline_aligned_in_smp;
3320571567SDavid Vernet /* For user-space producer ring buffers, an atomic_t busy bit is used
3420571567SDavid Vernet * to synchronize access to the ring buffers in the kernel, rather than
3520571567SDavid Vernet * the spinlock that is used for kernel-producer ring buffers. This is
3620571567SDavid Vernet * done because the ring buffer must hold a lock across a BPF program's
3720571567SDavid Vernet * callback:
3820571567SDavid Vernet *
3920571567SDavid Vernet * __bpf_user_ringbuf_peek() // lock acquired
4020571567SDavid Vernet * -> program callback_fn()
4120571567SDavid Vernet * -> __bpf_user_ringbuf_sample_release() // lock released
4220571567SDavid Vernet *
4320571567SDavid Vernet * It is unsafe and incorrect to hold an IRQ spinlock across what could
4420571567SDavid Vernet * be a long execution window, so we instead simply disallow concurrent
4520571567SDavid Vernet * access to the ring buffer by kernel consumers, and return -EBUSY from
4620571567SDavid Vernet * __bpf_user_ringbuf_peek() if the busy bit is held by another task.
4720571567SDavid Vernet */
4820571567SDavid Vernet atomic_t busy ____cacheline_aligned_in_smp;
49583c1f42SDavid Vernet /* Consumer and producer counters are put into separate pages to
50583c1f42SDavid Vernet * allow each position to be mapped with different permissions.
51583c1f42SDavid Vernet * This prevents a user-space application from modifying the
52583c1f42SDavid Vernet * position and ruining in-kernel tracking. The permissions of the
53583c1f42SDavid Vernet * pages depend on who is producing samples: user-space or the
54583c1f42SDavid Vernet * kernel.
55583c1f42SDavid Vernet *
56583c1f42SDavid Vernet * Kernel-producer
57583c1f42SDavid Vernet * ---------------
58583c1f42SDavid Vernet * The producer position and data pages are mapped as r/o in
59583c1f42SDavid Vernet * userspace. For this approach, bits in the header of samples are
60583c1f42SDavid Vernet * used to signal to user-space, and to other producers, whether a
61583c1f42SDavid Vernet * sample is currently being written.
62583c1f42SDavid Vernet *
63583c1f42SDavid Vernet * User-space producer
64583c1f42SDavid Vernet * -------------------
65583c1f42SDavid Vernet * Only the page containing the consumer position is mapped r/o in
66583c1f42SDavid Vernet * user-space. User-space producers also use bits of the header to
67583c1f42SDavid Vernet * communicate to the kernel, but the kernel must carefully check and
68583c1f42SDavid Vernet * validate each sample to ensure that they're correctly formatted, and
69583c1f42SDavid Vernet * fully contained within the ring buffer.
70457f4436SAndrii Nakryiko */
71457f4436SAndrii Nakryiko unsigned long consumer_pos __aligned(PAGE_SIZE);
72457f4436SAndrii Nakryiko unsigned long producer_pos __aligned(PAGE_SIZE);
73457f4436SAndrii Nakryiko char data[] __aligned(PAGE_SIZE);
74457f4436SAndrii Nakryiko };
75457f4436SAndrii Nakryiko
76457f4436SAndrii Nakryiko struct bpf_ringbuf_map {
77457f4436SAndrii Nakryiko struct bpf_map map;
78457f4436SAndrii Nakryiko struct bpf_ringbuf *rb;
79457f4436SAndrii Nakryiko };
80457f4436SAndrii Nakryiko
81457f4436SAndrii Nakryiko /* 8-byte ring buffer record header structure */
82457f4436SAndrii Nakryiko struct bpf_ringbuf_hdr {
83457f4436SAndrii Nakryiko u32 len;
84457f4436SAndrii Nakryiko u32 pg_off;
85457f4436SAndrii Nakryiko };
86457f4436SAndrii Nakryiko
bpf_ringbuf_area_alloc(size_t data_sz,int numa_node)87457f4436SAndrii Nakryiko static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
88457f4436SAndrii Nakryiko {
89be4035c7SRoman Gushchin const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
90be4035c7SRoman Gushchin __GFP_NOWARN | __GFP_ZERO;
912f7e4ab2SYafang Shao int nr_meta_pages = RINGBUF_NR_META_PAGES;
92457f4436SAndrii Nakryiko int nr_data_pages = data_sz >> PAGE_SHIFT;
93457f4436SAndrii Nakryiko int nr_pages = nr_meta_pages + nr_data_pages;
94457f4436SAndrii Nakryiko struct page **pages, *page;
95457f4436SAndrii Nakryiko struct bpf_ringbuf *rb;
96457f4436SAndrii Nakryiko size_t array_size;
97457f4436SAndrii Nakryiko int i;
98457f4436SAndrii Nakryiko
99457f4436SAndrii Nakryiko /* Each data page is mapped twice to allow "virtual"
100457f4436SAndrii Nakryiko * continuous read of samples wrapping around the end of ring
101457f4436SAndrii Nakryiko * buffer area:
102457f4436SAndrii Nakryiko * ------------------------------------------------------
103457f4436SAndrii Nakryiko * | meta pages | real data pages | same data pages |
104457f4436SAndrii Nakryiko * ------------------------------------------------------
105457f4436SAndrii Nakryiko * | | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
106457f4436SAndrii Nakryiko * ------------------------------------------------------
107457f4436SAndrii Nakryiko * | | TA DA | TA DA |
108457f4436SAndrii Nakryiko * ------------------------------------------------------
109457f4436SAndrii Nakryiko * ^^^^^^^
110457f4436SAndrii Nakryiko * |
111457f4436SAndrii Nakryiko * Here, no need to worry about special handling of wrapped-around
112457f4436SAndrii Nakryiko * data due to double-mapped data pages. This works both in kernel and
113457f4436SAndrii Nakryiko * when mmap()'ed in user-space, simplifying both kernel and
114457f4436SAndrii Nakryiko * user-space implementations significantly.
115457f4436SAndrii Nakryiko */
116457f4436SAndrii Nakryiko array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
117be4035c7SRoman Gushchin pages = bpf_map_area_alloc(array_size, numa_node);
118457f4436SAndrii Nakryiko if (!pages)
119457f4436SAndrii Nakryiko return NULL;
120457f4436SAndrii Nakryiko
121457f4436SAndrii Nakryiko for (i = 0; i < nr_pages; i++) {
122457f4436SAndrii Nakryiko page = alloc_pages_node(numa_node, flags, 0);
123457f4436SAndrii Nakryiko if (!page) {
124457f4436SAndrii Nakryiko nr_pages = i;
125457f4436SAndrii Nakryiko goto err_free_pages;
126457f4436SAndrii Nakryiko }
127457f4436SAndrii Nakryiko pages[i] = page;
128457f4436SAndrii Nakryiko if (i >= nr_meta_pages)
129457f4436SAndrii Nakryiko pages[nr_data_pages + i] = page;
130457f4436SAndrii Nakryiko }
131457f4436SAndrii Nakryiko
132457f4436SAndrii Nakryiko rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
133b293dcc4SHou Tao VM_MAP | VM_USERMAP, PAGE_KERNEL);
134457f4436SAndrii Nakryiko if (rb) {
135ccff81e1SRustam Kovhaev kmemleak_not_leak(pages);
136457f4436SAndrii Nakryiko rb->pages = pages;
137457f4436SAndrii Nakryiko rb->nr_pages = nr_pages;
138457f4436SAndrii Nakryiko return rb;
139457f4436SAndrii Nakryiko }
140457f4436SAndrii Nakryiko
141457f4436SAndrii Nakryiko err_free_pages:
142457f4436SAndrii Nakryiko for (i = 0; i < nr_pages; i++)
143457f4436SAndrii Nakryiko __free_page(pages[i]);
1448f58ee54SYafang Shao bpf_map_area_free(pages);
145457f4436SAndrii Nakryiko return NULL;
146457f4436SAndrii Nakryiko }
147457f4436SAndrii Nakryiko
bpf_ringbuf_notify(struct irq_work * work)148457f4436SAndrii Nakryiko static void bpf_ringbuf_notify(struct irq_work *work)
149457f4436SAndrii Nakryiko {
150457f4436SAndrii Nakryiko struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
151457f4436SAndrii Nakryiko
152457f4436SAndrii Nakryiko wake_up_all(&rb->waitq);
153457f4436SAndrii Nakryiko }
154457f4436SAndrii Nakryiko
155*cf6eeb8fSHou Tao /* Maximum size of ring buffer area is limited by 32-bit page offset within
156*cf6eeb8fSHou Tao * record header, counted in pages. Reserve 8 bits for extensibility, and
157*cf6eeb8fSHou Tao * take into account few extra pages for consumer/producer pages and
158*cf6eeb8fSHou Tao * non-mmap()'able parts, the current maximum size would be:
159*cf6eeb8fSHou Tao *
160*cf6eeb8fSHou Tao * (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
161*cf6eeb8fSHou Tao *
162*cf6eeb8fSHou Tao * This gives 64GB limit, which seems plenty for single ring buffer. Now
163*cf6eeb8fSHou Tao * considering that the maximum value of data_sz is (4GB - 1), there
164*cf6eeb8fSHou Tao * will be no overflow, so just note the size limit in the comments.
165*cf6eeb8fSHou Tao */
bpf_ringbuf_alloc(size_t data_sz,int numa_node)166457f4436SAndrii Nakryiko static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
167457f4436SAndrii Nakryiko {
168457f4436SAndrii Nakryiko struct bpf_ringbuf *rb;
169457f4436SAndrii Nakryiko
170457f4436SAndrii Nakryiko rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
171457f4436SAndrii Nakryiko if (!rb)
172abbdd081SRoman Gushchin return NULL;
173457f4436SAndrii Nakryiko
174457f4436SAndrii Nakryiko spin_lock_init(&rb->spinlock);
17520571567SDavid Vernet atomic_set(&rb->busy, 0);
176457f4436SAndrii Nakryiko init_waitqueue_head(&rb->waitq);
177457f4436SAndrii Nakryiko init_irq_work(&rb->work, bpf_ringbuf_notify);
178457f4436SAndrii Nakryiko
179457f4436SAndrii Nakryiko rb->mask = data_sz - 1;
180457f4436SAndrii Nakryiko rb->consumer_pos = 0;
181457f4436SAndrii Nakryiko rb->producer_pos = 0;
182457f4436SAndrii Nakryiko
183457f4436SAndrii Nakryiko return rb;
184457f4436SAndrii Nakryiko }
185457f4436SAndrii Nakryiko
ringbuf_map_alloc(union bpf_attr * attr)186457f4436SAndrii Nakryiko static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
187457f4436SAndrii Nakryiko {
188457f4436SAndrii Nakryiko struct bpf_ringbuf_map *rb_map;
189457f4436SAndrii Nakryiko
190457f4436SAndrii Nakryiko if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
191457f4436SAndrii Nakryiko return ERR_PTR(-EINVAL);
192457f4436SAndrii Nakryiko
193457f4436SAndrii Nakryiko if (attr->key_size || attr->value_size ||
194517bbe19SAndrii Nakryiko !is_power_of_2(attr->max_entries) ||
195517bbe19SAndrii Nakryiko !PAGE_ALIGNED(attr->max_entries))
196457f4436SAndrii Nakryiko return ERR_PTR(-EINVAL);
197457f4436SAndrii Nakryiko
19873cf09a3SYafang Shao rb_map = bpf_map_area_alloc(sizeof(*rb_map), NUMA_NO_NODE);
199457f4436SAndrii Nakryiko if (!rb_map)
200457f4436SAndrii Nakryiko return ERR_PTR(-ENOMEM);
201457f4436SAndrii Nakryiko
202457f4436SAndrii Nakryiko bpf_map_init_from_attr(&rb_map->map, attr);
203457f4436SAndrii Nakryiko
204457f4436SAndrii Nakryiko rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
205abbdd081SRoman Gushchin if (!rb_map->rb) {
20673cf09a3SYafang Shao bpf_map_area_free(rb_map);
207abbdd081SRoman Gushchin return ERR_PTR(-ENOMEM);
208457f4436SAndrii Nakryiko }
209457f4436SAndrii Nakryiko
210457f4436SAndrii Nakryiko return &rb_map->map;
211457f4436SAndrii Nakryiko }
212457f4436SAndrii Nakryiko
bpf_ringbuf_free(struct bpf_ringbuf * rb)213457f4436SAndrii Nakryiko static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
214457f4436SAndrii Nakryiko {
215457f4436SAndrii Nakryiko /* copy pages pointer and nr_pages to local variable, as we are going
216457f4436SAndrii Nakryiko * to unmap rb itself with vunmap() below
217457f4436SAndrii Nakryiko */
218457f4436SAndrii Nakryiko struct page **pages = rb->pages;
219457f4436SAndrii Nakryiko int i, nr_pages = rb->nr_pages;
220457f4436SAndrii Nakryiko
221457f4436SAndrii Nakryiko vunmap(rb);
222457f4436SAndrii Nakryiko for (i = 0; i < nr_pages; i++)
223457f4436SAndrii Nakryiko __free_page(pages[i]);
2248f58ee54SYafang Shao bpf_map_area_free(pages);
225457f4436SAndrii Nakryiko }
226457f4436SAndrii Nakryiko
ringbuf_map_free(struct bpf_map * map)227457f4436SAndrii Nakryiko static void ringbuf_map_free(struct bpf_map *map)
228457f4436SAndrii Nakryiko {
229457f4436SAndrii Nakryiko struct bpf_ringbuf_map *rb_map;
230457f4436SAndrii Nakryiko
231457f4436SAndrii Nakryiko rb_map = container_of(map, struct bpf_ringbuf_map, map);
232457f4436SAndrii Nakryiko bpf_ringbuf_free(rb_map->rb);
23373cf09a3SYafang Shao bpf_map_area_free(rb_map);
234457f4436SAndrii Nakryiko }
235457f4436SAndrii Nakryiko
ringbuf_map_lookup_elem(struct bpf_map * map,void * key)236457f4436SAndrii Nakryiko static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
237457f4436SAndrii Nakryiko {
238457f4436SAndrii Nakryiko return ERR_PTR(-ENOTSUPP);
239457f4436SAndrii Nakryiko }
240457f4436SAndrii Nakryiko
ringbuf_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)241d7ba4cc9SJP Kobryn static long ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
242457f4436SAndrii Nakryiko u64 flags)
243457f4436SAndrii Nakryiko {
244457f4436SAndrii Nakryiko return -ENOTSUPP;
245457f4436SAndrii Nakryiko }
246457f4436SAndrii Nakryiko
ringbuf_map_delete_elem(struct bpf_map * map,void * key)247d7ba4cc9SJP Kobryn static long ringbuf_map_delete_elem(struct bpf_map *map, void *key)
248457f4436SAndrii Nakryiko {
249457f4436SAndrii Nakryiko return -ENOTSUPP;
250457f4436SAndrii Nakryiko }
251457f4436SAndrii Nakryiko
ringbuf_map_get_next_key(struct bpf_map * map,void * key,void * next_key)252457f4436SAndrii Nakryiko static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
253457f4436SAndrii Nakryiko void *next_key)
254457f4436SAndrii Nakryiko {
255457f4436SAndrii Nakryiko return -ENOTSUPP;
256457f4436SAndrii Nakryiko }
257457f4436SAndrii Nakryiko
ringbuf_map_mmap_kern(struct bpf_map * map,struct vm_area_struct * vma)258583c1f42SDavid Vernet static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma)
259457f4436SAndrii Nakryiko {
260457f4436SAndrii Nakryiko struct bpf_ringbuf_map *rb_map;
261457f4436SAndrii Nakryiko
262457f4436SAndrii Nakryiko rb_map = container_of(map, struct bpf_ringbuf_map, map);
263457f4436SAndrii Nakryiko
26404ea3086SAndrii Nakryiko if (vma->vm_flags & VM_WRITE) {
26504ea3086SAndrii Nakryiko /* allow writable mapping for the consumer_pos only */
26604ea3086SAndrii Nakryiko if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
26704ea3086SAndrii Nakryiko return -EPERM;
26804ea3086SAndrii Nakryiko } else {
2691c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_MAYWRITE);
27004ea3086SAndrii Nakryiko }
27104ea3086SAndrii Nakryiko /* remap_vmalloc_range() checks size and offset constraints */
272457f4436SAndrii Nakryiko return remap_vmalloc_range(vma, rb_map->rb,
273457f4436SAndrii Nakryiko vma->vm_pgoff + RINGBUF_PGOFF);
274457f4436SAndrii Nakryiko }
275457f4436SAndrii Nakryiko
ringbuf_map_mmap_user(struct bpf_map * map,struct vm_area_struct * vma)276583c1f42SDavid Vernet static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma)
277583c1f42SDavid Vernet {
278583c1f42SDavid Vernet struct bpf_ringbuf_map *rb_map;
279583c1f42SDavid Vernet
280583c1f42SDavid Vernet rb_map = container_of(map, struct bpf_ringbuf_map, map);
281583c1f42SDavid Vernet
282583c1f42SDavid Vernet if (vma->vm_flags & VM_WRITE) {
283583c1f42SDavid Vernet if (vma->vm_pgoff == 0)
284583c1f42SDavid Vernet /* Disallow writable mappings to the consumer pointer,
285583c1f42SDavid Vernet * and allow writable mappings to both the producer
286583c1f42SDavid Vernet * position, and the ring buffer data itself.
287583c1f42SDavid Vernet */
288583c1f42SDavid Vernet return -EPERM;
289583c1f42SDavid Vernet } else {
2901c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_MAYWRITE);
291583c1f42SDavid Vernet }
292583c1f42SDavid Vernet /* remap_vmalloc_range() checks size and offset constraints */
293583c1f42SDavid Vernet return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
294583c1f42SDavid Vernet }
295583c1f42SDavid Vernet
ringbuf_avail_data_sz(struct bpf_ringbuf * rb)296457f4436SAndrii Nakryiko static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
297457f4436SAndrii Nakryiko {
298457f4436SAndrii Nakryiko unsigned long cons_pos, prod_pos;
299457f4436SAndrii Nakryiko
300457f4436SAndrii Nakryiko cons_pos = smp_load_acquire(&rb->consumer_pos);
301457f4436SAndrii Nakryiko prod_pos = smp_load_acquire(&rb->producer_pos);
302457f4436SAndrii Nakryiko return prod_pos - cons_pos;
303457f4436SAndrii Nakryiko }
304457f4436SAndrii Nakryiko
ringbuf_total_data_sz(const struct bpf_ringbuf * rb)30520571567SDavid Vernet static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb)
30620571567SDavid Vernet {
30720571567SDavid Vernet return rb->mask + 1;
30820571567SDavid Vernet }
30920571567SDavid Vernet
ringbuf_map_poll_kern(struct bpf_map * map,struct file * filp,struct poll_table_struct * pts)31020571567SDavid Vernet static __poll_t ringbuf_map_poll_kern(struct bpf_map *map, struct file *filp,
311457f4436SAndrii Nakryiko struct poll_table_struct *pts)
312457f4436SAndrii Nakryiko {
313457f4436SAndrii Nakryiko struct bpf_ringbuf_map *rb_map;
314457f4436SAndrii Nakryiko
315457f4436SAndrii Nakryiko rb_map = container_of(map, struct bpf_ringbuf_map, map);
316457f4436SAndrii Nakryiko poll_wait(filp, &rb_map->rb->waitq, pts);
317457f4436SAndrii Nakryiko
318457f4436SAndrii Nakryiko if (ringbuf_avail_data_sz(rb_map->rb))
319457f4436SAndrii Nakryiko return EPOLLIN | EPOLLRDNORM;
320457f4436SAndrii Nakryiko return 0;
321457f4436SAndrii Nakryiko }
322457f4436SAndrii Nakryiko
ringbuf_map_poll_user(struct bpf_map * map,struct file * filp,struct poll_table_struct * pts)32320571567SDavid Vernet static __poll_t ringbuf_map_poll_user(struct bpf_map *map, struct file *filp,
32420571567SDavid Vernet struct poll_table_struct *pts)
32520571567SDavid Vernet {
32620571567SDavid Vernet struct bpf_ringbuf_map *rb_map;
32720571567SDavid Vernet
32820571567SDavid Vernet rb_map = container_of(map, struct bpf_ringbuf_map, map);
32920571567SDavid Vernet poll_wait(filp, &rb_map->rb->waitq, pts);
33020571567SDavid Vernet
33120571567SDavid Vernet if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb))
33220571567SDavid Vernet return EPOLLOUT | EPOLLWRNORM;
33320571567SDavid Vernet return 0;
33420571567SDavid Vernet }
33520571567SDavid Vernet
ringbuf_map_mem_usage(const struct bpf_map * map)3362f7e4ab2SYafang Shao static u64 ringbuf_map_mem_usage(const struct bpf_map *map)
3372f7e4ab2SYafang Shao {
3382f7e4ab2SYafang Shao struct bpf_ringbuf *rb;
3392f7e4ab2SYafang Shao int nr_data_pages;
3402f7e4ab2SYafang Shao int nr_meta_pages;
3412f7e4ab2SYafang Shao u64 usage = sizeof(struct bpf_ringbuf_map);
3422f7e4ab2SYafang Shao
3432f7e4ab2SYafang Shao rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
3442f7e4ab2SYafang Shao usage += (u64)rb->nr_pages << PAGE_SHIFT;
3452f7e4ab2SYafang Shao nr_meta_pages = RINGBUF_NR_META_PAGES;
3462f7e4ab2SYafang Shao nr_data_pages = map->max_entries >> PAGE_SHIFT;
3472f7e4ab2SYafang Shao usage += (nr_meta_pages + 2 * nr_data_pages) * sizeof(struct page *);
3482f7e4ab2SYafang Shao return usage;
3492f7e4ab2SYafang Shao }
3502f7e4ab2SYafang Shao
351c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
352457f4436SAndrii Nakryiko const struct bpf_map_ops ringbuf_map_ops = {
353f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
354457f4436SAndrii Nakryiko .map_alloc = ringbuf_map_alloc,
355457f4436SAndrii Nakryiko .map_free = ringbuf_map_free,
356583c1f42SDavid Vernet .map_mmap = ringbuf_map_mmap_kern,
35720571567SDavid Vernet .map_poll = ringbuf_map_poll_kern,
358457f4436SAndrii Nakryiko .map_lookup_elem = ringbuf_map_lookup_elem,
359457f4436SAndrii Nakryiko .map_update_elem = ringbuf_map_update_elem,
360457f4436SAndrii Nakryiko .map_delete_elem = ringbuf_map_delete_elem,
361457f4436SAndrii Nakryiko .map_get_next_key = ringbuf_map_get_next_key,
3622f7e4ab2SYafang Shao .map_mem_usage = ringbuf_map_mem_usage,
363c317ab71SMenglong Dong .map_btf_id = &ringbuf_map_btf_ids[0],
364457f4436SAndrii Nakryiko };
365457f4436SAndrii Nakryiko
366583c1f42SDavid Vernet BTF_ID_LIST_SINGLE(user_ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
367583c1f42SDavid Vernet const struct bpf_map_ops user_ringbuf_map_ops = {
368583c1f42SDavid Vernet .map_meta_equal = bpf_map_meta_equal,
369583c1f42SDavid Vernet .map_alloc = ringbuf_map_alloc,
370583c1f42SDavid Vernet .map_free = ringbuf_map_free,
371583c1f42SDavid Vernet .map_mmap = ringbuf_map_mmap_user,
37220571567SDavid Vernet .map_poll = ringbuf_map_poll_user,
373583c1f42SDavid Vernet .map_lookup_elem = ringbuf_map_lookup_elem,
374583c1f42SDavid Vernet .map_update_elem = ringbuf_map_update_elem,
375583c1f42SDavid Vernet .map_delete_elem = ringbuf_map_delete_elem,
376583c1f42SDavid Vernet .map_get_next_key = ringbuf_map_get_next_key,
3772f7e4ab2SYafang Shao .map_mem_usage = ringbuf_map_mem_usage,
378583c1f42SDavid Vernet .map_btf_id = &user_ringbuf_map_btf_ids[0],
379583c1f42SDavid Vernet };
380583c1f42SDavid Vernet
381457f4436SAndrii Nakryiko /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
382457f4436SAndrii Nakryiko * calculate offset from record metadata to ring buffer in pages, rounded
383457f4436SAndrii Nakryiko * down. This page offset is stored as part of record metadata and allows to
384457f4436SAndrii Nakryiko * restore struct bpf_ringbuf * from record pointer. This page offset is
385457f4436SAndrii Nakryiko * stored at offset 4 of record metadata header.
386457f4436SAndrii Nakryiko */
bpf_ringbuf_rec_pg_off(struct bpf_ringbuf * rb,struct bpf_ringbuf_hdr * hdr)387457f4436SAndrii Nakryiko static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
388457f4436SAndrii Nakryiko struct bpf_ringbuf_hdr *hdr)
389457f4436SAndrii Nakryiko {
390457f4436SAndrii Nakryiko return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
391457f4436SAndrii Nakryiko }
392457f4436SAndrii Nakryiko
393457f4436SAndrii Nakryiko /* Given pointer to ring buffer record header, restore pointer to struct
394457f4436SAndrii Nakryiko * bpf_ringbuf itself by using page offset stored at offset 4
395457f4436SAndrii Nakryiko */
396457f4436SAndrii Nakryiko static struct bpf_ringbuf *
bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr * hdr)397457f4436SAndrii Nakryiko bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
398457f4436SAndrii Nakryiko {
399457f4436SAndrii Nakryiko unsigned long addr = (unsigned long)(void *)hdr;
400457f4436SAndrii Nakryiko unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
401457f4436SAndrii Nakryiko
402457f4436SAndrii Nakryiko return (void*)((addr & PAGE_MASK) - off);
403457f4436SAndrii Nakryiko }
404457f4436SAndrii Nakryiko
__bpf_ringbuf_reserve(struct bpf_ringbuf * rb,u64 size)405457f4436SAndrii Nakryiko static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
406457f4436SAndrii Nakryiko {
407457f4436SAndrii Nakryiko unsigned long cons_pos, prod_pos, new_prod_pos, flags;
408457f4436SAndrii Nakryiko u32 len, pg_off;
409457f4436SAndrii Nakryiko struct bpf_ringbuf_hdr *hdr;
410457f4436SAndrii Nakryiko
411457f4436SAndrii Nakryiko if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
412457f4436SAndrii Nakryiko return NULL;
413457f4436SAndrii Nakryiko
414457f4436SAndrii Nakryiko len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
41520571567SDavid Vernet if (len > ringbuf_total_data_sz(rb))
4164b81ccebSThadeu Lima de Souza Cascardo return NULL;
4174b81ccebSThadeu Lima de Souza Cascardo
418457f4436SAndrii Nakryiko cons_pos = smp_load_acquire(&rb->consumer_pos);
419457f4436SAndrii Nakryiko
420457f4436SAndrii Nakryiko if (in_nmi()) {
421457f4436SAndrii Nakryiko if (!spin_trylock_irqsave(&rb->spinlock, flags))
422457f4436SAndrii Nakryiko return NULL;
423457f4436SAndrii Nakryiko } else {
424457f4436SAndrii Nakryiko spin_lock_irqsave(&rb->spinlock, flags);
425457f4436SAndrii Nakryiko }
426457f4436SAndrii Nakryiko
427457f4436SAndrii Nakryiko prod_pos = rb->producer_pos;
428457f4436SAndrii Nakryiko new_prod_pos = prod_pos + len;
429457f4436SAndrii Nakryiko
430457f4436SAndrii Nakryiko /* check for out of ringbuf space by ensuring producer position
431457f4436SAndrii Nakryiko * doesn't advance more than (ringbuf_size - 1) ahead
432457f4436SAndrii Nakryiko */
433457f4436SAndrii Nakryiko if (new_prod_pos - cons_pos > rb->mask) {
434457f4436SAndrii Nakryiko spin_unlock_irqrestore(&rb->spinlock, flags);
435457f4436SAndrii Nakryiko return NULL;
436457f4436SAndrii Nakryiko }
437457f4436SAndrii Nakryiko
438457f4436SAndrii Nakryiko hdr = (void *)rb->data + (prod_pos & rb->mask);
439457f4436SAndrii Nakryiko pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
440457f4436SAndrii Nakryiko hdr->len = size | BPF_RINGBUF_BUSY_BIT;
441457f4436SAndrii Nakryiko hdr->pg_off = pg_off;
442457f4436SAndrii Nakryiko
443457f4436SAndrii Nakryiko /* pairs with consumer's smp_load_acquire() */
444457f4436SAndrii Nakryiko smp_store_release(&rb->producer_pos, new_prod_pos);
445457f4436SAndrii Nakryiko
446457f4436SAndrii Nakryiko spin_unlock_irqrestore(&rb->spinlock, flags);
447457f4436SAndrii Nakryiko
448457f4436SAndrii Nakryiko return (void *)hdr + BPF_RINGBUF_HDR_SZ;
449457f4436SAndrii Nakryiko }
450457f4436SAndrii Nakryiko
BPF_CALL_3(bpf_ringbuf_reserve,struct bpf_map *,map,u64,size,u64,flags)451457f4436SAndrii Nakryiko BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
452457f4436SAndrii Nakryiko {
453457f4436SAndrii Nakryiko struct bpf_ringbuf_map *rb_map;
454457f4436SAndrii Nakryiko
455457f4436SAndrii Nakryiko if (unlikely(flags))
456457f4436SAndrii Nakryiko return 0;
457457f4436SAndrii Nakryiko
458457f4436SAndrii Nakryiko rb_map = container_of(map, struct bpf_ringbuf_map, map);
459457f4436SAndrii Nakryiko return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
460457f4436SAndrii Nakryiko }
461457f4436SAndrii Nakryiko
462457f4436SAndrii Nakryiko const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
463457f4436SAndrii Nakryiko .func = bpf_ringbuf_reserve,
464894f2a8bSKumar Kartikeya Dwivedi .ret_type = RET_PTR_TO_RINGBUF_MEM_OR_NULL,
465457f4436SAndrii Nakryiko .arg1_type = ARG_CONST_MAP_PTR,
466457f4436SAndrii Nakryiko .arg2_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
467457f4436SAndrii Nakryiko .arg3_type = ARG_ANYTHING,
468457f4436SAndrii Nakryiko };
469457f4436SAndrii Nakryiko
bpf_ringbuf_commit(void * sample,u64 flags,bool discard)470457f4436SAndrii Nakryiko static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
471457f4436SAndrii Nakryiko {
472457f4436SAndrii Nakryiko unsigned long rec_pos, cons_pos;
473457f4436SAndrii Nakryiko struct bpf_ringbuf_hdr *hdr;
474457f4436SAndrii Nakryiko struct bpf_ringbuf *rb;
475457f4436SAndrii Nakryiko u32 new_len;
476457f4436SAndrii Nakryiko
477457f4436SAndrii Nakryiko hdr = sample - BPF_RINGBUF_HDR_SZ;
478457f4436SAndrii Nakryiko rb = bpf_ringbuf_restore_from_rec(hdr);
479457f4436SAndrii Nakryiko new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
480457f4436SAndrii Nakryiko if (discard)
481457f4436SAndrii Nakryiko new_len |= BPF_RINGBUF_DISCARD_BIT;
482457f4436SAndrii Nakryiko
483457f4436SAndrii Nakryiko /* update record header with correct final size prefix */
484457f4436SAndrii Nakryiko xchg(&hdr->len, new_len);
485457f4436SAndrii Nakryiko
486457f4436SAndrii Nakryiko /* if consumer caught up and is waiting for our record, notify about
487457f4436SAndrii Nakryiko * new data availability
488457f4436SAndrii Nakryiko */
489457f4436SAndrii Nakryiko rec_pos = (void *)hdr - (void *)rb->data;
490457f4436SAndrii Nakryiko cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
491457f4436SAndrii Nakryiko
492457f4436SAndrii Nakryiko if (flags & BPF_RB_FORCE_WAKEUP)
493457f4436SAndrii Nakryiko irq_work_queue(&rb->work);
494457f4436SAndrii Nakryiko else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
495457f4436SAndrii Nakryiko irq_work_queue(&rb->work);
496457f4436SAndrii Nakryiko }
497457f4436SAndrii Nakryiko
BPF_CALL_2(bpf_ringbuf_submit,void *,sample,u64,flags)498457f4436SAndrii Nakryiko BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
499457f4436SAndrii Nakryiko {
500457f4436SAndrii Nakryiko bpf_ringbuf_commit(sample, flags, false /* discard */);
501457f4436SAndrii Nakryiko return 0;
502457f4436SAndrii Nakryiko }
503457f4436SAndrii Nakryiko
504457f4436SAndrii Nakryiko const struct bpf_func_proto bpf_ringbuf_submit_proto = {
505457f4436SAndrii Nakryiko .func = bpf_ringbuf_submit,
506457f4436SAndrii Nakryiko .ret_type = RET_VOID,
507894f2a8bSKumar Kartikeya Dwivedi .arg1_type = ARG_PTR_TO_RINGBUF_MEM | OBJ_RELEASE,
508457f4436SAndrii Nakryiko .arg2_type = ARG_ANYTHING,
509457f4436SAndrii Nakryiko };
510457f4436SAndrii Nakryiko
BPF_CALL_2(bpf_ringbuf_discard,void *,sample,u64,flags)511457f4436SAndrii Nakryiko BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
512457f4436SAndrii Nakryiko {
513457f4436SAndrii Nakryiko bpf_ringbuf_commit(sample, flags, true /* discard */);
514457f4436SAndrii Nakryiko return 0;
515457f4436SAndrii Nakryiko }
516457f4436SAndrii Nakryiko
517457f4436SAndrii Nakryiko const struct bpf_func_proto bpf_ringbuf_discard_proto = {
518457f4436SAndrii Nakryiko .func = bpf_ringbuf_discard,
519457f4436SAndrii Nakryiko .ret_type = RET_VOID,
520894f2a8bSKumar Kartikeya Dwivedi .arg1_type = ARG_PTR_TO_RINGBUF_MEM | OBJ_RELEASE,
521457f4436SAndrii Nakryiko .arg2_type = ARG_ANYTHING,
522457f4436SAndrii Nakryiko };
523457f4436SAndrii Nakryiko
BPF_CALL_4(bpf_ringbuf_output,struct bpf_map *,map,void *,data,u64,size,u64,flags)524457f4436SAndrii Nakryiko BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
525457f4436SAndrii Nakryiko u64, flags)
526457f4436SAndrii Nakryiko {
527457f4436SAndrii Nakryiko struct bpf_ringbuf_map *rb_map;
528457f4436SAndrii Nakryiko void *rec;
529457f4436SAndrii Nakryiko
530457f4436SAndrii Nakryiko if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
531457f4436SAndrii Nakryiko return -EINVAL;
532457f4436SAndrii Nakryiko
533457f4436SAndrii Nakryiko rb_map = container_of(map, struct bpf_ringbuf_map, map);
534457f4436SAndrii Nakryiko rec = __bpf_ringbuf_reserve(rb_map->rb, size);
535457f4436SAndrii Nakryiko if (!rec)
536457f4436SAndrii Nakryiko return -EAGAIN;
537457f4436SAndrii Nakryiko
538457f4436SAndrii Nakryiko memcpy(rec, data, size);
539457f4436SAndrii Nakryiko bpf_ringbuf_commit(rec, flags, false /* discard */);
540457f4436SAndrii Nakryiko return 0;
541457f4436SAndrii Nakryiko }
542457f4436SAndrii Nakryiko
543457f4436SAndrii Nakryiko const struct bpf_func_proto bpf_ringbuf_output_proto = {
544457f4436SAndrii Nakryiko .func = bpf_ringbuf_output,
545457f4436SAndrii Nakryiko .ret_type = RET_INTEGER,
546457f4436SAndrii Nakryiko .arg1_type = ARG_CONST_MAP_PTR,
547216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
548457f4436SAndrii Nakryiko .arg3_type = ARG_CONST_SIZE_OR_ZERO,
549457f4436SAndrii Nakryiko .arg4_type = ARG_ANYTHING,
550457f4436SAndrii Nakryiko };
551457f4436SAndrii Nakryiko
BPF_CALL_2(bpf_ringbuf_query,struct bpf_map *,map,u64,flags)552457f4436SAndrii Nakryiko BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
553457f4436SAndrii Nakryiko {
554457f4436SAndrii Nakryiko struct bpf_ringbuf *rb;
555457f4436SAndrii Nakryiko
556457f4436SAndrii Nakryiko rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
557457f4436SAndrii Nakryiko
558457f4436SAndrii Nakryiko switch (flags) {
559457f4436SAndrii Nakryiko case BPF_RB_AVAIL_DATA:
560457f4436SAndrii Nakryiko return ringbuf_avail_data_sz(rb);
561457f4436SAndrii Nakryiko case BPF_RB_RING_SIZE:
56220571567SDavid Vernet return ringbuf_total_data_sz(rb);
563457f4436SAndrii Nakryiko case BPF_RB_CONS_POS:
564457f4436SAndrii Nakryiko return smp_load_acquire(&rb->consumer_pos);
565457f4436SAndrii Nakryiko case BPF_RB_PROD_POS:
566457f4436SAndrii Nakryiko return smp_load_acquire(&rb->producer_pos);
567457f4436SAndrii Nakryiko default:
568457f4436SAndrii Nakryiko return 0;
569457f4436SAndrii Nakryiko }
570457f4436SAndrii Nakryiko }
571457f4436SAndrii Nakryiko
572457f4436SAndrii Nakryiko const struct bpf_func_proto bpf_ringbuf_query_proto = {
573457f4436SAndrii Nakryiko .func = bpf_ringbuf_query,
574457f4436SAndrii Nakryiko .ret_type = RET_INTEGER,
575457f4436SAndrii Nakryiko .arg1_type = ARG_CONST_MAP_PTR,
576457f4436SAndrii Nakryiko .arg2_type = ARG_ANYTHING,
577457f4436SAndrii Nakryiko };
578bc34dee6SJoanne Koong
BPF_CALL_4(bpf_ringbuf_reserve_dynptr,struct bpf_map *,map,u32,size,u64,flags,struct bpf_dynptr_kern *,ptr)579bc34dee6SJoanne Koong BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
580bc34dee6SJoanne Koong struct bpf_dynptr_kern *, ptr)
581bc34dee6SJoanne Koong {
582bc34dee6SJoanne Koong struct bpf_ringbuf_map *rb_map;
583bc34dee6SJoanne Koong void *sample;
584bc34dee6SJoanne Koong int err;
585bc34dee6SJoanne Koong
586bc34dee6SJoanne Koong if (unlikely(flags)) {
587bc34dee6SJoanne Koong bpf_dynptr_set_null(ptr);
588bc34dee6SJoanne Koong return -EINVAL;
589bc34dee6SJoanne Koong }
590bc34dee6SJoanne Koong
591bc34dee6SJoanne Koong err = bpf_dynptr_check_size(size);
592bc34dee6SJoanne Koong if (err) {
593bc34dee6SJoanne Koong bpf_dynptr_set_null(ptr);
594bc34dee6SJoanne Koong return err;
595bc34dee6SJoanne Koong }
596bc34dee6SJoanne Koong
597bc34dee6SJoanne Koong rb_map = container_of(map, struct bpf_ringbuf_map, map);
598bc34dee6SJoanne Koong
599bc34dee6SJoanne Koong sample = __bpf_ringbuf_reserve(rb_map->rb, size);
600bc34dee6SJoanne Koong if (!sample) {
601bc34dee6SJoanne Koong bpf_dynptr_set_null(ptr);
602bc34dee6SJoanne Koong return -EINVAL;
603bc34dee6SJoanne Koong }
604bc34dee6SJoanne Koong
605bc34dee6SJoanne Koong bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size);
606bc34dee6SJoanne Koong
607bc34dee6SJoanne Koong return 0;
608bc34dee6SJoanne Koong }
609bc34dee6SJoanne Koong
610bc34dee6SJoanne Koong const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
611bc34dee6SJoanne Koong .func = bpf_ringbuf_reserve_dynptr,
612bc34dee6SJoanne Koong .ret_type = RET_INTEGER,
613bc34dee6SJoanne Koong .arg1_type = ARG_CONST_MAP_PTR,
614bc34dee6SJoanne Koong .arg2_type = ARG_ANYTHING,
615bc34dee6SJoanne Koong .arg3_type = ARG_ANYTHING,
616bc34dee6SJoanne Koong .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
617bc34dee6SJoanne Koong };
618bc34dee6SJoanne Koong
BPF_CALL_2(bpf_ringbuf_submit_dynptr,struct bpf_dynptr_kern *,ptr,u64,flags)619bc34dee6SJoanne Koong BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
620bc34dee6SJoanne Koong {
621bc34dee6SJoanne Koong if (!ptr->data)
622bc34dee6SJoanne Koong return 0;
623bc34dee6SJoanne Koong
624bc34dee6SJoanne Koong bpf_ringbuf_commit(ptr->data, flags, false /* discard */);
625bc34dee6SJoanne Koong
626bc34dee6SJoanne Koong bpf_dynptr_set_null(ptr);
627bc34dee6SJoanne Koong
628bc34dee6SJoanne Koong return 0;
629bc34dee6SJoanne Koong }
630bc34dee6SJoanne Koong
631bc34dee6SJoanne Koong const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = {
632bc34dee6SJoanne Koong .func = bpf_ringbuf_submit_dynptr,
633bc34dee6SJoanne Koong .ret_type = RET_VOID,
634bc34dee6SJoanne Koong .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
635bc34dee6SJoanne Koong .arg2_type = ARG_ANYTHING,
636bc34dee6SJoanne Koong };
637bc34dee6SJoanne Koong
BPF_CALL_2(bpf_ringbuf_discard_dynptr,struct bpf_dynptr_kern *,ptr,u64,flags)638bc34dee6SJoanne Koong BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
639bc34dee6SJoanne Koong {
640bc34dee6SJoanne Koong if (!ptr->data)
641bc34dee6SJoanne Koong return 0;
642bc34dee6SJoanne Koong
643bc34dee6SJoanne Koong bpf_ringbuf_commit(ptr->data, flags, true /* discard */);
644bc34dee6SJoanne Koong
645bc34dee6SJoanne Koong bpf_dynptr_set_null(ptr);
646bc34dee6SJoanne Koong
647bc34dee6SJoanne Koong return 0;
648bc34dee6SJoanne Koong }
649bc34dee6SJoanne Koong
650bc34dee6SJoanne Koong const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = {
651bc34dee6SJoanne Koong .func = bpf_ringbuf_discard_dynptr,
652bc34dee6SJoanne Koong .ret_type = RET_VOID,
653bc34dee6SJoanne Koong .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
654bc34dee6SJoanne Koong .arg2_type = ARG_ANYTHING,
655bc34dee6SJoanne Koong };
65620571567SDavid Vernet
__bpf_user_ringbuf_peek(struct bpf_ringbuf * rb,void ** sample,u32 * size)65720571567SDavid Vernet static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size)
65820571567SDavid Vernet {
65920571567SDavid Vernet int err;
66020571567SDavid Vernet u32 hdr_len, sample_len, total_len, flags, *hdr;
66120571567SDavid Vernet u64 cons_pos, prod_pos;
66220571567SDavid Vernet
66320571567SDavid Vernet /* Synchronizes with smp_store_release() in user-space producer. */
66420571567SDavid Vernet prod_pos = smp_load_acquire(&rb->producer_pos);
66520571567SDavid Vernet if (prod_pos % 8)
66620571567SDavid Vernet return -EINVAL;
66720571567SDavid Vernet
66820571567SDavid Vernet /* Synchronizes with smp_store_release() in __bpf_user_ringbuf_sample_release() */
66920571567SDavid Vernet cons_pos = smp_load_acquire(&rb->consumer_pos);
67020571567SDavid Vernet if (cons_pos >= prod_pos)
67120571567SDavid Vernet return -ENODATA;
67220571567SDavid Vernet
67320571567SDavid Vernet hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask));
67420571567SDavid Vernet /* Synchronizes with smp_store_release() in user-space producer. */
67520571567SDavid Vernet hdr_len = smp_load_acquire(hdr);
67620571567SDavid Vernet flags = hdr_len & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT);
67720571567SDavid Vernet sample_len = hdr_len & ~flags;
67820571567SDavid Vernet total_len = round_up(sample_len + BPF_RINGBUF_HDR_SZ, 8);
67920571567SDavid Vernet
68020571567SDavid Vernet /* The sample must fit within the region advertised by the producer position. */
68120571567SDavid Vernet if (total_len > prod_pos - cons_pos)
68220571567SDavid Vernet return -EINVAL;
68320571567SDavid Vernet
68420571567SDavid Vernet /* The sample must fit within the data region of the ring buffer. */
68520571567SDavid Vernet if (total_len > ringbuf_total_data_sz(rb))
68620571567SDavid Vernet return -E2BIG;
68720571567SDavid Vernet
68820571567SDavid Vernet /* The sample must fit into a struct bpf_dynptr. */
68920571567SDavid Vernet err = bpf_dynptr_check_size(sample_len);
69020571567SDavid Vernet if (err)
69120571567SDavid Vernet return -E2BIG;
69220571567SDavid Vernet
69320571567SDavid Vernet if (flags & BPF_RINGBUF_DISCARD_BIT) {
69420571567SDavid Vernet /* If the discard bit is set, the sample should be skipped.
69520571567SDavid Vernet *
69620571567SDavid Vernet * Update the consumer pos, and return -EAGAIN so the caller
69720571567SDavid Vernet * knows to skip this sample and try to read the next one.
69820571567SDavid Vernet */
69920571567SDavid Vernet smp_store_release(&rb->consumer_pos, cons_pos + total_len);
70020571567SDavid Vernet return -EAGAIN;
70120571567SDavid Vernet }
70220571567SDavid Vernet
70320571567SDavid Vernet if (flags & BPF_RINGBUF_BUSY_BIT)
70420571567SDavid Vernet return -ENODATA;
70520571567SDavid Vernet
70620571567SDavid Vernet *sample = (void *)((uintptr_t)rb->data +
70720571567SDavid Vernet (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask));
70820571567SDavid Vernet *size = sample_len;
70920571567SDavid Vernet return 0;
71020571567SDavid Vernet }
71120571567SDavid Vernet
__bpf_user_ringbuf_sample_release(struct bpf_ringbuf * rb,size_t size,u64 flags)71220571567SDavid Vernet static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags)
71320571567SDavid Vernet {
71420571567SDavid Vernet u64 consumer_pos;
71520571567SDavid Vernet u32 rounded_size = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
71620571567SDavid Vernet
71720571567SDavid Vernet /* Using smp_load_acquire() is unnecessary here, as the busy-bit
71820571567SDavid Vernet * prevents another task from writing to consumer_pos after it was read
71920571567SDavid Vernet * by this task with smp_load_acquire() in __bpf_user_ringbuf_peek().
72020571567SDavid Vernet */
72120571567SDavid Vernet consumer_pos = rb->consumer_pos;
72220571567SDavid Vernet /* Synchronizes with smp_load_acquire() in user-space producer. */
72320571567SDavid Vernet smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size);
72420571567SDavid Vernet }
72520571567SDavid Vernet
BPF_CALL_4(bpf_user_ringbuf_drain,struct bpf_map *,map,void *,callback_fn,void *,callback_ctx,u64,flags)72620571567SDavid Vernet BPF_CALL_4(bpf_user_ringbuf_drain, struct bpf_map *, map,
72720571567SDavid Vernet void *, callback_fn, void *, callback_ctx, u64, flags)
72820571567SDavid Vernet {
72920571567SDavid Vernet struct bpf_ringbuf *rb;
73020571567SDavid Vernet long samples, discarded_samples = 0, ret = 0;
73120571567SDavid Vernet bpf_callback_t callback = (bpf_callback_t)callback_fn;
73220571567SDavid Vernet u64 wakeup_flags = BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP;
73320571567SDavid Vernet int busy = 0;
73420571567SDavid Vernet
73520571567SDavid Vernet if (unlikely(flags & ~wakeup_flags))
73620571567SDavid Vernet return -EINVAL;
73720571567SDavid Vernet
73820571567SDavid Vernet rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
73920571567SDavid Vernet
74020571567SDavid Vernet /* If another consumer is already consuming a sample, wait for them to finish. */
74120571567SDavid Vernet if (!atomic_try_cmpxchg(&rb->busy, &busy, 1))
74220571567SDavid Vernet return -EBUSY;
74320571567SDavid Vernet
74420571567SDavid Vernet for (samples = 0; samples < BPF_MAX_USER_RINGBUF_SAMPLES && ret == 0; samples++) {
74520571567SDavid Vernet int err;
74620571567SDavid Vernet u32 size;
74720571567SDavid Vernet void *sample;
74820571567SDavid Vernet struct bpf_dynptr_kern dynptr;
74920571567SDavid Vernet
75020571567SDavid Vernet err = __bpf_user_ringbuf_peek(rb, &sample, &size);
75120571567SDavid Vernet if (err) {
75220571567SDavid Vernet if (err == -ENODATA) {
75320571567SDavid Vernet break;
75420571567SDavid Vernet } else if (err == -EAGAIN) {
75520571567SDavid Vernet discarded_samples++;
75620571567SDavid Vernet continue;
75720571567SDavid Vernet } else {
75820571567SDavid Vernet ret = err;
75920571567SDavid Vernet goto schedule_work_return;
76020571567SDavid Vernet }
76120571567SDavid Vernet }
76220571567SDavid Vernet
76320571567SDavid Vernet bpf_dynptr_init(&dynptr, sample, BPF_DYNPTR_TYPE_LOCAL, 0, size);
76420571567SDavid Vernet ret = callback((uintptr_t)&dynptr, (uintptr_t)callback_ctx, 0, 0, 0);
76520571567SDavid Vernet __bpf_user_ringbuf_sample_release(rb, size, flags);
76620571567SDavid Vernet }
76720571567SDavid Vernet ret = samples - discarded_samples;
76820571567SDavid Vernet
76920571567SDavid Vernet schedule_work_return:
77020571567SDavid Vernet /* Prevent the clearing of the busy-bit from being reordered before the
77120571567SDavid Vernet * storing of any rb consumer or producer positions.
77220571567SDavid Vernet */
77320571567SDavid Vernet smp_mb__before_atomic();
77420571567SDavid Vernet atomic_set(&rb->busy, 0);
77520571567SDavid Vernet
77620571567SDavid Vernet if (flags & BPF_RB_FORCE_WAKEUP)
77720571567SDavid Vernet irq_work_queue(&rb->work);
77820571567SDavid Vernet else if (!(flags & BPF_RB_NO_WAKEUP) && samples > 0)
77920571567SDavid Vernet irq_work_queue(&rb->work);
78020571567SDavid Vernet return ret;
78120571567SDavid Vernet }
78220571567SDavid Vernet
78320571567SDavid Vernet const struct bpf_func_proto bpf_user_ringbuf_drain_proto = {
78420571567SDavid Vernet .func = bpf_user_ringbuf_drain,
78520571567SDavid Vernet .ret_type = RET_INTEGER,
78620571567SDavid Vernet .arg1_type = ARG_CONST_MAP_PTR,
78720571567SDavid Vernet .arg2_type = ARG_PTR_TO_FUNC,
78820571567SDavid Vernet .arg3_type = ARG_PTR_TO_STACK_OR_NULL,
78920571567SDavid Vernet .arg4_type = ARG_ANYTHING,
79020571567SDavid Vernet };
791