xref: /openbmc/linux/kernel/bpf/ringbuf.c (revision d37cf9b63113f13d742713881ce691fc615d8b3b)
1 #include <linux/bpf.h>
2 #include <linux/btf.h>
3 #include <linux/err.h>
4 #include <linux/irq_work.h>
5 #include <linux/slab.h>
6 #include <linux/filter.h>
7 #include <linux/mm.h>
8 #include <linux/vmalloc.h>
9 #include <linux/wait.h>
10 #include <linux/poll.h>
11 #include <linux/kmemleak.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/btf_ids.h>
14 
15 #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
16 
17 /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
18 #define RINGBUF_PGOFF \
19 	(offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
20 /* consumer page and producer page */
21 #define RINGBUF_POS_PAGES 2
22 #define RINGBUF_NR_META_PAGES (RINGBUF_PGOFF + RINGBUF_POS_PAGES)
23 
24 #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
25 
26 struct bpf_ringbuf {
27 	wait_queue_head_t waitq;
28 	struct irq_work work;
29 	u64 mask;
30 	struct page **pages;
31 	int nr_pages;
32 	spinlock_t spinlock ____cacheline_aligned_in_smp;
33 	/* For user-space producer ring buffers, an atomic_t busy bit is used
34 	 * to synchronize access to the ring buffers in the kernel, rather than
35 	 * the spinlock that is used for kernel-producer ring buffers. This is
36 	 * done because the ring buffer must hold a lock across a BPF program's
37 	 * callback:
38 	 *
39 	 *    __bpf_user_ringbuf_peek() // lock acquired
40 	 * -> program callback_fn()
41 	 * -> __bpf_user_ringbuf_sample_release() // lock released
42 	 *
43 	 * It is unsafe and incorrect to hold an IRQ spinlock across what could
44 	 * be a long execution window, so we instead simply disallow concurrent
45 	 * access to the ring buffer by kernel consumers, and return -EBUSY from
46 	 * __bpf_user_ringbuf_peek() if the busy bit is held by another task.
47 	 */
48 	atomic_t busy ____cacheline_aligned_in_smp;
49 	/* Consumer and producer counters are put into separate pages to
50 	 * allow each position to be mapped with different permissions.
51 	 * This prevents a user-space application from modifying the
52 	 * position and ruining in-kernel tracking. The permissions of the
53 	 * pages depend on who is producing samples: user-space or the
54 	 * kernel. Note that the pending counter is placed in the same
55 	 * page as the producer, so that it shares the same cache line.
56 	 *
57 	 * Kernel-producer
58 	 * ---------------
59 	 * The producer position and data pages are mapped as r/o in
60 	 * userspace. For this approach, bits in the header of samples are
61 	 * used to signal to user-space, and to other producers, whether a
62 	 * sample is currently being written.
63 	 *
64 	 * User-space producer
65 	 * -------------------
66 	 * Only the page containing the consumer position is mapped r/o in
67 	 * user-space. User-space producers also use bits of the header to
68 	 * communicate to the kernel, but the kernel must carefully check and
69 	 * validate each sample to ensure that they're correctly formatted, and
70 	 * fully contained within the ring buffer.
71 	 */
72 	unsigned long consumer_pos __aligned(PAGE_SIZE);
73 	unsigned long producer_pos __aligned(PAGE_SIZE);
74 	unsigned long pending_pos;
75 	char data[] __aligned(PAGE_SIZE);
76 };
77 
78 struct bpf_ringbuf_map {
79 	struct bpf_map map;
80 	struct bpf_ringbuf *rb;
81 };
82 
83 /* 8-byte ring buffer record header structure */
84 struct bpf_ringbuf_hdr {
85 	u32 len;
86 	u32 pg_off;
87 };
88 
bpf_ringbuf_area_alloc(size_t data_sz,int numa_node)89 static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
90 {
91 	const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
92 			    __GFP_NOWARN | __GFP_ZERO;
93 	int nr_meta_pages = RINGBUF_NR_META_PAGES;
94 	int nr_data_pages = data_sz >> PAGE_SHIFT;
95 	int nr_pages = nr_meta_pages + nr_data_pages;
96 	struct page **pages, *page;
97 	struct bpf_ringbuf *rb;
98 	size_t array_size;
99 	int i;
100 
101 	/* Each data page is mapped twice to allow "virtual"
102 	 * continuous read of samples wrapping around the end of ring
103 	 * buffer area:
104 	 * ------------------------------------------------------
105 	 * | meta pages |  real data pages  |  same data pages  |
106 	 * ------------------------------------------------------
107 	 * |            | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
108 	 * ------------------------------------------------------
109 	 * |            | TA             DA | TA             DA |
110 	 * ------------------------------------------------------
111 	 *                               ^^^^^^^
112 	 *                                  |
113 	 * Here, no need to worry about special handling of wrapped-around
114 	 * data due to double-mapped data pages. This works both in kernel and
115 	 * when mmap()'ed in user-space, simplifying both kernel and
116 	 * user-space implementations significantly.
117 	 */
118 	array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
119 	pages = bpf_map_area_alloc(array_size, numa_node);
120 	if (!pages)
121 		return NULL;
122 
123 	for (i = 0; i < nr_pages; i++) {
124 		page = alloc_pages_node(numa_node, flags, 0);
125 		if (!page) {
126 			nr_pages = i;
127 			goto err_free_pages;
128 		}
129 		pages[i] = page;
130 		if (i >= nr_meta_pages)
131 			pages[nr_data_pages + i] = page;
132 	}
133 
134 	rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
135 		  VM_MAP | VM_USERMAP, PAGE_KERNEL);
136 	if (rb) {
137 		kmemleak_not_leak(pages);
138 		rb->pages = pages;
139 		rb->nr_pages = nr_pages;
140 		return rb;
141 	}
142 
143 err_free_pages:
144 	for (i = 0; i < nr_pages; i++)
145 		__free_page(pages[i]);
146 	bpf_map_area_free(pages);
147 	return NULL;
148 }
149 
bpf_ringbuf_notify(struct irq_work * work)150 static void bpf_ringbuf_notify(struct irq_work *work)
151 {
152 	struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
153 
154 	wake_up_all(&rb->waitq);
155 }
156 
157 /* Maximum size of ring buffer area is limited by 32-bit page offset within
158  * record header, counted in pages. Reserve 8 bits for extensibility, and
159  * take into account few extra pages for consumer/producer pages and
160  * non-mmap()'able parts, the current maximum size would be:
161  *
162  *     (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
163  *
164  * This gives 64GB limit, which seems plenty for single ring buffer. Now
165  * considering that the maximum value of data_sz is (4GB - 1), there
166  * will be no overflow, so just note the size limit in the comments.
167  */
bpf_ringbuf_alloc(size_t data_sz,int numa_node)168 static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
169 {
170 	struct bpf_ringbuf *rb;
171 
172 	rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
173 	if (!rb)
174 		return NULL;
175 
176 	spin_lock_init(&rb->spinlock);
177 	atomic_set(&rb->busy, 0);
178 	init_waitqueue_head(&rb->waitq);
179 	init_irq_work(&rb->work, bpf_ringbuf_notify);
180 
181 	rb->mask = data_sz - 1;
182 	rb->consumer_pos = 0;
183 	rb->producer_pos = 0;
184 	rb->pending_pos = 0;
185 
186 	return rb;
187 }
188 
ringbuf_map_alloc(union bpf_attr * attr)189 static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
190 {
191 	struct bpf_ringbuf_map *rb_map;
192 
193 	if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
194 		return ERR_PTR(-EINVAL);
195 
196 	if (attr->key_size || attr->value_size ||
197 	    !is_power_of_2(attr->max_entries) ||
198 	    !PAGE_ALIGNED(attr->max_entries))
199 		return ERR_PTR(-EINVAL);
200 
201 	rb_map = bpf_map_area_alloc(sizeof(*rb_map), NUMA_NO_NODE);
202 	if (!rb_map)
203 		return ERR_PTR(-ENOMEM);
204 
205 	bpf_map_init_from_attr(&rb_map->map, attr);
206 
207 	rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
208 	if (!rb_map->rb) {
209 		bpf_map_area_free(rb_map);
210 		return ERR_PTR(-ENOMEM);
211 	}
212 
213 	return &rb_map->map;
214 }
215 
bpf_ringbuf_free(struct bpf_ringbuf * rb)216 static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
217 {
218 	/* copy pages pointer and nr_pages to local variable, as we are going
219 	 * to unmap rb itself with vunmap() below
220 	 */
221 	struct page **pages = rb->pages;
222 	int i, nr_pages = rb->nr_pages;
223 
224 	vunmap(rb);
225 	for (i = 0; i < nr_pages; i++)
226 		__free_page(pages[i]);
227 	bpf_map_area_free(pages);
228 }
229 
ringbuf_map_free(struct bpf_map * map)230 static void ringbuf_map_free(struct bpf_map *map)
231 {
232 	struct bpf_ringbuf_map *rb_map;
233 
234 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
235 	bpf_ringbuf_free(rb_map->rb);
236 	bpf_map_area_free(rb_map);
237 }
238 
ringbuf_map_lookup_elem(struct bpf_map * map,void * key)239 static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
240 {
241 	return ERR_PTR(-ENOTSUPP);
242 }
243 
ringbuf_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)244 static long ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
245 				    u64 flags)
246 {
247 	return -ENOTSUPP;
248 }
249 
ringbuf_map_delete_elem(struct bpf_map * map,void * key)250 static long ringbuf_map_delete_elem(struct bpf_map *map, void *key)
251 {
252 	return -ENOTSUPP;
253 }
254 
ringbuf_map_get_next_key(struct bpf_map * map,void * key,void * next_key)255 static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
256 				    void *next_key)
257 {
258 	return -ENOTSUPP;
259 }
260 
ringbuf_map_mmap_kern(struct bpf_map * map,struct vm_area_struct * vma)261 static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma)
262 {
263 	struct bpf_ringbuf_map *rb_map;
264 
265 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
266 
267 	if (vma->vm_flags & VM_WRITE) {
268 		/* allow writable mapping for the consumer_pos only */
269 		if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
270 			return -EPERM;
271 	}
272 	/* remap_vmalloc_range() checks size and offset constraints */
273 	return remap_vmalloc_range(vma, rb_map->rb,
274 				   vma->vm_pgoff + RINGBUF_PGOFF);
275 }
276 
ringbuf_map_mmap_user(struct bpf_map * map,struct vm_area_struct * vma)277 static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma)
278 {
279 	struct bpf_ringbuf_map *rb_map;
280 
281 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
282 
283 	if (vma->vm_flags & VM_WRITE) {
284 		if (vma->vm_pgoff == 0)
285 			/* Disallow writable mappings to the consumer pointer,
286 			 * and allow writable mappings to both the producer
287 			 * position, and the ring buffer data itself.
288 			 */
289 			return -EPERM;
290 	}
291 	/* remap_vmalloc_range() checks size and offset constraints */
292 	return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
293 }
294 
ringbuf_avail_data_sz(struct bpf_ringbuf * rb)295 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
296 {
297 	unsigned long cons_pos, prod_pos;
298 
299 	cons_pos = smp_load_acquire(&rb->consumer_pos);
300 	prod_pos = smp_load_acquire(&rb->producer_pos);
301 	return prod_pos - cons_pos;
302 }
303 
ringbuf_total_data_sz(const struct bpf_ringbuf * rb)304 static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb)
305 {
306 	return rb->mask + 1;
307 }
308 
ringbuf_map_poll_kern(struct bpf_map * map,struct file * filp,struct poll_table_struct * pts)309 static __poll_t ringbuf_map_poll_kern(struct bpf_map *map, struct file *filp,
310 				      struct poll_table_struct *pts)
311 {
312 	struct bpf_ringbuf_map *rb_map;
313 
314 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
315 	poll_wait(filp, &rb_map->rb->waitq, pts);
316 
317 	if (ringbuf_avail_data_sz(rb_map->rb))
318 		return EPOLLIN | EPOLLRDNORM;
319 	return 0;
320 }
321 
ringbuf_map_poll_user(struct bpf_map * map,struct file * filp,struct poll_table_struct * pts)322 static __poll_t ringbuf_map_poll_user(struct bpf_map *map, struct file *filp,
323 				      struct poll_table_struct *pts)
324 {
325 	struct bpf_ringbuf_map *rb_map;
326 
327 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
328 	poll_wait(filp, &rb_map->rb->waitq, pts);
329 
330 	if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb))
331 		return EPOLLOUT | EPOLLWRNORM;
332 	return 0;
333 }
334 
ringbuf_map_mem_usage(const struct bpf_map * map)335 static u64 ringbuf_map_mem_usage(const struct bpf_map *map)
336 {
337 	struct bpf_ringbuf *rb;
338 	int nr_data_pages;
339 	int nr_meta_pages;
340 	u64 usage = sizeof(struct bpf_ringbuf_map);
341 
342 	rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
343 	usage += (u64)rb->nr_pages << PAGE_SHIFT;
344 	nr_meta_pages = RINGBUF_NR_META_PAGES;
345 	nr_data_pages = map->max_entries >> PAGE_SHIFT;
346 	usage += (nr_meta_pages + 2 * nr_data_pages) * sizeof(struct page *);
347 	return usage;
348 }
349 
350 BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
351 const struct bpf_map_ops ringbuf_map_ops = {
352 	.map_meta_equal = bpf_map_meta_equal,
353 	.map_alloc = ringbuf_map_alloc,
354 	.map_free = ringbuf_map_free,
355 	.map_mmap = ringbuf_map_mmap_kern,
356 	.map_poll = ringbuf_map_poll_kern,
357 	.map_lookup_elem = ringbuf_map_lookup_elem,
358 	.map_update_elem = ringbuf_map_update_elem,
359 	.map_delete_elem = ringbuf_map_delete_elem,
360 	.map_get_next_key = ringbuf_map_get_next_key,
361 	.map_mem_usage = ringbuf_map_mem_usage,
362 	.map_btf_id = &ringbuf_map_btf_ids[0],
363 };
364 
365 BTF_ID_LIST_SINGLE(user_ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
366 const struct bpf_map_ops user_ringbuf_map_ops = {
367 	.map_meta_equal = bpf_map_meta_equal,
368 	.map_alloc = ringbuf_map_alloc,
369 	.map_free = ringbuf_map_free,
370 	.map_mmap = ringbuf_map_mmap_user,
371 	.map_poll = ringbuf_map_poll_user,
372 	.map_lookup_elem = ringbuf_map_lookup_elem,
373 	.map_update_elem = ringbuf_map_update_elem,
374 	.map_delete_elem = ringbuf_map_delete_elem,
375 	.map_get_next_key = ringbuf_map_get_next_key,
376 	.map_mem_usage = ringbuf_map_mem_usage,
377 	.map_btf_id = &user_ringbuf_map_btf_ids[0],
378 };
379 
380 /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
381  * calculate offset from record metadata to ring buffer in pages, rounded
382  * down. This page offset is stored as part of record metadata and allows to
383  * restore struct bpf_ringbuf * from record pointer. This page offset is
384  * stored at offset 4 of record metadata header.
385  */
bpf_ringbuf_rec_pg_off(struct bpf_ringbuf * rb,struct bpf_ringbuf_hdr * hdr)386 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
387 				     struct bpf_ringbuf_hdr *hdr)
388 {
389 	return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
390 }
391 
392 /* Given pointer to ring buffer record header, restore pointer to struct
393  * bpf_ringbuf itself by using page offset stored at offset 4
394  */
395 static struct bpf_ringbuf *
bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr * hdr)396 bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
397 {
398 	unsigned long addr = (unsigned long)(void *)hdr;
399 	unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
400 
401 	return (void*)((addr & PAGE_MASK) - off);
402 }
403 
__bpf_ringbuf_reserve(struct bpf_ringbuf * rb,u64 size)404 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
405 {
406 	unsigned long cons_pos, prod_pos, new_prod_pos, pend_pos, flags;
407 	struct bpf_ringbuf_hdr *hdr;
408 	u32 len, pg_off, tmp_size, hdr_len;
409 
410 	if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
411 		return NULL;
412 
413 	len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
414 	if (len > ringbuf_total_data_sz(rb))
415 		return NULL;
416 
417 	cons_pos = smp_load_acquire(&rb->consumer_pos);
418 
419 	if (in_nmi()) {
420 		if (!spin_trylock_irqsave(&rb->spinlock, flags))
421 			return NULL;
422 	} else {
423 		spin_lock_irqsave(&rb->spinlock, flags);
424 	}
425 
426 	pend_pos = rb->pending_pos;
427 	prod_pos = rb->producer_pos;
428 	new_prod_pos = prod_pos + len;
429 
430 	while (pend_pos < prod_pos) {
431 		hdr = (void *)rb->data + (pend_pos & rb->mask);
432 		hdr_len = READ_ONCE(hdr->len);
433 		if (hdr_len & BPF_RINGBUF_BUSY_BIT)
434 			break;
435 		tmp_size = hdr_len & ~BPF_RINGBUF_DISCARD_BIT;
436 		tmp_size = round_up(tmp_size + BPF_RINGBUF_HDR_SZ, 8);
437 		pend_pos += tmp_size;
438 	}
439 	rb->pending_pos = pend_pos;
440 
441 	/* check for out of ringbuf space:
442 	 * - by ensuring producer position doesn't advance more than
443 	 *   (ringbuf_size - 1) ahead
444 	 * - by ensuring oldest not yet committed record until newest
445 	 *   record does not span more than (ringbuf_size - 1)
446 	 */
447 	if (new_prod_pos - cons_pos > rb->mask ||
448 	    new_prod_pos - pend_pos > rb->mask) {
449 		spin_unlock_irqrestore(&rb->spinlock, flags);
450 		return NULL;
451 	}
452 
453 	hdr = (void *)rb->data + (prod_pos & rb->mask);
454 	pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
455 	hdr->len = size | BPF_RINGBUF_BUSY_BIT;
456 	hdr->pg_off = pg_off;
457 
458 	/* pairs with consumer's smp_load_acquire() */
459 	smp_store_release(&rb->producer_pos, new_prod_pos);
460 
461 	spin_unlock_irqrestore(&rb->spinlock, flags);
462 
463 	return (void *)hdr + BPF_RINGBUF_HDR_SZ;
464 }
465 
BPF_CALL_3(bpf_ringbuf_reserve,struct bpf_map *,map,u64,size,u64,flags)466 BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
467 {
468 	struct bpf_ringbuf_map *rb_map;
469 
470 	if (unlikely(flags))
471 		return 0;
472 
473 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
474 	return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
475 }
476 
477 const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
478 	.func		= bpf_ringbuf_reserve,
479 	.ret_type	= RET_PTR_TO_RINGBUF_MEM_OR_NULL,
480 	.arg1_type	= ARG_CONST_MAP_PTR,
481 	.arg2_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
482 	.arg3_type	= ARG_ANYTHING,
483 };
484 
bpf_ringbuf_commit(void * sample,u64 flags,bool discard)485 static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
486 {
487 	unsigned long rec_pos, cons_pos;
488 	struct bpf_ringbuf_hdr *hdr;
489 	struct bpf_ringbuf *rb;
490 	u32 new_len;
491 
492 	hdr = sample - BPF_RINGBUF_HDR_SZ;
493 	rb = bpf_ringbuf_restore_from_rec(hdr);
494 	new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
495 	if (discard)
496 		new_len |= BPF_RINGBUF_DISCARD_BIT;
497 
498 	/* update record header with correct final size prefix */
499 	xchg(&hdr->len, new_len);
500 
501 	/* if consumer caught up and is waiting for our record, notify about
502 	 * new data availability
503 	 */
504 	rec_pos = (void *)hdr - (void *)rb->data;
505 	cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
506 
507 	if (flags & BPF_RB_FORCE_WAKEUP)
508 		irq_work_queue(&rb->work);
509 	else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
510 		irq_work_queue(&rb->work);
511 }
512 
BPF_CALL_2(bpf_ringbuf_submit,void *,sample,u64,flags)513 BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
514 {
515 	bpf_ringbuf_commit(sample, flags, false /* discard */);
516 	return 0;
517 }
518 
519 const struct bpf_func_proto bpf_ringbuf_submit_proto = {
520 	.func		= bpf_ringbuf_submit,
521 	.ret_type	= RET_VOID,
522 	.arg1_type	= ARG_PTR_TO_RINGBUF_MEM | OBJ_RELEASE,
523 	.arg2_type	= ARG_ANYTHING,
524 };
525 
BPF_CALL_2(bpf_ringbuf_discard,void *,sample,u64,flags)526 BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
527 {
528 	bpf_ringbuf_commit(sample, flags, true /* discard */);
529 	return 0;
530 }
531 
532 const struct bpf_func_proto bpf_ringbuf_discard_proto = {
533 	.func		= bpf_ringbuf_discard,
534 	.ret_type	= RET_VOID,
535 	.arg1_type	= ARG_PTR_TO_RINGBUF_MEM | OBJ_RELEASE,
536 	.arg2_type	= ARG_ANYTHING,
537 };
538 
BPF_CALL_4(bpf_ringbuf_output,struct bpf_map *,map,void *,data,u64,size,u64,flags)539 BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
540 	   u64, flags)
541 {
542 	struct bpf_ringbuf_map *rb_map;
543 	void *rec;
544 
545 	if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
546 		return -EINVAL;
547 
548 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
549 	rec = __bpf_ringbuf_reserve(rb_map->rb, size);
550 	if (!rec)
551 		return -EAGAIN;
552 
553 	memcpy(rec, data, size);
554 	bpf_ringbuf_commit(rec, flags, false /* discard */);
555 	return 0;
556 }
557 
558 const struct bpf_func_proto bpf_ringbuf_output_proto = {
559 	.func		= bpf_ringbuf_output,
560 	.ret_type	= RET_INTEGER,
561 	.arg1_type	= ARG_CONST_MAP_PTR,
562 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
563 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
564 	.arg4_type	= ARG_ANYTHING,
565 };
566 
BPF_CALL_2(bpf_ringbuf_query,struct bpf_map *,map,u64,flags)567 BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
568 {
569 	struct bpf_ringbuf *rb;
570 
571 	rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
572 
573 	switch (flags) {
574 	case BPF_RB_AVAIL_DATA:
575 		return ringbuf_avail_data_sz(rb);
576 	case BPF_RB_RING_SIZE:
577 		return ringbuf_total_data_sz(rb);
578 	case BPF_RB_CONS_POS:
579 		return smp_load_acquire(&rb->consumer_pos);
580 	case BPF_RB_PROD_POS:
581 		return smp_load_acquire(&rb->producer_pos);
582 	default:
583 		return 0;
584 	}
585 }
586 
587 const struct bpf_func_proto bpf_ringbuf_query_proto = {
588 	.func		= bpf_ringbuf_query,
589 	.ret_type	= RET_INTEGER,
590 	.arg1_type	= ARG_CONST_MAP_PTR,
591 	.arg2_type	= ARG_ANYTHING,
592 };
593 
BPF_CALL_4(bpf_ringbuf_reserve_dynptr,struct bpf_map *,map,u32,size,u64,flags,struct bpf_dynptr_kern *,ptr)594 BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
595 	   struct bpf_dynptr_kern *, ptr)
596 {
597 	struct bpf_ringbuf_map *rb_map;
598 	void *sample;
599 	int err;
600 
601 	if (unlikely(flags)) {
602 		bpf_dynptr_set_null(ptr);
603 		return -EINVAL;
604 	}
605 
606 	err = bpf_dynptr_check_size(size);
607 	if (err) {
608 		bpf_dynptr_set_null(ptr);
609 		return err;
610 	}
611 
612 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
613 
614 	sample = __bpf_ringbuf_reserve(rb_map->rb, size);
615 	if (!sample) {
616 		bpf_dynptr_set_null(ptr);
617 		return -EINVAL;
618 	}
619 
620 	bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size);
621 
622 	return 0;
623 }
624 
625 const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
626 	.func		= bpf_ringbuf_reserve_dynptr,
627 	.ret_type	= RET_INTEGER,
628 	.arg1_type	= ARG_CONST_MAP_PTR,
629 	.arg2_type	= ARG_ANYTHING,
630 	.arg3_type	= ARG_ANYTHING,
631 	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT | MEM_WRITE,
632 };
633 
BPF_CALL_2(bpf_ringbuf_submit_dynptr,struct bpf_dynptr_kern *,ptr,u64,flags)634 BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
635 {
636 	if (!ptr->data)
637 		return 0;
638 
639 	bpf_ringbuf_commit(ptr->data, flags, false /* discard */);
640 
641 	bpf_dynptr_set_null(ptr);
642 
643 	return 0;
644 }
645 
646 const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = {
647 	.func		= bpf_ringbuf_submit_dynptr,
648 	.ret_type	= RET_VOID,
649 	.arg1_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
650 	.arg2_type	= ARG_ANYTHING,
651 };
652 
BPF_CALL_2(bpf_ringbuf_discard_dynptr,struct bpf_dynptr_kern *,ptr,u64,flags)653 BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
654 {
655 	if (!ptr->data)
656 		return 0;
657 
658 	bpf_ringbuf_commit(ptr->data, flags, true /* discard */);
659 
660 	bpf_dynptr_set_null(ptr);
661 
662 	return 0;
663 }
664 
665 const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = {
666 	.func		= bpf_ringbuf_discard_dynptr,
667 	.ret_type	= RET_VOID,
668 	.arg1_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
669 	.arg2_type	= ARG_ANYTHING,
670 };
671 
__bpf_user_ringbuf_peek(struct bpf_ringbuf * rb,void ** sample,u32 * size)672 static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size)
673 {
674 	int err;
675 	u32 hdr_len, sample_len, total_len, flags, *hdr;
676 	u64 cons_pos, prod_pos;
677 
678 	/* Synchronizes with smp_store_release() in user-space producer. */
679 	prod_pos = smp_load_acquire(&rb->producer_pos);
680 	if (prod_pos % 8)
681 		return -EINVAL;
682 
683 	/* Synchronizes with smp_store_release() in __bpf_user_ringbuf_sample_release() */
684 	cons_pos = smp_load_acquire(&rb->consumer_pos);
685 	if (cons_pos >= prod_pos)
686 		return -ENODATA;
687 
688 	hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask));
689 	/* Synchronizes with smp_store_release() in user-space producer. */
690 	hdr_len = smp_load_acquire(hdr);
691 	flags = hdr_len & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT);
692 	sample_len = hdr_len & ~flags;
693 	total_len = round_up(sample_len + BPF_RINGBUF_HDR_SZ, 8);
694 
695 	/* The sample must fit within the region advertised by the producer position. */
696 	if (total_len > prod_pos - cons_pos)
697 		return -EINVAL;
698 
699 	/* The sample must fit within the data region of the ring buffer. */
700 	if (total_len > ringbuf_total_data_sz(rb))
701 		return -E2BIG;
702 
703 	/* The sample must fit into a struct bpf_dynptr. */
704 	err = bpf_dynptr_check_size(sample_len);
705 	if (err)
706 		return -E2BIG;
707 
708 	if (flags & BPF_RINGBUF_DISCARD_BIT) {
709 		/* If the discard bit is set, the sample should be skipped.
710 		 *
711 		 * Update the consumer pos, and return -EAGAIN so the caller
712 		 * knows to skip this sample and try to read the next one.
713 		 */
714 		smp_store_release(&rb->consumer_pos, cons_pos + total_len);
715 		return -EAGAIN;
716 	}
717 
718 	if (flags & BPF_RINGBUF_BUSY_BIT)
719 		return -ENODATA;
720 
721 	*sample = (void *)((uintptr_t)rb->data +
722 			   (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask));
723 	*size = sample_len;
724 	return 0;
725 }
726 
__bpf_user_ringbuf_sample_release(struct bpf_ringbuf * rb,size_t size,u64 flags)727 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags)
728 {
729 	u64 consumer_pos;
730 	u32 rounded_size = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
731 
732 	/* Using smp_load_acquire() is unnecessary here, as the busy-bit
733 	 * prevents another task from writing to consumer_pos after it was read
734 	 * by this task with smp_load_acquire() in __bpf_user_ringbuf_peek().
735 	 */
736 	consumer_pos = rb->consumer_pos;
737 	 /* Synchronizes with smp_load_acquire() in user-space producer. */
738 	smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size);
739 }
740 
BPF_CALL_4(bpf_user_ringbuf_drain,struct bpf_map *,map,void *,callback_fn,void *,callback_ctx,u64,flags)741 BPF_CALL_4(bpf_user_ringbuf_drain, struct bpf_map *, map,
742 	   void *, callback_fn, void *, callback_ctx, u64, flags)
743 {
744 	struct bpf_ringbuf *rb;
745 	long samples, discarded_samples = 0, ret = 0;
746 	bpf_callback_t callback = (bpf_callback_t)callback_fn;
747 	u64 wakeup_flags = BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP;
748 	int busy = 0;
749 
750 	if (unlikely(flags & ~wakeup_flags))
751 		return -EINVAL;
752 
753 	rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
754 
755 	/* If another consumer is already consuming a sample, wait for them to finish. */
756 	if (!atomic_try_cmpxchg(&rb->busy, &busy, 1))
757 		return -EBUSY;
758 
759 	for (samples = 0; samples < BPF_MAX_USER_RINGBUF_SAMPLES && ret == 0; samples++) {
760 		int err;
761 		u32 size;
762 		void *sample;
763 		struct bpf_dynptr_kern dynptr;
764 
765 		err = __bpf_user_ringbuf_peek(rb, &sample, &size);
766 		if (err) {
767 			if (err == -ENODATA) {
768 				break;
769 			} else if (err == -EAGAIN) {
770 				discarded_samples++;
771 				continue;
772 			} else {
773 				ret = err;
774 				goto schedule_work_return;
775 			}
776 		}
777 
778 		bpf_dynptr_init(&dynptr, sample, BPF_DYNPTR_TYPE_LOCAL, 0, size);
779 		ret = callback((uintptr_t)&dynptr, (uintptr_t)callback_ctx, 0, 0, 0);
780 		__bpf_user_ringbuf_sample_release(rb, size, flags);
781 	}
782 	ret = samples - discarded_samples;
783 
784 schedule_work_return:
785 	/* Prevent the clearing of the busy-bit from being reordered before the
786 	 * storing of any rb consumer or producer positions.
787 	 */
788 	smp_mb__before_atomic();
789 	atomic_set(&rb->busy, 0);
790 
791 	if (flags & BPF_RB_FORCE_WAKEUP)
792 		irq_work_queue(&rb->work);
793 	else if (!(flags & BPF_RB_NO_WAKEUP) && samples > 0)
794 		irq_work_queue(&rb->work);
795 	return ret;
796 }
797 
798 const struct bpf_func_proto bpf_user_ringbuf_drain_proto = {
799 	.func		= bpf_user_ringbuf_drain,
800 	.ret_type	= RET_INTEGER,
801 	.arg1_type	= ARG_CONST_MAP_PTR,
802 	.arg2_type	= ARG_PTR_TO_FUNC,
803 	.arg3_type	= ARG_PTR_TO_STACK_OR_NULL,
804 	.arg4_type	= ARG_ANYTHING,
805 };
806