1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 
4 #define _GNU_SOURCE
5 #include <linux/compiler.h>
6 #include <linux/ring_buffer.h>
7 #include <pthread.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <sys/mman.h>
11 #include <sys/syscall.h>
12 #include <sys/sysinfo.h>
13 #include <test_progs.h>
14 #include <uapi/linux/bpf.h>
15 #include <unistd.h>
16 
17 #include "user_ringbuf_fail.skel.h"
18 #include "user_ringbuf_success.skel.h"
19 
20 #include "../progs/test_user_ringbuf.h"
21 
22 static size_t log_buf_sz = 1 << 20; /* 1 MB */
23 static char obj_log_buf[1048576];
24 static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
25 static const long c_ringbuf_size = 1 << 12; /* 1 small page */
26 static const long c_max_entries = c_ringbuf_size / c_sample_size;
27 
28 static void drain_current_samples(void)
29 {
30 	syscall(__NR_getpgid);
31 }
32 
33 static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
34 {
35 	int i, err = 0;
36 
37 	/* Write some number of samples to the ring buffer. */
38 	for (i = 0; i < num_samples; i++) {
39 		struct sample *entry;
40 		int read;
41 
42 		entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
43 		if (!entry) {
44 			err = -errno;
45 			goto done;
46 		}
47 
48 		entry->pid = getpid();
49 		entry->seq = i;
50 		entry->value = i * i;
51 
52 		read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
53 		if (read <= 0) {
54 			/* Assert on the error path to avoid spamming logs with
55 			 * mostly success messages.
56 			 */
57 			ASSERT_GT(read, 0, "snprintf_comm");
58 			err = read;
59 			user_ring_buffer__discard(ringbuf, entry);
60 			goto done;
61 		}
62 
63 		user_ring_buffer__submit(ringbuf, entry);
64 	}
65 
66 done:
67 	drain_current_samples();
68 
69 	return err;
70 }
71 
72 static struct user_ringbuf_success *open_load_ringbuf_skel(void)
73 {
74 	struct user_ringbuf_success *skel;
75 	int err;
76 
77 	skel = user_ringbuf_success__open();
78 	if (!ASSERT_OK_PTR(skel, "skel_open"))
79 		return NULL;
80 
81 	err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
82 	if (!ASSERT_OK(err, "set_max_entries"))
83 		goto cleanup;
84 
85 	err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
86 	if (!ASSERT_OK(err, "set_max_entries"))
87 		goto cleanup;
88 
89 	err = user_ringbuf_success__load(skel);
90 	if (!ASSERT_OK(err, "skel_load"))
91 		goto cleanup;
92 
93 	return skel;
94 
95 cleanup:
96 	user_ringbuf_success__destroy(skel);
97 	return NULL;
98 }
99 
100 static void test_user_ringbuf_mappings(void)
101 {
102 	int err, rb_fd;
103 	int page_size = getpagesize();
104 	void *mmap_ptr;
105 	struct user_ringbuf_success *skel;
106 
107 	skel = open_load_ringbuf_skel();
108 	if (!skel)
109 		return;
110 
111 	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
112 	/* cons_pos can be mapped R/O, can't add +X with mprotect. */
113 	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
114 	ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
115 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
116 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
117 	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
118 	err = -errno;
119 	ASSERT_ERR(err, "wr_prod_pos_err");
120 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
121 
122 	/* prod_pos can be mapped RW, can't add +X with mprotect. */
123 	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
124 			rb_fd, page_size);
125 	ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
126 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
127 	err = -errno;
128 	ASSERT_ERR(err, "wr_prod_pos_err");
129 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
130 
131 	/* data pages can be mapped RW, can't add +X with mprotect. */
132 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
133 			2 * page_size);
134 	ASSERT_OK_PTR(mmap_ptr, "rw_data");
135 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
136 	err = -errno;
137 	ASSERT_ERR(err, "exec_data_err");
138 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
139 
140 	user_ringbuf_success__destroy(skel);
141 }
142 
143 static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
144 				     struct ring_buffer **kern_ringbuf_out,
145 				     ring_buffer_sample_fn callback,
146 				     struct user_ring_buffer **user_ringbuf_out)
147 {
148 	struct user_ringbuf_success *skel;
149 	struct ring_buffer *kern_ringbuf = NULL;
150 	struct user_ring_buffer *user_ringbuf = NULL;
151 	int err = -ENOMEM, rb_fd;
152 
153 	skel = open_load_ringbuf_skel();
154 	if (!skel)
155 		return err;
156 
157 	/* only trigger BPF program for current process */
158 	skel->bss->pid = getpid();
159 
160 	if (kern_ringbuf_out) {
161 		rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
162 		kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
163 		if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
164 			goto cleanup;
165 
166 		*kern_ringbuf_out = kern_ringbuf;
167 	}
168 
169 	if (user_ringbuf_out) {
170 		rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
171 		user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
172 		if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
173 			goto cleanup;
174 
175 		*user_ringbuf_out = user_ringbuf;
176 		ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
177 	}
178 
179 	err = user_ringbuf_success__attach(skel);
180 	if (!ASSERT_OK(err, "skel_attach"))
181 		goto cleanup;
182 
183 	*skel_out = skel;
184 	return 0;
185 
186 cleanup:
187 	if (kern_ringbuf_out)
188 		*kern_ringbuf_out = NULL;
189 	if (user_ringbuf_out)
190 		*user_ringbuf_out = NULL;
191 	ring_buffer__free(kern_ringbuf);
192 	user_ring_buffer__free(user_ringbuf);
193 	user_ringbuf_success__destroy(skel);
194 	return err;
195 }
196 
197 static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
198 					 struct user_ring_buffer **ringbuf_out)
199 {
200 	return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
201 }
202 
203 static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
204 					       __u32 size, __u64 producer_pos, int err)
205 {
206 	void *data_ptr;
207 	__u64 *producer_pos_ptr;
208 	int rb_fd, page_size = getpagesize();
209 
210 	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
211 
212 	ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
213 
214 	/* Map the producer_pos as RW. */
215 	producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
216 				MAP_SHARED, rb_fd, page_size);
217 	ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
218 
219 	/* Map the data pages as RW. */
220 	data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
221 	ASSERT_OK_PTR(data_ptr, "rw_data");
222 
223 	memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
224 	*(__u32 *)data_ptr = size;
225 
226 	/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
227 	smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
228 
229 	drain_current_samples();
230 	ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
231 	ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
232 
233 	ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
234 	ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
235 }
236 
237 static void test_user_ringbuf_post_misaligned(void)
238 {
239 	struct user_ringbuf_success *skel;
240 	struct user_ring_buffer *ringbuf;
241 	int err;
242 	__u32 size = (1 << 5) + 7;
243 
244 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
245 	if (!ASSERT_OK(err, "misaligned_skel"))
246 		return;
247 
248 	manually_write_test_invalid_sample(skel, size, size, -EINVAL);
249 	user_ring_buffer__free(ringbuf);
250 	user_ringbuf_success__destroy(skel);
251 }
252 
253 static void test_user_ringbuf_post_producer_wrong_offset(void)
254 {
255 	struct user_ringbuf_success *skel;
256 	struct user_ring_buffer *ringbuf;
257 	int err;
258 	__u32 size = (1 << 5);
259 
260 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
261 	if (!ASSERT_OK(err, "wrong_offset_skel"))
262 		return;
263 
264 	manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
265 	user_ring_buffer__free(ringbuf);
266 	user_ringbuf_success__destroy(skel);
267 }
268 
269 static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
270 {
271 	struct user_ringbuf_success *skel;
272 	struct user_ring_buffer *ringbuf;
273 	int err;
274 	__u32 size = c_ringbuf_size;
275 
276 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
277 	if (!ASSERT_OK(err, "huge_sample_skel"))
278 		return;
279 
280 	manually_write_test_invalid_sample(skel, size, size, -E2BIG);
281 	user_ring_buffer__free(ringbuf);
282 	user_ringbuf_success__destroy(skel);
283 }
284 
285 static void test_user_ringbuf_basic(void)
286 {
287 	struct user_ringbuf_success *skel;
288 	struct user_ring_buffer *ringbuf;
289 	int err;
290 
291 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
292 	if (!ASSERT_OK(err, "ringbuf_basic_skel"))
293 		return;
294 
295 	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
296 
297 	err = write_samples(ringbuf, 2);
298 	if (!ASSERT_OK(err, "write_samples"))
299 		goto cleanup;
300 
301 	ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
302 
303 cleanup:
304 	user_ring_buffer__free(ringbuf);
305 	user_ringbuf_success__destroy(skel);
306 }
307 
308 static void test_user_ringbuf_sample_full_ring_buffer(void)
309 {
310 	struct user_ringbuf_success *skel;
311 	struct user_ring_buffer *ringbuf;
312 	int err;
313 	void *sample;
314 
315 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
316 	if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
317 		return;
318 
319 	sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
320 	if (!ASSERT_OK_PTR(sample, "full_sample"))
321 		goto cleanup;
322 
323 	user_ring_buffer__submit(ringbuf, sample);
324 	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
325 	drain_current_samples();
326 	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
327 
328 cleanup:
329 	user_ring_buffer__free(ringbuf);
330 	user_ringbuf_success__destroy(skel);
331 }
332 
333 static void test_user_ringbuf_post_alignment_autoadjust(void)
334 {
335 	struct user_ringbuf_success *skel;
336 	struct user_ring_buffer *ringbuf;
337 	struct sample *sample;
338 	int err;
339 
340 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
341 	if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
342 		return;
343 
344 	/* libbpf should automatically round any sample up to an 8-byte alignment. */
345 	sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
346 	ASSERT_OK_PTR(sample, "reserve_autoaligned");
347 	user_ring_buffer__submit(ringbuf, sample);
348 
349 	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
350 	drain_current_samples();
351 	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
352 
353 	user_ring_buffer__free(ringbuf);
354 	user_ringbuf_success__destroy(skel);
355 }
356 
357 static void test_user_ringbuf_overfill(void)
358 {
359 	struct user_ringbuf_success *skel;
360 	struct user_ring_buffer *ringbuf;
361 	int err;
362 
363 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
364 	if (err)
365 		return;
366 
367 	err = write_samples(ringbuf, c_max_entries * 5);
368 	ASSERT_ERR(err, "write_samples");
369 	ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
370 
371 	user_ring_buffer__free(ringbuf);
372 	user_ringbuf_success__destroy(skel);
373 }
374 
375 static void test_user_ringbuf_discards_properly_ignored(void)
376 {
377 	struct user_ringbuf_success *skel;
378 	struct user_ring_buffer *ringbuf;
379 	int err, num_discarded = 0;
380 	__u64 *token;
381 
382 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
383 	if (err)
384 		return;
385 
386 	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
387 
388 	while (1) {
389 		/* Write samples until the buffer is full. */
390 		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
391 		if (!token)
392 			break;
393 
394 		user_ring_buffer__discard(ringbuf, token);
395 		num_discarded++;
396 	}
397 
398 	if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
399 		goto cleanup;
400 
401 	/* Should not read any samples, as they are all discarded. */
402 	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
403 	drain_current_samples();
404 	ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
405 
406 	/* Now that the ring buffer has been drained, we should be able to
407 	 * reserve another token.
408 	 */
409 	token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
410 
411 	if (!ASSERT_OK_PTR(token, "new_token"))
412 		goto cleanup;
413 
414 	user_ring_buffer__discard(ringbuf, token);
415 cleanup:
416 	user_ring_buffer__free(ringbuf);
417 	user_ringbuf_success__destroy(skel);
418 }
419 
420 static void test_user_ringbuf_loop(void)
421 {
422 	struct user_ringbuf_success *skel;
423 	struct user_ring_buffer *ringbuf;
424 	uint32_t total_samples = 8192;
425 	uint32_t remaining_samples = total_samples;
426 	int err;
427 
428 	BUILD_BUG_ON(total_samples <= c_max_entries);
429 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
430 	if (err)
431 		return;
432 
433 	do  {
434 		uint32_t curr_samples;
435 
436 		curr_samples = remaining_samples > c_max_entries
437 			? c_max_entries : remaining_samples;
438 		err = write_samples(ringbuf, curr_samples);
439 		if (err != 0) {
440 			/* Assert inside of if statement to avoid flooding logs
441 			 * on the success path.
442 			 */
443 			ASSERT_OK(err, "write_samples");
444 			goto cleanup;
445 		}
446 
447 		remaining_samples -= curr_samples;
448 		ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
449 			  "current_batched_entries");
450 	} while (remaining_samples > 0);
451 	ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
452 
453 cleanup:
454 	user_ring_buffer__free(ringbuf);
455 	user_ringbuf_success__destroy(skel);
456 }
457 
458 static int send_test_message(struct user_ring_buffer *ringbuf,
459 			     enum test_msg_op op, s64 operand_64,
460 			     s32 operand_32)
461 {
462 	struct test_msg *msg;
463 
464 	msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
465 	if (!msg) {
466 		/* Assert on the error path to avoid spamming logs with mostly
467 		 * success messages.
468 		 */
469 		ASSERT_OK_PTR(msg, "reserve_msg");
470 		return -ENOMEM;
471 	}
472 
473 	msg->msg_op = op;
474 
475 	switch (op) {
476 	case TEST_MSG_OP_INC64:
477 	case TEST_MSG_OP_MUL64:
478 		msg->operand_64 = operand_64;
479 		break;
480 	case TEST_MSG_OP_INC32:
481 	case TEST_MSG_OP_MUL32:
482 		msg->operand_32 = operand_32;
483 		break;
484 	default:
485 		PRINT_FAIL("Invalid operand %d\n", op);
486 		user_ring_buffer__discard(ringbuf, msg);
487 		return -EINVAL;
488 	}
489 
490 	user_ring_buffer__submit(ringbuf, msg);
491 
492 	return 0;
493 }
494 
495 static void kick_kernel_read_messages(void)
496 {
497 	syscall(__NR_prctl);
498 }
499 
500 static int handle_kernel_msg(void *ctx, void *data, size_t len)
501 {
502 	struct user_ringbuf_success *skel = ctx;
503 	struct test_msg *msg = data;
504 
505 	switch (msg->msg_op) {
506 	case TEST_MSG_OP_INC64:
507 		skel->bss->user_mutated += msg->operand_64;
508 		return 0;
509 	case TEST_MSG_OP_INC32:
510 		skel->bss->user_mutated += msg->operand_32;
511 		return 0;
512 	case TEST_MSG_OP_MUL64:
513 		skel->bss->user_mutated *= msg->operand_64;
514 		return 0;
515 	case TEST_MSG_OP_MUL32:
516 		skel->bss->user_mutated *= msg->operand_32;
517 		return 0;
518 	default:
519 		fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
520 		return -EINVAL;
521 	}
522 }
523 
524 static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
525 					 struct user_ringbuf_success *skel)
526 {
527 	int cnt;
528 
529 	cnt = ring_buffer__consume(kern_ringbuf);
530 	ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
531 	ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
532 }
533 
534 static void test_user_ringbuf_msg_protocol(void)
535 {
536 	struct user_ringbuf_success *skel;
537 	struct user_ring_buffer *user_ringbuf;
538 	struct ring_buffer *kern_ringbuf;
539 	int err, i;
540 	__u64 expected_kern = 0;
541 
542 	err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
543 	if (!ASSERT_OK(err, "create_ringbufs"))
544 		return;
545 
546 	for (i = 0; i < 64; i++) {
547 		enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
548 		__u64 operand_64 = TEST_OP_64;
549 		__u32 operand_32 = TEST_OP_32;
550 
551 		err = send_test_message(user_ringbuf, op, operand_64, operand_32);
552 		if (err) {
553 			/* Only assert on a failure to avoid spamming success logs. */
554 			ASSERT_OK(err, "send_test_message");
555 			goto cleanup;
556 		}
557 
558 		switch (op) {
559 		case TEST_MSG_OP_INC64:
560 			expected_kern += operand_64;
561 			break;
562 		case TEST_MSG_OP_INC32:
563 			expected_kern += operand_32;
564 			break;
565 		case TEST_MSG_OP_MUL64:
566 			expected_kern *= operand_64;
567 			break;
568 		case TEST_MSG_OP_MUL32:
569 			expected_kern *= operand_32;
570 			break;
571 		default:
572 			PRINT_FAIL("Unexpected op %d\n", op);
573 			goto cleanup;
574 		}
575 
576 		if (i % 8 == 0) {
577 			kick_kernel_read_messages();
578 			ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
579 			ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
580 			drain_kernel_messages_buffer(kern_ringbuf, skel);
581 		}
582 	}
583 
584 cleanup:
585 	ring_buffer__free(kern_ringbuf);
586 	user_ring_buffer__free(user_ringbuf);
587 	user_ringbuf_success__destroy(skel);
588 }
589 
590 static void *kick_kernel_cb(void *arg)
591 {
592 	/* Kick the kernel, causing it to drain the ring buffer and then wake
593 	 * up the test thread waiting on epoll.
594 	 */
595 	syscall(__NR_getrlimit);
596 
597 	return NULL;
598 }
599 
600 static int spawn_kick_thread_for_poll(void)
601 {
602 	pthread_t thread;
603 
604 	return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
605 }
606 
607 static void test_user_ringbuf_blocking_reserve(void)
608 {
609 	struct user_ringbuf_success *skel;
610 	struct user_ring_buffer *ringbuf;
611 	int err, num_written = 0;
612 	__u64 *token;
613 
614 	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
615 	if (err)
616 		return;
617 
618 	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
619 
620 	while (1) {
621 		/* Write samples until the buffer is full. */
622 		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
623 		if (!token)
624 			break;
625 
626 		*token = 0xdeadbeef;
627 
628 		user_ring_buffer__submit(ringbuf, token);
629 		num_written++;
630 	}
631 
632 	if (!ASSERT_GE(num_written, 0, "num_written"))
633 		goto cleanup;
634 
635 	/* Should not have read any samples until the kernel is kicked. */
636 	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
637 
638 	/* We correctly time out after 1 second, without a sample. */
639 	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
640 	if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
641 		goto cleanup;
642 
643 	err = spawn_kick_thread_for_poll();
644 	if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
645 		goto cleanup;
646 
647 	/* After spawning another thread that asychronously kicks the kernel to
648 	 * drain the messages, we're able to block and successfully get a
649 	 * sample once we receive an event notification.
650 	 */
651 	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
652 
653 	if (!ASSERT_OK_PTR(token, "block_token"))
654 		goto cleanup;
655 
656 	ASSERT_GT(skel->bss->read, 0, "num_post_kill");
657 	ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
658 	ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
659 	user_ring_buffer__discard(ringbuf, token);
660 
661 cleanup:
662 	user_ring_buffer__free(ringbuf);
663 	user_ringbuf_success__destroy(skel);
664 }
665 
666 static struct {
667 	const char *prog_name;
668 	const char *expected_err_msg;
669 } failure_tests[] = {
670 	/* failure cases */
671 	{"user_ringbuf_callback_bad_access1", "negative offset dynptr_ptr ptr"},
672 	{"user_ringbuf_callback_bad_access2", "dereference of modified dynptr_ptr ptr"},
673 	{"user_ringbuf_callback_write_forbidden", "invalid mem access 'dynptr_ptr'"},
674 	{"user_ringbuf_callback_null_context_write", "invalid mem access 'scalar'"},
675 	{"user_ringbuf_callback_null_context_read", "invalid mem access 'scalar'"},
676 	{"user_ringbuf_callback_discard_dynptr", "cannot release unowned const bpf_dynptr"},
677 	{"user_ringbuf_callback_submit_dynptr", "cannot release unowned const bpf_dynptr"},
678 	{"user_ringbuf_callback_invalid_return", "At callback return the register R0 has value"},
679 	{"user_ringbuf_callback_reinit_dynptr_mem", "Dynptr has to be an uninitialized dynptr"},
680 	{"user_ringbuf_callback_reinit_dynptr_ringbuf", "Dynptr has to be an uninitialized dynptr"},
681 };
682 
683 #define SUCCESS_TEST(_func) { _func, #_func }
684 
685 static struct {
686 	void (*test_callback)(void);
687 	const char *test_name;
688 } success_tests[] = {
689 	SUCCESS_TEST(test_user_ringbuf_mappings),
690 	SUCCESS_TEST(test_user_ringbuf_post_misaligned),
691 	SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
692 	SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
693 	SUCCESS_TEST(test_user_ringbuf_basic),
694 	SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
695 	SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
696 	SUCCESS_TEST(test_user_ringbuf_overfill),
697 	SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
698 	SUCCESS_TEST(test_user_ringbuf_loop),
699 	SUCCESS_TEST(test_user_ringbuf_msg_protocol),
700 	SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
701 };
702 
703 static void verify_fail(const char *prog_name, const char *expected_err_msg)
704 {
705 	LIBBPF_OPTS(bpf_object_open_opts, opts);
706 	struct bpf_program *prog;
707 	struct user_ringbuf_fail *skel;
708 	int err;
709 
710 	opts.kernel_log_buf = obj_log_buf;
711 	opts.kernel_log_size = log_buf_sz;
712 	opts.kernel_log_level = 1;
713 
714 	skel = user_ringbuf_fail__open_opts(&opts);
715 	if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts"))
716 		goto cleanup;
717 
718 	prog = bpf_object__find_program_by_name(skel->obj, prog_name);
719 	if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
720 		goto cleanup;
721 
722 	bpf_program__set_autoload(prog, true);
723 
724 	bpf_map__set_max_entries(skel->maps.user_ringbuf, getpagesize());
725 
726 	err = user_ringbuf_fail__load(skel);
727 	if (!ASSERT_ERR(err, "unexpected load success"))
728 		goto cleanup;
729 
730 	if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
731 		fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
732 		fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
733 	}
734 
735 cleanup:
736 	user_ringbuf_fail__destroy(skel);
737 }
738 
739 void test_user_ringbuf(void)
740 {
741 	int i;
742 
743 	for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
744 		if (!test__start_subtest(success_tests[i].test_name))
745 			continue;
746 
747 		success_tests[i].test_callback();
748 	}
749 
750 	for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
751 		if (!test__start_subtest(failure_tests[i].prog_name))
752 			continue;
753 
754 		verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
755 	}
756 }
757