1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <linux/compiler.h>
4 #include <asm/barrier.h>
5 #include <test_progs.h>
6 #include <sys/mman.h>
7 #include <sys/epoll.h>
8 #include <time.h>
9 #include <sched.h>
10 #include <signal.h>
11 #include <pthread.h>
12 #include <sys/sysinfo.h>
13 #include <linux/perf_event.h>
14 #include <linux/ring_buffer.h>
15 #include "test_ringbuf.skel.h"
16 
17 #define EDONE 7777
18 
19 static int duration = 0;
20 
21 struct sample {
22 	int pid;
23 	int seq;
24 	long value;
25 	char comm[16];
26 };
27 
28 static int sample_cnt;
29 
30 static void atomic_inc(int *cnt)
31 {
32 	__atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
33 }
34 
35 static int atomic_xchg(int *cnt, int val)
36 {
37 	return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
38 }
39 
40 static int process_sample(void *ctx, void *data, size_t len)
41 {
42 	struct sample *s = data;
43 
44 	atomic_inc(&sample_cnt);
45 
46 	switch (s->seq) {
47 	case 0:
48 		CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
49 		      333L, s->value);
50 		return 0;
51 	case 1:
52 		CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
53 		      777L, s->value);
54 		return -EDONE;
55 	default:
56 		/* we don't care about the rest */
57 		return 0;
58 	}
59 }
60 
61 static struct test_ringbuf *skel;
62 static struct ring_buffer *ringbuf;
63 
64 static void trigger_samples()
65 {
66 	skel->bss->dropped = 0;
67 	skel->bss->total = 0;
68 	skel->bss->discarded = 0;
69 
70 	/* trigger exactly two samples */
71 	skel->bss->value = 333;
72 	syscall(__NR_getpgid);
73 	skel->bss->value = 777;
74 	syscall(__NR_getpgid);
75 }
76 
77 static void *poll_thread(void *input)
78 {
79 	long timeout = (long)input;
80 
81 	return (void *)(long)ring_buffer__poll(ringbuf, timeout);
82 }
83 
84 void test_ringbuf(void)
85 {
86 	const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
87 	pthread_t thread;
88 	long bg_ret = -1;
89 	int err, cnt, rb_fd;
90 	int page_size = getpagesize();
91 	void *mmap_ptr, *tmp_ptr;
92 
93 	skel = test_ringbuf__open();
94 	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
95 		return;
96 
97 	err = bpf_map__set_max_entries(skel->maps.ringbuf, page_size);
98 	if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
99 		goto cleanup;
100 
101 	err = test_ringbuf__load(skel);
102 	if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
103 		goto cleanup;
104 
105 	rb_fd = bpf_map__fd(skel->maps.ringbuf);
106 	/* good read/write cons_pos */
107 	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
108 	ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
109 	tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
110 	if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
111 		goto cleanup;
112 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
113 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
114 
115 	/* bad writeable prod_pos */
116 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
117 	err = -errno;
118 	ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
119 	ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
120 
121 	/* bad writeable data pages */
122 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
123 	err = -errno;
124 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
125 	ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
126 	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
127 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
128 	mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
129 	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
130 
131 	/* good read-only pages */
132 	mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
133 	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
134 		goto cleanup;
135 
136 	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
137 	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
138 	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
139 	ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
140 
141 	/* good read-only pages with initial offset */
142 	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
143 	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
144 		goto cleanup;
145 
146 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
147 	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
148 	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
149 	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
150 
151 	/* only trigger BPF program for current process */
152 	skel->bss->pid = getpid();
153 
154 	ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf),
155 				   process_sample, NULL, NULL);
156 	if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
157 		goto cleanup;
158 
159 	err = test_ringbuf__attach(skel);
160 	if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
161 		goto cleanup;
162 
163 	trigger_samples();
164 
165 	/* 2 submitted + 1 discarded records */
166 	CHECK(skel->bss->avail_data != 3 * rec_sz,
167 	      "err_avail_size", "exp %ld, got %ld\n",
168 	      3L * rec_sz, skel->bss->avail_data);
169 	CHECK(skel->bss->ring_size != page_size,
170 	      "err_ring_size", "exp %ld, got %ld\n",
171 	      (long)page_size, skel->bss->ring_size);
172 	CHECK(skel->bss->cons_pos != 0,
173 	      "err_cons_pos", "exp %ld, got %ld\n",
174 	      0L, skel->bss->cons_pos);
175 	CHECK(skel->bss->prod_pos != 3 * rec_sz,
176 	      "err_prod_pos", "exp %ld, got %ld\n",
177 	      3L * rec_sz, skel->bss->prod_pos);
178 
179 	/* poll for samples */
180 	err = ring_buffer__poll(ringbuf, -1);
181 
182 	/* -EDONE is used as an indicator that we are done */
183 	if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
184 		goto cleanup;
185 	cnt = atomic_xchg(&sample_cnt, 0);
186 	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
187 
188 	/* we expect extra polling to return nothing */
189 	err = ring_buffer__poll(ringbuf, 0);
190 	if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
191 		goto cleanup;
192 	cnt = atomic_xchg(&sample_cnt, 0);
193 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
194 
195 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
196 	      0L, skel->bss->dropped);
197 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
198 	      2L, skel->bss->total);
199 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
200 	      1L, skel->bss->discarded);
201 
202 	/* now validate consumer position is updated and returned */
203 	trigger_samples();
204 	CHECK(skel->bss->cons_pos != 3 * rec_sz,
205 	      "err_cons_pos", "exp %ld, got %ld\n",
206 	      3L * rec_sz, skel->bss->cons_pos);
207 	err = ring_buffer__poll(ringbuf, -1);
208 	CHECK(err <= 0, "poll_err", "err %d\n", err);
209 	cnt = atomic_xchg(&sample_cnt, 0);
210 	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
211 
212 	/* start poll in background w/ long timeout */
213 	err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
214 	if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
215 		goto cleanup;
216 
217 	/* turn off notifications now */
218 	skel->bss->flags = BPF_RB_NO_WAKEUP;
219 
220 	/* give background thread a bit of a time */
221 	usleep(50000);
222 	trigger_samples();
223 	/* sleeping arbitrarily is bad, but no better way to know that
224 	 * epoll_wait() **DID NOT** unblock in background thread
225 	 */
226 	usleep(50000);
227 	/* background poll should still be blocked */
228 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
229 	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
230 		goto cleanup;
231 
232 	/* BPF side did everything right */
233 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
234 	      0L, skel->bss->dropped);
235 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
236 	      2L, skel->bss->total);
237 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
238 	      1L, skel->bss->discarded);
239 	cnt = atomic_xchg(&sample_cnt, 0);
240 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
241 
242 	/* clear flags to return to "adaptive" notification mode */
243 	skel->bss->flags = 0;
244 
245 	/* produce new samples, no notification should be triggered, because
246 	 * consumer is now behind
247 	 */
248 	trigger_samples();
249 
250 	/* background poll should still be blocked */
251 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
252 	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
253 		goto cleanup;
254 
255 	/* still no samples, because consumer is behind */
256 	cnt = atomic_xchg(&sample_cnt, 0);
257 	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
258 
259 	skel->bss->dropped = 0;
260 	skel->bss->total = 0;
261 	skel->bss->discarded = 0;
262 
263 	skel->bss->value = 333;
264 	syscall(__NR_getpgid);
265 	/* now force notifications */
266 	skel->bss->flags = BPF_RB_FORCE_WAKEUP;
267 	skel->bss->value = 777;
268 	syscall(__NR_getpgid);
269 
270 	/* now we should get a pending notification */
271 	usleep(50000);
272 	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
273 	if (CHECK(err, "join_bg", "err %d\n", err))
274 		goto cleanup;
275 
276 	if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
277 		goto cleanup;
278 
279 	/* due to timing variations, there could still be non-notified
280 	 * samples, so consume them here to collect all the samples
281 	 */
282 	err = ring_buffer__consume(ringbuf);
283 	CHECK(err < 0, "rb_consume", "failed: %d\b", err);
284 
285 	/* 3 rounds, 2 samples each */
286 	cnt = atomic_xchg(&sample_cnt, 0);
287 	CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
288 
289 	/* BPF side did everything right */
290 	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
291 	      0L, skel->bss->dropped);
292 	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
293 	      2L, skel->bss->total);
294 	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
295 	      1L, skel->bss->discarded);
296 
297 	test_ringbuf__detach(skel);
298 cleanup:
299 	ring_buffer__free(ringbuf);
300 	test_ringbuf__destroy(skel);
301 }
302