1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <linux/compiler.h>
4 #include <asm/barrier.h>
5 #include <test_progs.h>
6 #include <sys/mman.h>
7 #include <sys/epoll.h>
8 #include <time.h>
9 #include <sched.h>
10 #include <signal.h>
11 #include <pthread.h>
12 #include <sys/sysinfo.h>
13 #include <linux/perf_event.h>
14 #include <linux/ring_buffer.h>
15 #include "test_ringbuf.lskel.h"
16 #include "test_ringbuf_map_key.lskel.h"
17
18 #define EDONE 7777
19
20 static int duration = 0;
21
22 struct sample {
23 int pid;
24 int seq;
25 long value;
26 char comm[16];
27 };
28
29 static int sample_cnt;
30
atomic_inc(int * cnt)31 static void atomic_inc(int *cnt)
32 {
33 __atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
34 }
35
atomic_xchg(int * cnt,int val)36 static int atomic_xchg(int *cnt, int val)
37 {
38 return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
39 }
40
process_sample(void * ctx,void * data,size_t len)41 static int process_sample(void *ctx, void *data, size_t len)
42 {
43 struct sample *s = data;
44
45 atomic_inc(&sample_cnt);
46
47 switch (s->seq) {
48 case 0:
49 CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
50 333L, s->value);
51 return 0;
52 case 1:
53 CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
54 777L, s->value);
55 return -EDONE;
56 default:
57 /* we don't care about the rest */
58 return 0;
59 }
60 }
61
62 static struct test_ringbuf_map_key_lskel *skel_map_key;
63 static struct test_ringbuf_lskel *skel;
64 static struct ring_buffer *ringbuf;
65
trigger_samples()66 static void trigger_samples()
67 {
68 skel->bss->dropped = 0;
69 skel->bss->total = 0;
70 skel->bss->discarded = 0;
71
72 /* trigger exactly two samples */
73 skel->bss->value = 333;
74 syscall(__NR_getpgid);
75 skel->bss->value = 777;
76 syscall(__NR_getpgid);
77 }
78
poll_thread(void * input)79 static void *poll_thread(void *input)
80 {
81 long timeout = (long)input;
82
83 return (void *)(long)ring_buffer__poll(ringbuf, timeout);
84 }
85
ringbuf_subtest(void)86 static void ringbuf_subtest(void)
87 {
88 const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
89 pthread_t thread;
90 long bg_ret = -1;
91 int err, cnt, rb_fd;
92 int page_size = getpagesize();
93 void *mmap_ptr, *tmp_ptr;
94
95 skel = test_ringbuf_lskel__open();
96 if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
97 return;
98
99 skel->maps.ringbuf.max_entries = page_size;
100
101 err = test_ringbuf_lskel__load(skel);
102 if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
103 goto cleanup;
104
105 rb_fd = skel->maps.ringbuf.map_fd;
106 /* good read/write cons_pos */
107 mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
108 ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
109 tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
110 if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
111 goto cleanup;
112 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
113 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
114
115 /* bad writeable prod_pos */
116 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
117 err = -errno;
118 ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
119 ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
120
121 /* bad writeable data pages */
122 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
123 err = -errno;
124 ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
125 ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
126 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
127 ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
128 mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
129 ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
130
131 /* good read-only pages */
132 mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
133 if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
134 goto cleanup;
135
136 ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
137 ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
138 ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
139 ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
140
141 /* good read-only pages with initial offset */
142 mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
143 if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
144 goto cleanup;
145
146 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
147 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
148 ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
149 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
150
151 /* only trigger BPF program for current process */
152 skel->bss->pid = getpid();
153
154 ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
155 process_sample, NULL, NULL);
156 if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
157 goto cleanup;
158
159 err = test_ringbuf_lskel__attach(skel);
160 if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
161 goto cleanup;
162
163 trigger_samples();
164
165 /* 2 submitted + 1 discarded records */
166 CHECK(skel->bss->avail_data != 3 * rec_sz,
167 "err_avail_size", "exp %ld, got %ld\n",
168 3L * rec_sz, skel->bss->avail_data);
169 CHECK(skel->bss->ring_size != page_size,
170 "err_ring_size", "exp %ld, got %ld\n",
171 (long)page_size, skel->bss->ring_size);
172 CHECK(skel->bss->cons_pos != 0,
173 "err_cons_pos", "exp %ld, got %ld\n",
174 0L, skel->bss->cons_pos);
175 CHECK(skel->bss->prod_pos != 3 * rec_sz,
176 "err_prod_pos", "exp %ld, got %ld\n",
177 3L * rec_sz, skel->bss->prod_pos);
178
179 /* poll for samples */
180 err = ring_buffer__poll(ringbuf, -1);
181
182 /* -EDONE is used as an indicator that we are done */
183 if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
184 goto cleanup;
185 cnt = atomic_xchg(&sample_cnt, 0);
186 CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
187
188 /* we expect extra polling to return nothing */
189 err = ring_buffer__poll(ringbuf, 0);
190 if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
191 goto cleanup;
192 cnt = atomic_xchg(&sample_cnt, 0);
193 CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
194
195 CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
196 0L, skel->bss->dropped);
197 CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
198 2L, skel->bss->total);
199 CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
200 1L, skel->bss->discarded);
201
202 /* now validate consumer position is updated and returned */
203 trigger_samples();
204 CHECK(skel->bss->cons_pos != 3 * rec_sz,
205 "err_cons_pos", "exp %ld, got %ld\n",
206 3L * rec_sz, skel->bss->cons_pos);
207 err = ring_buffer__poll(ringbuf, -1);
208 CHECK(err <= 0, "poll_err", "err %d\n", err);
209 cnt = atomic_xchg(&sample_cnt, 0);
210 CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
211
212 /* start poll in background w/ long timeout */
213 err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
214 if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
215 goto cleanup;
216
217 /* turn off notifications now */
218 skel->bss->flags = BPF_RB_NO_WAKEUP;
219
220 /* give background thread a bit of a time */
221 usleep(50000);
222 trigger_samples();
223 /* sleeping arbitrarily is bad, but no better way to know that
224 * epoll_wait() **DID NOT** unblock in background thread
225 */
226 usleep(50000);
227 /* background poll should still be blocked */
228 err = pthread_tryjoin_np(thread, (void **)&bg_ret);
229 if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
230 goto cleanup;
231
232 /* BPF side did everything right */
233 CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
234 0L, skel->bss->dropped);
235 CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
236 2L, skel->bss->total);
237 CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
238 1L, skel->bss->discarded);
239 cnt = atomic_xchg(&sample_cnt, 0);
240 CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
241
242 /* clear flags to return to "adaptive" notification mode */
243 skel->bss->flags = 0;
244
245 /* produce new samples, no notification should be triggered, because
246 * consumer is now behind
247 */
248 trigger_samples();
249
250 /* background poll should still be blocked */
251 err = pthread_tryjoin_np(thread, (void **)&bg_ret);
252 if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
253 goto cleanup;
254
255 /* still no samples, because consumer is behind */
256 cnt = atomic_xchg(&sample_cnt, 0);
257 CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
258
259 skel->bss->dropped = 0;
260 skel->bss->total = 0;
261 skel->bss->discarded = 0;
262
263 skel->bss->value = 333;
264 syscall(__NR_getpgid);
265 /* now force notifications */
266 skel->bss->flags = BPF_RB_FORCE_WAKEUP;
267 skel->bss->value = 777;
268 syscall(__NR_getpgid);
269
270 /* now we should get a pending notification */
271 usleep(50000);
272 err = pthread_tryjoin_np(thread, (void **)&bg_ret);
273 if (CHECK(err, "join_bg", "err %d\n", err))
274 goto cleanup;
275
276 if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
277 goto cleanup;
278
279 /* due to timing variations, there could still be non-notified
280 * samples, so consume them here to collect all the samples
281 */
282 err = ring_buffer__consume(ringbuf);
283 CHECK(err < 0, "rb_consume", "failed: %d\b", err);
284
285 /* 3 rounds, 2 samples each */
286 cnt = atomic_xchg(&sample_cnt, 0);
287 CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
288
289 /* BPF side did everything right */
290 CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
291 0L, skel->bss->dropped);
292 CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
293 2L, skel->bss->total);
294 CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
295 1L, skel->bss->discarded);
296
297 test_ringbuf_lskel__detach(skel);
298 cleanup:
299 ring_buffer__free(ringbuf);
300 test_ringbuf_lskel__destroy(skel);
301 }
302
process_map_key_sample(void * ctx,void * data,size_t len)303 static int process_map_key_sample(void *ctx, void *data, size_t len)
304 {
305 struct sample *s;
306 int err, val;
307
308 s = data;
309 switch (s->seq) {
310 case 1:
311 ASSERT_EQ(s->value, 42, "sample_value");
312 err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
313 s, &val);
314 ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
315 ASSERT_EQ(val, 1, "hash_map val");
316 return -EDONE;
317 default:
318 return 0;
319 }
320 }
321
ringbuf_map_key_subtest(void)322 static void ringbuf_map_key_subtest(void)
323 {
324 int err;
325
326 skel_map_key = test_ringbuf_map_key_lskel__open();
327 if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
328 return;
329
330 skel_map_key->maps.ringbuf.max_entries = getpagesize();
331 skel_map_key->bss->pid = getpid();
332
333 err = test_ringbuf_map_key_lskel__load(skel_map_key);
334 if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
335 goto cleanup;
336
337 ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
338 process_map_key_sample, NULL, NULL);
339 if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
340 goto cleanup;
341
342 err = test_ringbuf_map_key_lskel__attach(skel_map_key);
343 if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
344 goto cleanup_ringbuf;
345
346 syscall(__NR_getpgid);
347 ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
348 err = ring_buffer__poll(ringbuf, -1);
349 ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
350
351 cleanup_ringbuf:
352 ring_buffer__free(ringbuf);
353 cleanup:
354 test_ringbuf_map_key_lskel__destroy(skel_map_key);
355 }
356
test_ringbuf(void)357 void test_ringbuf(void)
358 {
359 if (test__start_subtest("ringbuf"))
360 ringbuf_subtest();
361 if (test__start_subtest("ringbuf_map_key"))
362 ringbuf_map_key_subtest();
363 }
364