xref: /openbmc/linux/tools/testing/selftests/bpf/xskxceiver.c (revision 17f1034dd76d7465d4c0948c5280c6fc64ee0542)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 /*
5  * Some functions in this program are taken from
6  * Linux kernel samples/bpf/xdpsock* and modified
7  * for use.
8  *
9  * See test_xsk.sh for detailed information on test topology
10  * and prerequisite network setup.
11  *
12  * This test program contains two threads, each thread is single socket with
13  * a unique UMEM. It validates in-order packet delivery and packet content
14  * by sending packets to each other.
15  *
16  * Tests Information:
17  * ------------------
18  * These selftests test AF_XDP SKB and Native/DRV modes using veth
19  * Virtual Ethernet interfaces.
20  *
21  * For each mode, the following tests are run:
22  *    a. nopoll - soft-irq processing in run-to-completion mode
23  *    b. poll - using poll() syscall
24  *    c. Socket Teardown
25  *       Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy
26  *       both sockets, then repeat multiple times. Only nopoll mode is used
27  *    d. Bi-directional sockets
28  *       Configure sockets as bi-directional tx/rx sockets, sets up fill and
29  *       completion rings on each socket, tx/rx in both directions. Only nopoll
30  *       mode is used
31  *    e. Statistics
32  *       Trigger some error conditions and ensure that the appropriate statistics
33  *       are incremented. Within this test, the following statistics are tested:
34  *       i.   rx dropped
35  *            Increase the UMEM frame headroom to a value which results in
36  *            insufficient space in the rx buffer for both the packet and the headroom.
37  *       ii.  tx invalid
38  *            Set the 'len' field of tx descriptors to an invalid value (umem frame
39  *            size + 1).
40  *       iii. rx ring full
41  *            Reduce the size of the RX ring to a fraction of the fill ring size.
42  *       iv.  fill queue empty
43  *            Do not populate the fill queue and then try to receive pkts.
44  *    f. bpf_link resource persistence
45  *       Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
46  *       then remove xsk sockets from queue 0 on both veth interfaces and
47  *       finally run a traffic on queues ids 1
48  *    g. unaligned mode
49  *    h. tests for invalid and corner case Tx descriptors so that the correct ones
50  *       are discarded and let through, respectively.
51  *    i. 2K frame size tests
52  *
53  * Total tests: 12
54  *
55  * Flow:
56  * -----
57  * - Single process spawns two threads: Tx and Rx
58  * - Each of these two threads attach to a veth interface
59  * - Each thread creates one AF_XDP socket connected to a unique umem for each
60  *   veth interface
61  * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy>
62  * - Rx thread verifies if all packets were received and delivered in-order,
63  *   and have the right content
64  *
65  * Enable/disable packet dump mode:
66  * --------------------------
67  * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add
68  * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D")
69  */
70 
71 #define _GNU_SOURCE
72 #include <assert.h>
73 #include <fcntl.h>
74 #include <errno.h>
75 #include <getopt.h>
76 #include <asm/barrier.h>
77 #include <linux/if_link.h>
78 #include <linux/if_ether.h>
79 #include <linux/mman.h>
80 #include <arpa/inet.h>
81 #include <net/if.h>
82 #include <locale.h>
83 #include <poll.h>
84 #include <pthread.h>
85 #include <signal.h>
86 #include <stdio.h>
87 #include <stdlib.h>
88 #include <string.h>
89 #include <stddef.h>
90 #include <sys/mman.h>
91 #include <sys/socket.h>
92 #include <sys/time.h>
93 #include <sys/types.h>
94 #include <time.h>
95 #include <unistd.h>
96 
97 #include "xsk_xdp_progs.skel.h"
98 #include "xsk.h"
99 #include "xskxceiver.h"
100 #include <bpf/bpf.h>
101 #include <linux/filter.h>
102 #include "../kselftest.h"
103 #include "xsk_xdp_metadata.h"
104 
105 static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
106 static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
107 
108 static void __exit_with_error(int error, const char *file, const char *func, int line)
109 {
110 	ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
111 			      strerror(error));
112 	ksft_exit_xfail();
113 }
114 
115 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
116 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
117 static char *mode_string(struct test_spec *test)
118 {
119 	switch (test->mode) {
120 	case TEST_MODE_SKB:
121 		return "SKB";
122 	case TEST_MODE_DRV:
123 		return "DRV";
124 	case TEST_MODE_ZC:
125 		return "ZC";
126 	default:
127 		return "BOGUS";
128 	}
129 }
130 
131 static void report_failure(struct test_spec *test)
132 {
133 	if (test->fail)
134 		return;
135 
136 	ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
137 			      test->name);
138 	test->fail = true;
139 }
140 
141 /* The payload is a word consisting of a packet sequence number in the upper
142  * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's
143  * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0.
144  */
145 static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size)
146 {
147 	u32 *ptr = (u32 *)dest, i;
148 
149 	start /= sizeof(*ptr);
150 	size /= sizeof(*ptr);
151 	for (i = 0; i < size; i++)
152 		ptr[i] = htonl(pkt_nb << 16 | (i + start));
153 }
154 
155 static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
156 {
157 	memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
158 	memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
159 	eth_hdr->h_proto = htons(ETH_P_LOOPBACK);
160 }
161 
162 static bool is_umem_valid(struct ifobject *ifobj)
163 {
164 	return !!ifobj->umem->umem;
165 }
166 
167 static u32 mode_to_xdp_flags(enum test_mode mode)
168 {
169 	return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
170 }
171 
172 static u64 umem_size(struct xsk_umem_info *umem)
173 {
174 	return umem->num_frames * umem->frame_size;
175 }
176 
177 static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer,
178 			      u64 size)
179 {
180 	struct xsk_umem_config cfg = {
181 		.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
182 		.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
183 		.frame_size = umem->frame_size,
184 		.frame_headroom = umem->frame_headroom,
185 		.flags = XSK_UMEM__DEFAULT_FLAGS
186 	};
187 	int ret;
188 
189 	if (umem->unaligned_mode)
190 		cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
191 
192 	ret = xsk_umem__create(&umem->umem, buffer, size,
193 			       &umem->fq, &umem->cq, &cfg);
194 	if (ret)
195 		return ret;
196 
197 	umem->buffer = buffer;
198 	if (ifobj->shared_umem && ifobj->rx_on) {
199 		umem->base_addr = umem_size(umem);
200 		umem->next_buffer = umem_size(umem);
201 	}
202 
203 	return 0;
204 }
205 
206 static u64 umem_alloc_buffer(struct xsk_umem_info *umem)
207 {
208 	u64 addr;
209 
210 	addr = umem->next_buffer;
211 	umem->next_buffer += umem->frame_size;
212 	if (umem->next_buffer >= umem->base_addr + umem_size(umem))
213 		umem->next_buffer = umem->base_addr;
214 
215 	return addr;
216 }
217 
218 static void umem_reset_alloc(struct xsk_umem_info *umem)
219 {
220 	umem->next_buffer = 0;
221 }
222 
223 static void enable_busy_poll(struct xsk_socket_info *xsk)
224 {
225 	int sock_opt;
226 
227 	sock_opt = 1;
228 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
229 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
230 		exit_with_error(errno);
231 
232 	sock_opt = 20;
233 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
234 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
235 		exit_with_error(errno);
236 
237 	sock_opt = BATCH_SIZE;
238 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
239 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
240 		exit_with_error(errno);
241 }
242 
243 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
244 				  struct ifobject *ifobject, bool shared)
245 {
246 	struct xsk_socket_config cfg = {};
247 	struct xsk_ring_cons *rxr;
248 	struct xsk_ring_prod *txr;
249 
250 	xsk->umem = umem;
251 	cfg.rx_size = xsk->rxqsize;
252 	cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
253 	cfg.bind_flags = ifobject->bind_flags;
254 	if (shared)
255 		cfg.bind_flags |= XDP_SHARED_UMEM;
256 
257 	txr = ifobject->tx_on ? &xsk->tx : NULL;
258 	rxr = ifobject->rx_on ? &xsk->rx : NULL;
259 	return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
260 }
261 
262 static bool ifobj_zc_avail(struct ifobject *ifobject)
263 {
264 	size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
265 	int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
266 	struct xsk_socket_info *xsk;
267 	struct xsk_umem_info *umem;
268 	bool zc_avail = false;
269 	void *bufs;
270 	int ret;
271 
272 	bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
273 	if (bufs == MAP_FAILED)
274 		exit_with_error(errno);
275 
276 	umem = calloc(1, sizeof(struct xsk_umem_info));
277 	if (!umem) {
278 		munmap(bufs, umem_sz);
279 		exit_with_error(ENOMEM);
280 	}
281 	umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
282 	ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz);
283 	if (ret)
284 		exit_with_error(-ret);
285 
286 	xsk = calloc(1, sizeof(struct xsk_socket_info));
287 	if (!xsk)
288 		goto out;
289 	ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY;
290 	ifobject->rx_on = true;
291 	xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
292 	ret = __xsk_configure_socket(xsk, umem, ifobject, false);
293 	if (!ret)
294 		zc_avail = true;
295 
296 	xsk_socket__delete(xsk->xsk);
297 	free(xsk);
298 out:
299 	munmap(umem->buffer, umem_sz);
300 	xsk_umem__delete(umem->umem);
301 	free(umem);
302 	return zc_avail;
303 }
304 
305 static struct option long_options[] = {
306 	{"interface", required_argument, 0, 'i'},
307 	{"busy-poll", no_argument, 0, 'b'},
308 	{"verbose", no_argument, 0, 'v'},
309 	{0, 0, 0, 0}
310 };
311 
312 static void usage(const char *prog)
313 {
314 	const char *str =
315 		"  Usage: %s [OPTIONS]\n"
316 		"  Options:\n"
317 		"  -i, --interface      Use interface\n"
318 		"  -v, --verbose        Verbose output\n"
319 		"  -b, --busy-poll      Enable busy poll\n";
320 
321 	ksft_print_msg(str, prog);
322 }
323 
324 static bool validate_interface(struct ifobject *ifobj)
325 {
326 	if (!strcmp(ifobj->ifname, ""))
327 		return false;
328 	return true;
329 }
330 
331 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc,
332 			       char **argv)
333 {
334 	struct ifobject *ifobj;
335 	u32 interface_nb = 0;
336 	int option_index, c;
337 
338 	opterr = 0;
339 
340 	for (;;) {
341 		c = getopt_long(argc, argv, "i:vb", long_options, &option_index);
342 		if (c == -1)
343 			break;
344 
345 		switch (c) {
346 		case 'i':
347 			if (interface_nb == 0)
348 				ifobj = ifobj_tx;
349 			else if (interface_nb == 1)
350 				ifobj = ifobj_rx;
351 			else
352 				break;
353 
354 			memcpy(ifobj->ifname, optarg,
355 			       min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg)));
356 
357 			ifobj->ifindex = if_nametoindex(ifobj->ifname);
358 			if (!ifobj->ifindex)
359 				exit_with_error(errno);
360 
361 			interface_nb++;
362 			break;
363 		case 'v':
364 			opt_verbose = true;
365 			break;
366 		case 'b':
367 			ifobj_tx->busy_poll = true;
368 			ifobj_rx->busy_poll = true;
369 			break;
370 		default:
371 			usage(basename(argv[0]));
372 			ksft_exit_xfail();
373 		}
374 	}
375 }
376 
377 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
378 			     struct ifobject *ifobj_rx)
379 {
380 	u32 i, j;
381 
382 	for (i = 0; i < MAX_INTERFACES; i++) {
383 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
384 
385 		ifobj->xsk = &ifobj->xsk_arr[0];
386 		ifobj->use_poll = false;
387 		ifobj->use_fill_ring = true;
388 		ifobj->release_rx = true;
389 		ifobj->validation_func = NULL;
390 		ifobj->use_metadata = false;
391 
392 		if (i == 0) {
393 			ifobj->rx_on = false;
394 			ifobj->tx_on = true;
395 			ifobj->pkt_stream = test->tx_pkt_stream_default;
396 		} else {
397 			ifobj->rx_on = true;
398 			ifobj->tx_on = false;
399 			ifobj->pkt_stream = test->rx_pkt_stream_default;
400 		}
401 
402 		memset(ifobj->umem, 0, sizeof(*ifobj->umem));
403 		ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
404 		ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
405 
406 		for (j = 0; j < MAX_SOCKETS; j++) {
407 			memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
408 			ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
409 		}
410 	}
411 
412 	test->ifobj_tx = ifobj_tx;
413 	test->ifobj_rx = ifobj_rx;
414 	test->current_step = 0;
415 	test->total_steps = 1;
416 	test->nb_sockets = 1;
417 	test->fail = false;
418 	test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog;
419 	test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk;
420 	test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog;
421 	test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk;
422 }
423 
424 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
425 			   struct ifobject *ifobj_rx, enum test_mode mode)
426 {
427 	struct pkt_stream *tx_pkt_stream;
428 	struct pkt_stream *rx_pkt_stream;
429 	u32 i;
430 
431 	tx_pkt_stream = test->tx_pkt_stream_default;
432 	rx_pkt_stream = test->rx_pkt_stream_default;
433 	memset(test, 0, sizeof(*test));
434 	test->tx_pkt_stream_default = tx_pkt_stream;
435 	test->rx_pkt_stream_default = rx_pkt_stream;
436 
437 	for (i = 0; i < MAX_INTERFACES; i++) {
438 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
439 
440 		ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
441 		if (mode == TEST_MODE_ZC)
442 			ifobj->bind_flags |= XDP_ZEROCOPY;
443 		else
444 			ifobj->bind_flags |= XDP_COPY;
445 	}
446 
447 	test->mode = mode;
448 	__test_spec_init(test, ifobj_tx, ifobj_rx);
449 }
450 
451 static void test_spec_reset(struct test_spec *test)
452 {
453 	__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
454 }
455 
456 static void test_spec_set_name(struct test_spec *test, const char *name)
457 {
458 	strncpy(test->name, name, MAX_TEST_NAME_SIZE);
459 }
460 
461 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
462 				   struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
463 				   struct bpf_map *xskmap_tx)
464 {
465 	test->xdp_prog_rx = xdp_prog_rx;
466 	test->xdp_prog_tx = xdp_prog_tx;
467 	test->xskmap_rx = xskmap_rx;
468 	test->xskmap_tx = xskmap_tx;
469 }
470 
471 static void pkt_stream_reset(struct pkt_stream *pkt_stream)
472 {
473 	if (pkt_stream)
474 		pkt_stream->current_pkt_nb = 0;
475 }
476 
477 static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream)
478 {
479 	if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts)
480 		return NULL;
481 
482 	return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
483 }
484 
485 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
486 {
487 	while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
488 		(*pkts_sent)++;
489 		if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid)
490 			return &pkt_stream->pkts[pkt_stream->current_pkt_nb++];
491 		pkt_stream->current_pkt_nb++;
492 	}
493 	return NULL;
494 }
495 
496 static void pkt_stream_delete(struct pkt_stream *pkt_stream)
497 {
498 	free(pkt_stream->pkts);
499 	free(pkt_stream);
500 }
501 
502 static void pkt_stream_restore_default(struct test_spec *test)
503 {
504 	struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
505 	struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
506 
507 	if (tx_pkt_stream != test->tx_pkt_stream_default) {
508 		pkt_stream_delete(test->ifobj_tx->pkt_stream);
509 		test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default;
510 	}
511 
512 	if (rx_pkt_stream != test->rx_pkt_stream_default) {
513 		pkt_stream_delete(test->ifobj_rx->pkt_stream);
514 		test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default;
515 	}
516 }
517 
518 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
519 {
520 	struct pkt_stream *pkt_stream;
521 
522 	pkt_stream = calloc(1, sizeof(*pkt_stream));
523 	if (!pkt_stream)
524 		return NULL;
525 
526 	pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
527 	if (!pkt_stream->pkts) {
528 		free(pkt_stream);
529 		return NULL;
530 	}
531 
532 	pkt_stream->nb_pkts = nb_pkts;
533 	return pkt_stream;
534 }
535 
536 static bool pkt_continues(const struct xdp_desc *desc)
537 {
538 	return desc->options & XDP_PKT_CONTD;
539 }
540 
541 static u32 ceil_u32(u32 a, u32 b)
542 {
543 	return (a + b - 1) / b;
544 }
545 
546 static u32 pkt_nb_frags(u32 frame_size, struct pkt *pkt)
547 {
548 	if (!pkt || !pkt->valid)
549 		return 1;
550 	return ceil_u32(pkt->len, frame_size);
551 }
552 
553 static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len)
554 {
555 	pkt->offset = offset;
556 	pkt->len = len;
557 	if (len > MAX_ETH_JUMBO_SIZE)
558 		pkt->valid = false;
559 	else
560 		pkt->valid = true;
561 }
562 
563 static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len)
564 {
565 	return ceil_u32(len, umem->frame_size) * umem->frame_size;
566 }
567 
568 static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
569 {
570 	struct pkt_stream *pkt_stream;
571 	u32 i;
572 
573 	pkt_stream = __pkt_stream_alloc(nb_pkts);
574 	if (!pkt_stream)
575 		exit_with_error(ENOMEM);
576 
577 	pkt_stream->nb_pkts = nb_pkts;
578 	pkt_stream->max_pkt_len = pkt_len;
579 	for (i = 0; i < nb_pkts; i++) {
580 		struct pkt *pkt = &pkt_stream->pkts[i];
581 
582 		pkt_set(umem, pkt, 0, pkt_len);
583 		pkt->pkt_nb = i;
584 	}
585 
586 	return pkt_stream;
587 }
588 
589 static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem,
590 					   struct pkt_stream *pkt_stream)
591 {
592 	return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
593 }
594 
595 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
596 {
597 	struct pkt_stream *pkt_stream;
598 
599 	pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
600 	test->ifobj_tx->pkt_stream = pkt_stream;
601 	pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
602 	test->ifobj_rx->pkt_stream = pkt_stream;
603 }
604 
605 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
606 				      int offset)
607 {
608 	struct xsk_umem_info *umem = ifobj->umem;
609 	struct pkt_stream *pkt_stream;
610 	u32 i;
611 
612 	pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
613 	for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
614 		pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len);
615 
616 	ifobj->pkt_stream = pkt_stream;
617 }
618 
619 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
620 {
621 	__pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
622 	__pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
623 }
624 
625 static void pkt_stream_receive_half(struct test_spec *test)
626 {
627 	struct xsk_umem_info *umem = test->ifobj_rx->umem;
628 	struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
629 	u32 i;
630 
631 	test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
632 							 pkt_stream->pkts[0].len);
633 	pkt_stream = test->ifobj_rx->pkt_stream;
634 	for (i = 1; i < pkt_stream->nb_pkts; i += 2)
635 		pkt_stream->pkts[i].valid = false;
636 }
637 
638 static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem)
639 {
640 	if (!pkt->valid)
641 		return pkt->offset;
642 	return pkt->offset + umem_alloc_buffer(umem);
643 }
644 
645 static void pkt_stream_cancel(struct pkt_stream *pkt_stream)
646 {
647 	pkt_stream->current_pkt_nb--;
648 }
649 
650 static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb,
651 			 u32 bytes_written)
652 {
653 	void *data = xsk_umem__get_data(ifobject->umem->buffer, addr);
654 
655 	if (len < MIN_PKT_SIZE)
656 		return;
657 
658 	if (!bytes_written) {
659 		gen_eth_hdr(ifobject, data);
660 
661 		len -= PKT_HDR_SIZE;
662 		data += PKT_HDR_SIZE;
663 	} else {
664 		bytes_written -= PKT_HDR_SIZE;
665 	}
666 
667 	write_payload(data, pkt_nb, bytes_written, len);
668 }
669 
670 static void __pkt_stream_generate_custom(struct ifobject *ifobj,
671 					 struct pkt *pkts, u32 nb_pkts)
672 {
673 	struct pkt_stream *pkt_stream;
674 	u32 i;
675 
676 	pkt_stream = __pkt_stream_alloc(nb_pkts);
677 	if (!pkt_stream)
678 		exit_with_error(ENOMEM);
679 
680 	for (i = 0; i < nb_pkts; i++) {
681 		struct pkt *pkt = &pkt_stream->pkts[i];
682 
683 		pkt->offset = pkts[i].offset;
684 		pkt->len = pkts[i].len;
685 		pkt->pkt_nb = i;
686 		pkt->valid = pkts[i].valid;
687 		if (pkt->len > pkt_stream->max_pkt_len)
688 			pkt_stream->max_pkt_len = pkt->len;
689 	}
690 
691 	ifobj->pkt_stream = pkt_stream;
692 }
693 
694 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
695 {
696 	__pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts);
697 	__pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts);
698 }
699 
700 static void pkt_print_data(u32 *data, u32 cnt)
701 {
702 	u32 i;
703 
704 	for (i = 0; i < cnt; i++) {
705 		u32 seqnum, pkt_nb;
706 
707 		seqnum = ntohl(*data) & 0xffff;
708 		pkt_nb = ntohl(*data) >> 16;
709 		fprintf(stdout, "%u:%u ", pkt_nb, seqnum);
710 		data++;
711 	}
712 }
713 
714 static void pkt_dump(void *pkt, u32 len, bool eth_header)
715 {
716 	struct ethhdr *ethhdr = pkt;
717 	u32 i, *data;
718 
719 	if (eth_header) {
720 		/*extract L2 frame */
721 		fprintf(stdout, "DEBUG>> L2: dst mac: ");
722 		for (i = 0; i < ETH_ALEN; i++)
723 			fprintf(stdout, "%02X", ethhdr->h_dest[i]);
724 
725 		fprintf(stdout, "\nDEBUG>> L2: src mac: ");
726 		for (i = 0; i < ETH_ALEN; i++)
727 			fprintf(stdout, "%02X", ethhdr->h_source[i]);
728 
729 		data = pkt + PKT_HDR_SIZE;
730 	} else {
731 		data = pkt;
732 	}
733 
734 	/*extract L5 frame */
735 	fprintf(stdout, "\nDEBUG>> L5: seqnum: ");
736 	pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
737 	fprintf(stdout, "....");
738 	if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
739 		fprintf(stdout, "\n.... ");
740 		pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
741 			       PKT_DUMP_NB_TO_PRINT);
742 	}
743 	fprintf(stdout, "\n---------------------------------------\n");
744 }
745 
746 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
747 {
748 	u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
749 	u32 offset = addr % umem->frame_size, expected_offset;
750 	int pkt_offset = pkt->valid ? pkt->offset : 0;
751 
752 	if (!umem->unaligned_mode)
753 		pkt_offset = 0;
754 
755 	expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
756 
757 	if (offset == expected_offset)
758 		return true;
759 
760 	ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
761 	return false;
762 }
763 
764 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
765 {
766 	void *data = xsk_umem__get_data(buffer, addr);
767 	struct xdp_info *meta = data - sizeof(struct xdp_info);
768 
769 	if (meta->count != pkt->pkt_nb) {
770 		ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n",
771 			       __func__, pkt->pkt_nb, meta->count);
772 		return false;
773 	}
774 
775 	return true;
776 }
777 
778 static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb,
779 			  u32 bytes_processed)
780 {
781 	u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum;
782 	void *data = xsk_umem__get_data(umem->buffer, addr);
783 
784 	addr -= umem->base_addr;
785 
786 	if (addr >= umem->num_frames * umem->frame_size ||
787 	    addr + len > umem->num_frames * umem->frame_size) {
788 		ksft_print_msg("Frag invalid addr: %llx len: %u\n", addr, len);
789 		return false;
790 	}
791 	if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
792 		ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n", addr, len);
793 		return false;
794 	}
795 
796 	pkt_data = data;
797 	if (!bytes_processed) {
798 		pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data);
799 		len -= PKT_HDR_SIZE;
800 	} else {
801 		bytes_processed -= PKT_HDR_SIZE;
802 	}
803 
804 	expected_seqnum = bytes_processed / sizeof(*pkt_data);
805 	seqnum = ntohl(*pkt_data) & 0xffff;
806 	pkt_nb = ntohl(*pkt_data) >> 16;
807 
808 	if (expected_pkt_nb != pkt_nb) {
809 		ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n",
810 			       __func__, expected_pkt_nb, pkt_nb);
811 		goto error;
812 	}
813 	if (expected_seqnum != seqnum) {
814 		ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n",
815 			       __func__, expected_seqnum, seqnum);
816 		goto error;
817 	}
818 
819 	words_to_end = len / sizeof(*pkt_data) - 1;
820 	pkt_data += words_to_end;
821 	seqnum = ntohl(*pkt_data) & 0xffff;
822 	expected_seqnum += words_to_end;
823 	if (expected_seqnum != seqnum) {
824 		ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n",
825 			       __func__, expected_seqnum, seqnum);
826 		goto error;
827 	}
828 
829 	return true;
830 
831 error:
832 	pkt_dump(data, len, !bytes_processed);
833 	return false;
834 }
835 
836 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
837 {
838 	if (!pkt) {
839 		ksft_print_msg("[%s] too many packets received\n", __func__);
840 		return false;
841 	}
842 
843 	if (pkt->len != len) {
844 		ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n",
845 			       __func__, pkt->len, len);
846 		pkt_dump(xsk_umem__get_data(buffer, addr), len, true);
847 		return false;
848 	}
849 
850 	return true;
851 }
852 
853 static void kick_tx(struct xsk_socket_info *xsk)
854 {
855 	int ret;
856 
857 	ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
858 	if (ret >= 0)
859 		return;
860 	if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
861 		usleep(100);
862 		return;
863 	}
864 	exit_with_error(errno);
865 }
866 
867 static void kick_rx(struct xsk_socket_info *xsk)
868 {
869 	int ret;
870 
871 	ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
872 	if (ret < 0)
873 		exit_with_error(errno);
874 }
875 
876 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
877 {
878 	unsigned int rcvd;
879 	u32 idx;
880 
881 	if (xsk_ring_prod__needs_wakeup(&xsk->tx))
882 		kick_tx(xsk);
883 
884 	rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
885 	if (rcvd) {
886 		if (rcvd > xsk->outstanding_tx) {
887 			u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
888 
889 			ksft_print_msg("[%s] Too many packets completed\n", __func__);
890 			ksft_print_msg("Last completion address: %llx\n", addr);
891 			return TEST_FAILURE;
892 		}
893 
894 		xsk_ring_cons__release(&xsk->umem->cq, rcvd);
895 		xsk->outstanding_tx -= rcvd;
896 	}
897 
898 	return TEST_PASS;
899 }
900 
901 static int receive_pkts(struct test_spec *test, struct pollfd *fds)
902 {
903 	struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
904 	struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
905 	struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
906 	u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0;
907 	struct ifobject *ifobj = test->ifobj_rx;
908 	struct xsk_umem_info *umem = xsk->umem;
909 	struct pkt *pkt;
910 	int ret;
911 
912 	ret = gettimeofday(&tv_now, NULL);
913 	if (ret)
914 		exit_with_error(errno);
915 	timeradd(&tv_now, &tv_timeout, &tv_end);
916 
917 	pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
918 	while (pkt) {
919 		u32 frags_processed = 0, nb_frags = 0, pkt_len = 0;
920 		u64 first_addr;
921 
922 		ret = gettimeofday(&tv_now, NULL);
923 		if (ret)
924 			exit_with_error(errno);
925 		if (timercmp(&tv_now, &tv_end, >)) {
926 			ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
927 			return TEST_FAILURE;
928 		}
929 
930 		kick_rx(xsk);
931 		if (ifobj->use_poll) {
932 			ret = poll(fds, 1, POLL_TMOUT);
933 			if (ret < 0)
934 				exit_with_error(errno);
935 
936 			if (!ret) {
937 				if (!is_umem_valid(test->ifobj_tx))
938 					return TEST_PASS;
939 
940 				ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
941 				return TEST_FAILURE;
942 
943 			}
944 
945 			if (!(fds->revents & POLLIN))
946 				continue;
947 		}
948 
949 		rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
950 		if (!rcvd)
951 			continue;
952 
953 		if (ifobj->use_fill_ring) {
954 			ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
955 			while (ret != rcvd) {
956 				if (ret < 0)
957 					exit_with_error(-ret);
958 				if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
959 					ret = poll(fds, 1, POLL_TMOUT);
960 					if (ret < 0)
961 						exit_with_error(errno);
962 				}
963 				ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
964 			}
965 		}
966 
967 		while (frags_processed < rcvd) {
968 			const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
969 			u64 addr = desc->addr, orig;
970 
971 			orig = xsk_umem__extract_addr(addr);
972 			addr = xsk_umem__add_offset_to_addr(addr);
973 
974 			if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
975 			    !is_offset_correct(umem, pkt, addr) ||
976 			    (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr)))
977 				return TEST_FAILURE;
978 
979 			if (!nb_frags++)
980 				first_addr = addr;
981 			frags_processed++;
982 			pkt_len += desc->len;
983 			if (ifobj->use_fill_ring)
984 				*xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
985 
986 			if (pkt_continues(desc))
987 				continue;
988 
989 			/* The complete packet has been received */
990 			if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) ||
991 			    !is_offset_correct(umem, pkt, addr))
992 				return TEST_FAILURE;
993 
994 			pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
995 			nb_frags = 0;
996 			pkt_len = 0;
997 		}
998 
999 		if (nb_frags) {
1000 			/* In the middle of a packet. Start over from beginning of packet. */
1001 			idx_rx -= nb_frags;
1002 			xsk_ring_cons__cancel(&xsk->rx, nb_frags);
1003 			if (ifobj->use_fill_ring) {
1004 				idx_fq -= nb_frags;
1005 				xsk_ring_prod__cancel(&umem->fq, nb_frags);
1006 			}
1007 			frags_processed -= nb_frags;
1008 		}
1009 
1010 		if (ifobj->use_fill_ring)
1011 			xsk_ring_prod__submit(&umem->fq, frags_processed);
1012 		if (ifobj->release_rx)
1013 			xsk_ring_cons__release(&xsk->rx, frags_processed);
1014 
1015 		pthread_mutex_lock(&pacing_mutex);
1016 		pkts_in_flight -= pkts_sent;
1017 		pthread_mutex_unlock(&pacing_mutex);
1018 		pkts_sent = 0;
1019 	}
1020 
1021 	return TEST_PASS;
1022 }
1023 
1024 static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout)
1025 {
1026 	u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len;
1027 	struct pkt_stream *pkt_stream = ifobject->pkt_stream;
1028 	struct xsk_socket_info *xsk = ifobject->xsk;
1029 	struct xsk_umem_info *umem = ifobject->umem;
1030 	bool use_poll = ifobject->use_poll;
1031 	int ret;
1032 
1033 	buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
1034 	/* pkts_in_flight might be negative if many invalid packets are sent */
1035 	if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) {
1036 		kick_tx(xsk);
1037 		return TEST_CONTINUE;
1038 	}
1039 
1040 	while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
1041 		if (use_poll) {
1042 			ret = poll(fds, 1, POLL_TMOUT);
1043 			if (timeout) {
1044 				if (ret < 0) {
1045 					ksft_print_msg("ERROR: [%s] Poll error %d\n",
1046 						       __func__, errno);
1047 					return TEST_FAILURE;
1048 				}
1049 				if (ret == 0)
1050 					return TEST_PASS;
1051 				break;
1052 			}
1053 			if (ret <= 0) {
1054 				ksft_print_msg("ERROR: [%s] Poll error %d\n",
1055 					       __func__, errno);
1056 				return TEST_FAILURE;
1057 			}
1058 		}
1059 
1060 		complete_pkts(xsk, BATCH_SIZE);
1061 	}
1062 
1063 	for (i = 0; i < BATCH_SIZE; i++) {
1064 		struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream);
1065 		u32 nb_frags, bytes_written = 0;
1066 
1067 		if (!pkt)
1068 			break;
1069 
1070 		nb_frags = pkt_nb_frags(umem->frame_size, pkt);
1071 		if (nb_frags > BATCH_SIZE - i) {
1072 			pkt_stream_cancel(pkt_stream);
1073 			xsk_ring_prod__cancel(&xsk->tx, BATCH_SIZE - i);
1074 			break;
1075 		}
1076 
1077 		if (pkt->valid) {
1078 			valid_pkts++;
1079 			valid_frags += nb_frags;
1080 		}
1081 
1082 		while (nb_frags--) {
1083 			struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
1084 
1085 			tx_desc->addr = pkt_get_addr(pkt, ifobject->umem);
1086 			if (nb_frags) {
1087 				tx_desc->len = umem->frame_size;
1088 				tx_desc->options = XDP_PKT_CONTD;
1089 				i++;
1090 			} else {
1091 				tx_desc->len = pkt->len - bytes_written;
1092 				tx_desc->options = 0;
1093 			}
1094 			if (pkt->valid)
1095 				pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb,
1096 					     bytes_written);
1097 			bytes_written += tx_desc->len;
1098 		}
1099 	}
1100 
1101 	pthread_mutex_lock(&pacing_mutex);
1102 	pkts_in_flight += valid_pkts;
1103 	pthread_mutex_unlock(&pacing_mutex);
1104 
1105 	xsk_ring_prod__submit(&xsk->tx, i);
1106 	xsk->outstanding_tx += valid_frags;
1107 
1108 	if (use_poll) {
1109 		ret = poll(fds, 1, POLL_TMOUT);
1110 		if (ret <= 0) {
1111 			if (ret == 0 && timeout)
1112 				return TEST_PASS;
1113 
1114 			ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
1115 			return TEST_FAILURE;
1116 		}
1117 	}
1118 
1119 	if (!timeout) {
1120 		if (complete_pkts(xsk, i))
1121 			return TEST_FAILURE;
1122 
1123 		usleep(10);
1124 		return TEST_PASS;
1125 	}
1126 
1127 	return TEST_CONTINUE;
1128 }
1129 
1130 static void wait_for_tx_completion(struct xsk_socket_info *xsk)
1131 {
1132 	while (xsk->outstanding_tx)
1133 		complete_pkts(xsk, BATCH_SIZE);
1134 }
1135 
1136 static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
1137 {
1138 	struct pkt_stream *pkt_stream = ifobject->pkt_stream;
1139 	bool timeout = !is_umem_valid(test->ifobj_rx);
1140 	struct pollfd fds = { };
1141 	u32 ret;
1142 
1143 	fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
1144 	fds.events = POLLOUT;
1145 
1146 	while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) {
1147 		ret = __send_pkts(ifobject, &fds, timeout);
1148 		if (ret == TEST_CONTINUE && !test->fail)
1149 			continue;
1150 		if ((ret || test->fail) && !timeout)
1151 			return TEST_FAILURE;
1152 		if (ret == TEST_PASS && timeout)
1153 			return ret;
1154 	}
1155 
1156 	wait_for_tx_completion(ifobject->xsk);
1157 	return TEST_PASS;
1158 }
1159 
1160 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1161 {
1162 	int fd = xsk_socket__fd(xsk), err;
1163 	socklen_t optlen, expected_len;
1164 
1165 	optlen = sizeof(*stats);
1166 	err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
1167 	if (err) {
1168 		ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1169 			       __func__, -err, strerror(-err));
1170 		return TEST_FAILURE;
1171 	}
1172 
1173 	expected_len = sizeof(struct xdp_statistics);
1174 	if (optlen != expected_len) {
1175 		ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
1176 			       __func__, expected_len, optlen);
1177 		return TEST_FAILURE;
1178 	}
1179 
1180 	return TEST_PASS;
1181 }
1182 
1183 static int validate_rx_dropped(struct ifobject *ifobject)
1184 {
1185 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1186 	struct xdp_statistics stats;
1187 	int err;
1188 
1189 	kick_rx(ifobject->xsk);
1190 
1191 	err = get_xsk_stats(xsk, &stats);
1192 	if (err)
1193 		return TEST_FAILURE;
1194 
1195 	/* The receiver calls getsockopt after receiving the last (valid)
1196 	 * packet which is not the final packet sent in this test (valid and
1197 	 * invalid packets are sent in alternating fashion with the final
1198 	 * packet being invalid). Since the last packet may or may not have
1199 	 * been dropped already, both outcomes must be allowed.
1200 	 */
1201 	if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 ||
1202 	    stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1)
1203 		return TEST_PASS;
1204 
1205 	return TEST_FAILURE;
1206 }
1207 
1208 static int validate_rx_full(struct ifobject *ifobject)
1209 {
1210 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1211 	struct xdp_statistics stats;
1212 	int err;
1213 
1214 	usleep(1000);
1215 	kick_rx(ifobject->xsk);
1216 
1217 	err = get_xsk_stats(xsk, &stats);
1218 	if (err)
1219 		return TEST_FAILURE;
1220 
1221 	if (stats.rx_ring_full)
1222 		return TEST_PASS;
1223 
1224 	return TEST_FAILURE;
1225 }
1226 
1227 static int validate_fill_empty(struct ifobject *ifobject)
1228 {
1229 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1230 	struct xdp_statistics stats;
1231 	int err;
1232 
1233 	usleep(1000);
1234 	kick_rx(ifobject->xsk);
1235 
1236 	err = get_xsk_stats(xsk, &stats);
1237 	if (err)
1238 		return TEST_FAILURE;
1239 
1240 	if (stats.rx_fill_ring_empty_descs)
1241 		return TEST_PASS;
1242 
1243 	return TEST_FAILURE;
1244 }
1245 
1246 static int validate_tx_invalid_descs(struct ifobject *ifobject)
1247 {
1248 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1249 	int fd = xsk_socket__fd(xsk);
1250 	struct xdp_statistics stats;
1251 	socklen_t optlen;
1252 	int err;
1253 
1254 	optlen = sizeof(stats);
1255 	err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
1256 	if (err) {
1257 		ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1258 			       __func__, -err, strerror(-err));
1259 		return TEST_FAILURE;
1260 	}
1261 
1262 	if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
1263 		ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
1264 			       __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
1265 		return TEST_FAILURE;
1266 	}
1267 
1268 	return TEST_PASS;
1269 }
1270 
1271 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
1272 				 struct xsk_umem_info *umem, bool tx)
1273 {
1274 	int i, ret;
1275 
1276 	for (i = 0; i < test->nb_sockets; i++) {
1277 		bool shared = (ifobject->shared_umem && tx) ? true : !!i;
1278 		u32 ctr = 0;
1279 
1280 		while (ctr++ < SOCK_RECONF_CTR) {
1281 			ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
1282 						     ifobject, shared);
1283 			if (!ret)
1284 				break;
1285 
1286 			/* Retry if it fails as xsk_socket__create() is asynchronous */
1287 			if (ctr >= SOCK_RECONF_CTR)
1288 				exit_with_error(-ret);
1289 			usleep(USLEEP_MAX);
1290 		}
1291 		if (ifobject->busy_poll)
1292 			enable_busy_poll(&ifobject->xsk_arr[i]);
1293 	}
1294 }
1295 
1296 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
1297 {
1298 	xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
1299 	ifobject->xsk = &ifobject->xsk_arr[0];
1300 	ifobject->xskmap = test->ifobj_rx->xskmap;
1301 	memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
1302 	ifobject->umem->base_addr = 0;
1303 }
1304 
1305 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream,
1306 				   bool fill_up)
1307 {
1308 	u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM;
1309 	u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts;
1310 	int ret;
1311 
1312 	if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
1313 		buffers_to_fill = umem->num_frames;
1314 	else
1315 		buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
1316 
1317 	ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
1318 	if (ret != buffers_to_fill)
1319 		exit_with_error(ENOSPC);
1320 
1321 	while (filled < buffers_to_fill) {
1322 		struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts);
1323 		u64 addr;
1324 		u32 i;
1325 
1326 		for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt); i++) {
1327 			if (!pkt) {
1328 				if (!fill_up)
1329 					break;
1330 				addr = filled * umem->frame_size + umem->base_addr;
1331 			} else if (pkt->offset >= 0) {
1332 				addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem);
1333 			} else {
1334 				addr = pkt->offset + umem_alloc_buffer(umem);
1335 			}
1336 
1337 			*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
1338 			if (++filled >= buffers_to_fill)
1339 				break;
1340 		}
1341 	}
1342 	xsk_ring_prod__submit(&umem->fq, filled);
1343 	xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled);
1344 
1345 	pkt_stream_reset(pkt_stream);
1346 	umem_reset_alloc(umem);
1347 }
1348 
1349 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
1350 {
1351 	u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
1352 	int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
1353 	LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1354 	void *bufs;
1355 	int ret;
1356 
1357 	if (ifobject->umem->unaligned_mode)
1358 		mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB;
1359 
1360 	if (ifobject->shared_umem)
1361 		umem_sz *= 2;
1362 
1363 	bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
1364 	if (bufs == MAP_FAILED)
1365 		exit_with_error(errno);
1366 
1367 	ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz);
1368 	if (ret)
1369 		exit_with_error(-ret);
1370 
1371 	xsk_configure_socket(test, ifobject, ifobject->umem, false);
1372 
1373 	ifobject->xsk = &ifobject->xsk_arr[0];
1374 
1375 	if (!ifobject->rx_on)
1376 		return;
1377 
1378 	xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring);
1379 
1380 	ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
1381 	if (ret)
1382 		exit_with_error(errno);
1383 }
1384 
1385 static void *worker_testapp_validate_tx(void *arg)
1386 {
1387 	struct test_spec *test = (struct test_spec *)arg;
1388 	struct ifobject *ifobject = test->ifobj_tx;
1389 	int err;
1390 
1391 	if (test->current_step == 1) {
1392 		if (!ifobject->shared_umem)
1393 			thread_common_ops(test, ifobject);
1394 		else
1395 			thread_common_ops_tx(test, ifobject);
1396 	}
1397 
1398 	print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
1399 		      ifobject->ifname);
1400 	err = send_pkts(test, ifobject);
1401 
1402 	if (!err && ifobject->validation_func)
1403 		err = ifobject->validation_func(ifobject);
1404 	if (err)
1405 		report_failure(test);
1406 
1407 	pthread_exit(NULL);
1408 }
1409 
1410 static void *worker_testapp_validate_rx(void *arg)
1411 {
1412 	struct test_spec *test = (struct test_spec *)arg;
1413 	struct ifobject *ifobject = test->ifobj_rx;
1414 	struct pollfd fds = { };
1415 	int err;
1416 
1417 	if (test->current_step == 1) {
1418 		thread_common_ops(test, ifobject);
1419 	} else {
1420 		xsk_clear_xskmap(ifobject->xskmap);
1421 		err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
1422 		if (err) {
1423 			printf("Error: Failed to update xskmap, error %s\n", strerror(-err));
1424 			exit_with_error(-err);
1425 		}
1426 	}
1427 
1428 	fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
1429 	fds.events = POLLIN;
1430 
1431 	pthread_barrier_wait(&barr);
1432 
1433 	err = receive_pkts(test, &fds);
1434 
1435 	if (!err && ifobject->validation_func)
1436 		err = ifobject->validation_func(ifobject);
1437 	if (err)
1438 		report_failure(test);
1439 
1440 	pthread_exit(NULL);
1441 }
1442 
1443 static u64 ceil_u64(u64 a, u64 b)
1444 {
1445 	return (a + b - 1) / b;
1446 }
1447 
1448 static void testapp_clean_xsk_umem(struct ifobject *ifobj)
1449 {
1450 	u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
1451 
1452 	if (ifobj->shared_umem)
1453 		umem_sz *= 2;
1454 
1455 	umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
1456 	xsk_umem__delete(ifobj->umem->umem);
1457 	munmap(ifobj->umem->buffer, umem_sz);
1458 }
1459 
1460 static void handler(int signum)
1461 {
1462 	pthread_exit(NULL);
1463 }
1464 
1465 static bool xdp_prog_changed_rx(struct test_spec *test)
1466 {
1467 	struct ifobject *ifobj = test->ifobj_rx;
1468 
1469 	return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode;
1470 }
1471 
1472 static bool xdp_prog_changed_tx(struct test_spec *test)
1473 {
1474 	struct ifobject *ifobj = test->ifobj_tx;
1475 
1476 	return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode;
1477 }
1478 
1479 static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog,
1480 			     struct bpf_map *xskmap, enum test_mode mode)
1481 {
1482 	int err;
1483 
1484 	xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
1485 	err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
1486 	if (err) {
1487 		printf("Error attaching XDP program\n");
1488 		exit_with_error(-err);
1489 	}
1490 
1491 	if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC))
1492 		if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) {
1493 			ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
1494 			exit_with_error(EINVAL);
1495 		}
1496 
1497 	ifobj->xdp_prog = xdp_prog;
1498 	ifobj->xskmap = xskmap;
1499 	ifobj->mode = mode;
1500 }
1501 
1502 static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx,
1503 				 struct ifobject *ifobj_tx)
1504 {
1505 	if (xdp_prog_changed_rx(test))
1506 		xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode);
1507 
1508 	if (!ifobj_tx || ifobj_tx->shared_umem)
1509 		return;
1510 
1511 	if (xdp_prog_changed_tx(test))
1512 		xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode);
1513 }
1514 
1515 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1,
1516 				      struct ifobject *ifobj2)
1517 {
1518 	pthread_t t0, t1;
1519 
1520 	if (ifobj2) {
1521 		if (pthread_barrier_init(&barr, NULL, 2))
1522 			exit_with_error(errno);
1523 		pkt_stream_reset(ifobj2->pkt_stream);
1524 	}
1525 
1526 	test->current_step++;
1527 	pkt_stream_reset(ifobj1->pkt_stream);
1528 	pkts_in_flight = 0;
1529 
1530 	signal(SIGUSR1, handler);
1531 	/*Spawn RX thread */
1532 	pthread_create(&t0, NULL, ifobj1->func_ptr, test);
1533 
1534 	if (ifobj2) {
1535 		pthread_barrier_wait(&barr);
1536 		if (pthread_barrier_destroy(&barr))
1537 			exit_with_error(errno);
1538 
1539 		/*Spawn TX thread */
1540 		pthread_create(&t1, NULL, ifobj2->func_ptr, test);
1541 
1542 		pthread_join(t1, NULL);
1543 	}
1544 
1545 	if (!ifobj2)
1546 		pthread_kill(t0, SIGUSR1);
1547 	else
1548 		pthread_join(t0, NULL);
1549 
1550 	if (test->total_steps == test->current_step || test->fail) {
1551 		if (ifobj2)
1552 			xsk_socket__delete(ifobj2->xsk->xsk);
1553 		xsk_socket__delete(ifobj1->xsk->xsk);
1554 		testapp_clean_xsk_umem(ifobj1);
1555 		if (ifobj2 && !ifobj2->shared_umem)
1556 			testapp_clean_xsk_umem(ifobj2);
1557 	}
1558 
1559 	return !!test->fail;
1560 }
1561 
1562 static int testapp_validate_traffic(struct test_spec *test)
1563 {
1564 	struct ifobject *ifobj_rx = test->ifobj_rx;
1565 	struct ifobject *ifobj_tx = test->ifobj_tx;
1566 
1567 	if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) ||
1568 	    (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) {
1569 		ksft_test_result_skip("No huge pages present.\n");
1570 		return TEST_SKIP;
1571 	}
1572 
1573 	xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx);
1574 	return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx);
1575 }
1576 
1577 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj)
1578 {
1579 	return __testapp_validate_traffic(test, ifobj, NULL);
1580 }
1581 
1582 static int testapp_teardown(struct test_spec *test)
1583 {
1584 	int i;
1585 
1586 	test_spec_set_name(test, "TEARDOWN");
1587 	for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
1588 		if (testapp_validate_traffic(test))
1589 			return TEST_FAILURE;
1590 		test_spec_reset(test);
1591 	}
1592 
1593 	return TEST_PASS;
1594 }
1595 
1596 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
1597 {
1598 	thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
1599 	struct ifobject *tmp_ifobj = (*ifobj1);
1600 
1601 	(*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
1602 	(*ifobj2)->func_ptr = tmp_func_ptr;
1603 
1604 	*ifobj1 = *ifobj2;
1605 	*ifobj2 = tmp_ifobj;
1606 }
1607 
1608 static int testapp_bidi(struct test_spec *test)
1609 {
1610 	int res;
1611 
1612 	test_spec_set_name(test, "BIDIRECTIONAL");
1613 	test->ifobj_tx->rx_on = true;
1614 	test->ifobj_rx->tx_on = true;
1615 	test->total_steps = 2;
1616 	if (testapp_validate_traffic(test))
1617 		return TEST_FAILURE;
1618 
1619 	print_verbose("Switching Tx/Rx vectors\n");
1620 	swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1621 	res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
1622 
1623 	swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1624 	return res;
1625 }
1626 
1627 static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
1628 {
1629 	int ret;
1630 
1631 	xsk_socket__delete(ifobj_tx->xsk->xsk);
1632 	xsk_socket__delete(ifobj_rx->xsk->xsk);
1633 	ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
1634 	ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
1635 
1636 	ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk);
1637 	if (ret)
1638 		exit_with_error(errno);
1639 }
1640 
1641 static int testapp_bpf_res(struct test_spec *test)
1642 {
1643 	test_spec_set_name(test, "BPF_RES");
1644 	test->total_steps = 2;
1645 	test->nb_sockets = 2;
1646 	if (testapp_validate_traffic(test))
1647 		return TEST_FAILURE;
1648 
1649 	swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
1650 	return testapp_validate_traffic(test);
1651 }
1652 
1653 static int testapp_headroom(struct test_spec *test)
1654 {
1655 	test_spec_set_name(test, "UMEM_HEADROOM");
1656 	test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
1657 	return testapp_validate_traffic(test);
1658 }
1659 
1660 static int testapp_stats_rx_dropped(struct test_spec *test)
1661 {
1662 	test_spec_set_name(test, "STAT_RX_DROPPED");
1663 	if (test->mode == TEST_MODE_ZC) {
1664 		ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
1665 		return TEST_SKIP;
1666 	}
1667 
1668 	pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
1669 	test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
1670 		XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
1671 	pkt_stream_receive_half(test);
1672 	test->ifobj_rx->validation_func = validate_rx_dropped;
1673 	return testapp_validate_traffic(test);
1674 }
1675 
1676 static int testapp_stats_tx_invalid_descs(struct test_spec *test)
1677 {
1678 	test_spec_set_name(test, "STAT_TX_INVALID");
1679 	pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
1680 	test->ifobj_tx->validation_func = validate_tx_invalid_descs;
1681 	return testapp_validate_traffic(test);
1682 }
1683 
1684 static int testapp_stats_rx_full(struct test_spec *test)
1685 {
1686 	test_spec_set_name(test, "STAT_RX_FULL");
1687 	pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
1688 	test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
1689 							 DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
1690 
1691 	test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
1692 	test->ifobj_rx->release_rx = false;
1693 	test->ifobj_rx->validation_func = validate_rx_full;
1694 	return testapp_validate_traffic(test);
1695 }
1696 
1697 static int testapp_stats_fill_empty(struct test_spec *test)
1698 {
1699 	test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
1700 	pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
1701 	test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
1702 							 DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
1703 
1704 	test->ifobj_rx->use_fill_ring = false;
1705 	test->ifobj_rx->validation_func = validate_fill_empty;
1706 	return testapp_validate_traffic(test);
1707 }
1708 
1709 static int testapp_unaligned(struct test_spec *test)
1710 {
1711 	test_spec_set_name(test, "UNALIGNED_MODE");
1712 	test->ifobj_tx->umem->unaligned_mode = true;
1713 	test->ifobj_rx->umem->unaligned_mode = true;
1714 	/* Let half of the packets straddle a 4K buffer boundary */
1715 	pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2);
1716 
1717 	return testapp_validate_traffic(test);
1718 }
1719 
1720 static int testapp_single_pkt(struct test_spec *test)
1721 {
1722 	struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
1723 
1724 	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
1725 	return testapp_validate_traffic(test);
1726 }
1727 
1728 static int testapp_invalid_desc(struct test_spec *test)
1729 {
1730 	struct xsk_umem_info *umem = test->ifobj_tx->umem;
1731 	u64 umem_size = umem->num_frames * umem->frame_size;
1732 	struct pkt pkts[] = {
1733 		/* Zero packet address allowed */
1734 		{0, MIN_PKT_SIZE, 0, true},
1735 		/* Allowed packet */
1736 		{0, MIN_PKT_SIZE, 0, true},
1737 		/* Straddling the start of umem */
1738 		{-2, MIN_PKT_SIZE, 0, false},
1739 		/* Packet too large */
1740 		{0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
1741 		/* Up to end of umem allowed */
1742 		{umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true},
1743 		/* After umem ends */
1744 		{umem_size, MIN_PKT_SIZE, 0, false},
1745 		/* Straddle the end of umem */
1746 		{umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
1747 		/* Straddle a 4K boundary */
1748 		{0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false},
1749 		/* Straddle a 2K boundary */
1750 		{0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true},
1751 		/* Valid packet for synch so that something is received */
1752 		{0, MIN_PKT_SIZE, 0, true}};
1753 
1754 	if (umem->unaligned_mode) {
1755 		/* Crossing a page boundary allowed */
1756 		pkts[7].valid = true;
1757 	}
1758 	if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
1759 		/* Crossing a 2K frame size boundary not allowed */
1760 		pkts[8].valid = false;
1761 	}
1762 
1763 	if (test->ifobj_tx->shared_umem) {
1764 		pkts[4].offset += umem_size;
1765 		pkts[5].offset += umem_size;
1766 		pkts[6].offset += umem_size;
1767 	}
1768 
1769 	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
1770 	return testapp_validate_traffic(test);
1771 }
1772 
1773 static int testapp_xdp_drop(struct test_spec *test)
1774 {
1775 	struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
1776 	struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
1777 
1778 	test_spec_set_name(test, "XDP_DROP_HALF");
1779 	test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
1780 			       skel_rx->maps.xsk, skel_tx->maps.xsk);
1781 
1782 	pkt_stream_receive_half(test);
1783 	return testapp_validate_traffic(test);
1784 }
1785 
1786 static int testapp_xdp_metadata_count(struct test_spec *test)
1787 {
1788 	struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
1789 	struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
1790 	struct bpf_map *data_map;
1791 	int count = 0;
1792 	int key = 0;
1793 
1794 	test_spec_set_name(test, "XDP_METADATA_COUNT");
1795 	test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata,
1796 			       skel_tx->progs.xsk_xdp_populate_metadata,
1797 			       skel_rx->maps.xsk, skel_tx->maps.xsk);
1798 	test->ifobj_rx->use_metadata = true;
1799 
1800 	data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
1801 	if (!data_map || !bpf_map__is_internal(data_map))
1802 		exit_with_error(ENOMEM);
1803 
1804 	if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY))
1805 		exit_with_error(errno);
1806 
1807 	return testapp_validate_traffic(test);
1808 }
1809 
1810 static int testapp_poll_txq_tmout(struct test_spec *test)
1811 {
1812 	test_spec_set_name(test, "POLL_TXQ_FULL");
1813 
1814 	test->ifobj_tx->use_poll = true;
1815 	/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
1816 	test->ifobj_tx->umem->frame_size = 2048;
1817 	pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
1818 	return testapp_validate_traffic_single_thread(test, test->ifobj_tx);
1819 }
1820 
1821 static int testapp_poll_rxq_tmout(struct test_spec *test)
1822 {
1823 	test_spec_set_name(test, "POLL_RXQ_EMPTY");
1824 	test->ifobj_rx->use_poll = true;
1825 	return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
1826 }
1827 
1828 static int xsk_load_xdp_programs(struct ifobject *ifobj)
1829 {
1830 	ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
1831 	if (libbpf_get_error(ifobj->xdp_progs))
1832 		return libbpf_get_error(ifobj->xdp_progs);
1833 
1834 	return 0;
1835 }
1836 
1837 static void xsk_unload_xdp_programs(struct ifobject *ifobj)
1838 {
1839 	xsk_xdp_progs__destroy(ifobj->xdp_progs);
1840 }
1841 
1842 /* Simple test */
1843 static bool hugepages_present(void)
1844 {
1845 	size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
1846 	void *bufs;
1847 
1848 	bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1849 		    MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB);
1850 	if (bufs == MAP_FAILED)
1851 		return false;
1852 
1853 	mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE;
1854 	munmap(bufs, mmap_sz);
1855 	return true;
1856 }
1857 
1858 static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
1859 		       thread_func_t func_ptr)
1860 {
1861 	int err;
1862 
1863 	memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
1864 	memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
1865 
1866 	ifobj->func_ptr = func_ptr;
1867 
1868 	err = xsk_load_xdp_programs(ifobj);
1869 	if (err) {
1870 		printf("Error loading XDP program\n");
1871 		exit_with_error(err);
1872 	}
1873 
1874 	if (hugepages_present())
1875 		ifobj->unaligned_supp = true;
1876 }
1877 
1878 static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
1879 {
1880 	int ret = TEST_SKIP;
1881 
1882 	switch (type) {
1883 	case TEST_TYPE_STATS_RX_DROPPED:
1884 		ret = testapp_stats_rx_dropped(test);
1885 		break;
1886 	case TEST_TYPE_STATS_TX_INVALID_DESCS:
1887 		ret = testapp_stats_tx_invalid_descs(test);
1888 		break;
1889 	case TEST_TYPE_STATS_RX_FULL:
1890 		ret = testapp_stats_rx_full(test);
1891 		break;
1892 	case TEST_TYPE_STATS_FILL_EMPTY:
1893 		ret = testapp_stats_fill_empty(test);
1894 		break;
1895 	case TEST_TYPE_TEARDOWN:
1896 		ret = testapp_teardown(test);
1897 		break;
1898 	case TEST_TYPE_BIDI:
1899 		ret = testapp_bidi(test);
1900 		break;
1901 	case TEST_TYPE_BPF_RES:
1902 		ret = testapp_bpf_res(test);
1903 		break;
1904 	case TEST_TYPE_RUN_TO_COMPLETION:
1905 		test_spec_set_name(test, "RUN_TO_COMPLETION");
1906 		ret = testapp_validate_traffic(test);
1907 		break;
1908 	case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
1909 		test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
1910 		ret = testapp_single_pkt(test);
1911 		break;
1912 	case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
1913 		test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
1914 		test->ifobj_tx->umem->frame_size = 2048;
1915 		test->ifobj_rx->umem->frame_size = 2048;
1916 		pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
1917 		ret = testapp_validate_traffic(test);
1918 		break;
1919 	case TEST_TYPE_RX_POLL:
1920 		test->ifobj_rx->use_poll = true;
1921 		test_spec_set_name(test, "POLL_RX");
1922 		ret = testapp_validate_traffic(test);
1923 		break;
1924 	case TEST_TYPE_TX_POLL:
1925 		test->ifobj_tx->use_poll = true;
1926 		test_spec_set_name(test, "POLL_TX");
1927 		ret = testapp_validate_traffic(test);
1928 		break;
1929 	case TEST_TYPE_POLL_TXQ_TMOUT:
1930 		ret = testapp_poll_txq_tmout(test);
1931 		break;
1932 	case TEST_TYPE_POLL_RXQ_TMOUT:
1933 		ret = testapp_poll_rxq_tmout(test);
1934 		break;
1935 	case TEST_TYPE_ALIGNED_INV_DESC:
1936 		test_spec_set_name(test, "ALIGNED_INV_DESC");
1937 		ret = testapp_invalid_desc(test);
1938 		break;
1939 	case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
1940 		test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
1941 		test->ifobj_tx->umem->frame_size = 2048;
1942 		test->ifobj_rx->umem->frame_size = 2048;
1943 		ret = testapp_invalid_desc(test);
1944 		break;
1945 	case TEST_TYPE_UNALIGNED_INV_DESC:
1946 		test_spec_set_name(test, "UNALIGNED_INV_DESC");
1947 		test->ifobj_tx->umem->unaligned_mode = true;
1948 		test->ifobj_rx->umem->unaligned_mode = true;
1949 		ret = testapp_invalid_desc(test);
1950 		break;
1951 	case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: {
1952 		u64 page_size, umem_size;
1953 
1954 		test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE");
1955 		/* Odd frame size so the UMEM doesn't end near a page boundary. */
1956 		test->ifobj_tx->umem->frame_size = 4001;
1957 		test->ifobj_rx->umem->frame_size = 4001;
1958 		test->ifobj_tx->umem->unaligned_mode = true;
1959 		test->ifobj_rx->umem->unaligned_mode = true;
1960 		/* This test exists to test descriptors that staddle the end of
1961 		 * the UMEM but not a page.
1962 		 */
1963 		page_size = sysconf(_SC_PAGESIZE);
1964 		umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
1965 		assert(umem_size % page_size > MIN_PKT_SIZE);
1966 		assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
1967 		ret = testapp_invalid_desc(test);
1968 		break;
1969 	}
1970 	case TEST_TYPE_UNALIGNED:
1971 		ret = testapp_unaligned(test);
1972 		break;
1973 	case TEST_TYPE_HEADROOM:
1974 		ret = testapp_headroom(test);
1975 		break;
1976 	case TEST_TYPE_XDP_DROP_HALF:
1977 		ret = testapp_xdp_drop(test);
1978 		break;
1979 	case TEST_TYPE_XDP_METADATA_COUNT:
1980 		ret = testapp_xdp_metadata_count(test);
1981 		break;
1982 	default:
1983 		break;
1984 	}
1985 
1986 	if (ret == TEST_PASS)
1987 		ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
1988 				      test->name);
1989 	pkt_stream_restore_default(test);
1990 }
1991 
1992 static struct ifobject *ifobject_create(void)
1993 {
1994 	struct ifobject *ifobj;
1995 
1996 	ifobj = calloc(1, sizeof(struct ifobject));
1997 	if (!ifobj)
1998 		return NULL;
1999 
2000 	ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
2001 	if (!ifobj->xsk_arr)
2002 		goto out_xsk_arr;
2003 
2004 	ifobj->umem = calloc(1, sizeof(*ifobj->umem));
2005 	if (!ifobj->umem)
2006 		goto out_umem;
2007 
2008 	return ifobj;
2009 
2010 out_umem:
2011 	free(ifobj->xsk_arr);
2012 out_xsk_arr:
2013 	free(ifobj);
2014 	return NULL;
2015 }
2016 
2017 static void ifobject_delete(struct ifobject *ifobj)
2018 {
2019 	free(ifobj->umem);
2020 	free(ifobj->xsk_arr);
2021 	free(ifobj);
2022 }
2023 
2024 static bool is_xdp_supported(int ifindex)
2025 {
2026 	int flags = XDP_FLAGS_DRV_MODE;
2027 
2028 	LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags);
2029 	struct bpf_insn insns[2] = {
2030 		BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
2031 		BPF_EXIT_INSN()
2032 	};
2033 	int prog_fd, insn_cnt = ARRAY_SIZE(insns);
2034 	int err;
2035 
2036 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
2037 	if (prog_fd < 0)
2038 		return false;
2039 
2040 	err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL);
2041 	if (err) {
2042 		close(prog_fd);
2043 		return false;
2044 	}
2045 
2046 	bpf_xdp_detach(ifindex, flags, NULL);
2047 	close(prog_fd);
2048 
2049 	return true;
2050 }
2051 
2052 int main(int argc, char **argv)
2053 {
2054 	struct pkt_stream *rx_pkt_stream_default;
2055 	struct pkt_stream *tx_pkt_stream_default;
2056 	struct ifobject *ifobj_tx, *ifobj_rx;
2057 	int modes = TEST_MODE_SKB + 1;
2058 	u32 i, j, failed_tests = 0;
2059 	struct test_spec test;
2060 	bool shared_netdev;
2061 
2062 	/* Use libbpf 1.0 API mode */
2063 	libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
2064 
2065 	ifobj_tx = ifobject_create();
2066 	if (!ifobj_tx)
2067 		exit_with_error(ENOMEM);
2068 	ifobj_rx = ifobject_create();
2069 	if (!ifobj_rx)
2070 		exit_with_error(ENOMEM);
2071 
2072 	setlocale(LC_ALL, "");
2073 
2074 	parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
2075 
2076 	shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
2077 	ifobj_tx->shared_umem = shared_netdev;
2078 	ifobj_rx->shared_umem = shared_netdev;
2079 
2080 	if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
2081 		usage(basename(argv[0]));
2082 		ksft_exit_xfail();
2083 	}
2084 
2085 	if (is_xdp_supported(ifobj_tx->ifindex)) {
2086 		modes++;
2087 		if (ifobj_zc_avail(ifobj_tx))
2088 			modes++;
2089 	}
2090 
2091 	init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx);
2092 	init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx);
2093 
2094 	test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
2095 	tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
2096 	rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
2097 	if (!tx_pkt_stream_default || !rx_pkt_stream_default)
2098 		exit_with_error(ENOMEM);
2099 	test.tx_pkt_stream_default = tx_pkt_stream_default;
2100 	test.rx_pkt_stream_default = rx_pkt_stream_default;
2101 
2102 	ksft_set_plan(modes * TEST_TYPE_MAX);
2103 
2104 	for (i = 0; i < modes; i++) {
2105 		for (j = 0; j < TEST_TYPE_MAX; j++) {
2106 			test_spec_init(&test, ifobj_tx, ifobj_rx, i);
2107 			run_pkt_test(&test, i, j);
2108 			usleep(USLEEP_MAX);
2109 
2110 			if (test.fail)
2111 				failed_tests++;
2112 		}
2113 	}
2114 
2115 	pkt_stream_delete(tx_pkt_stream_default);
2116 	pkt_stream_delete(rx_pkt_stream_default);
2117 	xsk_unload_xdp_programs(ifobj_tx);
2118 	xsk_unload_xdp_programs(ifobj_rx);
2119 	ifobject_delete(ifobj_tx);
2120 	ifobject_delete(ifobj_rx);
2121 
2122 	if (failed_tests)
2123 		ksft_exit_fail();
2124 	else
2125 		ksft_exit_pass();
2126 }
2127