xref: /openbmc/linux/tools/testing/selftests/bpf/xskxceiver.c (revision 6b3c0821caa49538c49262b041bae59bad523c7c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 /*
5  * Some functions in this program are taken from
6  * Linux kernel samples/bpf/xdpsock* and modified
7  * for use.
8  *
9  * See test_xsk.sh for detailed information on test topology
10  * and prerequisite network setup.
11  *
12  * This test program contains two threads, each thread is single socket with
13  * a unique UMEM. It validates in-order packet delivery and packet content
14  * by sending packets to each other.
15  *
16  * Tests Information:
17  * ------------------
18  * These selftests test AF_XDP SKB and Native/DRV modes using veth
19  * Virtual Ethernet interfaces.
20  *
21  * For each mode, the following tests are run:
22  *    a. nopoll - soft-irq processing in run-to-completion mode
23  *    b. poll - using poll() syscall
24  *    c. Socket Teardown
25  *       Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy
26  *       both sockets, then repeat multiple times. Only nopoll mode is used
27  *    d. Bi-directional sockets
28  *       Configure sockets as bi-directional tx/rx sockets, sets up fill and
29  *       completion rings on each socket, tx/rx in both directions. Only nopoll
30  *       mode is used
31  *    e. Statistics
32  *       Trigger some error conditions and ensure that the appropriate statistics
33  *       are incremented. Within this test, the following statistics are tested:
34  *       i.   rx dropped
35  *            Increase the UMEM frame headroom to a value which results in
36  *            insufficient space in the rx buffer for both the packet and the headroom.
37  *       ii.  tx invalid
38  *            Set the 'len' field of tx descriptors to an invalid value (umem frame
39  *            size + 1).
40  *       iii. rx ring full
41  *            Reduce the size of the RX ring to a fraction of the fill ring size.
42  *       iv.  fill queue empty
43  *            Do not populate the fill queue and then try to receive pkts.
44  *    f. bpf_link resource persistence
45  *       Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
46  *       then remove xsk sockets from queue 0 on both veth interfaces and
47  *       finally run a traffic on queues ids 1
48  *    g. unaligned mode
49  *    h. tests for invalid and corner case Tx descriptors so that the correct ones
50  *       are discarded and let through, respectively.
51  *    i. 2K frame size tests
52  *
53  * Total tests: 12
54  *
55  * Flow:
56  * -----
57  * - Single process spawns two threads: Tx and Rx
58  * - Each of these two threads attach to a veth interface
59  * - Each thread creates one AF_XDP socket connected to a unique umem for each
60  *   veth interface
61  * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy>
62  * - Rx thread verifies if all packets were received and delivered in-order,
63  *   and have the right content
64  *
65  * Enable/disable packet dump mode:
66  * --------------------------
67  * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add
68  * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D")
69  */
70 
71 #define _GNU_SOURCE
72 #include <fcntl.h>
73 #include <errno.h>
74 #include <getopt.h>
75 #include <asm/barrier.h>
76 #include <linux/if_link.h>
77 #include <linux/if_ether.h>
78 #include <linux/ip.h>
79 #include <linux/udp.h>
80 #include <arpa/inet.h>
81 #include <net/if.h>
82 #include <locale.h>
83 #include <poll.h>
84 #include <pthread.h>
85 #include <signal.h>
86 #include <stdbool.h>
87 #include <stdio.h>
88 #include <stdlib.h>
89 #include <string.h>
90 #include <stddef.h>
91 #include <sys/mman.h>
92 #include <sys/socket.h>
93 #include <sys/time.h>
94 #include <sys/types.h>
95 #include <sys/queue.h>
96 #include <time.h>
97 #include <unistd.h>
98 #include <stdatomic.h>
99 #include "xsk.h"
100 #include "xskxceiver.h"
101 #include <bpf/bpf.h>
102 #include <linux/filter.h>
103 #include "../kselftest.h"
104 
105 static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
106 static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
107 static const char *IP1 = "192.168.100.162";
108 static const char *IP2 = "192.168.100.161";
109 static const u16 UDP_PORT1 = 2020;
110 static const u16 UDP_PORT2 = 2121;
111 
112 static void __exit_with_error(int error, const char *file, const char *func, int line)
113 {
114 	ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
115 			      strerror(error));
116 	ksft_exit_xfail();
117 }
118 
119 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
120 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
121 static char *mode_string(struct test_spec *test)
122 {
123 	switch (test->mode) {
124 	case TEST_MODE_SKB:
125 		return "SKB";
126 	case TEST_MODE_DRV:
127 		return "DRV";
128 	case TEST_MODE_ZC:
129 		return "ZC";
130 	default:
131 		return "BOGUS";
132 	}
133 }
134 
135 static void report_failure(struct test_spec *test)
136 {
137 	if (test->fail)
138 		return;
139 
140 	ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
141 			      test->name);
142 	test->fail = true;
143 }
144 
145 static void memset32_htonl(void *dest, u32 val, u32 size)
146 {
147 	u32 *ptr = (u32 *)dest;
148 	int i;
149 
150 	val = htonl(val);
151 
152 	for (i = 0; i < (size & (~0x3)); i += 4)
153 		ptr[i >> 2] = val;
154 }
155 
156 /*
157  * Fold a partial checksum
158  * This function code has been taken from
159  * Linux kernel include/asm-generic/checksum.h
160  */
161 static __u16 csum_fold(__u32 csum)
162 {
163 	u32 sum = (__force u32)csum;
164 
165 	sum = (sum & 0xffff) + (sum >> 16);
166 	sum = (sum & 0xffff) + (sum >> 16);
167 	return (__force __u16)~sum;
168 }
169 
170 /*
171  * This function code has been taken from
172  * Linux kernel lib/checksum.c
173  */
174 static u32 from64to32(u64 x)
175 {
176 	/* add up 32-bit and 32-bit for 32+c bit */
177 	x = (x & 0xffffffff) + (x >> 32);
178 	/* add up carry.. */
179 	x = (x & 0xffffffff) + (x >> 32);
180 	return (u32)x;
181 }
182 
183 /*
184  * This function code has been taken from
185  * Linux kernel lib/checksum.c
186  */
187 static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
188 {
189 	unsigned long long s = (__force u32)sum;
190 
191 	s += (__force u32)saddr;
192 	s += (__force u32)daddr;
193 #ifdef __BIG_ENDIAN__
194 	s += proto + len;
195 #else
196 	s += (proto + len) << 8;
197 #endif
198 	return (__force __u32)from64to32(s);
199 }
200 
201 /*
202  * This function has been taken from
203  * Linux kernel include/asm-generic/checksum.h
204  */
205 static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
206 {
207 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
208 }
209 
210 static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt)
211 {
212 	u32 csum = 0;
213 	u32 cnt = 0;
214 
215 	/* udp hdr and data */
216 	for (; cnt < len; cnt += 2)
217 		csum += udp_pkt[cnt >> 1];
218 
219 	return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
220 }
221 
222 static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
223 {
224 	memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
225 	memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
226 	eth_hdr->h_proto = htons(ETH_P_IP);
227 }
228 
229 static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr)
230 {
231 	ip_hdr->version = IP_PKT_VER;
232 	ip_hdr->ihl = 0x5;
233 	ip_hdr->tos = IP_PKT_TOS;
234 	ip_hdr->tot_len = htons(IP_PKT_SIZE);
235 	ip_hdr->id = 0;
236 	ip_hdr->frag_off = 0;
237 	ip_hdr->ttl = IPDEFTTL;
238 	ip_hdr->protocol = IPPROTO_UDP;
239 	ip_hdr->saddr = ifobject->src_ip;
240 	ip_hdr->daddr = ifobject->dst_ip;
241 	ip_hdr->check = 0;
242 }
243 
244 static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject,
245 			struct udphdr *udp_hdr)
246 {
247 	udp_hdr->source = htons(ifobject->src_port);
248 	udp_hdr->dest = htons(ifobject->dst_port);
249 	udp_hdr->len = htons(UDP_PKT_SIZE);
250 	memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE);
251 }
252 
253 static bool is_umem_valid(struct ifobject *ifobj)
254 {
255 	return !!ifobj->umem->umem;
256 }
257 
258 static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr)
259 {
260 	udp_hdr->check = 0;
261 	udp_hdr->check =
262 	    udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr);
263 }
264 
265 static u32 mode_to_xdp_flags(enum test_mode mode)
266 {
267 	return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
268 }
269 
270 static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size)
271 {
272 	struct xsk_umem_config cfg = {
273 		.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
274 		.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
275 		.frame_size = umem->frame_size,
276 		.frame_headroom = umem->frame_headroom,
277 		.flags = XSK_UMEM__DEFAULT_FLAGS
278 	};
279 	int ret;
280 
281 	if (umem->unaligned_mode)
282 		cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
283 
284 	ret = xsk_umem__create(&umem->umem, buffer, size,
285 			       &umem->fq, &umem->cq, &cfg);
286 	if (ret)
287 		return ret;
288 
289 	umem->buffer = buffer;
290 	return 0;
291 }
292 
293 static void enable_busy_poll(struct xsk_socket_info *xsk)
294 {
295 	int sock_opt;
296 
297 	sock_opt = 1;
298 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
299 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
300 		exit_with_error(errno);
301 
302 	sock_opt = 20;
303 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
304 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
305 		exit_with_error(errno);
306 
307 	sock_opt = BATCH_SIZE;
308 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
309 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
310 		exit_with_error(errno);
311 }
312 
313 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
314 				  struct ifobject *ifobject, bool shared)
315 {
316 	struct xsk_socket_config cfg = {};
317 	struct xsk_ring_cons *rxr;
318 	struct xsk_ring_prod *txr;
319 
320 	xsk->umem = umem;
321 	cfg.rx_size = xsk->rxqsize;
322 	cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
323 	cfg.bind_flags = ifobject->bind_flags;
324 	if (shared)
325 		cfg.bind_flags |= XDP_SHARED_UMEM;
326 
327 	txr = ifobject->tx_on ? &xsk->tx : NULL;
328 	rxr = ifobject->rx_on ? &xsk->rx : NULL;
329 	return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
330 }
331 
332 static bool ifobj_zc_avail(struct ifobject *ifobject)
333 {
334 	size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
335 	int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
336 	struct xsk_socket_info *xsk;
337 	struct xsk_umem_info *umem;
338 	bool zc_avail = false;
339 	void *bufs;
340 	int ret;
341 
342 	bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
343 	if (bufs == MAP_FAILED)
344 		exit_with_error(errno);
345 
346 	umem = calloc(1, sizeof(struct xsk_umem_info));
347 	if (!umem) {
348 		munmap(bufs, umem_sz);
349 		exit_with_error(ENOMEM);
350 	}
351 	umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
352 	ret = xsk_configure_umem(umem, bufs, umem_sz);
353 	if (ret)
354 		exit_with_error(-ret);
355 
356 	xsk = calloc(1, sizeof(struct xsk_socket_info));
357 	if (!xsk)
358 		goto out;
359 	ifobject->xdp_flags = XDP_FLAGS_DRV_MODE;
360 	ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY;
361 	ifobject->rx_on = true;
362 	xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
363 	ret = __xsk_configure_socket(xsk, umem, ifobject, false);
364 	if (!ret)
365 		zc_avail = true;
366 
367 	xsk_socket__delete(xsk->xsk);
368 	free(xsk);
369 out:
370 	munmap(umem->buffer, umem_sz);
371 	xsk_umem__delete(umem->umem);
372 	free(umem);
373 	return zc_avail;
374 }
375 
376 static struct option long_options[] = {
377 	{"interface", required_argument, 0, 'i'},
378 	{"busy-poll", no_argument, 0, 'b'},
379 	{"dump-pkts", no_argument, 0, 'D'},
380 	{"verbose", no_argument, 0, 'v'},
381 	{0, 0, 0, 0}
382 };
383 
384 static void usage(const char *prog)
385 {
386 	const char *str =
387 		"  Usage: %s [OPTIONS]\n"
388 		"  Options:\n"
389 		"  -i, --interface      Use interface\n"
390 		"  -D, --dump-pkts      Dump packets L2 - L5\n"
391 		"  -v, --verbose        Verbose output\n"
392 		"  -b, --busy-poll      Enable busy poll\n";
393 
394 	ksft_print_msg(str, prog);
395 }
396 
397 static bool validate_interface(struct ifobject *ifobj)
398 {
399 	if (!strcmp(ifobj->ifname, ""))
400 		return false;
401 	return true;
402 }
403 
404 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc,
405 			       char **argv)
406 {
407 	struct ifobject *ifobj;
408 	u32 interface_nb = 0;
409 	int option_index, c;
410 
411 	opterr = 0;
412 
413 	for (;;) {
414 		c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index);
415 		if (c == -1)
416 			break;
417 
418 		switch (c) {
419 		case 'i':
420 			if (interface_nb == 0)
421 				ifobj = ifobj_tx;
422 			else if (interface_nb == 1)
423 				ifobj = ifobj_rx;
424 			else
425 				break;
426 
427 			memcpy(ifobj->ifname, optarg,
428 			       min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg)));
429 
430 			ifobj->ifindex = if_nametoindex(ifobj->ifname);
431 			if (!ifobj->ifindex)
432 				exit_with_error(errno);
433 
434 			interface_nb++;
435 			break;
436 		case 'D':
437 			opt_pkt_dump = true;
438 			break;
439 		case 'v':
440 			opt_verbose = true;
441 			break;
442 		case 'b':
443 			ifobj_tx->busy_poll = true;
444 			ifobj_rx->busy_poll = true;
445 			break;
446 		default:
447 			usage(basename(argv[0]));
448 			ksft_exit_xfail();
449 		}
450 	}
451 }
452 
453 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
454 			     struct ifobject *ifobj_rx)
455 {
456 	u32 i, j;
457 
458 	for (i = 0; i < MAX_INTERFACES; i++) {
459 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
460 
461 		ifobj->xsk = &ifobj->xsk_arr[0];
462 		ifobj->use_poll = false;
463 		ifobj->use_fill_ring = true;
464 		ifobj->release_rx = true;
465 		ifobj->validation_func = NULL;
466 
467 		if (i == 0) {
468 			ifobj->rx_on = false;
469 			ifobj->tx_on = true;
470 			ifobj->pkt_stream = test->tx_pkt_stream_default;
471 		} else {
472 			ifobj->rx_on = true;
473 			ifobj->tx_on = false;
474 			ifobj->pkt_stream = test->rx_pkt_stream_default;
475 		}
476 
477 		memset(ifobj->umem, 0, sizeof(*ifobj->umem));
478 		ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
479 		ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
480 		if (ifobj->shared_umem && ifobj->rx_on)
481 			ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS *
482 				XSK_UMEM__DEFAULT_FRAME_SIZE;
483 
484 		for (j = 0; j < MAX_SOCKETS; j++) {
485 			memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
486 			ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
487 		}
488 	}
489 
490 	test->ifobj_tx = ifobj_tx;
491 	test->ifobj_rx = ifobj_rx;
492 	test->current_step = 0;
493 	test->total_steps = 1;
494 	test->nb_sockets = 1;
495 	test->fail = false;
496 }
497 
498 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
499 			   struct ifobject *ifobj_rx, enum test_mode mode)
500 {
501 	struct pkt_stream *tx_pkt_stream;
502 	struct pkt_stream *rx_pkt_stream;
503 	u32 i;
504 
505 	tx_pkt_stream = test->tx_pkt_stream_default;
506 	rx_pkt_stream = test->rx_pkt_stream_default;
507 	memset(test, 0, sizeof(*test));
508 	test->tx_pkt_stream_default = tx_pkt_stream;
509 	test->rx_pkt_stream_default = rx_pkt_stream;
510 
511 	for (i = 0; i < MAX_INTERFACES; i++) {
512 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
513 
514 		ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
515 		if (mode == TEST_MODE_ZC)
516 			ifobj->bind_flags |= XDP_ZEROCOPY;
517 		else
518 			ifobj->bind_flags |= XDP_COPY;
519 	}
520 
521 	test->mode = mode;
522 	__test_spec_init(test, ifobj_tx, ifobj_rx);
523 }
524 
525 static void test_spec_reset(struct test_spec *test)
526 {
527 	__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
528 }
529 
530 static void test_spec_set_name(struct test_spec *test, const char *name)
531 {
532 	strncpy(test->name, name, MAX_TEST_NAME_SIZE);
533 }
534 
535 static void pkt_stream_reset(struct pkt_stream *pkt_stream)
536 {
537 	if (pkt_stream)
538 		pkt_stream->rx_pkt_nb = 0;
539 }
540 
541 static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb)
542 {
543 	if (pkt_nb >= pkt_stream->nb_pkts)
544 		return NULL;
545 
546 	return &pkt_stream->pkts[pkt_nb];
547 }
548 
549 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
550 {
551 	while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) {
552 		(*pkts_sent)++;
553 		if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid)
554 			return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++];
555 		pkt_stream->rx_pkt_nb++;
556 	}
557 	return NULL;
558 }
559 
560 static void pkt_stream_delete(struct pkt_stream *pkt_stream)
561 {
562 	free(pkt_stream->pkts);
563 	free(pkt_stream);
564 }
565 
566 static void pkt_stream_restore_default(struct test_spec *test)
567 {
568 	struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
569 	struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
570 
571 	if (tx_pkt_stream != test->tx_pkt_stream_default) {
572 		pkt_stream_delete(test->ifobj_tx->pkt_stream);
573 		test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default;
574 	}
575 
576 	if (rx_pkt_stream != test->rx_pkt_stream_default) {
577 		pkt_stream_delete(test->ifobj_rx->pkt_stream);
578 		test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default;
579 	}
580 }
581 
582 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
583 {
584 	struct pkt_stream *pkt_stream;
585 
586 	pkt_stream = calloc(1, sizeof(*pkt_stream));
587 	if (!pkt_stream)
588 		return NULL;
589 
590 	pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
591 	if (!pkt_stream->pkts) {
592 		free(pkt_stream);
593 		return NULL;
594 	}
595 
596 	pkt_stream->nb_pkts = nb_pkts;
597 	return pkt_stream;
598 }
599 
600 static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len)
601 {
602 	pkt->addr = addr + umem->base_addr;
603 	pkt->len = len;
604 	if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom)
605 		pkt->valid = false;
606 	else
607 		pkt->valid = true;
608 }
609 
610 static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
611 {
612 	struct pkt_stream *pkt_stream;
613 	u32 i;
614 
615 	pkt_stream = __pkt_stream_alloc(nb_pkts);
616 	if (!pkt_stream)
617 		exit_with_error(ENOMEM);
618 
619 	pkt_stream->nb_pkts = nb_pkts;
620 	for (i = 0; i < nb_pkts; i++) {
621 		pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
622 			pkt_len);
623 		pkt_stream->pkts[i].payload = i;
624 	}
625 
626 	return pkt_stream;
627 }
628 
629 static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem,
630 					   struct pkt_stream *pkt_stream)
631 {
632 	return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
633 }
634 
635 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
636 {
637 	struct pkt_stream *pkt_stream;
638 
639 	pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
640 	test->ifobj_tx->pkt_stream = pkt_stream;
641 	pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
642 	test->ifobj_rx->pkt_stream = pkt_stream;
643 }
644 
645 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
646 				      int offset)
647 {
648 	struct xsk_umem_info *umem = ifobj->umem;
649 	struct pkt_stream *pkt_stream;
650 	u32 i;
651 
652 	pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
653 	for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
654 		pkt_set(umem, &pkt_stream->pkts[i],
655 			(i % umem->num_frames) * umem->frame_size + offset, pkt_len);
656 
657 	ifobj->pkt_stream = pkt_stream;
658 }
659 
660 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
661 {
662 	__pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
663 	__pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
664 }
665 
666 static void pkt_stream_receive_half(struct test_spec *test)
667 {
668 	struct xsk_umem_info *umem = test->ifobj_rx->umem;
669 	struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
670 	u32 i;
671 
672 	test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
673 							 pkt_stream->pkts[0].len);
674 	pkt_stream = test->ifobj_rx->pkt_stream;
675 	for (i = 1; i < pkt_stream->nb_pkts; i += 2)
676 		pkt_stream->pkts[i].valid = false;
677 }
678 
679 static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb)
680 {
681 	struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb);
682 	struct udphdr *udp_hdr;
683 	struct ethhdr *eth_hdr;
684 	struct iphdr *ip_hdr;
685 	void *data;
686 
687 	if (!pkt)
688 		return NULL;
689 	if (!pkt->valid || pkt->len < MIN_PKT_SIZE)
690 		return pkt;
691 
692 	data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr);
693 	udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr));
694 	ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr));
695 	eth_hdr = (struct ethhdr *)data;
696 
697 	gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr);
698 	gen_ip_hdr(ifobject, ip_hdr);
699 	gen_udp_csum(udp_hdr, ip_hdr);
700 	gen_eth_hdr(ifobject, eth_hdr);
701 
702 	return pkt;
703 }
704 
705 static void __pkt_stream_generate_custom(struct ifobject *ifobj,
706 					 struct pkt *pkts, u32 nb_pkts)
707 {
708 	struct pkt_stream *pkt_stream;
709 	u32 i;
710 
711 	pkt_stream = __pkt_stream_alloc(nb_pkts);
712 	if (!pkt_stream)
713 		exit_with_error(ENOMEM);
714 
715 	for (i = 0; i < nb_pkts; i++) {
716 		pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr;
717 		pkt_stream->pkts[i].len = pkts[i].len;
718 		pkt_stream->pkts[i].payload = i;
719 		pkt_stream->pkts[i].valid = pkts[i].valid;
720 	}
721 
722 	ifobj->pkt_stream = pkt_stream;
723 }
724 
725 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
726 {
727 	__pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts);
728 	__pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts);
729 }
730 
731 static void pkt_dump(void *pkt, u32 len)
732 {
733 	char s[INET_ADDRSTRLEN];
734 	struct ethhdr *ethhdr;
735 	struct udphdr *udphdr;
736 	struct iphdr *iphdr;
737 	u32 payload, i;
738 
739 	ethhdr = pkt;
740 	iphdr = pkt + sizeof(*ethhdr);
741 	udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr);
742 
743 	/*extract L2 frame */
744 	fprintf(stdout, "DEBUG>> L2: dst mac: ");
745 	for (i = 0; i < ETH_ALEN; i++)
746 		fprintf(stdout, "%02X", ethhdr->h_dest[i]);
747 
748 	fprintf(stdout, "\nDEBUG>> L2: src mac: ");
749 	for (i = 0; i < ETH_ALEN; i++)
750 		fprintf(stdout, "%02X", ethhdr->h_source[i]);
751 
752 	/*extract L3 frame */
753 	fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl);
754 	fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n",
755 		inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s)));
756 	fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n",
757 		inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s)));
758 	/*extract L4 frame */
759 	fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
760 	fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
761 	/*extract L5 frame */
762 	payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE)));
763 
764 	fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload);
765 	fprintf(stdout, "---------------------------------------\n");
766 }
767 
768 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr,
769 			      u64 pkt_stream_addr)
770 {
771 	u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
772 	u32 offset = addr % umem->frame_size, expected_offset = 0;
773 
774 	if (!pkt_stream->use_addr_for_fill)
775 		pkt_stream_addr = 0;
776 
777 	expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
778 
779 	if (offset == expected_offset)
780 		return true;
781 
782 	ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
783 	return false;
784 }
785 
786 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
787 {
788 	void *data = xsk_umem__get_data(buffer, addr);
789 	struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr));
790 
791 	if (!pkt) {
792 		ksft_print_msg("[%s] too many packets received\n", __func__);
793 		return false;
794 	}
795 
796 	if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) {
797 		/* Do not try to verify packets that are smaller than minimum size. */
798 		return true;
799 	}
800 
801 	if (pkt->len != len) {
802 		ksft_print_msg("[%s] expected length [%d], got length [%d]\n",
803 			       __func__, pkt->len, len);
804 		return false;
805 	}
806 
807 	if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) {
808 		u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE)));
809 
810 		if (opt_pkt_dump)
811 			pkt_dump(data, PKT_SIZE);
812 
813 		if (pkt->payload != seqnum) {
814 			ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n",
815 				       __func__, pkt->payload, seqnum);
816 			return false;
817 		}
818 	} else {
819 		ksft_print_msg("Invalid frame received: ");
820 		ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version,
821 			       iphdr->tos);
822 		return false;
823 	}
824 
825 	return true;
826 }
827 
828 static void kick_tx(struct xsk_socket_info *xsk)
829 {
830 	int ret;
831 
832 	ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
833 	if (ret >= 0)
834 		return;
835 	if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
836 		usleep(100);
837 		return;
838 	}
839 	exit_with_error(errno);
840 }
841 
842 static void kick_rx(struct xsk_socket_info *xsk)
843 {
844 	int ret;
845 
846 	ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
847 	if (ret < 0)
848 		exit_with_error(errno);
849 }
850 
851 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
852 {
853 	unsigned int rcvd;
854 	u32 idx;
855 
856 	if (xsk_ring_prod__needs_wakeup(&xsk->tx))
857 		kick_tx(xsk);
858 
859 	rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
860 	if (rcvd) {
861 		if (rcvd > xsk->outstanding_tx) {
862 			u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
863 
864 			ksft_print_msg("[%s] Too many packets completed\n", __func__);
865 			ksft_print_msg("Last completion address: %llx\n", addr);
866 			return TEST_FAILURE;
867 		}
868 
869 		xsk_ring_cons__release(&xsk->umem->cq, rcvd);
870 		xsk->outstanding_tx -= rcvd;
871 	}
872 
873 	return TEST_PASS;
874 }
875 
876 static int receive_pkts(struct test_spec *test, struct pollfd *fds)
877 {
878 	struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
879 	struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
880 	u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0;
881 	struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
882 	struct ifobject *ifobj = test->ifobj_rx;
883 	struct xsk_umem_info *umem = xsk->umem;
884 	struct pkt *pkt;
885 	int ret;
886 
887 	ret = gettimeofday(&tv_now, NULL);
888 	if (ret)
889 		exit_with_error(errno);
890 	timeradd(&tv_now, &tv_timeout, &tv_end);
891 
892 	pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
893 	while (pkt) {
894 		ret = gettimeofday(&tv_now, NULL);
895 		if (ret)
896 			exit_with_error(errno);
897 		if (timercmp(&tv_now, &tv_end, >)) {
898 			ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
899 			return TEST_FAILURE;
900 		}
901 
902 		kick_rx(xsk);
903 		if (ifobj->use_poll) {
904 			ret = poll(fds, 1, POLL_TMOUT);
905 			if (ret < 0)
906 				exit_with_error(errno);
907 
908 			if (!ret) {
909 				if (!is_umem_valid(test->ifobj_tx))
910 					return TEST_PASS;
911 
912 				ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
913 				return TEST_FAILURE;
914 
915 			}
916 
917 			if (!(fds->revents & POLLIN))
918 				continue;
919 		}
920 
921 		rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
922 		if (!rcvd)
923 			continue;
924 
925 		if (ifobj->use_fill_ring) {
926 			ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
927 			while (ret != rcvd) {
928 				if (ret < 0)
929 					exit_with_error(-ret);
930 				if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
931 					ret = poll(fds, 1, POLL_TMOUT);
932 					if (ret < 0)
933 						exit_with_error(errno);
934 				}
935 				ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
936 			}
937 		}
938 
939 		for (i = 0; i < rcvd; i++) {
940 			const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
941 			u64 addr = desc->addr, orig;
942 
943 			orig = xsk_umem__extract_addr(addr);
944 			addr = xsk_umem__add_offset_to_addr(addr);
945 
946 			if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) ||
947 			    !is_offset_correct(umem, pkt_stream, addr, pkt->addr))
948 				return TEST_FAILURE;
949 
950 			if (ifobj->use_fill_ring)
951 				*xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
952 			pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
953 		}
954 
955 		if (ifobj->use_fill_ring)
956 			xsk_ring_prod__submit(&umem->fq, rcvd);
957 		if (ifobj->release_rx)
958 			xsk_ring_cons__release(&xsk->rx, rcvd);
959 
960 		pthread_mutex_lock(&pacing_mutex);
961 		pkts_in_flight -= pkts_sent;
962 		if (pkts_in_flight < umem->num_frames)
963 			pthread_cond_signal(&pacing_cond);
964 		pthread_mutex_unlock(&pacing_mutex);
965 		pkts_sent = 0;
966 	}
967 
968 	return TEST_PASS;
969 }
970 
971 static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds,
972 		       bool timeout)
973 {
974 	struct xsk_socket_info *xsk = ifobject->xsk;
975 	bool use_poll = ifobject->use_poll;
976 	u32 i, idx = 0, valid_pkts = 0;
977 	int ret;
978 
979 	while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
980 		if (use_poll) {
981 			ret = poll(fds, 1, POLL_TMOUT);
982 			if (timeout) {
983 				if (ret < 0) {
984 					ksft_print_msg("ERROR: [%s] Poll error %d\n",
985 						       __func__, errno);
986 					return TEST_FAILURE;
987 				}
988 				if (ret == 0)
989 					return TEST_PASS;
990 				break;
991 			}
992 			if (ret <= 0) {
993 				ksft_print_msg("ERROR: [%s] Poll error %d\n",
994 					       __func__, errno);
995 				return TEST_FAILURE;
996 			}
997 		}
998 
999 		complete_pkts(xsk, BATCH_SIZE);
1000 	}
1001 
1002 	for (i = 0; i < BATCH_SIZE; i++) {
1003 		struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
1004 		struct pkt *pkt = pkt_generate(ifobject, *pkt_nb);
1005 
1006 		if (!pkt)
1007 			break;
1008 
1009 		tx_desc->addr = pkt->addr;
1010 		tx_desc->len = pkt->len;
1011 		(*pkt_nb)++;
1012 		if (pkt->valid)
1013 			valid_pkts++;
1014 	}
1015 
1016 	pthread_mutex_lock(&pacing_mutex);
1017 	pkts_in_flight += valid_pkts;
1018 	/* pkts_in_flight might be negative if many invalid packets are sent */
1019 	if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) {
1020 		kick_tx(xsk);
1021 		pthread_cond_wait(&pacing_cond, &pacing_mutex);
1022 	}
1023 	pthread_mutex_unlock(&pacing_mutex);
1024 
1025 	xsk_ring_prod__submit(&xsk->tx, i);
1026 	xsk->outstanding_tx += valid_pkts;
1027 
1028 	if (use_poll) {
1029 		ret = poll(fds, 1, POLL_TMOUT);
1030 		if (ret <= 0) {
1031 			if (ret == 0 && timeout)
1032 				return TEST_PASS;
1033 
1034 			ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
1035 			return TEST_FAILURE;
1036 		}
1037 	}
1038 
1039 	if (!timeout) {
1040 		if (complete_pkts(xsk, i))
1041 			return TEST_FAILURE;
1042 
1043 		usleep(10);
1044 		return TEST_PASS;
1045 	}
1046 
1047 	return TEST_CONTINUE;
1048 }
1049 
1050 static void wait_for_tx_completion(struct xsk_socket_info *xsk)
1051 {
1052 	while (xsk->outstanding_tx)
1053 		complete_pkts(xsk, BATCH_SIZE);
1054 }
1055 
1056 static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
1057 {
1058 	bool timeout = !is_umem_valid(test->ifobj_rx);
1059 	struct pollfd fds = { };
1060 	u32 pkt_cnt = 0, ret;
1061 
1062 	fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
1063 	fds.events = POLLOUT;
1064 
1065 	while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
1066 		ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout);
1067 		if ((ret || test->fail) && !timeout)
1068 			return TEST_FAILURE;
1069 		else if (ret == TEST_PASS && timeout)
1070 			return ret;
1071 	}
1072 
1073 	wait_for_tx_completion(ifobject->xsk);
1074 	return TEST_PASS;
1075 }
1076 
1077 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1078 {
1079 	int fd = xsk_socket__fd(xsk), err;
1080 	socklen_t optlen, expected_len;
1081 
1082 	optlen = sizeof(*stats);
1083 	err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
1084 	if (err) {
1085 		ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1086 			       __func__, -err, strerror(-err));
1087 		return TEST_FAILURE;
1088 	}
1089 
1090 	expected_len = sizeof(struct xdp_statistics);
1091 	if (optlen != expected_len) {
1092 		ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
1093 			       __func__, expected_len, optlen);
1094 		return TEST_FAILURE;
1095 	}
1096 
1097 	return TEST_PASS;
1098 }
1099 
1100 static int validate_rx_dropped(struct ifobject *ifobject)
1101 {
1102 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1103 	struct xdp_statistics stats;
1104 	int err;
1105 
1106 	kick_rx(ifobject->xsk);
1107 
1108 	err = get_xsk_stats(xsk, &stats);
1109 	if (err)
1110 		return TEST_FAILURE;
1111 
1112 	if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2)
1113 		return TEST_PASS;
1114 
1115 	return TEST_FAILURE;
1116 }
1117 
1118 static int validate_rx_full(struct ifobject *ifobject)
1119 {
1120 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1121 	struct xdp_statistics stats;
1122 	int err;
1123 
1124 	usleep(1000);
1125 	kick_rx(ifobject->xsk);
1126 
1127 	err = get_xsk_stats(xsk, &stats);
1128 	if (err)
1129 		return TEST_FAILURE;
1130 
1131 	if (stats.rx_ring_full)
1132 		return TEST_PASS;
1133 
1134 	return TEST_FAILURE;
1135 }
1136 
1137 static int validate_fill_empty(struct ifobject *ifobject)
1138 {
1139 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1140 	struct xdp_statistics stats;
1141 	int err;
1142 
1143 	usleep(1000);
1144 	kick_rx(ifobject->xsk);
1145 
1146 	err = get_xsk_stats(xsk, &stats);
1147 	if (err)
1148 		return TEST_FAILURE;
1149 
1150 	if (stats.rx_fill_ring_empty_descs)
1151 		return TEST_PASS;
1152 
1153 	return TEST_FAILURE;
1154 }
1155 
1156 static int validate_tx_invalid_descs(struct ifobject *ifobject)
1157 {
1158 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1159 	int fd = xsk_socket__fd(xsk);
1160 	struct xdp_statistics stats;
1161 	socklen_t optlen;
1162 	int err;
1163 
1164 	optlen = sizeof(stats);
1165 	err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
1166 	if (err) {
1167 		ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1168 			       __func__, -err, strerror(-err));
1169 		return TEST_FAILURE;
1170 	}
1171 
1172 	if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
1173 		ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
1174 			       __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
1175 		return TEST_FAILURE;
1176 	}
1177 
1178 	return TEST_PASS;
1179 }
1180 
1181 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
1182 				 struct xsk_umem_info *umem, bool tx)
1183 {
1184 	int i, ret;
1185 
1186 	for (i = 0; i < test->nb_sockets; i++) {
1187 		bool shared = (ifobject->shared_umem && tx) ? true : !!i;
1188 		u32 ctr = 0;
1189 
1190 		while (ctr++ < SOCK_RECONF_CTR) {
1191 			ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
1192 						     ifobject, shared);
1193 			if (!ret)
1194 				break;
1195 
1196 			/* Retry if it fails as xsk_socket__create() is asynchronous */
1197 			if (ctr >= SOCK_RECONF_CTR)
1198 				exit_with_error(-ret);
1199 			usleep(USLEEP_MAX);
1200 		}
1201 		if (ifobject->busy_poll)
1202 			enable_busy_poll(&ifobject->xsk_arr[i]);
1203 	}
1204 }
1205 
1206 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
1207 {
1208 	xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
1209 	ifobject->xsk = &ifobject->xsk_arr[0];
1210 	ifobject->xsk_map_fd = test->ifobj_rx->xsk_map_fd;
1211 	memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
1212 }
1213 
1214 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
1215 {
1216 	u32 idx = 0, i, buffers_to_fill;
1217 	int ret;
1218 
1219 	if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
1220 		buffers_to_fill = umem->num_frames;
1221 	else
1222 		buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
1223 
1224 	ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
1225 	if (ret != buffers_to_fill)
1226 		exit_with_error(ENOSPC);
1227 	for (i = 0; i < buffers_to_fill; i++) {
1228 		u64 addr;
1229 
1230 		if (pkt_stream->use_addr_for_fill) {
1231 			struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i);
1232 
1233 			if (!pkt)
1234 				break;
1235 			addr = pkt->addr;
1236 		} else {
1237 			addr = i * umem->frame_size;
1238 		}
1239 
1240 		*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
1241 	}
1242 	xsk_ring_prod__submit(&umem->fq, i);
1243 }
1244 
1245 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
1246 {
1247 	u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
1248 	int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
1249 	LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1250 	u32 queue_id = 0;
1251 	int ret, fd;
1252 	void *bufs;
1253 
1254 	if (ifobject->umem->unaligned_mode)
1255 		mmap_flags |= MAP_HUGETLB;
1256 
1257 	if (ifobject->shared_umem)
1258 		umem_sz *= 2;
1259 
1260 	bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
1261 	if (bufs == MAP_FAILED)
1262 		exit_with_error(errno);
1263 
1264 	ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz);
1265 	if (ret)
1266 		exit_with_error(-ret);
1267 
1268 	xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
1269 
1270 	xsk_configure_socket(test, ifobject, ifobject->umem, false);
1271 
1272 	ifobject->xsk = &ifobject->xsk_arr[0];
1273 
1274 	if (!ifobject->rx_on)
1275 		return;
1276 
1277 	fd = xsk_socket__fd(ifobject->xsk->xsk);
1278 	ret = bpf_map_update_elem(ifobject->xsk_map_fd, &queue_id, &fd, 0);
1279 	if (ret)
1280 		exit_with_error(errno);
1281 }
1282 
1283 static void *worker_testapp_validate_tx(void *arg)
1284 {
1285 	struct test_spec *test = (struct test_spec *)arg;
1286 	struct ifobject *ifobject = test->ifobj_tx;
1287 	int err;
1288 
1289 	if (test->current_step == 1) {
1290 		if (!ifobject->shared_umem)
1291 			thread_common_ops(test, ifobject);
1292 		else
1293 			thread_common_ops_tx(test, ifobject);
1294 	}
1295 
1296 	print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
1297 		      ifobject->ifname);
1298 	err = send_pkts(test, ifobject);
1299 
1300 	if (!err && ifobject->validation_func)
1301 		err = ifobject->validation_func(ifobject);
1302 	if (err)
1303 		report_failure(test);
1304 
1305 	pthread_exit(NULL);
1306 }
1307 
1308 static void *worker_testapp_validate_rx(void *arg)
1309 {
1310 	struct test_spec *test = (struct test_spec *)arg;
1311 	struct ifobject *ifobject = test->ifobj_rx;
1312 	int id = 0, err, fd = xsk_socket__fd(ifobject->xsk->xsk);
1313 	struct pollfd fds = { };
1314 	u32 queue_id = 0;
1315 
1316 	if (test->current_step == 1) {
1317 		thread_common_ops(test, ifobject);
1318 	} else {
1319 		bpf_map_delete_elem(ifobject->xsk_map_fd, &id);
1320 		err = bpf_map_update_elem(ifobject->xsk_map_fd, &queue_id, &fd, 0);
1321 		if (err) {
1322 			printf("Error: Failed to update xskmap, error %s\n", strerror(err));
1323 			exit_with_error(err);
1324 		}
1325 	}
1326 
1327 	fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
1328 	fds.events = POLLIN;
1329 
1330 	pthread_barrier_wait(&barr);
1331 
1332 	err = receive_pkts(test, &fds);
1333 
1334 	if (!err && ifobject->validation_func)
1335 		err = ifobject->validation_func(ifobject);
1336 	if (err) {
1337 		report_failure(test);
1338 		pthread_mutex_lock(&pacing_mutex);
1339 		pthread_cond_signal(&pacing_cond);
1340 		pthread_mutex_unlock(&pacing_mutex);
1341 	}
1342 
1343 	pthread_exit(NULL);
1344 }
1345 
1346 static void testapp_clean_xsk_umem(struct ifobject *ifobj)
1347 {
1348 	u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
1349 
1350 	if (ifobj->shared_umem)
1351 		umem_sz *= 2;
1352 
1353 	xsk_umem__delete(ifobj->umem->umem);
1354 	munmap(ifobj->umem->buffer, umem_sz);
1355 }
1356 
1357 static void handler(int signum)
1358 {
1359 	pthread_exit(NULL);
1360 }
1361 
1362 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj,
1363 						  enum test_type type)
1364 {
1365 	bool old_shared_umem = ifobj->shared_umem;
1366 	pthread_t t0;
1367 
1368 	if (pthread_barrier_init(&barr, NULL, 2))
1369 		exit_with_error(errno);
1370 
1371 	test->current_step++;
1372 	if (type == TEST_TYPE_POLL_RXQ_TMOUT)
1373 		pkt_stream_reset(ifobj->pkt_stream);
1374 	pkts_in_flight = 0;
1375 
1376 	test->ifobj_rx->shared_umem = false;
1377 	test->ifobj_tx->shared_umem = false;
1378 
1379 	signal(SIGUSR1, handler);
1380 	/* Spawn thread */
1381 	pthread_create(&t0, NULL, ifobj->func_ptr, test);
1382 
1383 	if (type != TEST_TYPE_POLL_TXQ_TMOUT)
1384 		pthread_barrier_wait(&barr);
1385 
1386 	if (pthread_barrier_destroy(&barr))
1387 		exit_with_error(errno);
1388 
1389 	pthread_kill(t0, SIGUSR1);
1390 	pthread_join(t0, NULL);
1391 
1392 	if (test->total_steps == test->current_step || test->fail) {
1393 		u32 queue_id = 0;
1394 
1395 		xsk_socket__delete(ifobj->xsk->xsk);
1396 		bpf_map_delete_elem(ifobj->xsk_map_fd, &queue_id);
1397 		testapp_clean_xsk_umem(ifobj);
1398 	}
1399 
1400 	test->ifobj_rx->shared_umem = old_shared_umem;
1401 	test->ifobj_tx->shared_umem = old_shared_umem;
1402 
1403 	return !!test->fail;
1404 }
1405 
1406 static int testapp_validate_traffic(struct test_spec *test)
1407 {
1408 	struct ifobject *ifobj_tx = test->ifobj_tx;
1409 	struct ifobject *ifobj_rx = test->ifobj_rx;
1410 	pthread_t t0, t1;
1411 
1412 	if (pthread_barrier_init(&barr, NULL, 2))
1413 		exit_with_error(errno);
1414 
1415 	test->current_step++;
1416 	pkt_stream_reset(ifobj_rx->pkt_stream);
1417 	pkts_in_flight = 0;
1418 
1419 	/*Spawn RX thread */
1420 	pthread_create(&t0, NULL, ifobj_rx->func_ptr, test);
1421 
1422 	pthread_barrier_wait(&barr);
1423 	if (pthread_barrier_destroy(&barr))
1424 		exit_with_error(errno);
1425 
1426 	/*Spawn TX thread */
1427 	pthread_create(&t1, NULL, ifobj_tx->func_ptr, test);
1428 
1429 	pthread_join(t1, NULL);
1430 	pthread_join(t0, NULL);
1431 
1432 	if (test->total_steps == test->current_step || test->fail) {
1433 		xsk_socket__delete(ifobj_tx->xsk->xsk);
1434 		xsk_socket__delete(ifobj_rx->xsk->xsk);
1435 		testapp_clean_xsk_umem(ifobj_rx);
1436 		if (!ifobj_tx->shared_umem)
1437 			testapp_clean_xsk_umem(ifobj_tx);
1438 	}
1439 
1440 	return !!test->fail;
1441 }
1442 
1443 static void testapp_teardown(struct test_spec *test)
1444 {
1445 	int i;
1446 
1447 	test_spec_set_name(test, "TEARDOWN");
1448 	for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
1449 		if (testapp_validate_traffic(test))
1450 			return;
1451 		test_spec_reset(test);
1452 	}
1453 }
1454 
1455 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
1456 {
1457 	thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
1458 	struct ifobject *tmp_ifobj = (*ifobj1);
1459 
1460 	(*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
1461 	(*ifobj2)->func_ptr = tmp_func_ptr;
1462 
1463 	*ifobj1 = *ifobj2;
1464 	*ifobj2 = tmp_ifobj;
1465 }
1466 
1467 static void testapp_bidi(struct test_spec *test)
1468 {
1469 	test_spec_set_name(test, "BIDIRECTIONAL");
1470 	test->ifobj_tx->rx_on = true;
1471 	test->ifobj_rx->tx_on = true;
1472 	test->total_steps = 2;
1473 	if (testapp_validate_traffic(test))
1474 		return;
1475 
1476 	print_verbose("Switching Tx/Rx vectors\n");
1477 	swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1478 	testapp_validate_traffic(test);
1479 
1480 	swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1481 }
1482 
1483 static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
1484 {
1485 	int ret, queue_id = 0, fd = xsk_socket__fd(ifobj_rx->xsk->xsk);
1486 
1487 	xsk_socket__delete(ifobj_tx->xsk->xsk);
1488 	xsk_socket__delete(ifobj_rx->xsk->xsk);
1489 	ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
1490 	ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
1491 
1492 	ret = bpf_map_update_elem(ifobj_rx->xsk_map_fd, &queue_id, &fd, 0);
1493 	if (ret)
1494 		exit_with_error(errno);
1495 }
1496 
1497 static void testapp_bpf_res(struct test_spec *test)
1498 {
1499 	test_spec_set_name(test, "BPF_RES");
1500 	test->total_steps = 2;
1501 	test->nb_sockets = 2;
1502 	if (testapp_validate_traffic(test))
1503 		return;
1504 
1505 	swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
1506 	testapp_validate_traffic(test);
1507 }
1508 
1509 static void testapp_headroom(struct test_spec *test)
1510 {
1511 	test_spec_set_name(test, "UMEM_HEADROOM");
1512 	test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
1513 	testapp_validate_traffic(test);
1514 }
1515 
1516 static void testapp_stats_rx_dropped(struct test_spec *test)
1517 {
1518 	test_spec_set_name(test, "STAT_RX_DROPPED");
1519 	pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
1520 	test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
1521 		XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
1522 	pkt_stream_receive_half(test);
1523 	test->ifobj_rx->validation_func = validate_rx_dropped;
1524 	testapp_validate_traffic(test);
1525 }
1526 
1527 static void testapp_stats_tx_invalid_descs(struct test_spec *test)
1528 {
1529 	test_spec_set_name(test, "STAT_TX_INVALID");
1530 	pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
1531 	test->ifobj_tx->validation_func = validate_tx_invalid_descs;
1532 	testapp_validate_traffic(test);
1533 
1534 	pkt_stream_restore_default(test);
1535 }
1536 
1537 static void testapp_stats_rx_full(struct test_spec *test)
1538 {
1539 	test_spec_set_name(test, "STAT_RX_FULL");
1540 	pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
1541 	test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
1542 							 DEFAULT_UMEM_BUFFERS, PKT_SIZE);
1543 	if (!test->ifobj_rx->pkt_stream)
1544 		exit_with_error(ENOMEM);
1545 
1546 	test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
1547 	test->ifobj_rx->release_rx = false;
1548 	test->ifobj_rx->validation_func = validate_rx_full;
1549 	testapp_validate_traffic(test);
1550 
1551 	pkt_stream_restore_default(test);
1552 }
1553 
1554 static void testapp_stats_fill_empty(struct test_spec *test)
1555 {
1556 	test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
1557 	pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
1558 	test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
1559 							 DEFAULT_UMEM_BUFFERS, PKT_SIZE);
1560 	if (!test->ifobj_rx->pkt_stream)
1561 		exit_with_error(ENOMEM);
1562 
1563 	test->ifobj_rx->use_fill_ring = false;
1564 	test->ifobj_rx->validation_func = validate_fill_empty;
1565 	testapp_validate_traffic(test);
1566 
1567 	pkt_stream_restore_default(test);
1568 }
1569 
1570 /* Simple test */
1571 static bool hugepages_present(struct ifobject *ifobject)
1572 {
1573 	const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size;
1574 	void *bufs;
1575 
1576 	bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1577 		    MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
1578 	if (bufs == MAP_FAILED)
1579 		return false;
1580 
1581 	munmap(bufs, mmap_sz);
1582 	return true;
1583 }
1584 
1585 static bool testapp_unaligned(struct test_spec *test)
1586 {
1587 	if (!hugepages_present(test->ifobj_tx)) {
1588 		ksft_test_result_skip("No 2M huge pages present.\n");
1589 		return false;
1590 	}
1591 
1592 	test_spec_set_name(test, "UNALIGNED_MODE");
1593 	test->ifobj_tx->umem->unaligned_mode = true;
1594 	test->ifobj_rx->umem->unaligned_mode = true;
1595 	/* Let half of the packets straddle a buffer boundrary */
1596 	pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2);
1597 	test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
1598 	testapp_validate_traffic(test);
1599 
1600 	pkt_stream_restore_default(test);
1601 	return true;
1602 }
1603 
1604 static void testapp_single_pkt(struct test_spec *test)
1605 {
1606 	struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}};
1607 
1608 	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
1609 	testapp_validate_traffic(test);
1610 	pkt_stream_restore_default(test);
1611 }
1612 
1613 static void testapp_invalid_desc(struct test_spec *test)
1614 {
1615 	struct pkt pkts[] = {
1616 		/* Zero packet address allowed */
1617 		{0, PKT_SIZE, 0, true},
1618 		/* Allowed packet */
1619 		{0x1000, PKT_SIZE, 0, true},
1620 		/* Straddling the start of umem */
1621 		{-2, PKT_SIZE, 0, false},
1622 		/* Packet too large */
1623 		{0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
1624 		/* After umem ends */
1625 		{UMEM_SIZE, PKT_SIZE, 0, false},
1626 		/* Straddle the end of umem */
1627 		{UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false},
1628 		/* Straddle a page boundrary */
1629 		{0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false},
1630 		/* Straddle a 2K boundrary */
1631 		{0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true},
1632 		/* Valid packet for synch so that something is received */
1633 		{0x4000, PKT_SIZE, 0, true}};
1634 
1635 	if (test->ifobj_tx->umem->unaligned_mode) {
1636 		/* Crossing a page boundrary allowed */
1637 		pkts[6].valid = true;
1638 	}
1639 	if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
1640 		/* Crossing a 2K frame size boundrary not allowed */
1641 		pkts[7].valid = false;
1642 	}
1643 
1644 	if (test->ifobj_tx->shared_umem) {
1645 		pkts[4].addr += UMEM_SIZE;
1646 		pkts[5].addr += UMEM_SIZE;
1647 	}
1648 
1649 	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
1650 	testapp_validate_traffic(test);
1651 	pkt_stream_restore_default(test);
1652 }
1653 
1654 static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
1655 		       const char *dst_ip, const char *src_ip, const u16 dst_port,
1656 		       const u16 src_port, thread_func_t func_ptr, bool load_xdp)
1657 {
1658 	int xsk_map_fd, prog_fd, err;
1659 	struct in_addr ip;
1660 
1661 	memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
1662 	memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
1663 
1664 	inet_aton(dst_ip, &ip);
1665 	ifobj->dst_ip = ip.s_addr;
1666 
1667 	inet_aton(src_ip, &ip);
1668 	ifobj->src_ip = ip.s_addr;
1669 
1670 	ifobj->dst_port = dst_port;
1671 	ifobj->src_port = src_port;
1672 
1673 	ifobj->func_ptr = func_ptr;
1674 
1675 	if (!load_xdp)
1676 		return;
1677 
1678 	err = xsk_load_xdp_program(&xsk_map_fd, &prog_fd);
1679 	if (err) {
1680 		printf("Error loading XDP program\n");
1681 		exit_with_error(err);
1682 	}
1683 
1684 	ifobj->xsk_map_fd = xsk_map_fd;
1685 	ifobj->prog_fd = prog_fd;
1686 	ifobj->xdp_flags = mode_to_xdp_flags(TEST_MODE_SKB);
1687 	ifobj->link_fd = xsk_attach_xdp_program(ifobj->ifindex, prog_fd, ifobj->xdp_flags);
1688 	if (ifobj->link_fd < 0) {
1689 		printf("Error attaching XDP program\n");
1690 		exit_with_error(ifobj->link_fd);
1691 	}
1692 }
1693 
1694 static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
1695 {
1696 	switch (type) {
1697 	case TEST_TYPE_STATS_RX_DROPPED:
1698 		if (mode == TEST_MODE_ZC) {
1699 			ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
1700 			return;
1701 		}
1702 		testapp_stats_rx_dropped(test);
1703 		break;
1704 	case TEST_TYPE_STATS_TX_INVALID_DESCS:
1705 		testapp_stats_tx_invalid_descs(test);
1706 		break;
1707 	case TEST_TYPE_STATS_RX_FULL:
1708 		testapp_stats_rx_full(test);
1709 		break;
1710 	case TEST_TYPE_STATS_FILL_EMPTY:
1711 		testapp_stats_fill_empty(test);
1712 		break;
1713 	case TEST_TYPE_TEARDOWN:
1714 		testapp_teardown(test);
1715 		break;
1716 	case TEST_TYPE_BIDI:
1717 		testapp_bidi(test);
1718 		break;
1719 	case TEST_TYPE_BPF_RES:
1720 		testapp_bpf_res(test);
1721 		break;
1722 	case TEST_TYPE_RUN_TO_COMPLETION:
1723 		test_spec_set_name(test, "RUN_TO_COMPLETION");
1724 		testapp_validate_traffic(test);
1725 		break;
1726 	case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
1727 		test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
1728 		testapp_single_pkt(test);
1729 		break;
1730 	case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
1731 		test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
1732 		test->ifobj_tx->umem->frame_size = 2048;
1733 		test->ifobj_rx->umem->frame_size = 2048;
1734 		pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE);
1735 		testapp_validate_traffic(test);
1736 
1737 		pkt_stream_restore_default(test);
1738 		break;
1739 	case TEST_TYPE_RX_POLL:
1740 		test->ifobj_rx->use_poll = true;
1741 		test_spec_set_name(test, "POLL_RX");
1742 		testapp_validate_traffic(test);
1743 		break;
1744 	case TEST_TYPE_TX_POLL:
1745 		test->ifobj_tx->use_poll = true;
1746 		test_spec_set_name(test, "POLL_TX");
1747 		testapp_validate_traffic(test);
1748 		break;
1749 	case TEST_TYPE_POLL_TXQ_TMOUT:
1750 		test_spec_set_name(test, "POLL_TXQ_FULL");
1751 		test->ifobj_tx->use_poll = true;
1752 		/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
1753 		test->ifobj_tx->umem->frame_size = 2048;
1754 		pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
1755 		testapp_validate_traffic_single_thread(test, test->ifobj_tx, type);
1756 		pkt_stream_restore_default(test);
1757 		break;
1758 	case TEST_TYPE_POLL_RXQ_TMOUT:
1759 		test_spec_set_name(test, "POLL_RXQ_EMPTY");
1760 		test->ifobj_rx->use_poll = true;
1761 		testapp_validate_traffic_single_thread(test, test->ifobj_rx, type);
1762 		break;
1763 	case TEST_TYPE_ALIGNED_INV_DESC:
1764 		test_spec_set_name(test, "ALIGNED_INV_DESC");
1765 		testapp_invalid_desc(test);
1766 		break;
1767 	case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
1768 		test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
1769 		test->ifobj_tx->umem->frame_size = 2048;
1770 		test->ifobj_rx->umem->frame_size = 2048;
1771 		testapp_invalid_desc(test);
1772 		break;
1773 	case TEST_TYPE_UNALIGNED_INV_DESC:
1774 		if (!hugepages_present(test->ifobj_tx)) {
1775 			ksft_test_result_skip("No 2M huge pages present.\n");
1776 			return;
1777 		}
1778 		test_spec_set_name(test, "UNALIGNED_INV_DESC");
1779 		test->ifobj_tx->umem->unaligned_mode = true;
1780 		test->ifobj_rx->umem->unaligned_mode = true;
1781 		testapp_invalid_desc(test);
1782 		break;
1783 	case TEST_TYPE_UNALIGNED:
1784 		if (!testapp_unaligned(test))
1785 			return;
1786 		break;
1787 	case TEST_TYPE_HEADROOM:
1788 		testapp_headroom(test);
1789 		break;
1790 	default:
1791 		break;
1792 	}
1793 
1794 	if (!test->fail)
1795 		ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
1796 				      test->name);
1797 }
1798 
1799 static struct ifobject *ifobject_create(void)
1800 {
1801 	struct ifobject *ifobj;
1802 
1803 	ifobj = calloc(1, sizeof(struct ifobject));
1804 	if (!ifobj)
1805 		return NULL;
1806 
1807 	ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
1808 	if (!ifobj->xsk_arr)
1809 		goto out_xsk_arr;
1810 
1811 	ifobj->umem = calloc(1, sizeof(*ifobj->umem));
1812 	if (!ifobj->umem)
1813 		goto out_umem;
1814 
1815 	return ifobj;
1816 
1817 out_umem:
1818 	free(ifobj->xsk_arr);
1819 out_xsk_arr:
1820 	free(ifobj);
1821 	return NULL;
1822 }
1823 
1824 static void ifobject_delete(struct ifobject *ifobj)
1825 {
1826 	close(ifobj->prog_fd);
1827 	close(ifobj->xsk_map_fd);
1828 
1829 	free(ifobj->umem);
1830 	free(ifobj->xsk_arr);
1831 	free(ifobj);
1832 }
1833 
1834 static bool is_xdp_supported(int ifindex)
1835 {
1836 	int flags = XDP_FLAGS_DRV_MODE;
1837 
1838 	LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags);
1839 	struct bpf_insn insns[2] = {
1840 		BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
1841 		BPF_EXIT_INSN()
1842 	};
1843 	int prog_fd, insn_cnt = ARRAY_SIZE(insns);
1844 	int err;
1845 
1846 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
1847 	if (prog_fd < 0)
1848 		return false;
1849 
1850 	err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL);
1851 	if (err) {
1852 		close(prog_fd);
1853 		return false;
1854 	}
1855 
1856 	bpf_xdp_detach(ifindex, flags, NULL);
1857 	close(prog_fd);
1858 
1859 	return true;
1860 }
1861 
1862 static void change_to_drv_mode(struct ifobject *ifobj)
1863 {
1864 	LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1865 	int ret;
1866 
1867 	close(ifobj->link_fd);
1868 	ifobj->link_fd = xsk_attach_xdp_program(ifobj->ifindex, ifobj->prog_fd,
1869 						XDP_FLAGS_DRV_MODE);
1870 	if (ifobj->link_fd < 0) {
1871 		ksft_print_msg("Error attaching XDP program\n");
1872 		exit_with_error(-ifobj->link_fd);
1873 	}
1874 
1875 	ret = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &opts);
1876 	if (ret)
1877 		exit_with_error(errno);
1878 
1879 	if (opts.attach_mode != XDP_ATTACHED_DRV) {
1880 		ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
1881 		exit_with_error(EINVAL);
1882 	}
1883 }
1884 
1885 int main(int argc, char **argv)
1886 {
1887 	struct pkt_stream *rx_pkt_stream_default;
1888 	struct pkt_stream *tx_pkt_stream_default;
1889 	struct ifobject *ifobj_tx, *ifobj_rx;
1890 	int modes = TEST_MODE_SKB + 1;
1891 	u32 i, j, failed_tests = 0;
1892 	struct test_spec test;
1893 	bool shared_netdev;
1894 
1895 	/* Use libbpf 1.0 API mode */
1896 	libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1897 
1898 	ifobj_tx = ifobject_create();
1899 	if (!ifobj_tx)
1900 		exit_with_error(ENOMEM);
1901 	ifobj_rx = ifobject_create();
1902 	if (!ifobj_rx)
1903 		exit_with_error(ENOMEM);
1904 
1905 	setlocale(LC_ALL, "");
1906 
1907 	parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
1908 
1909 	shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
1910 	ifobj_tx->shared_umem = shared_netdev;
1911 	ifobj_rx->shared_umem = shared_netdev;
1912 
1913 	if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
1914 		usage(basename(argv[0]));
1915 		ksft_exit_xfail();
1916 	}
1917 
1918 	if (is_xdp_supported(ifobj_tx->ifindex)) {
1919 		modes++;
1920 		if (ifobj_zc_avail(ifobj_tx))
1921 			modes++;
1922 	}
1923 
1924 	init_iface(ifobj_rx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2,
1925 		   worker_testapp_validate_rx, true);
1926 	init_iface(ifobj_tx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1,
1927 		   worker_testapp_validate_tx, !shared_netdev);
1928 
1929 	test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
1930 	tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
1931 	rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
1932 	if (!tx_pkt_stream_default || !rx_pkt_stream_default)
1933 		exit_with_error(ENOMEM);
1934 	test.tx_pkt_stream_default = tx_pkt_stream_default;
1935 	test.rx_pkt_stream_default = rx_pkt_stream_default;
1936 
1937 	ksft_set_plan(modes * TEST_TYPE_MAX);
1938 
1939 	for (i = 0; i < modes; i++) {
1940 		if (i == TEST_MODE_DRV) {
1941 			change_to_drv_mode(ifobj_rx);
1942 			if (!shared_netdev)
1943 				change_to_drv_mode(ifobj_tx);
1944 		}
1945 
1946 		for (j = 0; j < TEST_TYPE_MAX; j++) {
1947 			test_spec_init(&test, ifobj_tx, ifobj_rx, i);
1948 			run_pkt_test(&test, i, j);
1949 			usleep(USLEEP_MAX);
1950 
1951 			if (test.fail)
1952 				failed_tests++;
1953 		}
1954 	}
1955 
1956 	pkt_stream_delete(tx_pkt_stream_default);
1957 	pkt_stream_delete(rx_pkt_stream_default);
1958 	ifobject_delete(ifobj_tx);
1959 	ifobject_delete(ifobj_rx);
1960 
1961 	if (failed_tests)
1962 		ksft_exit_fail();
1963 	else
1964 		ksft_exit_pass();
1965 }
1966