xref: /openbmc/linux/tools/testing/selftests/bpf/xskxceiver.c (revision f0a249df1b071d6f7177cc615d688a3a5d48423a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 /*
5  * Some functions in this program are taken from
6  * Linux kernel samples/bpf/xdpsock* and modified
7  * for use.
8  *
9  * See test_xsk.sh for detailed information on test topology
10  * and prerequisite network setup.
11  *
12  * This test program contains two threads, each thread is single socket with
13  * a unique UMEM. It validates in-order packet delivery and packet content
14  * by sending packets to each other.
15  *
16  * Tests Information:
17  * ------------------
18  * These selftests test AF_XDP SKB and Native/DRV modes using veth
19  * Virtual Ethernet interfaces.
20  *
21  * For each mode, the following tests are run:
22  *    a. nopoll - soft-irq processing in run-to-completion mode
23  *    b. poll - using poll() syscall
24  *    c. Socket Teardown
25  *       Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy
26  *       both sockets, then repeat multiple times. Only nopoll mode is used
27  *    d. Bi-directional sockets
28  *       Configure sockets as bi-directional tx/rx sockets, sets up fill and
29  *       completion rings on each socket, tx/rx in both directions. Only nopoll
30  *       mode is used
31  *    e. Statistics
32  *       Trigger some error conditions and ensure that the appropriate statistics
33  *       are incremented. Within this test, the following statistics are tested:
34  *       i.   rx dropped
35  *            Increase the UMEM frame headroom to a value which results in
36  *            insufficient space in the rx buffer for both the packet and the headroom.
37  *       ii.  tx invalid
38  *            Set the 'len' field of tx descriptors to an invalid value (umem frame
39  *            size + 1).
40  *       iii. rx ring full
41  *            Reduce the size of the RX ring to a fraction of the fill ring size.
42  *       iv.  fill queue empty
43  *            Do not populate the fill queue and then try to receive pkts.
44  *    f. bpf_link resource persistence
45  *       Configure sockets at indexes 0 and 1, run a traffic on queue ids 0,
46  *       then remove xsk sockets from queue 0 on both veth interfaces and
47  *       finally run a traffic on queues ids 1
48  *    g. unaligned mode
49  *    h. tests for invalid and corner case Tx descriptors so that the correct ones
50  *       are discarded and let through, respectively.
51  *    i. 2K frame size tests
52  *
53  * Total tests: 12
54  *
55  * Flow:
56  * -----
57  * - Single process spawns two threads: Tx and Rx
58  * - Each of these two threads attach to a veth interface
59  * - Each thread creates one AF_XDP socket connected to a unique umem for each
60  *   veth interface
61  * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy>
62  * - Rx thread verifies if all packets were received and delivered in-order,
63  *   and have the right content
64  *
65  * Enable/disable packet dump mode:
66  * --------------------------
67  * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add
68  * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D")
69  */
70 
71 #define _GNU_SOURCE
72 #include <fcntl.h>
73 #include <errno.h>
74 #include <getopt.h>
75 #include <asm/barrier.h>
76 #include <linux/if_link.h>
77 #include <linux/if_ether.h>
78 #include <linux/ip.h>
79 #include <linux/udp.h>
80 #include <arpa/inet.h>
81 #include <net/if.h>
82 #include <locale.h>
83 #include <poll.h>
84 #include <pthread.h>
85 #include <signal.h>
86 #include <stdbool.h>
87 #include <stdio.h>
88 #include <stdlib.h>
89 #include <string.h>
90 #include <stddef.h>
91 #include <sys/mman.h>
92 #include <sys/socket.h>
93 #include <sys/time.h>
94 #include <sys/types.h>
95 #include <sys/queue.h>
96 #include <time.h>
97 #include <unistd.h>
98 #include <stdatomic.h>
99 #include "xsk.h"
100 #include "xskxceiver.h"
101 #include <bpf/bpf.h>
102 #include <linux/filter.h>
103 #include "../kselftest.h"
104 
105 static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
106 static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
107 static const char *IP1 = "192.168.100.162";
108 static const char *IP2 = "192.168.100.161";
109 static const u16 UDP_PORT1 = 2020;
110 static const u16 UDP_PORT2 = 2121;
111 
112 static void __exit_with_error(int error, const char *file, const char *func, int line)
113 {
114 	ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
115 			      strerror(error));
116 	ksft_exit_xfail();
117 }
118 
119 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
120 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
121 static char *mode_string(struct test_spec *test)
122 {
123 	switch (test->mode) {
124 	case TEST_MODE_SKB:
125 		return "SKB";
126 	case TEST_MODE_DRV:
127 		return "DRV";
128 	case TEST_MODE_ZC:
129 		return "ZC";
130 	default:
131 		return "BOGUS";
132 	}
133 }
134 
135 static void report_failure(struct test_spec *test)
136 {
137 	if (test->fail)
138 		return;
139 
140 	ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
141 			      test->name);
142 	test->fail = true;
143 }
144 
145 static void memset32_htonl(void *dest, u32 val, u32 size)
146 {
147 	u32 *ptr = (u32 *)dest;
148 	int i;
149 
150 	val = htonl(val);
151 
152 	for (i = 0; i < (size & (~0x3)); i += 4)
153 		ptr[i >> 2] = val;
154 }
155 
156 /*
157  * Fold a partial checksum
158  * This function code has been taken from
159  * Linux kernel include/asm-generic/checksum.h
160  */
161 static __u16 csum_fold(__u32 csum)
162 {
163 	u32 sum = (__force u32)csum;
164 
165 	sum = (sum & 0xffff) + (sum >> 16);
166 	sum = (sum & 0xffff) + (sum >> 16);
167 	return (__force __u16)~sum;
168 }
169 
170 /*
171  * This function code has been taken from
172  * Linux kernel lib/checksum.c
173  */
174 static u32 from64to32(u64 x)
175 {
176 	/* add up 32-bit and 32-bit for 32+c bit */
177 	x = (x & 0xffffffff) + (x >> 32);
178 	/* add up carry.. */
179 	x = (x & 0xffffffff) + (x >> 32);
180 	return (u32)x;
181 }
182 
183 /*
184  * This function code has been taken from
185  * Linux kernel lib/checksum.c
186  */
187 static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
188 {
189 	unsigned long long s = (__force u32)sum;
190 
191 	s += (__force u32)saddr;
192 	s += (__force u32)daddr;
193 #ifdef __BIG_ENDIAN__
194 	s += proto + len;
195 #else
196 	s += (proto + len) << 8;
197 #endif
198 	return (__force __u32)from64to32(s);
199 }
200 
201 /*
202  * This function has been taken from
203  * Linux kernel include/asm-generic/checksum.h
204  */
205 static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum)
206 {
207 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
208 }
209 
210 static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt)
211 {
212 	u32 csum = 0;
213 	u32 cnt = 0;
214 
215 	/* udp hdr and data */
216 	for (; cnt < len; cnt += 2)
217 		csum += udp_pkt[cnt >> 1];
218 
219 	return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
220 }
221 
222 static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr)
223 {
224 	memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN);
225 	memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN);
226 	eth_hdr->h_proto = htons(ETH_P_IP);
227 }
228 
229 static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr)
230 {
231 	ip_hdr->version = IP_PKT_VER;
232 	ip_hdr->ihl = 0x5;
233 	ip_hdr->tos = IP_PKT_TOS;
234 	ip_hdr->tot_len = htons(IP_PKT_SIZE);
235 	ip_hdr->id = 0;
236 	ip_hdr->frag_off = 0;
237 	ip_hdr->ttl = IPDEFTTL;
238 	ip_hdr->protocol = IPPROTO_UDP;
239 	ip_hdr->saddr = ifobject->src_ip;
240 	ip_hdr->daddr = ifobject->dst_ip;
241 	ip_hdr->check = 0;
242 }
243 
244 static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject,
245 			struct udphdr *udp_hdr)
246 {
247 	udp_hdr->source = htons(ifobject->src_port);
248 	udp_hdr->dest = htons(ifobject->dst_port);
249 	udp_hdr->len = htons(UDP_PKT_SIZE);
250 	memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE);
251 }
252 
253 static bool is_umem_valid(struct ifobject *ifobj)
254 {
255 	return !!ifobj->umem->umem;
256 }
257 
258 static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr)
259 {
260 	udp_hdr->check = 0;
261 	udp_hdr->check =
262 	    udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr);
263 }
264 
265 static u32 mode_to_xdp_flags(enum test_mode mode)
266 {
267 	return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
268 }
269 
270 static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size)
271 {
272 	struct xsk_umem_config cfg = {
273 		.fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
274 		.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
275 		.frame_size = umem->frame_size,
276 		.frame_headroom = umem->frame_headroom,
277 		.flags = XSK_UMEM__DEFAULT_FLAGS
278 	};
279 	int ret;
280 
281 	if (umem->unaligned_mode)
282 		cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
283 
284 	ret = xsk_umem__create(&umem->umem, buffer, size,
285 			       &umem->fq, &umem->cq, &cfg);
286 	if (ret)
287 		return ret;
288 
289 	umem->buffer = buffer;
290 	return 0;
291 }
292 
293 static void enable_busy_poll(struct xsk_socket_info *xsk)
294 {
295 	int sock_opt;
296 
297 	sock_opt = 1;
298 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
299 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
300 		exit_with_error(errno);
301 
302 	sock_opt = 20;
303 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
304 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
305 		exit_with_error(errno);
306 
307 	sock_opt = BATCH_SIZE;
308 	if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
309 		       (void *)&sock_opt, sizeof(sock_opt)) < 0)
310 		exit_with_error(errno);
311 }
312 
313 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
314 				  struct ifobject *ifobject, bool shared)
315 {
316 	struct xsk_socket_config cfg = {};
317 	struct xsk_ring_cons *rxr;
318 	struct xsk_ring_prod *txr;
319 
320 	xsk->umem = umem;
321 	cfg.rx_size = xsk->rxqsize;
322 	cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
323 	cfg.bind_flags = ifobject->bind_flags;
324 	if (shared)
325 		cfg.bind_flags |= XDP_SHARED_UMEM;
326 
327 	txr = ifobject->tx_on ? &xsk->tx : NULL;
328 	rxr = ifobject->rx_on ? &xsk->rx : NULL;
329 	return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg);
330 }
331 
332 static bool ifobj_zc_avail(struct ifobject *ifobject)
333 {
334 	size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE;
335 	int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
336 	struct xsk_socket_info *xsk;
337 	struct xsk_umem_info *umem;
338 	bool zc_avail = false;
339 	void *bufs;
340 	int ret;
341 
342 	bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
343 	if (bufs == MAP_FAILED)
344 		exit_with_error(errno);
345 
346 	umem = calloc(1, sizeof(struct xsk_umem_info));
347 	if (!umem) {
348 		munmap(bufs, umem_sz);
349 		exit_with_error(ENOMEM);
350 	}
351 	umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
352 	ret = xsk_configure_umem(umem, bufs, umem_sz);
353 	if (ret)
354 		exit_with_error(-ret);
355 
356 	xsk = calloc(1, sizeof(struct xsk_socket_info));
357 	if (!xsk)
358 		goto out;
359 	ifobject->xdp_flags = XDP_FLAGS_DRV_MODE;
360 	ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY;
361 	ifobject->rx_on = true;
362 	xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
363 	ret = __xsk_configure_socket(xsk, umem, ifobject, false);
364 	if (!ret)
365 		zc_avail = true;
366 
367 	xsk_socket__delete(xsk->xsk);
368 	free(xsk);
369 out:
370 	munmap(umem->buffer, umem_sz);
371 	xsk_umem__delete(umem->umem);
372 	free(umem);
373 	return zc_avail;
374 }
375 
376 static struct option long_options[] = {
377 	{"interface", required_argument, 0, 'i'},
378 	{"busy-poll", no_argument, 0, 'b'},
379 	{"dump-pkts", no_argument, 0, 'D'},
380 	{"verbose", no_argument, 0, 'v'},
381 	{0, 0, 0, 0}
382 };
383 
384 static void usage(const char *prog)
385 {
386 	const char *str =
387 		"  Usage: %s [OPTIONS]\n"
388 		"  Options:\n"
389 		"  -i, --interface      Use interface\n"
390 		"  -D, --dump-pkts      Dump packets L2 - L5\n"
391 		"  -v, --verbose        Verbose output\n"
392 		"  -b, --busy-poll      Enable busy poll\n";
393 
394 	ksft_print_msg(str, prog);
395 }
396 
397 static bool validate_interface(struct ifobject *ifobj)
398 {
399 	if (!strcmp(ifobj->ifname, ""))
400 		return false;
401 	return true;
402 }
403 
404 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc,
405 			       char **argv)
406 {
407 	struct ifobject *ifobj;
408 	u32 interface_nb = 0;
409 	int option_index, c;
410 
411 	opterr = 0;
412 
413 	for (;;) {
414 		c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index);
415 		if (c == -1)
416 			break;
417 
418 		switch (c) {
419 		case 'i':
420 			if (interface_nb == 0)
421 				ifobj = ifobj_tx;
422 			else if (interface_nb == 1)
423 				ifobj = ifobj_rx;
424 			else
425 				break;
426 
427 			memcpy(ifobj->ifname, optarg,
428 			       min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg)));
429 
430 			ifobj->ifindex = if_nametoindex(ifobj->ifname);
431 			if (!ifobj->ifindex)
432 				exit_with_error(errno);
433 
434 			interface_nb++;
435 			break;
436 		case 'D':
437 			opt_pkt_dump = true;
438 			break;
439 		case 'v':
440 			opt_verbose = true;
441 			break;
442 		case 'b':
443 			ifobj_tx->busy_poll = true;
444 			ifobj_rx->busy_poll = true;
445 			break;
446 		default:
447 			usage(basename(argv[0]));
448 			ksft_exit_xfail();
449 		}
450 	}
451 }
452 
453 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
454 			     struct ifobject *ifobj_rx)
455 {
456 	u32 i, j;
457 
458 	for (i = 0; i < MAX_INTERFACES; i++) {
459 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
460 
461 		ifobj->xsk = &ifobj->xsk_arr[0];
462 		ifobj->use_poll = false;
463 		ifobj->use_fill_ring = true;
464 		ifobj->release_rx = true;
465 		ifobj->validation_func = NULL;
466 
467 		if (i == 0) {
468 			ifobj->rx_on = false;
469 			ifobj->tx_on = true;
470 			ifobj->pkt_stream = test->tx_pkt_stream_default;
471 		} else {
472 			ifobj->rx_on = true;
473 			ifobj->tx_on = false;
474 			ifobj->pkt_stream = test->rx_pkt_stream_default;
475 		}
476 
477 		memset(ifobj->umem, 0, sizeof(*ifobj->umem));
478 		ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS;
479 		ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
480 		if (ifobj->shared_umem && ifobj->rx_on)
481 			ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS *
482 				XSK_UMEM__DEFAULT_FRAME_SIZE;
483 
484 		for (j = 0; j < MAX_SOCKETS; j++) {
485 			memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j]));
486 			ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS;
487 		}
488 	}
489 
490 	test->ifobj_tx = ifobj_tx;
491 	test->ifobj_rx = ifobj_rx;
492 	test->current_step = 0;
493 	test->total_steps = 1;
494 	test->nb_sockets = 1;
495 	test->fail = false;
496 }
497 
498 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
499 			   struct ifobject *ifobj_rx, enum test_mode mode)
500 {
501 	struct pkt_stream *tx_pkt_stream;
502 	struct pkt_stream *rx_pkt_stream;
503 	u32 i;
504 
505 	tx_pkt_stream = test->tx_pkt_stream_default;
506 	rx_pkt_stream = test->rx_pkt_stream_default;
507 	memset(test, 0, sizeof(*test));
508 	test->tx_pkt_stream_default = tx_pkt_stream;
509 	test->rx_pkt_stream_default = rx_pkt_stream;
510 
511 	for (i = 0; i < MAX_INTERFACES; i++) {
512 		struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx;
513 
514 		ifobj->bind_flags = XDP_USE_NEED_WAKEUP;
515 		if (mode == TEST_MODE_ZC)
516 			ifobj->bind_flags |= XDP_ZEROCOPY;
517 		else
518 			ifobj->bind_flags |= XDP_COPY;
519 	}
520 
521 	test->mode = mode;
522 	__test_spec_init(test, ifobj_tx, ifobj_rx);
523 }
524 
525 static void test_spec_reset(struct test_spec *test)
526 {
527 	__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
528 }
529 
530 static void test_spec_set_name(struct test_spec *test, const char *name)
531 {
532 	strncpy(test->name, name, MAX_TEST_NAME_SIZE);
533 }
534 
535 static void pkt_stream_reset(struct pkt_stream *pkt_stream)
536 {
537 	if (pkt_stream)
538 		pkt_stream->rx_pkt_nb = 0;
539 }
540 
541 static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb)
542 {
543 	if (pkt_nb >= pkt_stream->nb_pkts)
544 		return NULL;
545 
546 	return &pkt_stream->pkts[pkt_nb];
547 }
548 
549 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
550 {
551 	while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) {
552 		(*pkts_sent)++;
553 		if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid)
554 			return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++];
555 		pkt_stream->rx_pkt_nb++;
556 	}
557 	return NULL;
558 }
559 
560 static void pkt_stream_delete(struct pkt_stream *pkt_stream)
561 {
562 	free(pkt_stream->pkts);
563 	free(pkt_stream);
564 }
565 
566 static void pkt_stream_restore_default(struct test_spec *test)
567 {
568 	struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
569 	struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
570 
571 	if (tx_pkt_stream != test->tx_pkt_stream_default) {
572 		pkt_stream_delete(test->ifobj_tx->pkt_stream);
573 		test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default;
574 	}
575 
576 	if (rx_pkt_stream != test->rx_pkt_stream_default) {
577 		pkt_stream_delete(test->ifobj_rx->pkt_stream);
578 		test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default;
579 	}
580 }
581 
582 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
583 {
584 	struct pkt_stream *pkt_stream;
585 
586 	pkt_stream = calloc(1, sizeof(*pkt_stream));
587 	if (!pkt_stream)
588 		return NULL;
589 
590 	pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts));
591 	if (!pkt_stream->pkts) {
592 		free(pkt_stream);
593 		return NULL;
594 	}
595 
596 	pkt_stream->nb_pkts = nb_pkts;
597 	return pkt_stream;
598 }
599 
600 static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len)
601 {
602 	pkt->addr = addr + umem->base_addr;
603 	pkt->len = len;
604 	if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom)
605 		pkt->valid = false;
606 	else
607 		pkt->valid = true;
608 }
609 
610 static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
611 {
612 	struct pkt_stream *pkt_stream;
613 	u32 i;
614 
615 	pkt_stream = __pkt_stream_alloc(nb_pkts);
616 	if (!pkt_stream)
617 		exit_with_error(ENOMEM);
618 
619 	pkt_stream->nb_pkts = nb_pkts;
620 	for (i = 0; i < nb_pkts; i++) {
621 		pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
622 			pkt_len);
623 		pkt_stream->pkts[i].payload = i;
624 	}
625 
626 	return pkt_stream;
627 }
628 
629 static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem,
630 					   struct pkt_stream *pkt_stream)
631 {
632 	return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len);
633 }
634 
635 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len)
636 {
637 	struct pkt_stream *pkt_stream;
638 
639 	pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len);
640 	test->ifobj_tx->pkt_stream = pkt_stream;
641 	pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len);
642 	test->ifobj_rx->pkt_stream = pkt_stream;
643 }
644 
645 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len,
646 				      int offset)
647 {
648 	struct xsk_umem_info *umem = ifobj->umem;
649 	struct pkt_stream *pkt_stream;
650 	u32 i;
651 
652 	pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream);
653 	for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2)
654 		pkt_set(umem, &pkt_stream->pkts[i],
655 			(i % umem->num_frames) * umem->frame_size + offset, pkt_len);
656 
657 	ifobj->pkt_stream = pkt_stream;
658 }
659 
660 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset)
661 {
662 	__pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset);
663 	__pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset);
664 }
665 
666 static void pkt_stream_receive_half(struct test_spec *test)
667 {
668 	struct xsk_umem_info *umem = test->ifobj_rx->umem;
669 	struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
670 	u32 i;
671 
672 	test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
673 							 pkt_stream->pkts[0].len);
674 	pkt_stream = test->ifobj_rx->pkt_stream;
675 	for (i = 1; i < pkt_stream->nb_pkts; i += 2)
676 		pkt_stream->pkts[i].valid = false;
677 }
678 
679 static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb)
680 {
681 	struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb);
682 	struct udphdr *udp_hdr;
683 	struct ethhdr *eth_hdr;
684 	struct iphdr *ip_hdr;
685 	void *data;
686 
687 	if (!pkt)
688 		return NULL;
689 	if (!pkt->valid || pkt->len < MIN_PKT_SIZE)
690 		return pkt;
691 
692 	data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr);
693 	udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr));
694 	ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr));
695 	eth_hdr = (struct ethhdr *)data;
696 
697 	gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr);
698 	gen_ip_hdr(ifobject, ip_hdr);
699 	gen_udp_csum(udp_hdr, ip_hdr);
700 	gen_eth_hdr(ifobject, eth_hdr);
701 
702 	return pkt;
703 }
704 
705 static void __pkt_stream_generate_custom(struct ifobject *ifobj,
706 					 struct pkt *pkts, u32 nb_pkts)
707 {
708 	struct pkt_stream *pkt_stream;
709 	u32 i;
710 
711 	pkt_stream = __pkt_stream_alloc(nb_pkts);
712 	if (!pkt_stream)
713 		exit_with_error(ENOMEM);
714 
715 	for (i = 0; i < nb_pkts; i++) {
716 		pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr;
717 		pkt_stream->pkts[i].len = pkts[i].len;
718 		pkt_stream->pkts[i].payload = i;
719 		pkt_stream->pkts[i].valid = pkts[i].valid;
720 	}
721 
722 	ifobj->pkt_stream = pkt_stream;
723 }
724 
725 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts)
726 {
727 	__pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts);
728 	__pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts);
729 }
730 
731 static void pkt_dump(void *pkt, u32 len)
732 {
733 	char s[INET_ADDRSTRLEN];
734 	struct ethhdr *ethhdr;
735 	struct udphdr *udphdr;
736 	struct iphdr *iphdr;
737 	u32 payload, i;
738 
739 	ethhdr = pkt;
740 	iphdr = pkt + sizeof(*ethhdr);
741 	udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr);
742 
743 	/*extract L2 frame */
744 	fprintf(stdout, "DEBUG>> L2: dst mac: ");
745 	for (i = 0; i < ETH_ALEN; i++)
746 		fprintf(stdout, "%02X", ethhdr->h_dest[i]);
747 
748 	fprintf(stdout, "\nDEBUG>> L2: src mac: ");
749 	for (i = 0; i < ETH_ALEN; i++)
750 		fprintf(stdout, "%02X", ethhdr->h_source[i]);
751 
752 	/*extract L3 frame */
753 	fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl);
754 	fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n",
755 		inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s)));
756 	fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n",
757 		inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s)));
758 	/*extract L4 frame */
759 	fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
760 	fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
761 	/*extract L5 frame */
762 	payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE)));
763 
764 	fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload);
765 	fprintf(stdout, "---------------------------------------\n");
766 }
767 
768 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr,
769 			      u64 pkt_stream_addr)
770 {
771 	u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
772 	u32 offset = addr % umem->frame_size, expected_offset = 0;
773 
774 	if (!pkt_stream->use_addr_for_fill)
775 		pkt_stream_addr = 0;
776 
777 	expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
778 
779 	if (offset == expected_offset)
780 		return true;
781 
782 	ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
783 	return false;
784 }
785 
786 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
787 {
788 	void *data = xsk_umem__get_data(buffer, addr);
789 	struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr));
790 
791 	if (!pkt) {
792 		ksft_print_msg("[%s] too many packets received\n", __func__);
793 		return false;
794 	}
795 
796 	if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) {
797 		/* Do not try to verify packets that are smaller than minimum size. */
798 		return true;
799 	}
800 
801 	if (pkt->len != len) {
802 		ksft_print_msg("[%s] expected length [%d], got length [%d]\n",
803 			       __func__, pkt->len, len);
804 		return false;
805 	}
806 
807 	if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) {
808 		u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE)));
809 
810 		if (opt_pkt_dump)
811 			pkt_dump(data, PKT_SIZE);
812 
813 		if (pkt->payload != seqnum) {
814 			ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n",
815 				       __func__, pkt->payload, seqnum);
816 			return false;
817 		}
818 	} else {
819 		ksft_print_msg("Invalid frame received: ");
820 		ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version,
821 			       iphdr->tos);
822 		return false;
823 	}
824 
825 	return true;
826 }
827 
828 static void kick_tx(struct xsk_socket_info *xsk)
829 {
830 	int ret;
831 
832 	ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
833 	if (ret >= 0)
834 		return;
835 	if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
836 		usleep(100);
837 		return;
838 	}
839 	exit_with_error(errno);
840 }
841 
842 static void kick_rx(struct xsk_socket_info *xsk)
843 {
844 	int ret;
845 
846 	ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
847 	if (ret < 0)
848 		exit_with_error(errno);
849 }
850 
851 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
852 {
853 	unsigned int rcvd;
854 	u32 idx;
855 
856 	if (xsk_ring_prod__needs_wakeup(&xsk->tx))
857 		kick_tx(xsk);
858 
859 	rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
860 	if (rcvd) {
861 		if (rcvd > xsk->outstanding_tx) {
862 			u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
863 
864 			ksft_print_msg("[%s] Too many packets completed\n", __func__);
865 			ksft_print_msg("Last completion address: %llx\n", addr);
866 			return TEST_FAILURE;
867 		}
868 
869 		xsk_ring_cons__release(&xsk->umem->cq, rcvd);
870 		xsk->outstanding_tx -= rcvd;
871 	}
872 
873 	return TEST_PASS;
874 }
875 
876 static int receive_pkts(struct test_spec *test, struct pollfd *fds)
877 {
878 	struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
879 	struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream;
880 	u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0;
881 	struct xsk_socket_info *xsk = test->ifobj_rx->xsk;
882 	struct ifobject *ifobj = test->ifobj_rx;
883 	struct xsk_umem_info *umem = xsk->umem;
884 	struct pkt *pkt;
885 	int ret;
886 
887 	ret = gettimeofday(&tv_now, NULL);
888 	if (ret)
889 		exit_with_error(errno);
890 	timeradd(&tv_now, &tv_timeout, &tv_end);
891 
892 	pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
893 	while (pkt) {
894 		ret = gettimeofday(&tv_now, NULL);
895 		if (ret)
896 			exit_with_error(errno);
897 		if (timercmp(&tv_now, &tv_end, >)) {
898 			ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
899 			return TEST_FAILURE;
900 		}
901 
902 		kick_rx(xsk);
903 		if (ifobj->use_poll) {
904 			ret = poll(fds, 1, POLL_TMOUT);
905 			if (ret < 0)
906 				exit_with_error(errno);
907 
908 			if (!ret) {
909 				if (!is_umem_valid(test->ifobj_tx))
910 					return TEST_PASS;
911 
912 				ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__);
913 				return TEST_FAILURE;
914 
915 			}
916 
917 			if (!(fds->revents & POLLIN))
918 				continue;
919 		}
920 
921 		rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
922 		if (!rcvd)
923 			continue;
924 
925 		if (ifobj->use_fill_ring) {
926 			ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
927 			while (ret != rcvd) {
928 				if (ret < 0)
929 					exit_with_error(-ret);
930 				if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
931 					ret = poll(fds, 1, POLL_TMOUT);
932 					if (ret < 0)
933 						exit_with_error(errno);
934 				}
935 				ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
936 			}
937 		}
938 
939 		for (i = 0; i < rcvd; i++) {
940 			const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
941 			u64 addr = desc->addr, orig;
942 
943 			orig = xsk_umem__extract_addr(addr);
944 			addr = xsk_umem__add_offset_to_addr(addr);
945 
946 			if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) ||
947 			    !is_offset_correct(umem, pkt_stream, addr, pkt->addr))
948 				return TEST_FAILURE;
949 
950 			if (ifobj->use_fill_ring)
951 				*xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
952 			pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
953 		}
954 
955 		if (ifobj->use_fill_ring)
956 			xsk_ring_prod__submit(&umem->fq, rcvd);
957 		if (ifobj->release_rx)
958 			xsk_ring_cons__release(&xsk->rx, rcvd);
959 
960 		pthread_mutex_lock(&pacing_mutex);
961 		pkts_in_flight -= pkts_sent;
962 		if (pkts_in_flight < umem->num_frames)
963 			pthread_cond_signal(&pacing_cond);
964 		pthread_mutex_unlock(&pacing_mutex);
965 		pkts_sent = 0;
966 	}
967 
968 	return TEST_PASS;
969 }
970 
971 static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds,
972 		       bool timeout)
973 {
974 	struct xsk_socket_info *xsk = ifobject->xsk;
975 	bool use_poll = ifobject->use_poll;
976 	u32 i, idx = 0, valid_pkts = 0;
977 	int ret;
978 
979 	while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) {
980 		if (use_poll) {
981 			ret = poll(fds, 1, POLL_TMOUT);
982 			if (timeout) {
983 				if (ret < 0) {
984 					ksft_print_msg("ERROR: [%s] Poll error %d\n",
985 						       __func__, errno);
986 					return TEST_FAILURE;
987 				}
988 				if (ret == 0)
989 					return TEST_PASS;
990 				break;
991 			}
992 			if (ret <= 0) {
993 				ksft_print_msg("ERROR: [%s] Poll error %d\n",
994 					       __func__, errno);
995 				return TEST_FAILURE;
996 			}
997 		}
998 
999 		complete_pkts(xsk, BATCH_SIZE);
1000 	}
1001 
1002 	for (i = 0; i < BATCH_SIZE; i++) {
1003 		struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
1004 		struct pkt *pkt = pkt_generate(ifobject, *pkt_nb);
1005 
1006 		if (!pkt)
1007 			break;
1008 
1009 		tx_desc->addr = pkt->addr;
1010 		tx_desc->len = pkt->len;
1011 		(*pkt_nb)++;
1012 		if (pkt->valid)
1013 			valid_pkts++;
1014 	}
1015 
1016 	pthread_mutex_lock(&pacing_mutex);
1017 	pkts_in_flight += valid_pkts;
1018 	/* pkts_in_flight might be negative if many invalid packets are sent */
1019 	if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) {
1020 		kick_tx(xsk);
1021 		pthread_cond_wait(&pacing_cond, &pacing_mutex);
1022 	}
1023 	pthread_mutex_unlock(&pacing_mutex);
1024 
1025 	xsk_ring_prod__submit(&xsk->tx, i);
1026 	xsk->outstanding_tx += valid_pkts;
1027 
1028 	if (use_poll) {
1029 		ret = poll(fds, 1, POLL_TMOUT);
1030 		if (ret <= 0) {
1031 			if (ret == 0 && timeout)
1032 				return TEST_PASS;
1033 
1034 			ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret);
1035 			return TEST_FAILURE;
1036 		}
1037 	}
1038 
1039 	if (!timeout) {
1040 		if (complete_pkts(xsk, i))
1041 			return TEST_FAILURE;
1042 
1043 		usleep(10);
1044 		return TEST_PASS;
1045 	}
1046 
1047 	return TEST_CONTINUE;
1048 }
1049 
1050 static void wait_for_tx_completion(struct xsk_socket_info *xsk)
1051 {
1052 	while (xsk->outstanding_tx)
1053 		complete_pkts(xsk, BATCH_SIZE);
1054 }
1055 
1056 static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
1057 {
1058 	bool timeout = !is_umem_valid(test->ifobj_rx);
1059 	struct pollfd fds = { };
1060 	u32 pkt_cnt = 0, ret;
1061 
1062 	fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
1063 	fds.events = POLLOUT;
1064 
1065 	while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
1066 		ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout);
1067 		if ((ret || test->fail) && !timeout)
1068 			return TEST_FAILURE;
1069 		else if (ret == TEST_PASS && timeout)
1070 			return ret;
1071 	}
1072 
1073 	wait_for_tx_completion(ifobject->xsk);
1074 	return TEST_PASS;
1075 }
1076 
1077 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
1078 {
1079 	int fd = xsk_socket__fd(xsk), err;
1080 	socklen_t optlen, expected_len;
1081 
1082 	optlen = sizeof(*stats);
1083 	err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
1084 	if (err) {
1085 		ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1086 			       __func__, -err, strerror(-err));
1087 		return TEST_FAILURE;
1088 	}
1089 
1090 	expected_len = sizeof(struct xdp_statistics);
1091 	if (optlen != expected_len) {
1092 		ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
1093 			       __func__, expected_len, optlen);
1094 		return TEST_FAILURE;
1095 	}
1096 
1097 	return TEST_PASS;
1098 }
1099 
1100 static int validate_rx_dropped(struct ifobject *ifobject)
1101 {
1102 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1103 	struct xdp_statistics stats;
1104 	int err;
1105 
1106 	kick_rx(ifobject->xsk);
1107 
1108 	err = get_xsk_stats(xsk, &stats);
1109 	if (err)
1110 		return TEST_FAILURE;
1111 
1112 	if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2)
1113 		return TEST_PASS;
1114 
1115 	return TEST_FAILURE;
1116 }
1117 
1118 static int validate_rx_full(struct ifobject *ifobject)
1119 {
1120 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1121 	struct xdp_statistics stats;
1122 	int err;
1123 
1124 	usleep(1000);
1125 	kick_rx(ifobject->xsk);
1126 
1127 	err = get_xsk_stats(xsk, &stats);
1128 	if (err)
1129 		return TEST_FAILURE;
1130 
1131 	if (stats.rx_ring_full)
1132 		return TEST_PASS;
1133 
1134 	return TEST_FAILURE;
1135 }
1136 
1137 static int validate_fill_empty(struct ifobject *ifobject)
1138 {
1139 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1140 	struct xdp_statistics stats;
1141 	int err;
1142 
1143 	usleep(1000);
1144 	kick_rx(ifobject->xsk);
1145 
1146 	err = get_xsk_stats(xsk, &stats);
1147 	if (err)
1148 		return TEST_FAILURE;
1149 
1150 	if (stats.rx_fill_ring_empty_descs)
1151 		return TEST_PASS;
1152 
1153 	return TEST_FAILURE;
1154 }
1155 
1156 static int validate_tx_invalid_descs(struct ifobject *ifobject)
1157 {
1158 	struct xsk_socket *xsk = ifobject->xsk->xsk;
1159 	int fd = xsk_socket__fd(xsk);
1160 	struct xdp_statistics stats;
1161 	socklen_t optlen;
1162 	int err;
1163 
1164 	optlen = sizeof(stats);
1165 	err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
1166 	if (err) {
1167 		ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
1168 			       __func__, -err, strerror(-err));
1169 		return TEST_FAILURE;
1170 	}
1171 
1172 	if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
1173 		ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
1174 			       __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
1175 		return TEST_FAILURE;
1176 	}
1177 
1178 	return TEST_PASS;
1179 }
1180 
1181 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject,
1182 				 struct xsk_umem_info *umem, bool tx)
1183 {
1184 	int i, ret;
1185 
1186 	for (i = 0; i < test->nb_sockets; i++) {
1187 		bool shared = (ifobject->shared_umem && tx) ? true : !!i;
1188 		u32 ctr = 0;
1189 
1190 		while (ctr++ < SOCK_RECONF_CTR) {
1191 			ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem,
1192 						     ifobject, shared);
1193 			if (!ret)
1194 				break;
1195 
1196 			/* Retry if it fails as xsk_socket__create() is asynchronous */
1197 			if (ctr >= SOCK_RECONF_CTR)
1198 				exit_with_error(-ret);
1199 			usleep(USLEEP_MAX);
1200 		}
1201 		if (ifobject->busy_poll)
1202 			enable_busy_poll(&ifobject->xsk_arr[i]);
1203 	}
1204 }
1205 
1206 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject)
1207 {
1208 	xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true);
1209 	ifobject->xsk = &ifobject->xsk_arr[0];
1210 	ifobject->xskmap = test->ifobj_rx->xskmap;
1211 	memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info));
1212 }
1213 
1214 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream)
1215 {
1216 	u32 idx = 0, i, buffers_to_fill;
1217 	int ret;
1218 
1219 	if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
1220 		buffers_to_fill = umem->num_frames;
1221 	else
1222 		buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS;
1223 
1224 	ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx);
1225 	if (ret != buffers_to_fill)
1226 		exit_with_error(ENOSPC);
1227 	for (i = 0; i < buffers_to_fill; i++) {
1228 		u64 addr;
1229 
1230 		if (pkt_stream->use_addr_for_fill) {
1231 			struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i);
1232 
1233 			if (!pkt)
1234 				break;
1235 			addr = pkt->addr;
1236 		} else {
1237 			addr = i * umem->frame_size;
1238 		}
1239 
1240 		*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
1241 	}
1242 	xsk_ring_prod__submit(&umem->fq, i);
1243 }
1244 
1245 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
1246 {
1247 	u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
1248 	int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
1249 	LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1250 	void *bufs;
1251 	int ret;
1252 
1253 	if (ifobject->umem->unaligned_mode)
1254 		mmap_flags |= MAP_HUGETLB;
1255 
1256 	if (ifobject->shared_umem)
1257 		umem_sz *= 2;
1258 
1259 	bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
1260 	if (bufs == MAP_FAILED)
1261 		exit_with_error(errno);
1262 
1263 	ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz);
1264 	if (ret)
1265 		exit_with_error(-ret);
1266 
1267 	xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
1268 
1269 	xsk_configure_socket(test, ifobject, ifobject->umem, false);
1270 
1271 	ifobject->xsk = &ifobject->xsk_arr[0];
1272 
1273 	if (!ifobject->rx_on)
1274 		return;
1275 
1276 	ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
1277 	if (ret)
1278 		exit_with_error(errno);
1279 }
1280 
1281 static void *worker_testapp_validate_tx(void *arg)
1282 {
1283 	struct test_spec *test = (struct test_spec *)arg;
1284 	struct ifobject *ifobject = test->ifobj_tx;
1285 	int err;
1286 
1287 	if (test->current_step == 1) {
1288 		if (!ifobject->shared_umem)
1289 			thread_common_ops(test, ifobject);
1290 		else
1291 			thread_common_ops_tx(test, ifobject);
1292 	}
1293 
1294 	print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
1295 		      ifobject->ifname);
1296 	err = send_pkts(test, ifobject);
1297 
1298 	if (!err && ifobject->validation_func)
1299 		err = ifobject->validation_func(ifobject);
1300 	if (err)
1301 		report_failure(test);
1302 
1303 	pthread_exit(NULL);
1304 }
1305 
1306 static void *worker_testapp_validate_rx(void *arg)
1307 {
1308 	struct test_spec *test = (struct test_spec *)arg;
1309 	struct ifobject *ifobject = test->ifobj_rx;
1310 	struct pollfd fds = { };
1311 	int err;
1312 
1313 	if (test->current_step == 1) {
1314 		thread_common_ops(test, ifobject);
1315 	} else {
1316 		xsk_clear_xskmap(ifobject->xskmap);
1317 		err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
1318 		if (err) {
1319 			printf("Error: Failed to update xskmap, error %s\n", strerror(-err));
1320 			exit_with_error(-err);
1321 		}
1322 	}
1323 
1324 	fds.fd = xsk_socket__fd(ifobject->xsk->xsk);
1325 	fds.events = POLLIN;
1326 
1327 	pthread_barrier_wait(&barr);
1328 
1329 	err = receive_pkts(test, &fds);
1330 
1331 	if (!err && ifobject->validation_func)
1332 		err = ifobject->validation_func(ifobject);
1333 	if (err) {
1334 		report_failure(test);
1335 		pthread_mutex_lock(&pacing_mutex);
1336 		pthread_cond_signal(&pacing_cond);
1337 		pthread_mutex_unlock(&pacing_mutex);
1338 	}
1339 
1340 	pthread_exit(NULL);
1341 }
1342 
1343 static void testapp_clean_xsk_umem(struct ifobject *ifobj)
1344 {
1345 	u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size;
1346 
1347 	if (ifobj->shared_umem)
1348 		umem_sz *= 2;
1349 
1350 	xsk_umem__delete(ifobj->umem->umem);
1351 	munmap(ifobj->umem->buffer, umem_sz);
1352 }
1353 
1354 static void handler(int signum)
1355 {
1356 	pthread_exit(NULL);
1357 }
1358 
1359 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj,
1360 						  enum test_type type)
1361 {
1362 	bool old_shared_umem = ifobj->shared_umem;
1363 	pthread_t t0;
1364 
1365 	if (pthread_barrier_init(&barr, NULL, 2))
1366 		exit_with_error(errno);
1367 
1368 	test->current_step++;
1369 	if (type == TEST_TYPE_POLL_RXQ_TMOUT)
1370 		pkt_stream_reset(ifobj->pkt_stream);
1371 	pkts_in_flight = 0;
1372 
1373 	test->ifobj_rx->shared_umem = false;
1374 	test->ifobj_tx->shared_umem = false;
1375 
1376 	signal(SIGUSR1, handler);
1377 	/* Spawn thread */
1378 	pthread_create(&t0, NULL, ifobj->func_ptr, test);
1379 
1380 	if (type != TEST_TYPE_POLL_TXQ_TMOUT)
1381 		pthread_barrier_wait(&barr);
1382 
1383 	if (pthread_barrier_destroy(&barr))
1384 		exit_with_error(errno);
1385 
1386 	pthread_kill(t0, SIGUSR1);
1387 	pthread_join(t0, NULL);
1388 
1389 	if (test->total_steps == test->current_step || test->fail) {
1390 		xsk_socket__delete(ifobj->xsk->xsk);
1391 		xsk_clear_xskmap(ifobj->xskmap);
1392 		testapp_clean_xsk_umem(ifobj);
1393 	}
1394 
1395 	test->ifobj_rx->shared_umem = old_shared_umem;
1396 	test->ifobj_tx->shared_umem = old_shared_umem;
1397 
1398 	return !!test->fail;
1399 }
1400 
1401 static int testapp_validate_traffic(struct test_spec *test)
1402 {
1403 	struct ifobject *ifobj_tx = test->ifobj_tx;
1404 	struct ifobject *ifobj_rx = test->ifobj_rx;
1405 	pthread_t t0, t1;
1406 
1407 	if (pthread_barrier_init(&barr, NULL, 2))
1408 		exit_with_error(errno);
1409 
1410 	test->current_step++;
1411 	pkt_stream_reset(ifobj_rx->pkt_stream);
1412 	pkts_in_flight = 0;
1413 
1414 	/*Spawn RX thread */
1415 	pthread_create(&t0, NULL, ifobj_rx->func_ptr, test);
1416 
1417 	pthread_barrier_wait(&barr);
1418 	if (pthread_barrier_destroy(&barr))
1419 		exit_with_error(errno);
1420 
1421 	/*Spawn TX thread */
1422 	pthread_create(&t1, NULL, ifobj_tx->func_ptr, test);
1423 
1424 	pthread_join(t1, NULL);
1425 	pthread_join(t0, NULL);
1426 
1427 	if (test->total_steps == test->current_step || test->fail) {
1428 		xsk_socket__delete(ifobj_tx->xsk->xsk);
1429 		xsk_socket__delete(ifobj_rx->xsk->xsk);
1430 		testapp_clean_xsk_umem(ifobj_rx);
1431 		if (!ifobj_tx->shared_umem)
1432 			testapp_clean_xsk_umem(ifobj_tx);
1433 	}
1434 
1435 	return !!test->fail;
1436 }
1437 
1438 static void testapp_teardown(struct test_spec *test)
1439 {
1440 	int i;
1441 
1442 	test_spec_set_name(test, "TEARDOWN");
1443 	for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
1444 		if (testapp_validate_traffic(test))
1445 			return;
1446 		test_spec_reset(test);
1447 	}
1448 }
1449 
1450 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
1451 {
1452 	thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr;
1453 	struct ifobject *tmp_ifobj = (*ifobj1);
1454 
1455 	(*ifobj1)->func_ptr = (*ifobj2)->func_ptr;
1456 	(*ifobj2)->func_ptr = tmp_func_ptr;
1457 
1458 	*ifobj1 = *ifobj2;
1459 	*ifobj2 = tmp_ifobj;
1460 }
1461 
1462 static void testapp_bidi(struct test_spec *test)
1463 {
1464 	test_spec_set_name(test, "BIDIRECTIONAL");
1465 	test->ifobj_tx->rx_on = true;
1466 	test->ifobj_rx->tx_on = true;
1467 	test->total_steps = 2;
1468 	if (testapp_validate_traffic(test))
1469 		return;
1470 
1471 	print_verbose("Switching Tx/Rx vectors\n");
1472 	swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1473 	testapp_validate_traffic(test);
1474 
1475 	swap_directions(&test->ifobj_rx, &test->ifobj_tx);
1476 }
1477 
1478 static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
1479 {
1480 	int ret;
1481 
1482 	xsk_socket__delete(ifobj_tx->xsk->xsk);
1483 	xsk_socket__delete(ifobj_rx->xsk->xsk);
1484 	ifobj_tx->xsk = &ifobj_tx->xsk_arr[1];
1485 	ifobj_rx->xsk = &ifobj_rx->xsk_arr[1];
1486 
1487 	ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk);
1488 	if (ret)
1489 		exit_with_error(errno);
1490 }
1491 
1492 static void testapp_bpf_res(struct test_spec *test)
1493 {
1494 	test_spec_set_name(test, "BPF_RES");
1495 	test->total_steps = 2;
1496 	test->nb_sockets = 2;
1497 	if (testapp_validate_traffic(test))
1498 		return;
1499 
1500 	swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
1501 	testapp_validate_traffic(test);
1502 }
1503 
1504 static void testapp_headroom(struct test_spec *test)
1505 {
1506 	test_spec_set_name(test, "UMEM_HEADROOM");
1507 	test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
1508 	testapp_validate_traffic(test);
1509 }
1510 
1511 static void testapp_stats_rx_dropped(struct test_spec *test)
1512 {
1513 	test_spec_set_name(test, "STAT_RX_DROPPED");
1514 	pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
1515 	test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
1516 		XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
1517 	pkt_stream_receive_half(test);
1518 	test->ifobj_rx->validation_func = validate_rx_dropped;
1519 	testapp_validate_traffic(test);
1520 }
1521 
1522 static void testapp_stats_tx_invalid_descs(struct test_spec *test)
1523 {
1524 	test_spec_set_name(test, "STAT_TX_INVALID");
1525 	pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
1526 	test->ifobj_tx->validation_func = validate_tx_invalid_descs;
1527 	testapp_validate_traffic(test);
1528 
1529 	pkt_stream_restore_default(test);
1530 }
1531 
1532 static void testapp_stats_rx_full(struct test_spec *test)
1533 {
1534 	test_spec_set_name(test, "STAT_RX_FULL");
1535 	pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
1536 	test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
1537 							 DEFAULT_UMEM_BUFFERS, PKT_SIZE);
1538 	if (!test->ifobj_rx->pkt_stream)
1539 		exit_with_error(ENOMEM);
1540 
1541 	test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
1542 	test->ifobj_rx->release_rx = false;
1543 	test->ifobj_rx->validation_func = validate_rx_full;
1544 	testapp_validate_traffic(test);
1545 
1546 	pkt_stream_restore_default(test);
1547 }
1548 
1549 static void testapp_stats_fill_empty(struct test_spec *test)
1550 {
1551 	test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
1552 	pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
1553 	test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
1554 							 DEFAULT_UMEM_BUFFERS, PKT_SIZE);
1555 	if (!test->ifobj_rx->pkt_stream)
1556 		exit_with_error(ENOMEM);
1557 
1558 	test->ifobj_rx->use_fill_ring = false;
1559 	test->ifobj_rx->validation_func = validate_fill_empty;
1560 	testapp_validate_traffic(test);
1561 
1562 	pkt_stream_restore_default(test);
1563 }
1564 
1565 /* Simple test */
1566 static bool hugepages_present(struct ifobject *ifobject)
1567 {
1568 	const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size;
1569 	void *bufs;
1570 
1571 	bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1572 		    MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
1573 	if (bufs == MAP_FAILED)
1574 		return false;
1575 
1576 	munmap(bufs, mmap_sz);
1577 	return true;
1578 }
1579 
1580 static bool testapp_unaligned(struct test_spec *test)
1581 {
1582 	if (!hugepages_present(test->ifobj_tx)) {
1583 		ksft_test_result_skip("No 2M huge pages present.\n");
1584 		return false;
1585 	}
1586 
1587 	test_spec_set_name(test, "UNALIGNED_MODE");
1588 	test->ifobj_tx->umem->unaligned_mode = true;
1589 	test->ifobj_rx->umem->unaligned_mode = true;
1590 	/* Let half of the packets straddle a buffer boundrary */
1591 	pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2);
1592 	test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
1593 	testapp_validate_traffic(test);
1594 
1595 	pkt_stream_restore_default(test);
1596 	return true;
1597 }
1598 
1599 static void testapp_single_pkt(struct test_spec *test)
1600 {
1601 	struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}};
1602 
1603 	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
1604 	testapp_validate_traffic(test);
1605 	pkt_stream_restore_default(test);
1606 }
1607 
1608 static void testapp_invalid_desc(struct test_spec *test)
1609 {
1610 	struct pkt pkts[] = {
1611 		/* Zero packet address allowed */
1612 		{0, PKT_SIZE, 0, true},
1613 		/* Allowed packet */
1614 		{0x1000, PKT_SIZE, 0, true},
1615 		/* Straddling the start of umem */
1616 		{-2, PKT_SIZE, 0, false},
1617 		/* Packet too large */
1618 		{0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
1619 		/* After umem ends */
1620 		{UMEM_SIZE, PKT_SIZE, 0, false},
1621 		/* Straddle the end of umem */
1622 		{UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false},
1623 		/* Straddle a page boundrary */
1624 		{0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false},
1625 		/* Straddle a 2K boundrary */
1626 		{0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true},
1627 		/* Valid packet for synch so that something is received */
1628 		{0x4000, PKT_SIZE, 0, true}};
1629 
1630 	if (test->ifobj_tx->umem->unaligned_mode) {
1631 		/* Crossing a page boundrary allowed */
1632 		pkts[6].valid = true;
1633 	}
1634 	if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) {
1635 		/* Crossing a 2K frame size boundrary not allowed */
1636 		pkts[7].valid = false;
1637 	}
1638 
1639 	if (test->ifobj_tx->shared_umem) {
1640 		pkts[4].addr += UMEM_SIZE;
1641 		pkts[5].addr += UMEM_SIZE;
1642 	}
1643 
1644 	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
1645 	testapp_validate_traffic(test);
1646 	pkt_stream_restore_default(test);
1647 }
1648 
1649 static int xsk_load_xdp_programs(struct ifobject *ifobj)
1650 {
1651 	ifobj->xdp_progs = xsk_xdp_progs__open_and_load();
1652 	if (libbpf_get_error(ifobj->xdp_progs))
1653 		return libbpf_get_error(ifobj->xdp_progs);
1654 
1655 	return 0;
1656 }
1657 
1658 static void xsk_unload_xdp_programs(struct ifobject *ifobj)
1659 {
1660 	xsk_xdp_progs__destroy(ifobj->xdp_progs);
1661 }
1662 
1663 static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac,
1664 		       const char *dst_ip, const char *src_ip, const u16 dst_port,
1665 		       const u16 src_port, thread_func_t func_ptr, bool load_xdp)
1666 {
1667 	struct in_addr ip;
1668 	int err;
1669 
1670 	memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN);
1671 	memcpy(ifobj->src_mac, src_mac, ETH_ALEN);
1672 
1673 	inet_aton(dst_ip, &ip);
1674 	ifobj->dst_ip = ip.s_addr;
1675 
1676 	inet_aton(src_ip, &ip);
1677 	ifobj->src_ip = ip.s_addr;
1678 
1679 	ifobj->dst_port = dst_port;
1680 	ifobj->src_port = src_port;
1681 
1682 	ifobj->func_ptr = func_ptr;
1683 
1684 	if (!load_xdp)
1685 		return;
1686 
1687 	err = xsk_load_xdp_programs(ifobj);
1688 	if (err) {
1689 		printf("Error loading XDP program\n");
1690 		exit_with_error(err);
1691 	}
1692 
1693 	ifobj->xdp_flags = mode_to_xdp_flags(TEST_MODE_SKB);
1694 	err = xsk_attach_xdp_program(ifobj->xdp_progs->progs.xsk_def_prog, ifobj->ifindex,
1695 				     ifobj->xdp_flags);
1696 	if (err) {
1697 		printf("Error attaching XDP program\n");
1698 		exit_with_error(-err);
1699 	}
1700 	ifobj->xskmap = ifobj->xdp_progs->maps.xsk;
1701 }
1702 
1703 static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
1704 {
1705 	switch (type) {
1706 	case TEST_TYPE_STATS_RX_DROPPED:
1707 		if (mode == TEST_MODE_ZC) {
1708 			ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
1709 			return;
1710 		}
1711 		testapp_stats_rx_dropped(test);
1712 		break;
1713 	case TEST_TYPE_STATS_TX_INVALID_DESCS:
1714 		testapp_stats_tx_invalid_descs(test);
1715 		break;
1716 	case TEST_TYPE_STATS_RX_FULL:
1717 		testapp_stats_rx_full(test);
1718 		break;
1719 	case TEST_TYPE_STATS_FILL_EMPTY:
1720 		testapp_stats_fill_empty(test);
1721 		break;
1722 	case TEST_TYPE_TEARDOWN:
1723 		testapp_teardown(test);
1724 		break;
1725 	case TEST_TYPE_BIDI:
1726 		testapp_bidi(test);
1727 		break;
1728 	case TEST_TYPE_BPF_RES:
1729 		testapp_bpf_res(test);
1730 		break;
1731 	case TEST_TYPE_RUN_TO_COMPLETION:
1732 		test_spec_set_name(test, "RUN_TO_COMPLETION");
1733 		testapp_validate_traffic(test);
1734 		break;
1735 	case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
1736 		test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
1737 		testapp_single_pkt(test);
1738 		break;
1739 	case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
1740 		test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
1741 		test->ifobj_tx->umem->frame_size = 2048;
1742 		test->ifobj_rx->umem->frame_size = 2048;
1743 		pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE);
1744 		testapp_validate_traffic(test);
1745 
1746 		pkt_stream_restore_default(test);
1747 		break;
1748 	case TEST_TYPE_RX_POLL:
1749 		test->ifobj_rx->use_poll = true;
1750 		test_spec_set_name(test, "POLL_RX");
1751 		testapp_validate_traffic(test);
1752 		break;
1753 	case TEST_TYPE_TX_POLL:
1754 		test->ifobj_tx->use_poll = true;
1755 		test_spec_set_name(test, "POLL_TX");
1756 		testapp_validate_traffic(test);
1757 		break;
1758 	case TEST_TYPE_POLL_TXQ_TMOUT:
1759 		test_spec_set_name(test, "POLL_TXQ_FULL");
1760 		test->ifobj_tx->use_poll = true;
1761 		/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
1762 		test->ifobj_tx->umem->frame_size = 2048;
1763 		pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048);
1764 		testapp_validate_traffic_single_thread(test, test->ifobj_tx, type);
1765 		pkt_stream_restore_default(test);
1766 		break;
1767 	case TEST_TYPE_POLL_RXQ_TMOUT:
1768 		test_spec_set_name(test, "POLL_RXQ_EMPTY");
1769 		test->ifobj_rx->use_poll = true;
1770 		testapp_validate_traffic_single_thread(test, test->ifobj_rx, type);
1771 		break;
1772 	case TEST_TYPE_ALIGNED_INV_DESC:
1773 		test_spec_set_name(test, "ALIGNED_INV_DESC");
1774 		testapp_invalid_desc(test);
1775 		break;
1776 	case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
1777 		test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
1778 		test->ifobj_tx->umem->frame_size = 2048;
1779 		test->ifobj_rx->umem->frame_size = 2048;
1780 		testapp_invalid_desc(test);
1781 		break;
1782 	case TEST_TYPE_UNALIGNED_INV_DESC:
1783 		if (!hugepages_present(test->ifobj_tx)) {
1784 			ksft_test_result_skip("No 2M huge pages present.\n");
1785 			return;
1786 		}
1787 		test_spec_set_name(test, "UNALIGNED_INV_DESC");
1788 		test->ifobj_tx->umem->unaligned_mode = true;
1789 		test->ifobj_rx->umem->unaligned_mode = true;
1790 		testapp_invalid_desc(test);
1791 		break;
1792 	case TEST_TYPE_UNALIGNED:
1793 		if (!testapp_unaligned(test))
1794 			return;
1795 		break;
1796 	case TEST_TYPE_HEADROOM:
1797 		testapp_headroom(test);
1798 		break;
1799 	default:
1800 		break;
1801 	}
1802 
1803 	if (!test->fail)
1804 		ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
1805 				      test->name);
1806 }
1807 
1808 static struct ifobject *ifobject_create(void)
1809 {
1810 	struct ifobject *ifobj;
1811 
1812 	ifobj = calloc(1, sizeof(struct ifobject));
1813 	if (!ifobj)
1814 		return NULL;
1815 
1816 	ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr));
1817 	if (!ifobj->xsk_arr)
1818 		goto out_xsk_arr;
1819 
1820 	ifobj->umem = calloc(1, sizeof(*ifobj->umem));
1821 	if (!ifobj->umem)
1822 		goto out_umem;
1823 
1824 	return ifobj;
1825 
1826 out_umem:
1827 	free(ifobj->xsk_arr);
1828 out_xsk_arr:
1829 	free(ifobj);
1830 	return NULL;
1831 }
1832 
1833 static void ifobject_delete(struct ifobject *ifobj)
1834 {
1835 	free(ifobj->umem);
1836 	free(ifobj->xsk_arr);
1837 	free(ifobj);
1838 }
1839 
1840 static bool is_xdp_supported(int ifindex)
1841 {
1842 	int flags = XDP_FLAGS_DRV_MODE;
1843 
1844 	LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags);
1845 	struct bpf_insn insns[2] = {
1846 		BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
1847 		BPF_EXIT_INSN()
1848 	};
1849 	int prog_fd, insn_cnt = ARRAY_SIZE(insns);
1850 	int err;
1851 
1852 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
1853 	if (prog_fd < 0)
1854 		return false;
1855 
1856 	err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL);
1857 	if (err) {
1858 		close(prog_fd);
1859 		return false;
1860 	}
1861 
1862 	bpf_xdp_detach(ifindex, flags, NULL);
1863 	close(prog_fd);
1864 
1865 	return true;
1866 }
1867 
1868 static void change_to_drv_mode(struct ifobject *ifobj)
1869 {
1870 	LIBBPF_OPTS(bpf_xdp_query_opts, opts);
1871 	int ret;
1872 
1873 	xsk_detach_xdp_program(ifobj->ifindex, ifobj->xdp_flags);
1874 	ifobj->xdp_flags = XDP_FLAGS_DRV_MODE;
1875 	ret = xsk_attach_xdp_program(ifobj->xdp_progs->progs.xsk_def_prog, ifobj->ifindex,
1876 				     ifobj->xdp_flags);
1877 	if (ret) {
1878 		ksft_print_msg("Error attaching XDP program\n");
1879 		exit_with_error(-ret);
1880 	}
1881 	ifobj->xskmap = ifobj->xdp_progs->maps.xsk;
1882 
1883 	ret = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &opts);
1884 	if (ret)
1885 		exit_with_error(errno);
1886 
1887 	if (opts.attach_mode != XDP_ATTACHED_DRV) {
1888 		ksft_print_msg("ERROR: XDP prog not in DRV mode\n");
1889 		exit_with_error(EINVAL);
1890 	}
1891 }
1892 
1893 int main(int argc, char **argv)
1894 {
1895 	struct pkt_stream *rx_pkt_stream_default;
1896 	struct pkt_stream *tx_pkt_stream_default;
1897 	struct ifobject *ifobj_tx, *ifobj_rx;
1898 	int modes = TEST_MODE_SKB + 1;
1899 	u32 i, j, failed_tests = 0;
1900 	struct test_spec test;
1901 	bool shared_netdev;
1902 
1903 	/* Use libbpf 1.0 API mode */
1904 	libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1905 
1906 	ifobj_tx = ifobject_create();
1907 	if (!ifobj_tx)
1908 		exit_with_error(ENOMEM);
1909 	ifobj_rx = ifobject_create();
1910 	if (!ifobj_rx)
1911 		exit_with_error(ENOMEM);
1912 
1913 	setlocale(LC_ALL, "");
1914 
1915 	parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
1916 
1917 	shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
1918 	ifobj_tx->shared_umem = shared_netdev;
1919 	ifobj_rx->shared_umem = shared_netdev;
1920 
1921 	if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
1922 		usage(basename(argv[0]));
1923 		ksft_exit_xfail();
1924 	}
1925 
1926 	if (is_xdp_supported(ifobj_tx->ifindex)) {
1927 		modes++;
1928 		if (ifobj_zc_avail(ifobj_tx))
1929 			modes++;
1930 	}
1931 
1932 	init_iface(ifobj_rx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2,
1933 		   worker_testapp_validate_rx, true);
1934 	init_iface(ifobj_tx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1,
1935 		   worker_testapp_validate_tx, !shared_netdev);
1936 
1937 	test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
1938 	tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
1939 	rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE);
1940 	if (!tx_pkt_stream_default || !rx_pkt_stream_default)
1941 		exit_with_error(ENOMEM);
1942 	test.tx_pkt_stream_default = tx_pkt_stream_default;
1943 	test.rx_pkt_stream_default = rx_pkt_stream_default;
1944 
1945 	ksft_set_plan(modes * TEST_TYPE_MAX);
1946 
1947 	for (i = 0; i < modes; i++) {
1948 		if (i == TEST_MODE_DRV) {
1949 			change_to_drv_mode(ifobj_rx);
1950 			if (!shared_netdev)
1951 				change_to_drv_mode(ifobj_tx);
1952 		}
1953 
1954 		for (j = 0; j < TEST_TYPE_MAX; j++) {
1955 			test_spec_init(&test, ifobj_tx, ifobj_rx, i);
1956 			run_pkt_test(&test, i, j);
1957 			usleep(USLEEP_MAX);
1958 
1959 			if (test.fail)
1960 				failed_tests++;
1961 		}
1962 	}
1963 
1964 	pkt_stream_delete(tx_pkt_stream_default);
1965 	pkt_stream_delete(rx_pkt_stream_default);
1966 	xsk_unload_xdp_programs(ifobj_tx);
1967 	xsk_unload_xdp_programs(ifobj_rx);
1968 	ifobject_delete(ifobj_tx);
1969 	ifobject_delete(ifobj_rx);
1970 
1971 	if (failed_tests)
1972 		ksft_exit_fail();
1973 	else
1974 		ksft_exit_pass();
1975 }
1976