1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2020 Intel Corporation. */ 3 4 /* 5 * Some functions in this program are taken from 6 * Linux kernel samples/bpf/xdpsock* and modified 7 * for use. 8 * 9 * See test_xsk.sh for detailed information on test topology 10 * and prerequisite network setup. 11 * 12 * This test program contains two threads, each thread is single socket with 13 * a unique UMEM. It validates in-order packet delivery and packet content 14 * by sending packets to each other. 15 * 16 * Tests Information: 17 * ------------------ 18 * These selftests test AF_XDP SKB and Native/DRV modes using veth 19 * Virtual Ethernet interfaces. 20 * 21 * For each mode, the following tests are run: 22 * a. nopoll - soft-irq processing in run-to-completion mode 23 * b. poll - using poll() syscall 24 * c. Socket Teardown 25 * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy 26 * both sockets, then repeat multiple times. Only nopoll mode is used 27 * d. Bi-directional sockets 28 * Configure sockets as bi-directional tx/rx sockets, sets up fill and 29 * completion rings on each socket, tx/rx in both directions. Only nopoll 30 * mode is used 31 * e. Statistics 32 * Trigger some error conditions and ensure that the appropriate statistics 33 * are incremented. Within this test, the following statistics are tested: 34 * i. rx dropped 35 * Increase the UMEM frame headroom to a value which results in 36 * insufficient space in the rx buffer for both the packet and the headroom. 37 * ii. tx invalid 38 * Set the 'len' field of tx descriptors to an invalid value (umem frame 39 * size + 1). 40 * iii. rx ring full 41 * Reduce the size of the RX ring to a fraction of the fill ring size. 42 * iv. fill queue empty 43 * Do not populate the fill queue and then try to receive pkts. 44 * f. bpf_link resource persistence 45 * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0, 46 * then remove xsk sockets from queue 0 on both veth interfaces and 47 * finally run a traffic on queues ids 1 48 * g. unaligned mode 49 * h. tests for invalid and corner case Tx descriptors so that the correct ones 50 * are discarded and let through, respectively. 51 * i. 2K frame size tests 52 * 53 * Total tests: 12 54 * 55 * Flow: 56 * ----- 57 * - Single process spawns two threads: Tx and Rx 58 * - Each of these two threads attach to a veth interface 59 * - Each thread creates one AF_XDP socket connected to a unique umem for each 60 * veth interface 61 * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy> 62 * - Rx thread verifies if all packets were received and delivered in-order, 63 * and have the right content 64 * 65 * Enable/disable packet dump mode: 66 * -------------------------- 67 * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add 68 * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D") 69 */ 70 71 #define _GNU_SOURCE 72 #include <assert.h> 73 #include <fcntl.h> 74 #include <errno.h> 75 #include <getopt.h> 76 #include <asm/barrier.h> 77 #include <linux/if_link.h> 78 #include <linux/if_ether.h> 79 #include <linux/ip.h> 80 #include <linux/udp.h> 81 #include <arpa/inet.h> 82 #include <net/if.h> 83 #include <locale.h> 84 #include <poll.h> 85 #include <pthread.h> 86 #include <signal.h> 87 #include <stdbool.h> 88 #include <stdio.h> 89 #include <stdlib.h> 90 #include <string.h> 91 #include <stddef.h> 92 #include <sys/mman.h> 93 #include <sys/socket.h> 94 #include <sys/time.h> 95 #include <sys/types.h> 96 #include <sys/queue.h> 97 #include <time.h> 98 #include <unistd.h> 99 #include <stdatomic.h> 100 101 #include "xsk_xdp_progs.skel.h" 102 #include "xsk.h" 103 #include "xskxceiver.h" 104 #include <bpf/bpf.h> 105 #include <linux/filter.h> 106 #include "../kselftest.h" 107 #include "xsk_xdp_metadata.h" 108 109 static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; 110 static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; 111 static const char *IP1 = "192.168.100.162"; 112 static const char *IP2 = "192.168.100.161"; 113 static const u16 UDP_PORT1 = 2020; 114 static const u16 UDP_PORT2 = 2121; 115 116 static void __exit_with_error(int error, const char *file, const char *func, int line) 117 { 118 ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, 119 strerror(error)); 120 ksft_exit_xfail(); 121 } 122 123 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) 124 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : "" 125 static char *mode_string(struct test_spec *test) 126 { 127 switch (test->mode) { 128 case TEST_MODE_SKB: 129 return "SKB"; 130 case TEST_MODE_DRV: 131 return "DRV"; 132 case TEST_MODE_ZC: 133 return "ZC"; 134 default: 135 return "BOGUS"; 136 } 137 } 138 139 static void report_failure(struct test_spec *test) 140 { 141 if (test->fail) 142 return; 143 144 ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test), 145 test->name); 146 test->fail = true; 147 } 148 149 static void memset32_htonl(void *dest, u32 val, u32 size) 150 { 151 u32 *ptr = (u32 *)dest; 152 int i; 153 154 val = htonl(val); 155 156 for (i = 0; i < (size & (~0x3)); i += 4) 157 ptr[i >> 2] = val; 158 } 159 160 /* 161 * Fold a partial checksum 162 * This function code has been taken from 163 * Linux kernel include/asm-generic/checksum.h 164 */ 165 static __u16 csum_fold(__u32 csum) 166 { 167 u32 sum = (__force u32)csum; 168 169 sum = (sum & 0xffff) + (sum >> 16); 170 sum = (sum & 0xffff) + (sum >> 16); 171 return (__force __u16)~sum; 172 } 173 174 /* 175 * This function code has been taken from 176 * Linux kernel lib/checksum.c 177 */ 178 static u32 from64to32(u64 x) 179 { 180 /* add up 32-bit and 32-bit for 32+c bit */ 181 x = (x & 0xffffffff) + (x >> 32); 182 /* add up carry.. */ 183 x = (x & 0xffffffff) + (x >> 32); 184 return (u32)x; 185 } 186 187 /* 188 * This function code has been taken from 189 * Linux kernel lib/checksum.c 190 */ 191 static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum) 192 { 193 unsigned long long s = (__force u32)sum; 194 195 s += (__force u32)saddr; 196 s += (__force u32)daddr; 197 #ifdef __BIG_ENDIAN__ 198 s += proto + len; 199 #else 200 s += (proto + len) << 8; 201 #endif 202 return (__force __u32)from64to32(s); 203 } 204 205 /* 206 * This function has been taken from 207 * Linux kernel include/asm-generic/checksum.h 208 */ 209 static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum) 210 { 211 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 212 } 213 214 static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt) 215 { 216 u32 csum = 0; 217 u32 cnt = 0; 218 219 /* udp hdr and data */ 220 for (; cnt < len; cnt += 2) 221 csum += udp_pkt[cnt >> 1]; 222 223 return csum_tcpudp_magic(saddr, daddr, len, proto, csum); 224 } 225 226 static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr) 227 { 228 memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN); 229 memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN); 230 eth_hdr->h_proto = htons(ETH_P_IP); 231 } 232 233 static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr) 234 { 235 ip_hdr->version = IP_PKT_VER; 236 ip_hdr->ihl = 0x5; 237 ip_hdr->tos = IP_PKT_TOS; 238 ip_hdr->tot_len = htons(IP_PKT_SIZE); 239 ip_hdr->id = 0; 240 ip_hdr->frag_off = 0; 241 ip_hdr->ttl = IPDEFTTL; 242 ip_hdr->protocol = IPPROTO_UDP; 243 ip_hdr->saddr = ifobject->src_ip; 244 ip_hdr->daddr = ifobject->dst_ip; 245 ip_hdr->check = 0; 246 } 247 248 static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject, 249 struct udphdr *udp_hdr) 250 { 251 udp_hdr->source = htons(ifobject->src_port); 252 udp_hdr->dest = htons(ifobject->dst_port); 253 udp_hdr->len = htons(UDP_PKT_SIZE); 254 memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE); 255 } 256 257 static bool is_umem_valid(struct ifobject *ifobj) 258 { 259 return !!ifobj->umem->umem; 260 } 261 262 static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr) 263 { 264 udp_hdr->check = 0; 265 udp_hdr->check = 266 udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr); 267 } 268 269 static u32 mode_to_xdp_flags(enum test_mode mode) 270 { 271 return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; 272 } 273 274 static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size) 275 { 276 struct xsk_umem_config cfg = { 277 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, 278 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, 279 .frame_size = umem->frame_size, 280 .frame_headroom = umem->frame_headroom, 281 .flags = XSK_UMEM__DEFAULT_FLAGS 282 }; 283 int ret; 284 285 if (umem->unaligned_mode) 286 cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; 287 288 ret = xsk_umem__create(&umem->umem, buffer, size, 289 &umem->fq, &umem->cq, &cfg); 290 if (ret) 291 return ret; 292 293 umem->buffer = buffer; 294 return 0; 295 } 296 297 static void enable_busy_poll(struct xsk_socket_info *xsk) 298 { 299 int sock_opt; 300 301 sock_opt = 1; 302 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL, 303 (void *)&sock_opt, sizeof(sock_opt)) < 0) 304 exit_with_error(errno); 305 306 sock_opt = 20; 307 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL, 308 (void *)&sock_opt, sizeof(sock_opt)) < 0) 309 exit_with_error(errno); 310 311 sock_opt = BATCH_SIZE; 312 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET, 313 (void *)&sock_opt, sizeof(sock_opt)) < 0) 314 exit_with_error(errno); 315 } 316 317 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, 318 struct ifobject *ifobject, bool shared) 319 { 320 struct xsk_socket_config cfg = {}; 321 struct xsk_ring_cons *rxr; 322 struct xsk_ring_prod *txr; 323 324 xsk->umem = umem; 325 cfg.rx_size = xsk->rxqsize; 326 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; 327 cfg.bind_flags = ifobject->bind_flags; 328 if (shared) 329 cfg.bind_flags |= XDP_SHARED_UMEM; 330 331 txr = ifobject->tx_on ? &xsk->tx : NULL; 332 rxr = ifobject->rx_on ? &xsk->rx : NULL; 333 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); 334 } 335 336 static bool ifobj_zc_avail(struct ifobject *ifobject) 337 { 338 size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; 339 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 340 struct xsk_socket_info *xsk; 341 struct xsk_umem_info *umem; 342 bool zc_avail = false; 343 void *bufs; 344 int ret; 345 346 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 347 if (bufs == MAP_FAILED) 348 exit_with_error(errno); 349 350 umem = calloc(1, sizeof(struct xsk_umem_info)); 351 if (!umem) { 352 munmap(bufs, umem_sz); 353 exit_with_error(ENOMEM); 354 } 355 umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 356 ret = xsk_configure_umem(umem, bufs, umem_sz); 357 if (ret) 358 exit_with_error(-ret); 359 360 xsk = calloc(1, sizeof(struct xsk_socket_info)); 361 if (!xsk) 362 goto out; 363 ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY; 364 ifobject->rx_on = true; 365 xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 366 ret = __xsk_configure_socket(xsk, umem, ifobject, false); 367 if (!ret) 368 zc_avail = true; 369 370 xsk_socket__delete(xsk->xsk); 371 free(xsk); 372 out: 373 munmap(umem->buffer, umem_sz); 374 xsk_umem__delete(umem->umem); 375 free(umem); 376 return zc_avail; 377 } 378 379 static struct option long_options[] = { 380 {"interface", required_argument, 0, 'i'}, 381 {"busy-poll", no_argument, 0, 'b'}, 382 {"dump-pkts", no_argument, 0, 'D'}, 383 {"verbose", no_argument, 0, 'v'}, 384 {0, 0, 0, 0} 385 }; 386 387 static void usage(const char *prog) 388 { 389 const char *str = 390 " Usage: %s [OPTIONS]\n" 391 " Options:\n" 392 " -i, --interface Use interface\n" 393 " -D, --dump-pkts Dump packets L2 - L5\n" 394 " -v, --verbose Verbose output\n" 395 " -b, --busy-poll Enable busy poll\n"; 396 397 ksft_print_msg(str, prog); 398 } 399 400 static bool validate_interface(struct ifobject *ifobj) 401 { 402 if (!strcmp(ifobj->ifname, "")) 403 return false; 404 return true; 405 } 406 407 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc, 408 char **argv) 409 { 410 struct ifobject *ifobj; 411 u32 interface_nb = 0; 412 int option_index, c; 413 414 opterr = 0; 415 416 for (;;) { 417 c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index); 418 if (c == -1) 419 break; 420 421 switch (c) { 422 case 'i': 423 if (interface_nb == 0) 424 ifobj = ifobj_tx; 425 else if (interface_nb == 1) 426 ifobj = ifobj_rx; 427 else 428 break; 429 430 memcpy(ifobj->ifname, optarg, 431 min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg))); 432 433 ifobj->ifindex = if_nametoindex(ifobj->ifname); 434 if (!ifobj->ifindex) 435 exit_with_error(errno); 436 437 interface_nb++; 438 break; 439 case 'D': 440 opt_pkt_dump = true; 441 break; 442 case 'v': 443 opt_verbose = true; 444 break; 445 case 'b': 446 ifobj_tx->busy_poll = true; 447 ifobj_rx->busy_poll = true; 448 break; 449 default: 450 usage(basename(argv[0])); 451 ksft_exit_xfail(); 452 } 453 } 454 } 455 456 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 457 struct ifobject *ifobj_rx) 458 { 459 u32 i, j; 460 461 for (i = 0; i < MAX_INTERFACES; i++) { 462 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 463 464 ifobj->xsk = &ifobj->xsk_arr[0]; 465 ifobj->use_poll = false; 466 ifobj->use_fill_ring = true; 467 ifobj->release_rx = true; 468 ifobj->validation_func = NULL; 469 ifobj->use_metadata = false; 470 471 if (i == 0) { 472 ifobj->rx_on = false; 473 ifobj->tx_on = true; 474 ifobj->pkt_stream = test->tx_pkt_stream_default; 475 } else { 476 ifobj->rx_on = true; 477 ifobj->tx_on = false; 478 ifobj->pkt_stream = test->rx_pkt_stream_default; 479 } 480 481 memset(ifobj->umem, 0, sizeof(*ifobj->umem)); 482 ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; 483 ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 484 if (ifobj->shared_umem && ifobj->rx_on) 485 ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS * 486 XSK_UMEM__DEFAULT_FRAME_SIZE; 487 488 for (j = 0; j < MAX_SOCKETS; j++) { 489 memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); 490 ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 491 } 492 } 493 494 test->ifobj_tx = ifobj_tx; 495 test->ifobj_rx = ifobj_rx; 496 test->current_step = 0; 497 test->total_steps = 1; 498 test->nb_sockets = 1; 499 test->fail = false; 500 test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog; 501 test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk; 502 test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog; 503 test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk; 504 } 505 506 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 507 struct ifobject *ifobj_rx, enum test_mode mode) 508 { 509 struct pkt_stream *tx_pkt_stream; 510 struct pkt_stream *rx_pkt_stream; 511 u32 i; 512 513 tx_pkt_stream = test->tx_pkt_stream_default; 514 rx_pkt_stream = test->rx_pkt_stream_default; 515 memset(test, 0, sizeof(*test)); 516 test->tx_pkt_stream_default = tx_pkt_stream; 517 test->rx_pkt_stream_default = rx_pkt_stream; 518 519 for (i = 0; i < MAX_INTERFACES; i++) { 520 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 521 522 ifobj->bind_flags = XDP_USE_NEED_WAKEUP; 523 if (mode == TEST_MODE_ZC) 524 ifobj->bind_flags |= XDP_ZEROCOPY; 525 else 526 ifobj->bind_flags |= XDP_COPY; 527 } 528 529 test->mode = mode; 530 __test_spec_init(test, ifobj_tx, ifobj_rx); 531 } 532 533 static void test_spec_reset(struct test_spec *test) 534 { 535 __test_spec_init(test, test->ifobj_tx, test->ifobj_rx); 536 } 537 538 static void test_spec_set_name(struct test_spec *test, const char *name) 539 { 540 strncpy(test->name, name, MAX_TEST_NAME_SIZE); 541 } 542 543 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx, 544 struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx, 545 struct bpf_map *xskmap_tx) 546 { 547 test->xdp_prog_rx = xdp_prog_rx; 548 test->xdp_prog_tx = xdp_prog_tx; 549 test->xskmap_rx = xskmap_rx; 550 test->xskmap_tx = xskmap_tx; 551 } 552 553 static void pkt_stream_reset(struct pkt_stream *pkt_stream) 554 { 555 if (pkt_stream) 556 pkt_stream->rx_pkt_nb = 0; 557 } 558 559 static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb) 560 { 561 if (pkt_nb >= pkt_stream->nb_pkts) 562 return NULL; 563 564 return &pkt_stream->pkts[pkt_nb]; 565 } 566 567 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent) 568 { 569 while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) { 570 (*pkts_sent)++; 571 if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid) 572 return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++]; 573 pkt_stream->rx_pkt_nb++; 574 } 575 return NULL; 576 } 577 578 static void pkt_stream_delete(struct pkt_stream *pkt_stream) 579 { 580 free(pkt_stream->pkts); 581 free(pkt_stream); 582 } 583 584 static void pkt_stream_restore_default(struct test_spec *test) 585 { 586 struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream; 587 struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream; 588 589 if (tx_pkt_stream != test->tx_pkt_stream_default) { 590 pkt_stream_delete(test->ifobj_tx->pkt_stream); 591 test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default; 592 } 593 594 if (rx_pkt_stream != test->rx_pkt_stream_default) { 595 pkt_stream_delete(test->ifobj_rx->pkt_stream); 596 test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default; 597 } 598 } 599 600 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) 601 { 602 struct pkt_stream *pkt_stream; 603 604 pkt_stream = calloc(1, sizeof(*pkt_stream)); 605 if (!pkt_stream) 606 return NULL; 607 608 pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); 609 if (!pkt_stream->pkts) { 610 free(pkt_stream); 611 return NULL; 612 } 613 614 pkt_stream->nb_pkts = nb_pkts; 615 return pkt_stream; 616 } 617 618 static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len) 619 { 620 pkt->addr = addr + umem->base_addr; 621 pkt->len = len; 622 if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom) 623 pkt->valid = false; 624 else 625 pkt->valid = true; 626 } 627 628 static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) 629 { 630 struct pkt_stream *pkt_stream; 631 u32 i; 632 633 pkt_stream = __pkt_stream_alloc(nb_pkts); 634 if (!pkt_stream) 635 exit_with_error(ENOMEM); 636 637 for (i = 0; i < nb_pkts; i++) { 638 pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size, 639 pkt_len); 640 pkt_stream->pkts[i].payload = i; 641 } 642 643 return pkt_stream; 644 } 645 646 static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem, 647 struct pkt_stream *pkt_stream) 648 { 649 return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len); 650 } 651 652 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) 653 { 654 struct pkt_stream *pkt_stream; 655 656 pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len); 657 test->ifobj_tx->pkt_stream = pkt_stream; 658 pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len); 659 test->ifobj_rx->pkt_stream = pkt_stream; 660 } 661 662 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, 663 int offset) 664 { 665 struct xsk_umem_info *umem = ifobj->umem; 666 struct pkt_stream *pkt_stream; 667 u32 i; 668 669 pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream); 670 for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) 671 pkt_set(umem, &pkt_stream->pkts[i], 672 (i % umem->num_frames) * umem->frame_size + offset, pkt_len); 673 674 ifobj->pkt_stream = pkt_stream; 675 } 676 677 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) 678 { 679 __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); 680 __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); 681 } 682 683 static void pkt_stream_receive_half(struct test_spec *test) 684 { 685 struct xsk_umem_info *umem = test->ifobj_rx->umem; 686 struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream; 687 u32 i; 688 689 test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts, 690 pkt_stream->pkts[0].len); 691 pkt_stream = test->ifobj_rx->pkt_stream; 692 for (i = 1; i < pkt_stream->nb_pkts; i += 2) 693 pkt_stream->pkts[i].valid = false; 694 } 695 696 static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) 697 { 698 struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb); 699 struct udphdr *udp_hdr; 700 struct ethhdr *eth_hdr; 701 struct iphdr *ip_hdr; 702 void *data; 703 704 if (!pkt) 705 return NULL; 706 if (!pkt->valid || pkt->len < MIN_PKT_SIZE) 707 return pkt; 708 709 data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr); 710 udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr)); 711 ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr)); 712 eth_hdr = (struct ethhdr *)data; 713 714 gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr); 715 gen_ip_hdr(ifobject, ip_hdr); 716 gen_udp_csum(udp_hdr, ip_hdr); 717 gen_eth_hdr(ifobject, eth_hdr); 718 719 return pkt; 720 } 721 722 static void __pkt_stream_generate_custom(struct ifobject *ifobj, 723 struct pkt *pkts, u32 nb_pkts) 724 { 725 struct pkt_stream *pkt_stream; 726 u32 i; 727 728 pkt_stream = __pkt_stream_alloc(nb_pkts); 729 if (!pkt_stream) 730 exit_with_error(ENOMEM); 731 732 for (i = 0; i < nb_pkts; i++) { 733 pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr; 734 pkt_stream->pkts[i].len = pkts[i].len; 735 pkt_stream->pkts[i].payload = i; 736 pkt_stream->pkts[i].valid = pkts[i].valid; 737 } 738 739 ifobj->pkt_stream = pkt_stream; 740 } 741 742 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) 743 { 744 __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts); 745 __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts); 746 } 747 748 static void pkt_dump(void *pkt, u32 len) 749 { 750 char s[INET_ADDRSTRLEN]; 751 struct ethhdr *ethhdr; 752 struct udphdr *udphdr; 753 struct iphdr *iphdr; 754 u32 payload, i; 755 756 ethhdr = pkt; 757 iphdr = pkt + sizeof(*ethhdr); 758 udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr); 759 760 /*extract L2 frame */ 761 fprintf(stdout, "DEBUG>> L2: dst mac: "); 762 for (i = 0; i < ETH_ALEN; i++) 763 fprintf(stdout, "%02X", ethhdr->h_dest[i]); 764 765 fprintf(stdout, "\nDEBUG>> L2: src mac: "); 766 for (i = 0; i < ETH_ALEN; i++) 767 fprintf(stdout, "%02X", ethhdr->h_source[i]); 768 769 /*extract L3 frame */ 770 fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl); 771 fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n", 772 inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s))); 773 fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n", 774 inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s))); 775 /*extract L4 frame */ 776 fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source)); 777 fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest)); 778 /*extract L5 frame */ 779 payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE))); 780 781 fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload); 782 fprintf(stdout, "---------------------------------------\n"); 783 } 784 785 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr, 786 u64 pkt_stream_addr) 787 { 788 u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; 789 u32 offset = addr % umem->frame_size, expected_offset = 0; 790 791 if (!pkt_stream->use_addr_for_fill) 792 pkt_stream_addr = 0; 793 794 expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; 795 796 if (offset == expected_offset) 797 return true; 798 799 ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset); 800 return false; 801 } 802 803 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) 804 { 805 void *data = xsk_umem__get_data(buffer, addr); 806 struct xdp_info *meta = data - sizeof(struct xdp_info); 807 808 if (meta->count != pkt->payload) { 809 ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n", 810 __func__, pkt->payload, meta->count); 811 return false; 812 } 813 814 return true; 815 } 816 817 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) 818 { 819 void *data = xsk_umem__get_data(buffer, addr); 820 struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr)); 821 822 if (!pkt) { 823 ksft_print_msg("[%s] too many packets received\n", __func__); 824 return false; 825 } 826 827 if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) { 828 /* Do not try to verify packets that are smaller than minimum size. */ 829 return true; 830 } 831 832 if (pkt->len != len) { 833 ksft_print_msg("[%s] expected length [%d], got length [%d]\n", 834 __func__, pkt->len, len); 835 return false; 836 } 837 838 if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) { 839 u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); 840 841 if (opt_pkt_dump) 842 pkt_dump(data, PKT_SIZE); 843 844 if (pkt->payload != seqnum) { 845 ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", 846 __func__, pkt->payload, seqnum); 847 return false; 848 } 849 } else { 850 ksft_print_msg("Invalid frame received: "); 851 ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version, 852 iphdr->tos); 853 return false; 854 } 855 856 return true; 857 } 858 859 static void kick_tx(struct xsk_socket_info *xsk) 860 { 861 int ret; 862 863 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); 864 if (ret >= 0) 865 return; 866 if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) { 867 usleep(100); 868 return; 869 } 870 exit_with_error(errno); 871 } 872 873 static void kick_rx(struct xsk_socket_info *xsk) 874 { 875 int ret; 876 877 ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); 878 if (ret < 0) 879 exit_with_error(errno); 880 } 881 882 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size) 883 { 884 unsigned int rcvd; 885 u32 idx; 886 887 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) 888 kick_tx(xsk); 889 890 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx); 891 if (rcvd) { 892 if (rcvd > xsk->outstanding_tx) { 893 u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1); 894 895 ksft_print_msg("[%s] Too many packets completed\n", __func__); 896 ksft_print_msg("Last completion address: %llx\n", addr); 897 return TEST_FAILURE; 898 } 899 900 xsk_ring_cons__release(&xsk->umem->cq, rcvd); 901 xsk->outstanding_tx -= rcvd; 902 } 903 904 return TEST_PASS; 905 } 906 907 static int receive_pkts(struct test_spec *test, struct pollfd *fds) 908 { 909 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0}; 910 struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream; 911 u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0; 912 struct xsk_socket_info *xsk = test->ifobj_rx->xsk; 913 struct ifobject *ifobj = test->ifobj_rx; 914 struct xsk_umem_info *umem = xsk->umem; 915 struct pkt *pkt; 916 int ret; 917 918 ret = gettimeofday(&tv_now, NULL); 919 if (ret) 920 exit_with_error(errno); 921 timeradd(&tv_now, &tv_timeout, &tv_end); 922 923 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 924 while (pkt) { 925 ret = gettimeofday(&tv_now, NULL); 926 if (ret) 927 exit_with_error(errno); 928 if (timercmp(&tv_now, &tv_end, >)) { 929 ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__); 930 return TEST_FAILURE; 931 } 932 933 kick_rx(xsk); 934 if (ifobj->use_poll) { 935 ret = poll(fds, 1, POLL_TMOUT); 936 if (ret < 0) 937 exit_with_error(errno); 938 939 if (!ret) { 940 if (!is_umem_valid(test->ifobj_tx)) 941 return TEST_PASS; 942 943 ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__); 944 return TEST_FAILURE; 945 946 } 947 948 if (!(fds->revents & POLLIN)) 949 continue; 950 } 951 952 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 953 if (!rcvd) 954 continue; 955 956 if (ifobj->use_fill_ring) { 957 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 958 while (ret != rcvd) { 959 if (ret < 0) 960 exit_with_error(-ret); 961 if (xsk_ring_prod__needs_wakeup(&umem->fq)) { 962 ret = poll(fds, 1, POLL_TMOUT); 963 if (ret < 0) 964 exit_with_error(errno); 965 } 966 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 967 } 968 } 969 970 for (i = 0; i < rcvd; i++) { 971 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); 972 u64 addr = desc->addr, orig; 973 974 orig = xsk_umem__extract_addr(addr); 975 addr = xsk_umem__add_offset_to_addr(addr); 976 977 if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) || 978 !is_offset_correct(umem, pkt_stream, addr, pkt->addr) || 979 (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr))) 980 return TEST_FAILURE; 981 982 if (ifobj->use_fill_ring) 983 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig; 984 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 985 } 986 987 if (ifobj->use_fill_ring) 988 xsk_ring_prod__submit(&umem->fq, rcvd); 989 if (ifobj->release_rx) 990 xsk_ring_cons__release(&xsk->rx, rcvd); 991 992 pthread_mutex_lock(&pacing_mutex); 993 pkts_in_flight -= pkts_sent; 994 if (pkts_in_flight < umem->num_frames) 995 pthread_cond_signal(&pacing_cond); 996 pthread_mutex_unlock(&pacing_mutex); 997 pkts_sent = 0; 998 } 999 1000 return TEST_PASS; 1001 } 1002 1003 static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds, 1004 bool timeout) 1005 { 1006 struct xsk_socket_info *xsk = ifobject->xsk; 1007 bool use_poll = ifobject->use_poll; 1008 u32 i, idx = 0, valid_pkts = 0; 1009 int ret; 1010 1011 while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { 1012 if (use_poll) { 1013 ret = poll(fds, 1, POLL_TMOUT); 1014 if (timeout) { 1015 if (ret < 0) { 1016 ksft_print_msg("ERROR: [%s] Poll error %d\n", 1017 __func__, errno); 1018 return TEST_FAILURE; 1019 } 1020 if (ret == 0) 1021 return TEST_PASS; 1022 break; 1023 } 1024 if (ret <= 0) { 1025 ksft_print_msg("ERROR: [%s] Poll error %d\n", 1026 __func__, errno); 1027 return TEST_FAILURE; 1028 } 1029 } 1030 1031 complete_pkts(xsk, BATCH_SIZE); 1032 } 1033 1034 for (i = 0; i < BATCH_SIZE; i++) { 1035 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); 1036 struct pkt *pkt = pkt_generate(ifobject, *pkt_nb); 1037 1038 if (!pkt) 1039 break; 1040 1041 tx_desc->addr = pkt->addr; 1042 tx_desc->len = pkt->len; 1043 (*pkt_nb)++; 1044 if (pkt->valid) 1045 valid_pkts++; 1046 } 1047 1048 pthread_mutex_lock(&pacing_mutex); 1049 pkts_in_flight += valid_pkts; 1050 /* pkts_in_flight might be negative if many invalid packets are sent */ 1051 if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) { 1052 kick_tx(xsk); 1053 pthread_cond_wait(&pacing_cond, &pacing_mutex); 1054 } 1055 pthread_mutex_unlock(&pacing_mutex); 1056 1057 xsk_ring_prod__submit(&xsk->tx, i); 1058 xsk->outstanding_tx += valid_pkts; 1059 1060 if (use_poll) { 1061 ret = poll(fds, 1, POLL_TMOUT); 1062 if (ret <= 0) { 1063 if (ret == 0 && timeout) 1064 return TEST_PASS; 1065 1066 ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret); 1067 return TEST_FAILURE; 1068 } 1069 } 1070 1071 if (!timeout) { 1072 if (complete_pkts(xsk, i)) 1073 return TEST_FAILURE; 1074 1075 usleep(10); 1076 return TEST_PASS; 1077 } 1078 1079 return TEST_CONTINUE; 1080 } 1081 1082 static void wait_for_tx_completion(struct xsk_socket_info *xsk) 1083 { 1084 while (xsk->outstanding_tx) 1085 complete_pkts(xsk, BATCH_SIZE); 1086 } 1087 1088 static int send_pkts(struct test_spec *test, struct ifobject *ifobject) 1089 { 1090 bool timeout = !is_umem_valid(test->ifobj_rx); 1091 struct pollfd fds = { }; 1092 u32 pkt_cnt = 0, ret; 1093 1094 fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 1095 fds.events = POLLOUT; 1096 1097 while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { 1098 ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout); 1099 if ((ret || test->fail) && !timeout) 1100 return TEST_FAILURE; 1101 else if (ret == TEST_PASS && timeout) 1102 return ret; 1103 } 1104 1105 wait_for_tx_completion(ifobject->xsk); 1106 return TEST_PASS; 1107 } 1108 1109 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats) 1110 { 1111 int fd = xsk_socket__fd(xsk), err; 1112 socklen_t optlen, expected_len; 1113 1114 optlen = sizeof(*stats); 1115 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen); 1116 if (err) { 1117 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1118 __func__, -err, strerror(-err)); 1119 return TEST_FAILURE; 1120 } 1121 1122 expected_len = sizeof(struct xdp_statistics); 1123 if (optlen != expected_len) { 1124 ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n", 1125 __func__, expected_len, optlen); 1126 return TEST_FAILURE; 1127 } 1128 1129 return TEST_PASS; 1130 } 1131 1132 static int validate_rx_dropped(struct ifobject *ifobject) 1133 { 1134 struct xsk_socket *xsk = ifobject->xsk->xsk; 1135 struct xdp_statistics stats; 1136 int err; 1137 1138 kick_rx(ifobject->xsk); 1139 1140 err = get_xsk_stats(xsk, &stats); 1141 if (err) 1142 return TEST_FAILURE; 1143 1144 /* The receiver calls getsockopt after receiving the last (valid) 1145 * packet which is not the final packet sent in this test (valid and 1146 * invalid packets are sent in alternating fashion with the final 1147 * packet being invalid). Since the last packet may or may not have 1148 * been dropped already, both outcomes must be allowed. 1149 */ 1150 if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 || 1151 stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1) 1152 return TEST_PASS; 1153 1154 return TEST_FAILURE; 1155 } 1156 1157 static int validate_rx_full(struct ifobject *ifobject) 1158 { 1159 struct xsk_socket *xsk = ifobject->xsk->xsk; 1160 struct xdp_statistics stats; 1161 int err; 1162 1163 usleep(1000); 1164 kick_rx(ifobject->xsk); 1165 1166 err = get_xsk_stats(xsk, &stats); 1167 if (err) 1168 return TEST_FAILURE; 1169 1170 if (stats.rx_ring_full) 1171 return TEST_PASS; 1172 1173 return TEST_FAILURE; 1174 } 1175 1176 static int validate_fill_empty(struct ifobject *ifobject) 1177 { 1178 struct xsk_socket *xsk = ifobject->xsk->xsk; 1179 struct xdp_statistics stats; 1180 int err; 1181 1182 usleep(1000); 1183 kick_rx(ifobject->xsk); 1184 1185 err = get_xsk_stats(xsk, &stats); 1186 if (err) 1187 return TEST_FAILURE; 1188 1189 if (stats.rx_fill_ring_empty_descs) 1190 return TEST_PASS; 1191 1192 return TEST_FAILURE; 1193 } 1194 1195 static int validate_tx_invalid_descs(struct ifobject *ifobject) 1196 { 1197 struct xsk_socket *xsk = ifobject->xsk->xsk; 1198 int fd = xsk_socket__fd(xsk); 1199 struct xdp_statistics stats; 1200 socklen_t optlen; 1201 int err; 1202 1203 optlen = sizeof(stats); 1204 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 1205 if (err) { 1206 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1207 __func__, -err, strerror(-err)); 1208 return TEST_FAILURE; 1209 } 1210 1211 if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) { 1212 ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n", 1213 __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts); 1214 return TEST_FAILURE; 1215 } 1216 1217 return TEST_PASS; 1218 } 1219 1220 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, 1221 struct xsk_umem_info *umem, bool tx) 1222 { 1223 int i, ret; 1224 1225 for (i = 0; i < test->nb_sockets; i++) { 1226 bool shared = (ifobject->shared_umem && tx) ? true : !!i; 1227 u32 ctr = 0; 1228 1229 while (ctr++ < SOCK_RECONF_CTR) { 1230 ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem, 1231 ifobject, shared); 1232 if (!ret) 1233 break; 1234 1235 /* Retry if it fails as xsk_socket__create() is asynchronous */ 1236 if (ctr >= SOCK_RECONF_CTR) 1237 exit_with_error(-ret); 1238 usleep(USLEEP_MAX); 1239 } 1240 if (ifobject->busy_poll) 1241 enable_busy_poll(&ifobject->xsk_arr[i]); 1242 } 1243 } 1244 1245 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) 1246 { 1247 xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); 1248 ifobject->xsk = &ifobject->xsk_arr[0]; 1249 ifobject->xskmap = test->ifobj_rx->xskmap; 1250 memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); 1251 } 1252 1253 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) 1254 { 1255 u32 idx = 0, i, buffers_to_fill; 1256 int ret; 1257 1258 if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) 1259 buffers_to_fill = umem->num_frames; 1260 else 1261 buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; 1262 1263 ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); 1264 if (ret != buffers_to_fill) 1265 exit_with_error(ENOSPC); 1266 for (i = 0; i < buffers_to_fill; i++) { 1267 u64 addr; 1268 1269 if (pkt_stream->use_addr_for_fill) { 1270 struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); 1271 1272 if (!pkt) 1273 break; 1274 addr = pkt->addr; 1275 } else { 1276 addr = i * umem->frame_size; 1277 } 1278 1279 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; 1280 } 1281 xsk_ring_prod__submit(&umem->fq, i); 1282 } 1283 1284 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) 1285 { 1286 u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; 1287 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 1288 LIBBPF_OPTS(bpf_xdp_query_opts, opts); 1289 void *bufs; 1290 int ret; 1291 1292 if (ifobject->umem->unaligned_mode) 1293 mmap_flags |= MAP_HUGETLB; 1294 1295 if (ifobject->shared_umem) 1296 umem_sz *= 2; 1297 1298 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 1299 if (bufs == MAP_FAILED) 1300 exit_with_error(errno); 1301 1302 ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz); 1303 if (ret) 1304 exit_with_error(-ret); 1305 1306 xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); 1307 1308 xsk_configure_socket(test, ifobject, ifobject->umem, false); 1309 1310 ifobject->xsk = &ifobject->xsk_arr[0]; 1311 1312 if (!ifobject->rx_on) 1313 return; 1314 1315 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); 1316 if (ret) 1317 exit_with_error(errno); 1318 } 1319 1320 static void *worker_testapp_validate_tx(void *arg) 1321 { 1322 struct test_spec *test = (struct test_spec *)arg; 1323 struct ifobject *ifobject = test->ifobj_tx; 1324 int err; 1325 1326 if (test->current_step == 1) { 1327 if (!ifobject->shared_umem) 1328 thread_common_ops(test, ifobject); 1329 else 1330 thread_common_ops_tx(test, ifobject); 1331 } 1332 1333 print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, 1334 ifobject->ifname); 1335 err = send_pkts(test, ifobject); 1336 1337 if (!err && ifobject->validation_func) 1338 err = ifobject->validation_func(ifobject); 1339 if (err) 1340 report_failure(test); 1341 1342 pthread_exit(NULL); 1343 } 1344 1345 static void *worker_testapp_validate_rx(void *arg) 1346 { 1347 struct test_spec *test = (struct test_spec *)arg; 1348 struct ifobject *ifobject = test->ifobj_rx; 1349 struct pollfd fds = { }; 1350 int err; 1351 1352 if (test->current_step == 1) { 1353 thread_common_ops(test, ifobject); 1354 } else { 1355 xsk_clear_xskmap(ifobject->xskmap); 1356 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); 1357 if (err) { 1358 printf("Error: Failed to update xskmap, error %s\n", strerror(-err)); 1359 exit_with_error(-err); 1360 } 1361 } 1362 1363 fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 1364 fds.events = POLLIN; 1365 1366 pthread_barrier_wait(&barr); 1367 1368 err = receive_pkts(test, &fds); 1369 1370 if (!err && ifobject->validation_func) 1371 err = ifobject->validation_func(ifobject); 1372 if (err) { 1373 report_failure(test); 1374 pthread_mutex_lock(&pacing_mutex); 1375 pthread_cond_signal(&pacing_cond); 1376 pthread_mutex_unlock(&pacing_mutex); 1377 } 1378 1379 pthread_exit(NULL); 1380 } 1381 1382 static void testapp_clean_xsk_umem(struct ifobject *ifobj) 1383 { 1384 u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; 1385 1386 if (ifobj->shared_umem) 1387 umem_sz *= 2; 1388 1389 xsk_umem__delete(ifobj->umem->umem); 1390 munmap(ifobj->umem->buffer, umem_sz); 1391 } 1392 1393 static void handler(int signum) 1394 { 1395 pthread_exit(NULL); 1396 } 1397 1398 static bool xdp_prog_changed(struct test_spec *test, struct ifobject *ifobj) 1399 { 1400 return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode; 1401 } 1402 1403 static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog, 1404 struct bpf_map *xskmap, enum test_mode mode) 1405 { 1406 int err; 1407 1408 xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode)); 1409 err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode)); 1410 if (err) { 1411 printf("Error attaching XDP program\n"); 1412 exit_with_error(-err); 1413 } 1414 1415 if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC)) 1416 if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) { 1417 ksft_print_msg("ERROR: XDP prog not in DRV mode\n"); 1418 exit_with_error(EINVAL); 1419 } 1420 1421 ifobj->xdp_prog = xdp_prog; 1422 ifobj->xskmap = xskmap; 1423 ifobj->mode = mode; 1424 } 1425 1426 static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx, 1427 struct ifobject *ifobj_tx) 1428 { 1429 if (xdp_prog_changed(test, ifobj_rx)) 1430 xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode); 1431 1432 if (!ifobj_tx || ifobj_tx->shared_umem) 1433 return; 1434 1435 if (xdp_prog_changed(test, ifobj_tx)) 1436 xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode); 1437 } 1438 1439 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1, 1440 struct ifobject *ifobj2) 1441 { 1442 pthread_t t0, t1; 1443 1444 if (ifobj2) 1445 if (pthread_barrier_init(&barr, NULL, 2)) 1446 exit_with_error(errno); 1447 1448 test->current_step++; 1449 pkt_stream_reset(ifobj1->pkt_stream); 1450 pkts_in_flight = 0; 1451 1452 signal(SIGUSR1, handler); 1453 /*Spawn RX thread */ 1454 pthread_create(&t0, NULL, ifobj1->func_ptr, test); 1455 1456 if (ifobj2) { 1457 pthread_barrier_wait(&barr); 1458 if (pthread_barrier_destroy(&barr)) 1459 exit_with_error(errno); 1460 1461 /*Spawn TX thread */ 1462 pthread_create(&t1, NULL, ifobj2->func_ptr, test); 1463 1464 pthread_join(t1, NULL); 1465 } 1466 1467 if (!ifobj2) 1468 pthread_kill(t0, SIGUSR1); 1469 else 1470 pthread_join(t0, NULL); 1471 1472 if (test->total_steps == test->current_step || test->fail) { 1473 if (ifobj2) 1474 xsk_socket__delete(ifobj2->xsk->xsk); 1475 xsk_socket__delete(ifobj1->xsk->xsk); 1476 testapp_clean_xsk_umem(ifobj1); 1477 if (ifobj2 && !ifobj2->shared_umem) 1478 testapp_clean_xsk_umem(ifobj2); 1479 } 1480 1481 return !!test->fail; 1482 } 1483 1484 static int testapp_validate_traffic(struct test_spec *test) 1485 { 1486 struct ifobject *ifobj_rx = test->ifobj_rx; 1487 struct ifobject *ifobj_tx = test->ifobj_tx; 1488 1489 xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); 1490 return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx); 1491 } 1492 1493 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj) 1494 { 1495 return __testapp_validate_traffic(test, ifobj, NULL); 1496 } 1497 1498 static void testapp_teardown(struct test_spec *test) 1499 { 1500 int i; 1501 1502 test_spec_set_name(test, "TEARDOWN"); 1503 for (i = 0; i < MAX_TEARDOWN_ITER; i++) { 1504 if (testapp_validate_traffic(test)) 1505 return; 1506 test_spec_reset(test); 1507 } 1508 } 1509 1510 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) 1511 { 1512 thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr; 1513 struct ifobject *tmp_ifobj = (*ifobj1); 1514 1515 (*ifobj1)->func_ptr = (*ifobj2)->func_ptr; 1516 (*ifobj2)->func_ptr = tmp_func_ptr; 1517 1518 *ifobj1 = *ifobj2; 1519 *ifobj2 = tmp_ifobj; 1520 } 1521 1522 static void testapp_bidi(struct test_spec *test) 1523 { 1524 test_spec_set_name(test, "BIDIRECTIONAL"); 1525 test->ifobj_tx->rx_on = true; 1526 test->ifobj_rx->tx_on = true; 1527 test->total_steps = 2; 1528 if (testapp_validate_traffic(test)) 1529 return; 1530 1531 print_verbose("Switching Tx/Rx vectors\n"); 1532 swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1533 __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); 1534 1535 swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1536 } 1537 1538 static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx) 1539 { 1540 int ret; 1541 1542 xsk_socket__delete(ifobj_tx->xsk->xsk); 1543 xsk_socket__delete(ifobj_rx->xsk->xsk); 1544 ifobj_tx->xsk = &ifobj_tx->xsk_arr[1]; 1545 ifobj_rx->xsk = &ifobj_rx->xsk_arr[1]; 1546 1547 ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk); 1548 if (ret) 1549 exit_with_error(errno); 1550 } 1551 1552 static void testapp_bpf_res(struct test_spec *test) 1553 { 1554 test_spec_set_name(test, "BPF_RES"); 1555 test->total_steps = 2; 1556 test->nb_sockets = 2; 1557 if (testapp_validate_traffic(test)) 1558 return; 1559 1560 swap_xsk_resources(test->ifobj_tx, test->ifobj_rx); 1561 testapp_validate_traffic(test); 1562 } 1563 1564 static void testapp_headroom(struct test_spec *test) 1565 { 1566 test_spec_set_name(test, "UMEM_HEADROOM"); 1567 test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE; 1568 testapp_validate_traffic(test); 1569 } 1570 1571 static void testapp_stats_rx_dropped(struct test_spec *test) 1572 { 1573 test_spec_set_name(test, "STAT_RX_DROPPED"); 1574 pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); 1575 test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - 1576 XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; 1577 pkt_stream_receive_half(test); 1578 test->ifobj_rx->validation_func = validate_rx_dropped; 1579 testapp_validate_traffic(test); 1580 } 1581 1582 static void testapp_stats_tx_invalid_descs(struct test_spec *test) 1583 { 1584 test_spec_set_name(test, "STAT_TX_INVALID"); 1585 pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0); 1586 test->ifobj_tx->validation_func = validate_tx_invalid_descs; 1587 testapp_validate_traffic(test); 1588 } 1589 1590 static void testapp_stats_rx_full(struct test_spec *test) 1591 { 1592 test_spec_set_name(test, "STAT_RX_FULL"); 1593 pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE); 1594 test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 1595 DEFAULT_UMEM_BUFFERS, PKT_SIZE); 1596 if (!test->ifobj_rx->pkt_stream) 1597 exit_with_error(ENOMEM); 1598 1599 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; 1600 test->ifobj_rx->release_rx = false; 1601 test->ifobj_rx->validation_func = validate_rx_full; 1602 testapp_validate_traffic(test); 1603 } 1604 1605 static void testapp_stats_fill_empty(struct test_spec *test) 1606 { 1607 test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); 1608 pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE); 1609 test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 1610 DEFAULT_UMEM_BUFFERS, PKT_SIZE); 1611 if (!test->ifobj_rx->pkt_stream) 1612 exit_with_error(ENOMEM); 1613 1614 test->ifobj_rx->use_fill_ring = false; 1615 test->ifobj_rx->validation_func = validate_fill_empty; 1616 testapp_validate_traffic(test); 1617 } 1618 1619 /* Simple test */ 1620 static bool hugepages_present(struct ifobject *ifobject) 1621 { 1622 const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; 1623 void *bufs; 1624 1625 bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 1626 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); 1627 if (bufs == MAP_FAILED) 1628 return false; 1629 1630 munmap(bufs, mmap_sz); 1631 return true; 1632 } 1633 1634 static bool testapp_unaligned(struct test_spec *test) 1635 { 1636 if (!hugepages_present(test->ifobj_tx)) { 1637 ksft_test_result_skip("No 2M huge pages present.\n"); 1638 return false; 1639 } 1640 1641 test_spec_set_name(test, "UNALIGNED_MODE"); 1642 test->ifobj_tx->umem->unaligned_mode = true; 1643 test->ifobj_rx->umem->unaligned_mode = true; 1644 /* Let half of the packets straddle a buffer boundrary */ 1645 pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2); 1646 test->ifobj_rx->pkt_stream->use_addr_for_fill = true; 1647 testapp_validate_traffic(test); 1648 1649 return true; 1650 } 1651 1652 static void testapp_single_pkt(struct test_spec *test) 1653 { 1654 struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}}; 1655 1656 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1657 testapp_validate_traffic(test); 1658 } 1659 1660 static void testapp_invalid_desc(struct test_spec *test) 1661 { 1662 u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; 1663 struct pkt pkts[] = { 1664 /* Zero packet address allowed */ 1665 {0, PKT_SIZE, 0, true}, 1666 /* Allowed packet */ 1667 {0x1000, PKT_SIZE, 0, true}, 1668 /* Straddling the start of umem */ 1669 {-2, PKT_SIZE, 0, false}, 1670 /* Packet too large */ 1671 {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, 1672 /* Up to end of umem allowed */ 1673 {umem_size - PKT_SIZE, PKT_SIZE, 0, true}, 1674 /* After umem ends */ 1675 {umem_size, PKT_SIZE, 0, false}, 1676 /* Straddle the end of umem */ 1677 {umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false}, 1678 /* Straddle a page boundrary */ 1679 {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false}, 1680 /* Straddle a 2K boundrary */ 1681 {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true}, 1682 /* Valid packet for synch so that something is received */ 1683 {0x4000, PKT_SIZE, 0, true}}; 1684 1685 if (test->ifobj_tx->umem->unaligned_mode) { 1686 /* Crossing a page boundrary allowed */ 1687 pkts[7].valid = true; 1688 } 1689 if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { 1690 /* Crossing a 2K frame size boundrary not allowed */ 1691 pkts[8].valid = false; 1692 } 1693 1694 if (test->ifobj_tx->shared_umem) { 1695 pkts[4].addr += umem_size; 1696 pkts[5].addr += umem_size; 1697 pkts[6].addr += umem_size; 1698 } 1699 1700 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1701 testapp_validate_traffic(test); 1702 } 1703 1704 static void testapp_xdp_drop(struct test_spec *test) 1705 { 1706 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 1707 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 1708 1709 test_spec_set_name(test, "XDP_DROP_HALF"); 1710 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop, 1711 skel_rx->maps.xsk, skel_tx->maps.xsk); 1712 1713 pkt_stream_receive_half(test); 1714 testapp_validate_traffic(test); 1715 } 1716 1717 static void testapp_xdp_metadata_count(struct test_spec *test) 1718 { 1719 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 1720 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 1721 struct bpf_map *data_map; 1722 int count = 0; 1723 int key = 0; 1724 1725 test_spec_set_name(test, "XDP_METADATA_COUNT"); 1726 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata, 1727 skel_tx->progs.xsk_xdp_populate_metadata, 1728 skel_rx->maps.xsk, skel_tx->maps.xsk); 1729 test->ifobj_rx->use_metadata = true; 1730 1731 data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss"); 1732 if (!data_map || !bpf_map__is_internal(data_map)) 1733 exit_with_error(ENOMEM); 1734 1735 if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) 1736 exit_with_error(errno); 1737 1738 testapp_validate_traffic(test); 1739 } 1740 1741 static void testapp_poll_txq_tmout(struct test_spec *test) 1742 { 1743 test_spec_set_name(test, "POLL_TXQ_FULL"); 1744 1745 test->ifobj_tx->use_poll = true; 1746 /* create invalid frame by set umem frame_size and pkt length equal to 2048 */ 1747 test->ifobj_tx->umem->frame_size = 2048; 1748 pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048); 1749 testapp_validate_traffic_single_thread(test, test->ifobj_tx); 1750 } 1751 1752 static void testapp_poll_rxq_tmout(struct test_spec *test) 1753 { 1754 test_spec_set_name(test, "POLL_RXQ_EMPTY"); 1755 test->ifobj_rx->use_poll = true; 1756 testapp_validate_traffic_single_thread(test, test->ifobj_rx); 1757 } 1758 1759 static int xsk_load_xdp_programs(struct ifobject *ifobj) 1760 { 1761 ifobj->xdp_progs = xsk_xdp_progs__open_and_load(); 1762 if (libbpf_get_error(ifobj->xdp_progs)) 1763 return libbpf_get_error(ifobj->xdp_progs); 1764 1765 return 0; 1766 } 1767 1768 static void xsk_unload_xdp_programs(struct ifobject *ifobj) 1769 { 1770 xsk_xdp_progs__destroy(ifobj->xdp_progs); 1771 } 1772 1773 static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, 1774 const char *dst_ip, const char *src_ip, const u16 dst_port, 1775 const u16 src_port, thread_func_t func_ptr) 1776 { 1777 struct in_addr ip; 1778 int err; 1779 1780 memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN); 1781 memcpy(ifobj->src_mac, src_mac, ETH_ALEN); 1782 1783 inet_aton(dst_ip, &ip); 1784 ifobj->dst_ip = ip.s_addr; 1785 1786 inet_aton(src_ip, &ip); 1787 ifobj->src_ip = ip.s_addr; 1788 1789 ifobj->dst_port = dst_port; 1790 ifobj->src_port = src_port; 1791 1792 ifobj->func_ptr = func_ptr; 1793 1794 err = xsk_load_xdp_programs(ifobj); 1795 if (err) { 1796 printf("Error loading XDP program\n"); 1797 exit_with_error(err); 1798 } 1799 } 1800 1801 static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type) 1802 { 1803 switch (type) { 1804 case TEST_TYPE_STATS_RX_DROPPED: 1805 if (mode == TEST_MODE_ZC) { 1806 ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); 1807 return; 1808 } 1809 testapp_stats_rx_dropped(test); 1810 break; 1811 case TEST_TYPE_STATS_TX_INVALID_DESCS: 1812 testapp_stats_tx_invalid_descs(test); 1813 break; 1814 case TEST_TYPE_STATS_RX_FULL: 1815 testapp_stats_rx_full(test); 1816 break; 1817 case TEST_TYPE_STATS_FILL_EMPTY: 1818 testapp_stats_fill_empty(test); 1819 break; 1820 case TEST_TYPE_TEARDOWN: 1821 testapp_teardown(test); 1822 break; 1823 case TEST_TYPE_BIDI: 1824 testapp_bidi(test); 1825 break; 1826 case TEST_TYPE_BPF_RES: 1827 testapp_bpf_res(test); 1828 break; 1829 case TEST_TYPE_RUN_TO_COMPLETION: 1830 test_spec_set_name(test, "RUN_TO_COMPLETION"); 1831 testapp_validate_traffic(test); 1832 break; 1833 case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT: 1834 test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT"); 1835 testapp_single_pkt(test); 1836 break; 1837 case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME: 1838 test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); 1839 test->ifobj_tx->umem->frame_size = 2048; 1840 test->ifobj_rx->umem->frame_size = 2048; 1841 pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE); 1842 testapp_validate_traffic(test); 1843 break; 1844 case TEST_TYPE_RX_POLL: 1845 test->ifobj_rx->use_poll = true; 1846 test_spec_set_name(test, "POLL_RX"); 1847 testapp_validate_traffic(test); 1848 break; 1849 case TEST_TYPE_TX_POLL: 1850 test->ifobj_tx->use_poll = true; 1851 test_spec_set_name(test, "POLL_TX"); 1852 testapp_validate_traffic(test); 1853 break; 1854 case TEST_TYPE_POLL_TXQ_TMOUT: 1855 testapp_poll_txq_tmout(test); 1856 break; 1857 case TEST_TYPE_POLL_RXQ_TMOUT: 1858 testapp_poll_rxq_tmout(test); 1859 break; 1860 case TEST_TYPE_ALIGNED_INV_DESC: 1861 test_spec_set_name(test, "ALIGNED_INV_DESC"); 1862 testapp_invalid_desc(test); 1863 break; 1864 case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME: 1865 test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE"); 1866 test->ifobj_tx->umem->frame_size = 2048; 1867 test->ifobj_rx->umem->frame_size = 2048; 1868 testapp_invalid_desc(test); 1869 break; 1870 case TEST_TYPE_UNALIGNED_INV_DESC: 1871 if (!hugepages_present(test->ifobj_tx)) { 1872 ksft_test_result_skip("No 2M huge pages present.\n"); 1873 return; 1874 } 1875 test_spec_set_name(test, "UNALIGNED_INV_DESC"); 1876 test->ifobj_tx->umem->unaligned_mode = true; 1877 test->ifobj_rx->umem->unaligned_mode = true; 1878 testapp_invalid_desc(test); 1879 break; 1880 case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: { 1881 u64 page_size, umem_size; 1882 1883 if (!hugepages_present(test->ifobj_tx)) { 1884 ksft_test_result_skip("No 2M huge pages present.\n"); 1885 return; 1886 } 1887 test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE"); 1888 /* Odd frame size so the UMEM doesn't end near a page boundary. */ 1889 test->ifobj_tx->umem->frame_size = 4001; 1890 test->ifobj_rx->umem->frame_size = 4001; 1891 test->ifobj_tx->umem->unaligned_mode = true; 1892 test->ifobj_rx->umem->unaligned_mode = true; 1893 /* This test exists to test descriptors that staddle the end of 1894 * the UMEM but not a page. 1895 */ 1896 page_size = sysconf(_SC_PAGESIZE); 1897 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; 1898 assert(umem_size % page_size > PKT_SIZE); 1899 assert(umem_size % page_size < page_size - PKT_SIZE); 1900 testapp_invalid_desc(test); 1901 break; 1902 } 1903 case TEST_TYPE_UNALIGNED: 1904 if (!testapp_unaligned(test)) 1905 return; 1906 break; 1907 case TEST_TYPE_HEADROOM: 1908 testapp_headroom(test); 1909 break; 1910 case TEST_TYPE_XDP_DROP_HALF: 1911 testapp_xdp_drop(test); 1912 break; 1913 case TEST_TYPE_XDP_METADATA_COUNT: 1914 testapp_xdp_metadata_count(test); 1915 break; 1916 default: 1917 break; 1918 } 1919 1920 if (!test->fail) 1921 ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test), 1922 test->name); 1923 pkt_stream_restore_default(test); 1924 } 1925 1926 static struct ifobject *ifobject_create(void) 1927 { 1928 struct ifobject *ifobj; 1929 1930 ifobj = calloc(1, sizeof(struct ifobject)); 1931 if (!ifobj) 1932 return NULL; 1933 1934 ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr)); 1935 if (!ifobj->xsk_arr) 1936 goto out_xsk_arr; 1937 1938 ifobj->umem = calloc(1, sizeof(*ifobj->umem)); 1939 if (!ifobj->umem) 1940 goto out_umem; 1941 1942 return ifobj; 1943 1944 out_umem: 1945 free(ifobj->xsk_arr); 1946 out_xsk_arr: 1947 free(ifobj); 1948 return NULL; 1949 } 1950 1951 static void ifobject_delete(struct ifobject *ifobj) 1952 { 1953 free(ifobj->umem); 1954 free(ifobj->xsk_arr); 1955 free(ifobj); 1956 } 1957 1958 static bool is_xdp_supported(int ifindex) 1959 { 1960 int flags = XDP_FLAGS_DRV_MODE; 1961 1962 LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags); 1963 struct bpf_insn insns[2] = { 1964 BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), 1965 BPF_EXIT_INSN() 1966 }; 1967 int prog_fd, insn_cnt = ARRAY_SIZE(insns); 1968 int err; 1969 1970 prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); 1971 if (prog_fd < 0) 1972 return false; 1973 1974 err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL); 1975 if (err) { 1976 close(prog_fd); 1977 return false; 1978 } 1979 1980 bpf_xdp_detach(ifindex, flags, NULL); 1981 close(prog_fd); 1982 1983 return true; 1984 } 1985 1986 int main(int argc, char **argv) 1987 { 1988 struct pkt_stream *rx_pkt_stream_default; 1989 struct pkt_stream *tx_pkt_stream_default; 1990 struct ifobject *ifobj_tx, *ifobj_rx; 1991 int modes = TEST_MODE_SKB + 1; 1992 u32 i, j, failed_tests = 0; 1993 struct test_spec test; 1994 bool shared_netdev; 1995 1996 /* Use libbpf 1.0 API mode */ 1997 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 1998 1999 ifobj_tx = ifobject_create(); 2000 if (!ifobj_tx) 2001 exit_with_error(ENOMEM); 2002 ifobj_rx = ifobject_create(); 2003 if (!ifobj_rx) 2004 exit_with_error(ENOMEM); 2005 2006 setlocale(LC_ALL, ""); 2007 2008 parse_command_line(ifobj_tx, ifobj_rx, argc, argv); 2009 2010 shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex); 2011 ifobj_tx->shared_umem = shared_netdev; 2012 ifobj_rx->shared_umem = shared_netdev; 2013 2014 if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { 2015 usage(basename(argv[0])); 2016 ksft_exit_xfail(); 2017 } 2018 2019 if (is_xdp_supported(ifobj_tx->ifindex)) { 2020 modes++; 2021 if (ifobj_zc_avail(ifobj_tx)) 2022 modes++; 2023 } 2024 2025 init_iface(ifobj_rx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, 2026 worker_testapp_validate_rx); 2027 init_iface(ifobj_tx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, 2028 worker_testapp_validate_tx); 2029 2030 test_spec_init(&test, ifobj_tx, ifobj_rx, 0); 2031 tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); 2032 rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE); 2033 if (!tx_pkt_stream_default || !rx_pkt_stream_default) 2034 exit_with_error(ENOMEM); 2035 test.tx_pkt_stream_default = tx_pkt_stream_default; 2036 test.rx_pkt_stream_default = rx_pkt_stream_default; 2037 2038 ksft_set_plan(modes * TEST_TYPE_MAX); 2039 2040 for (i = 0; i < modes; i++) { 2041 for (j = 0; j < TEST_TYPE_MAX; j++) { 2042 test_spec_init(&test, ifobj_tx, ifobj_rx, i); 2043 run_pkt_test(&test, i, j); 2044 usleep(USLEEP_MAX); 2045 2046 if (test.fail) 2047 failed_tests++; 2048 } 2049 } 2050 2051 pkt_stream_delete(tx_pkt_stream_default); 2052 pkt_stream_delete(rx_pkt_stream_default); 2053 xsk_unload_xdp_programs(ifobj_tx); 2054 xsk_unload_xdp_programs(ifobj_rx); 2055 ifobject_delete(ifobj_tx); 2056 ifobject_delete(ifobj_rx); 2057 2058 if (failed_tests) 2059 ksft_exit_fail(); 2060 else 2061 ksft_exit_pass(); 2062 } 2063