1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2020 Intel Corporation. */ 3 4 /* 5 * Some functions in this program are taken from 6 * Linux kernel samples/bpf/xdpsock* and modified 7 * for use. 8 * 9 * See test_xsk.sh for detailed information on test topology 10 * and prerequisite network setup. 11 * 12 * This test program contains two threads, each thread is single socket with 13 * a unique UMEM. It validates in-order packet delivery and packet content 14 * by sending packets to each other. 15 * 16 * Tests Information: 17 * ------------------ 18 * These selftests test AF_XDP SKB and Native/DRV modes using veth 19 * Virtual Ethernet interfaces. 20 * 21 * For each mode, the following tests are run: 22 * a. nopoll - soft-irq processing in run-to-completion mode 23 * b. poll - using poll() syscall 24 * c. Socket Teardown 25 * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy 26 * both sockets, then repeat multiple times. Only nopoll mode is used 27 * d. Bi-directional sockets 28 * Configure sockets as bi-directional tx/rx sockets, sets up fill and 29 * completion rings on each socket, tx/rx in both directions. Only nopoll 30 * mode is used 31 * e. Statistics 32 * Trigger some error conditions and ensure that the appropriate statistics 33 * are incremented. Within this test, the following statistics are tested: 34 * i. rx dropped 35 * Increase the UMEM frame headroom to a value which results in 36 * insufficient space in the rx buffer for both the packet and the headroom. 37 * ii. tx invalid 38 * Set the 'len' field of tx descriptors to an invalid value (umem frame 39 * size + 1). 40 * iii. rx ring full 41 * Reduce the size of the RX ring to a fraction of the fill ring size. 42 * iv. fill queue empty 43 * Do not populate the fill queue and then try to receive pkts. 44 * f. bpf_link resource persistence 45 * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0, 46 * then remove xsk sockets from queue 0 on both veth interfaces and 47 * finally run a traffic on queues ids 1 48 * g. unaligned mode 49 * h. tests for invalid and corner case Tx descriptors so that the correct ones 50 * are discarded and let through, respectively. 51 * i. 2K frame size tests 52 * j. If multi-buffer is supported, send 9k packets divided into 3 frames 53 * k. If multi-buffer and huge pages are supported, send 9k packets in a single frame 54 * using unaligned mode 55 * l. If multi-buffer is supported, try various nasty combinations of descriptors to 56 * check if they pass the validation or not 57 * 58 * Flow: 59 * ----- 60 * - Single process spawns two threads: Tx and Rx 61 * - Each of these two threads attach to a veth interface 62 * - Each thread creates one AF_XDP socket connected to a unique umem for each 63 * veth interface 64 * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy> 65 * - Rx thread verifies if all packets were received and delivered in-order, 66 * and have the right content 67 * 68 * Enable/disable packet dump mode: 69 * -------------------------- 70 * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add 71 * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D") 72 */ 73 74 #define _GNU_SOURCE 75 #include <assert.h> 76 #include <fcntl.h> 77 #include <errno.h> 78 #include <getopt.h> 79 #include <linux/if_link.h> 80 #include <linux/if_ether.h> 81 #include <linux/mman.h> 82 #include <linux/netdev.h> 83 #include <arpa/inet.h> 84 #include <net/if.h> 85 #include <locale.h> 86 #include <poll.h> 87 #include <pthread.h> 88 #include <signal.h> 89 #include <stdio.h> 90 #include <stdlib.h> 91 #include <string.h> 92 #include <stddef.h> 93 #include <sys/mman.h> 94 #include <sys/socket.h> 95 #include <sys/time.h> 96 #include <sys/types.h> 97 #include <unistd.h> 98 99 #include "xsk_xdp_progs.skel.h" 100 #include "xsk.h" 101 #include "xskxceiver.h" 102 #include <bpf/bpf.h> 103 #include <linux/filter.h> 104 #include "../kselftest.h" 105 #include "xsk_xdp_metadata.h" 106 107 static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; 108 static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; 109 110 static void __exit_with_error(int error, const char *file, const char *func, int line) 111 { 112 ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, 113 strerror(error)); 114 ksft_exit_xfail(); 115 } 116 117 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) 118 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : "" 119 static char *mode_string(struct test_spec *test) 120 { 121 switch (test->mode) { 122 case TEST_MODE_SKB: 123 return "SKB"; 124 case TEST_MODE_DRV: 125 return "DRV"; 126 case TEST_MODE_ZC: 127 return "ZC"; 128 default: 129 return "BOGUS"; 130 } 131 } 132 133 static void report_failure(struct test_spec *test) 134 { 135 if (test->fail) 136 return; 137 138 ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test), 139 test->name); 140 test->fail = true; 141 } 142 143 /* The payload is a word consisting of a packet sequence number in the upper 144 * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's 145 * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0. 146 */ 147 static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size) 148 { 149 u32 *ptr = (u32 *)dest, i; 150 151 start /= sizeof(*ptr); 152 size /= sizeof(*ptr); 153 for (i = 0; i < size; i++) 154 ptr[i] = htonl(pkt_nb << 16 | (i + start)); 155 } 156 157 static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr) 158 { 159 memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN); 160 memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN); 161 eth_hdr->h_proto = htons(ETH_P_LOOPBACK); 162 } 163 164 static bool is_umem_valid(struct ifobject *ifobj) 165 { 166 return !!ifobj->umem->umem; 167 } 168 169 static u32 mode_to_xdp_flags(enum test_mode mode) 170 { 171 return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; 172 } 173 174 static u64 umem_size(struct xsk_umem_info *umem) 175 { 176 return umem->num_frames * umem->frame_size; 177 } 178 179 static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, 180 u64 size) 181 { 182 struct xsk_umem_config cfg = { 183 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, 184 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, 185 .frame_size = umem->frame_size, 186 .frame_headroom = umem->frame_headroom, 187 .flags = XSK_UMEM__DEFAULT_FLAGS 188 }; 189 int ret; 190 191 if (umem->unaligned_mode) 192 cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; 193 194 ret = xsk_umem__create(&umem->umem, buffer, size, 195 &umem->fq, &umem->cq, &cfg); 196 if (ret) 197 return ret; 198 199 umem->buffer = buffer; 200 if (ifobj->shared_umem && ifobj->rx_on) { 201 umem->base_addr = umem_size(umem); 202 umem->next_buffer = umem_size(umem); 203 } 204 205 return 0; 206 } 207 208 static u64 umem_alloc_buffer(struct xsk_umem_info *umem) 209 { 210 u64 addr; 211 212 addr = umem->next_buffer; 213 umem->next_buffer += umem->frame_size; 214 if (umem->next_buffer >= umem->base_addr + umem_size(umem)) 215 umem->next_buffer = umem->base_addr; 216 217 return addr; 218 } 219 220 static void umem_reset_alloc(struct xsk_umem_info *umem) 221 { 222 umem->next_buffer = 0; 223 } 224 225 static void enable_busy_poll(struct xsk_socket_info *xsk) 226 { 227 int sock_opt; 228 229 sock_opt = 1; 230 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL, 231 (void *)&sock_opt, sizeof(sock_opt)) < 0) 232 exit_with_error(errno); 233 234 sock_opt = 20; 235 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL, 236 (void *)&sock_opt, sizeof(sock_opt)) < 0) 237 exit_with_error(errno); 238 239 sock_opt = BATCH_SIZE; 240 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET, 241 (void *)&sock_opt, sizeof(sock_opt)) < 0) 242 exit_with_error(errno); 243 } 244 245 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, 246 struct ifobject *ifobject, bool shared) 247 { 248 struct xsk_socket_config cfg = {}; 249 struct xsk_ring_cons *rxr; 250 struct xsk_ring_prod *txr; 251 252 xsk->umem = umem; 253 cfg.rx_size = xsk->rxqsize; 254 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; 255 cfg.bind_flags = ifobject->bind_flags; 256 if (shared) 257 cfg.bind_flags |= XDP_SHARED_UMEM; 258 if (ifobject->pkt_stream && ifobject->mtu > MAX_ETH_PKT_SIZE) 259 cfg.bind_flags |= XDP_USE_SG; 260 261 txr = ifobject->tx_on ? &xsk->tx : NULL; 262 rxr = ifobject->rx_on ? &xsk->rx : NULL; 263 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); 264 } 265 266 static bool ifobj_zc_avail(struct ifobject *ifobject) 267 { 268 size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; 269 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 270 struct xsk_socket_info *xsk; 271 struct xsk_umem_info *umem; 272 bool zc_avail = false; 273 void *bufs; 274 int ret; 275 276 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 277 if (bufs == MAP_FAILED) 278 exit_with_error(errno); 279 280 umem = calloc(1, sizeof(struct xsk_umem_info)); 281 if (!umem) { 282 munmap(bufs, umem_sz); 283 exit_with_error(ENOMEM); 284 } 285 umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 286 ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz); 287 if (ret) 288 exit_with_error(-ret); 289 290 xsk = calloc(1, sizeof(struct xsk_socket_info)); 291 if (!xsk) 292 goto out; 293 ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY; 294 ifobject->rx_on = true; 295 xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 296 ret = __xsk_configure_socket(xsk, umem, ifobject, false); 297 if (!ret) 298 zc_avail = true; 299 300 xsk_socket__delete(xsk->xsk); 301 free(xsk); 302 out: 303 munmap(umem->buffer, umem_sz); 304 xsk_umem__delete(umem->umem); 305 free(umem); 306 return zc_avail; 307 } 308 309 static struct option long_options[] = { 310 {"interface", required_argument, 0, 'i'}, 311 {"busy-poll", no_argument, 0, 'b'}, 312 {"verbose", no_argument, 0, 'v'}, 313 {0, 0, 0, 0} 314 }; 315 316 static void usage(const char *prog) 317 { 318 const char *str = 319 " Usage: %s [OPTIONS]\n" 320 " Options:\n" 321 " -i, --interface Use interface\n" 322 " -v, --verbose Verbose output\n" 323 " -b, --busy-poll Enable busy poll\n"; 324 325 ksft_print_msg(str, prog); 326 } 327 328 static bool validate_interface(struct ifobject *ifobj) 329 { 330 if (!strcmp(ifobj->ifname, "")) 331 return false; 332 return true; 333 } 334 335 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc, 336 char **argv) 337 { 338 struct ifobject *ifobj; 339 u32 interface_nb = 0; 340 int option_index, c; 341 342 opterr = 0; 343 344 for (;;) { 345 c = getopt_long(argc, argv, "i:vb", long_options, &option_index); 346 if (c == -1) 347 break; 348 349 switch (c) { 350 case 'i': 351 if (interface_nb == 0) 352 ifobj = ifobj_tx; 353 else if (interface_nb == 1) 354 ifobj = ifobj_rx; 355 else 356 break; 357 358 memcpy(ifobj->ifname, optarg, 359 min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg))); 360 361 ifobj->ifindex = if_nametoindex(ifobj->ifname); 362 if (!ifobj->ifindex) 363 exit_with_error(errno); 364 365 interface_nb++; 366 break; 367 case 'v': 368 opt_verbose = true; 369 break; 370 case 'b': 371 ifobj_tx->busy_poll = true; 372 ifobj_rx->busy_poll = true; 373 break; 374 default: 375 usage(basename(argv[0])); 376 ksft_exit_xfail(); 377 } 378 } 379 } 380 381 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 382 struct ifobject *ifobj_rx) 383 { 384 u32 i, j; 385 386 for (i = 0; i < MAX_INTERFACES; i++) { 387 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 388 389 ifobj->xsk = &ifobj->xsk_arr[0]; 390 ifobj->use_poll = false; 391 ifobj->use_fill_ring = true; 392 ifobj->release_rx = true; 393 ifobj->validation_func = NULL; 394 ifobj->use_metadata = false; 395 396 if (i == 0) { 397 ifobj->rx_on = false; 398 ifobj->tx_on = true; 399 ifobj->pkt_stream = test->tx_pkt_stream_default; 400 } else { 401 ifobj->rx_on = true; 402 ifobj->tx_on = false; 403 ifobj->pkt_stream = test->rx_pkt_stream_default; 404 } 405 406 memset(ifobj->umem, 0, sizeof(*ifobj->umem)); 407 ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; 408 ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 409 410 for (j = 0; j < MAX_SOCKETS; j++) { 411 memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); 412 ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 413 } 414 } 415 416 test->ifobj_tx = ifobj_tx; 417 test->ifobj_rx = ifobj_rx; 418 test->current_step = 0; 419 test->total_steps = 1; 420 test->nb_sockets = 1; 421 test->fail = false; 422 test->mtu = MAX_ETH_PKT_SIZE; 423 test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog; 424 test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk; 425 test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog; 426 test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk; 427 } 428 429 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 430 struct ifobject *ifobj_rx, enum test_mode mode) 431 { 432 struct pkt_stream *tx_pkt_stream; 433 struct pkt_stream *rx_pkt_stream; 434 u32 i; 435 436 tx_pkt_stream = test->tx_pkt_stream_default; 437 rx_pkt_stream = test->rx_pkt_stream_default; 438 memset(test, 0, sizeof(*test)); 439 test->tx_pkt_stream_default = tx_pkt_stream; 440 test->rx_pkt_stream_default = rx_pkt_stream; 441 442 for (i = 0; i < MAX_INTERFACES; i++) { 443 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 444 445 ifobj->bind_flags = XDP_USE_NEED_WAKEUP; 446 if (mode == TEST_MODE_ZC) 447 ifobj->bind_flags |= XDP_ZEROCOPY; 448 else 449 ifobj->bind_flags |= XDP_COPY; 450 } 451 452 test->mode = mode; 453 __test_spec_init(test, ifobj_tx, ifobj_rx); 454 } 455 456 static void test_spec_reset(struct test_spec *test) 457 { 458 __test_spec_init(test, test->ifobj_tx, test->ifobj_rx); 459 } 460 461 static void test_spec_set_name(struct test_spec *test, const char *name) 462 { 463 strncpy(test->name, name, MAX_TEST_NAME_SIZE); 464 } 465 466 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx, 467 struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx, 468 struct bpf_map *xskmap_tx) 469 { 470 test->xdp_prog_rx = xdp_prog_rx; 471 test->xdp_prog_tx = xdp_prog_tx; 472 test->xskmap_rx = xskmap_rx; 473 test->xskmap_tx = xskmap_tx; 474 } 475 476 static int test_spec_set_mtu(struct test_spec *test, int mtu) 477 { 478 int err; 479 480 if (test->ifobj_rx->mtu != mtu) { 481 err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu); 482 if (err) 483 return err; 484 test->ifobj_rx->mtu = mtu; 485 } 486 if (test->ifobj_tx->mtu != mtu) { 487 err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu); 488 if (err) 489 return err; 490 test->ifobj_tx->mtu = mtu; 491 } 492 493 return 0; 494 } 495 496 static void pkt_stream_reset(struct pkt_stream *pkt_stream) 497 { 498 if (pkt_stream) 499 pkt_stream->current_pkt_nb = 0; 500 } 501 502 static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream) 503 { 504 if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) 505 return NULL; 506 507 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; 508 } 509 510 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent) 511 { 512 while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { 513 (*pkts_sent)++; 514 if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid) 515 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; 516 pkt_stream->current_pkt_nb++; 517 } 518 return NULL; 519 } 520 521 static void pkt_stream_delete(struct pkt_stream *pkt_stream) 522 { 523 free(pkt_stream->pkts); 524 free(pkt_stream); 525 } 526 527 static void pkt_stream_restore_default(struct test_spec *test) 528 { 529 struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream; 530 struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream; 531 532 if (tx_pkt_stream != test->tx_pkt_stream_default) { 533 pkt_stream_delete(test->ifobj_tx->pkt_stream); 534 test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default; 535 } 536 537 if (rx_pkt_stream != test->rx_pkt_stream_default) { 538 pkt_stream_delete(test->ifobj_rx->pkt_stream); 539 test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default; 540 } 541 } 542 543 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) 544 { 545 struct pkt_stream *pkt_stream; 546 547 pkt_stream = calloc(1, sizeof(*pkt_stream)); 548 if (!pkt_stream) 549 return NULL; 550 551 pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); 552 if (!pkt_stream->pkts) { 553 free(pkt_stream); 554 return NULL; 555 } 556 557 pkt_stream->nb_pkts = nb_pkts; 558 return pkt_stream; 559 } 560 561 static bool pkt_continues(u32 options) 562 { 563 return options & XDP_PKT_CONTD; 564 } 565 566 static u32 ceil_u32(u32 a, u32 b) 567 { 568 return (a + b - 1) / b; 569 } 570 571 static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt) 572 { 573 u32 nb_frags = 1, next_frag; 574 575 if (!pkt) 576 return 1; 577 578 if (!pkt_stream->verbatim) { 579 if (!pkt->valid || !pkt->len) 580 return 1; 581 return ceil_u32(pkt->len, frame_size); 582 } 583 584 /* Search for the end of the packet in verbatim mode */ 585 if (!pkt_continues(pkt->options)) 586 return nb_frags; 587 588 next_frag = pkt_stream->current_pkt_nb; 589 pkt++; 590 while (next_frag++ < pkt_stream->nb_pkts) { 591 nb_frags++; 592 if (!pkt_continues(pkt->options) || !pkt->valid) 593 break; 594 pkt++; 595 } 596 return nb_frags; 597 } 598 599 static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len) 600 { 601 pkt->offset = offset; 602 pkt->len = len; 603 if (len > MAX_ETH_JUMBO_SIZE) 604 pkt->valid = false; 605 else 606 pkt->valid = true; 607 } 608 609 static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len) 610 { 611 return ceil_u32(len, umem->frame_size) * umem->frame_size; 612 } 613 614 static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) 615 { 616 struct pkt_stream *pkt_stream; 617 u32 i; 618 619 pkt_stream = __pkt_stream_alloc(nb_pkts); 620 if (!pkt_stream) 621 exit_with_error(ENOMEM); 622 623 pkt_stream->nb_pkts = nb_pkts; 624 pkt_stream->max_pkt_len = pkt_len; 625 for (i = 0; i < nb_pkts; i++) { 626 struct pkt *pkt = &pkt_stream->pkts[i]; 627 628 pkt_set(umem, pkt, 0, pkt_len); 629 pkt->pkt_nb = i; 630 } 631 632 return pkt_stream; 633 } 634 635 static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem, 636 struct pkt_stream *pkt_stream) 637 { 638 return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len); 639 } 640 641 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) 642 { 643 struct pkt_stream *pkt_stream; 644 645 pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len); 646 test->ifobj_tx->pkt_stream = pkt_stream; 647 pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len); 648 test->ifobj_rx->pkt_stream = pkt_stream; 649 } 650 651 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, 652 int offset) 653 { 654 struct xsk_umem_info *umem = ifobj->umem; 655 struct pkt_stream *pkt_stream; 656 u32 i; 657 658 pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream); 659 for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) 660 pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len); 661 662 ifobj->pkt_stream = pkt_stream; 663 } 664 665 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) 666 { 667 __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); 668 __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); 669 } 670 671 static void pkt_stream_receive_half(struct test_spec *test) 672 { 673 struct xsk_umem_info *umem = test->ifobj_rx->umem; 674 struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream; 675 u32 i; 676 677 test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts, 678 pkt_stream->pkts[0].len); 679 pkt_stream = test->ifobj_rx->pkt_stream; 680 for (i = 1; i < pkt_stream->nb_pkts; i += 2) 681 pkt_stream->pkts[i].valid = false; 682 } 683 684 static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem) 685 { 686 if (!pkt->valid) 687 return pkt->offset; 688 return pkt->offset + umem_alloc_buffer(umem); 689 } 690 691 static void pkt_stream_cancel(struct pkt_stream *pkt_stream) 692 { 693 pkt_stream->current_pkt_nb--; 694 } 695 696 static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb, 697 u32 bytes_written) 698 { 699 void *data = xsk_umem__get_data(ifobject->umem->buffer, addr); 700 701 if (len < MIN_PKT_SIZE) 702 return; 703 704 if (!bytes_written) { 705 gen_eth_hdr(ifobject, data); 706 707 len -= PKT_HDR_SIZE; 708 data += PKT_HDR_SIZE; 709 } else { 710 bytes_written -= PKT_HDR_SIZE; 711 } 712 713 write_payload(data, pkt_nb, bytes_written, len); 714 } 715 716 static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames, 717 u32 nb_frames, bool verbatim) 718 { 719 u32 i, len = 0, pkt_nb = 0, payload = 0; 720 struct pkt_stream *pkt_stream; 721 722 pkt_stream = __pkt_stream_alloc(nb_frames); 723 if (!pkt_stream) 724 exit_with_error(ENOMEM); 725 726 for (i = 0; i < nb_frames; i++) { 727 struct pkt *pkt = &pkt_stream->pkts[pkt_nb]; 728 struct pkt *frame = &frames[i]; 729 730 pkt->offset = frame->offset; 731 if (verbatim) { 732 *pkt = *frame; 733 pkt->pkt_nb = payload; 734 if (!frame->valid || !pkt_continues(frame->options)) 735 payload++; 736 } else { 737 if (frame->valid) 738 len += frame->len; 739 if (frame->valid && pkt_continues(frame->options)) 740 continue; 741 742 pkt->pkt_nb = pkt_nb; 743 pkt->len = len; 744 pkt->valid = frame->valid; 745 pkt->options = 0; 746 747 len = 0; 748 } 749 750 if (pkt->valid && pkt->len > pkt_stream->max_pkt_len) 751 pkt_stream->max_pkt_len = pkt->len; 752 pkt_nb++; 753 } 754 755 pkt_stream->nb_pkts = pkt_nb; 756 pkt_stream->verbatim = verbatim; 757 return pkt_stream; 758 } 759 760 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) 761 { 762 struct pkt_stream *pkt_stream; 763 764 pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true); 765 test->ifobj_tx->pkt_stream = pkt_stream; 766 767 pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false); 768 test->ifobj_rx->pkt_stream = pkt_stream; 769 } 770 771 static void pkt_print_data(u32 *data, u32 cnt) 772 { 773 u32 i; 774 775 for (i = 0; i < cnt; i++) { 776 u32 seqnum, pkt_nb; 777 778 seqnum = ntohl(*data) & 0xffff; 779 pkt_nb = ntohl(*data) >> 16; 780 fprintf(stdout, "%u:%u ", pkt_nb, seqnum); 781 data++; 782 } 783 } 784 785 static void pkt_dump(void *pkt, u32 len, bool eth_header) 786 { 787 struct ethhdr *ethhdr = pkt; 788 u32 i, *data; 789 790 if (eth_header) { 791 /*extract L2 frame */ 792 fprintf(stdout, "DEBUG>> L2: dst mac: "); 793 for (i = 0; i < ETH_ALEN; i++) 794 fprintf(stdout, "%02X", ethhdr->h_dest[i]); 795 796 fprintf(stdout, "\nDEBUG>> L2: src mac: "); 797 for (i = 0; i < ETH_ALEN; i++) 798 fprintf(stdout, "%02X", ethhdr->h_source[i]); 799 800 data = pkt + PKT_HDR_SIZE; 801 } else { 802 data = pkt; 803 } 804 805 /*extract L5 frame */ 806 fprintf(stdout, "\nDEBUG>> L5: seqnum: "); 807 pkt_print_data(data, PKT_DUMP_NB_TO_PRINT); 808 fprintf(stdout, "...."); 809 if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) { 810 fprintf(stdout, "\n.... "); 811 pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT, 812 PKT_DUMP_NB_TO_PRINT); 813 } 814 fprintf(stdout, "\n---------------------------------------\n"); 815 } 816 817 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr) 818 { 819 u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; 820 u32 offset = addr % umem->frame_size, expected_offset; 821 int pkt_offset = pkt->valid ? pkt->offset : 0; 822 823 if (!umem->unaligned_mode) 824 pkt_offset = 0; 825 826 expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; 827 828 if (offset == expected_offset) 829 return true; 830 831 ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset); 832 return false; 833 } 834 835 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) 836 { 837 void *data = xsk_umem__get_data(buffer, addr); 838 struct xdp_info *meta = data - sizeof(struct xdp_info); 839 840 if (meta->count != pkt->pkt_nb) { 841 ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n", 842 __func__, pkt->pkt_nb, meta->count); 843 return false; 844 } 845 846 return true; 847 } 848 849 static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb, 850 u32 bytes_processed) 851 { 852 u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum; 853 void *data = xsk_umem__get_data(umem->buffer, addr); 854 855 addr -= umem->base_addr; 856 857 if (addr >= umem->num_frames * umem->frame_size || 858 addr + len > umem->num_frames * umem->frame_size) { 859 ksft_print_msg("Frag invalid addr: %llx len: %u\n", addr, len); 860 return false; 861 } 862 if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) { 863 ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n", addr, len); 864 return false; 865 } 866 867 pkt_data = data; 868 if (!bytes_processed) { 869 pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data); 870 len -= PKT_HDR_SIZE; 871 } else { 872 bytes_processed -= PKT_HDR_SIZE; 873 } 874 875 expected_seqnum = bytes_processed / sizeof(*pkt_data); 876 seqnum = ntohl(*pkt_data) & 0xffff; 877 pkt_nb = ntohl(*pkt_data) >> 16; 878 879 if (expected_pkt_nb != pkt_nb) { 880 ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n", 881 __func__, expected_pkt_nb, pkt_nb); 882 goto error; 883 } 884 if (expected_seqnum != seqnum) { 885 ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n", 886 __func__, expected_seqnum, seqnum); 887 goto error; 888 } 889 890 words_to_end = len / sizeof(*pkt_data) - 1; 891 pkt_data += words_to_end; 892 seqnum = ntohl(*pkt_data) & 0xffff; 893 expected_seqnum += words_to_end; 894 if (expected_seqnum != seqnum) { 895 ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n", 896 __func__, expected_seqnum, seqnum); 897 goto error; 898 } 899 900 return true; 901 902 error: 903 pkt_dump(data, len, !bytes_processed); 904 return false; 905 } 906 907 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) 908 { 909 if (pkt->len != len) { 910 ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n", 911 __func__, pkt->len, len); 912 pkt_dump(xsk_umem__get_data(buffer, addr), len, true); 913 return false; 914 } 915 916 return true; 917 } 918 919 static void kick_tx(struct xsk_socket_info *xsk) 920 { 921 int ret; 922 923 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); 924 if (ret >= 0) 925 return; 926 if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) { 927 usleep(100); 928 return; 929 } 930 exit_with_error(errno); 931 } 932 933 static void kick_rx(struct xsk_socket_info *xsk) 934 { 935 int ret; 936 937 ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); 938 if (ret < 0) 939 exit_with_error(errno); 940 } 941 942 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size) 943 { 944 unsigned int rcvd; 945 u32 idx; 946 947 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) 948 kick_tx(xsk); 949 950 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx); 951 if (rcvd) { 952 if (rcvd > xsk->outstanding_tx) { 953 u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1); 954 955 ksft_print_msg("[%s] Too many packets completed\n", __func__); 956 ksft_print_msg("Last completion address: %llx\n", addr); 957 return TEST_FAILURE; 958 } 959 960 xsk_ring_cons__release(&xsk->umem->cq, rcvd); 961 xsk->outstanding_tx -= rcvd; 962 } 963 964 return TEST_PASS; 965 } 966 967 static int receive_pkts(struct test_spec *test, struct pollfd *fds) 968 { 969 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0}; 970 struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream; 971 struct xsk_socket_info *xsk = test->ifobj_rx->xsk; 972 u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0; 973 struct ifobject *ifobj = test->ifobj_rx; 974 struct xsk_umem_info *umem = xsk->umem; 975 struct pkt *pkt; 976 int ret; 977 978 ret = gettimeofday(&tv_now, NULL); 979 if (ret) 980 exit_with_error(errno); 981 timeradd(&tv_now, &tv_timeout, &tv_end); 982 983 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 984 while (pkt) { 985 u32 frags_processed = 0, nb_frags = 0, pkt_len = 0; 986 u64 first_addr; 987 988 ret = gettimeofday(&tv_now, NULL); 989 if (ret) 990 exit_with_error(errno); 991 if (timercmp(&tv_now, &tv_end, >)) { 992 ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__); 993 return TEST_FAILURE; 994 } 995 996 kick_rx(xsk); 997 if (ifobj->use_poll) { 998 ret = poll(fds, 1, POLL_TMOUT); 999 if (ret < 0) 1000 exit_with_error(errno); 1001 1002 if (!ret) { 1003 if (!is_umem_valid(test->ifobj_tx)) 1004 return TEST_PASS; 1005 1006 ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__); 1007 return TEST_FAILURE; 1008 } 1009 1010 if (!(fds->revents & POLLIN)) 1011 continue; 1012 } 1013 1014 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 1015 if (!rcvd) 1016 continue; 1017 1018 if (ifobj->use_fill_ring) { 1019 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 1020 while (ret != rcvd) { 1021 if (ret < 0) 1022 exit_with_error(-ret); 1023 if (xsk_ring_prod__needs_wakeup(&umem->fq)) { 1024 ret = poll(fds, 1, POLL_TMOUT); 1025 if (ret < 0) 1026 exit_with_error(errno); 1027 } 1028 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 1029 } 1030 } 1031 1032 while (frags_processed < rcvd) { 1033 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); 1034 u64 addr = desc->addr, orig; 1035 1036 orig = xsk_umem__extract_addr(addr); 1037 addr = xsk_umem__add_offset_to_addr(addr); 1038 1039 if (!pkt) { 1040 ksft_print_msg("[%s] received too many packets addr: %lx len %u\n", 1041 __func__, addr, desc->len); 1042 return TEST_FAILURE; 1043 } 1044 1045 if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) || 1046 !is_offset_correct(umem, pkt, addr) || 1047 (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr))) 1048 return TEST_FAILURE; 1049 1050 if (!nb_frags++) 1051 first_addr = addr; 1052 frags_processed++; 1053 pkt_len += desc->len; 1054 if (ifobj->use_fill_ring) 1055 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig; 1056 1057 if (pkt_continues(desc->options)) 1058 continue; 1059 1060 /* The complete packet has been received */ 1061 if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) || 1062 !is_offset_correct(umem, pkt, addr)) 1063 return TEST_FAILURE; 1064 1065 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 1066 nb_frags = 0; 1067 pkt_len = 0; 1068 } 1069 1070 if (nb_frags) { 1071 /* In the middle of a packet. Start over from beginning of packet. */ 1072 idx_rx -= nb_frags; 1073 xsk_ring_cons__cancel(&xsk->rx, nb_frags); 1074 if (ifobj->use_fill_ring) { 1075 idx_fq -= nb_frags; 1076 xsk_ring_prod__cancel(&umem->fq, nb_frags); 1077 } 1078 frags_processed -= nb_frags; 1079 } 1080 1081 if (ifobj->use_fill_ring) 1082 xsk_ring_prod__submit(&umem->fq, frags_processed); 1083 if (ifobj->release_rx) 1084 xsk_ring_cons__release(&xsk->rx, frags_processed); 1085 1086 pthread_mutex_lock(&pacing_mutex); 1087 pkts_in_flight -= pkts_sent; 1088 pthread_mutex_unlock(&pacing_mutex); 1089 pkts_sent = 0; 1090 } 1091 1092 return TEST_PASS; 1093 } 1094 1095 static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout) 1096 { 1097 u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len; 1098 struct pkt_stream *pkt_stream = ifobject->pkt_stream; 1099 struct xsk_socket_info *xsk = ifobject->xsk; 1100 struct xsk_umem_info *umem = ifobject->umem; 1101 bool use_poll = ifobject->use_poll; 1102 int ret; 1103 1104 buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len); 1105 /* pkts_in_flight might be negative if many invalid packets are sent */ 1106 if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) { 1107 kick_tx(xsk); 1108 return TEST_CONTINUE; 1109 } 1110 1111 while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { 1112 if (use_poll) { 1113 ret = poll(fds, 1, POLL_TMOUT); 1114 if (timeout) { 1115 if (ret < 0) { 1116 ksft_print_msg("ERROR: [%s] Poll error %d\n", 1117 __func__, errno); 1118 return TEST_FAILURE; 1119 } 1120 if (ret == 0) 1121 return TEST_PASS; 1122 break; 1123 } 1124 if (ret <= 0) { 1125 ksft_print_msg("ERROR: [%s] Poll error %d\n", 1126 __func__, errno); 1127 return TEST_FAILURE; 1128 } 1129 } 1130 1131 complete_pkts(xsk, BATCH_SIZE); 1132 } 1133 1134 for (i = 0; i < BATCH_SIZE; i++) { 1135 struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream); 1136 u32 nb_frags_left, nb_frags, bytes_written = 0; 1137 1138 if (!pkt) 1139 break; 1140 1141 nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt); 1142 if (nb_frags > BATCH_SIZE - i) { 1143 pkt_stream_cancel(pkt_stream); 1144 xsk_ring_prod__cancel(&xsk->tx, BATCH_SIZE - i); 1145 break; 1146 } 1147 nb_frags_left = nb_frags; 1148 1149 while (nb_frags_left--) { 1150 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); 1151 1152 tx_desc->addr = pkt_get_addr(pkt, ifobject->umem); 1153 if (pkt_stream->verbatim) { 1154 tx_desc->len = pkt->len; 1155 tx_desc->options = pkt->options; 1156 } else if (nb_frags_left) { 1157 tx_desc->len = umem->frame_size; 1158 tx_desc->options = XDP_PKT_CONTD; 1159 } else { 1160 tx_desc->len = pkt->len - bytes_written; 1161 tx_desc->options = 0; 1162 } 1163 if (pkt->valid) 1164 pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb, 1165 bytes_written); 1166 bytes_written += tx_desc->len; 1167 1168 if (nb_frags_left) { 1169 i++; 1170 if (pkt_stream->verbatim) 1171 pkt = pkt_stream_get_next_tx_pkt(pkt_stream); 1172 } 1173 } 1174 1175 if (pkt && pkt->valid) { 1176 valid_pkts++; 1177 valid_frags += nb_frags; 1178 } 1179 } 1180 1181 pthread_mutex_lock(&pacing_mutex); 1182 pkts_in_flight += valid_pkts; 1183 pthread_mutex_unlock(&pacing_mutex); 1184 1185 xsk_ring_prod__submit(&xsk->tx, i); 1186 xsk->outstanding_tx += valid_frags; 1187 1188 if (use_poll) { 1189 ret = poll(fds, 1, POLL_TMOUT); 1190 if (ret <= 0) { 1191 if (ret == 0 && timeout) 1192 return TEST_PASS; 1193 1194 ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret); 1195 return TEST_FAILURE; 1196 } 1197 } 1198 1199 if (!timeout) { 1200 if (complete_pkts(xsk, i)) 1201 return TEST_FAILURE; 1202 1203 usleep(10); 1204 return TEST_PASS; 1205 } 1206 1207 return TEST_CONTINUE; 1208 } 1209 1210 static void wait_for_tx_completion(struct xsk_socket_info *xsk) 1211 { 1212 while (xsk->outstanding_tx) 1213 complete_pkts(xsk, BATCH_SIZE); 1214 } 1215 1216 static int send_pkts(struct test_spec *test, struct ifobject *ifobject) 1217 { 1218 struct pkt_stream *pkt_stream = ifobject->pkt_stream; 1219 bool timeout = !is_umem_valid(test->ifobj_rx); 1220 struct pollfd fds = { }; 1221 u32 ret; 1222 1223 fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 1224 fds.events = POLLOUT; 1225 1226 while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { 1227 ret = __send_pkts(ifobject, &fds, timeout); 1228 if (ret == TEST_CONTINUE && !test->fail) 1229 continue; 1230 if ((ret || test->fail) && !timeout) 1231 return TEST_FAILURE; 1232 if (ret == TEST_PASS && timeout) 1233 return ret; 1234 } 1235 1236 wait_for_tx_completion(ifobject->xsk); 1237 return TEST_PASS; 1238 } 1239 1240 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats) 1241 { 1242 int fd = xsk_socket__fd(xsk), err; 1243 socklen_t optlen, expected_len; 1244 1245 optlen = sizeof(*stats); 1246 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen); 1247 if (err) { 1248 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1249 __func__, -err, strerror(-err)); 1250 return TEST_FAILURE; 1251 } 1252 1253 expected_len = sizeof(struct xdp_statistics); 1254 if (optlen != expected_len) { 1255 ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n", 1256 __func__, expected_len, optlen); 1257 return TEST_FAILURE; 1258 } 1259 1260 return TEST_PASS; 1261 } 1262 1263 static int validate_rx_dropped(struct ifobject *ifobject) 1264 { 1265 struct xsk_socket *xsk = ifobject->xsk->xsk; 1266 struct xdp_statistics stats; 1267 int err; 1268 1269 kick_rx(ifobject->xsk); 1270 1271 err = get_xsk_stats(xsk, &stats); 1272 if (err) 1273 return TEST_FAILURE; 1274 1275 /* The receiver calls getsockopt after receiving the last (valid) 1276 * packet which is not the final packet sent in this test (valid and 1277 * invalid packets are sent in alternating fashion with the final 1278 * packet being invalid). Since the last packet may or may not have 1279 * been dropped already, both outcomes must be allowed. 1280 */ 1281 if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 || 1282 stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1) 1283 return TEST_PASS; 1284 1285 return TEST_FAILURE; 1286 } 1287 1288 static int validate_rx_full(struct ifobject *ifobject) 1289 { 1290 struct xsk_socket *xsk = ifobject->xsk->xsk; 1291 struct xdp_statistics stats; 1292 int err; 1293 1294 usleep(1000); 1295 kick_rx(ifobject->xsk); 1296 1297 err = get_xsk_stats(xsk, &stats); 1298 if (err) 1299 return TEST_FAILURE; 1300 1301 if (stats.rx_ring_full) 1302 return TEST_PASS; 1303 1304 return TEST_FAILURE; 1305 } 1306 1307 static int validate_fill_empty(struct ifobject *ifobject) 1308 { 1309 struct xsk_socket *xsk = ifobject->xsk->xsk; 1310 struct xdp_statistics stats; 1311 int err; 1312 1313 usleep(1000); 1314 kick_rx(ifobject->xsk); 1315 1316 err = get_xsk_stats(xsk, &stats); 1317 if (err) 1318 return TEST_FAILURE; 1319 1320 if (stats.rx_fill_ring_empty_descs) 1321 return TEST_PASS; 1322 1323 return TEST_FAILURE; 1324 } 1325 1326 static int validate_tx_invalid_descs(struct ifobject *ifobject) 1327 { 1328 struct xsk_socket *xsk = ifobject->xsk->xsk; 1329 int fd = xsk_socket__fd(xsk); 1330 struct xdp_statistics stats; 1331 socklen_t optlen; 1332 int err; 1333 1334 optlen = sizeof(stats); 1335 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 1336 if (err) { 1337 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1338 __func__, -err, strerror(-err)); 1339 return TEST_FAILURE; 1340 } 1341 1342 if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) { 1343 ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n", 1344 __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts); 1345 return TEST_FAILURE; 1346 } 1347 1348 return TEST_PASS; 1349 } 1350 1351 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, 1352 struct xsk_umem_info *umem, bool tx) 1353 { 1354 int i, ret; 1355 1356 for (i = 0; i < test->nb_sockets; i++) { 1357 bool shared = (ifobject->shared_umem && tx) ? true : !!i; 1358 u32 ctr = 0; 1359 1360 while (ctr++ < SOCK_RECONF_CTR) { 1361 ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem, 1362 ifobject, shared); 1363 if (!ret) 1364 break; 1365 1366 /* Retry if it fails as xsk_socket__create() is asynchronous */ 1367 if (ctr >= SOCK_RECONF_CTR) 1368 exit_with_error(-ret); 1369 usleep(USLEEP_MAX); 1370 } 1371 if (ifobject->busy_poll) 1372 enable_busy_poll(&ifobject->xsk_arr[i]); 1373 } 1374 } 1375 1376 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) 1377 { 1378 xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); 1379 ifobject->xsk = &ifobject->xsk_arr[0]; 1380 ifobject->xskmap = test->ifobj_rx->xskmap; 1381 memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); 1382 ifobject->umem->base_addr = 0; 1383 } 1384 1385 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, 1386 bool fill_up) 1387 { 1388 u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM; 1389 u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts; 1390 int ret; 1391 1392 if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) 1393 buffers_to_fill = umem->num_frames; 1394 else 1395 buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; 1396 1397 ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); 1398 if (ret != buffers_to_fill) 1399 exit_with_error(ENOSPC); 1400 1401 while (filled < buffers_to_fill) { 1402 struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts); 1403 u64 addr; 1404 u32 i; 1405 1406 for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) { 1407 if (!pkt) { 1408 if (!fill_up) 1409 break; 1410 addr = filled * umem->frame_size + umem->base_addr; 1411 } else if (pkt->offset >= 0) { 1412 addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem); 1413 } else { 1414 addr = pkt->offset + umem_alloc_buffer(umem); 1415 } 1416 1417 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; 1418 if (++filled >= buffers_to_fill) 1419 break; 1420 } 1421 } 1422 xsk_ring_prod__submit(&umem->fq, filled); 1423 xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled); 1424 1425 pkt_stream_reset(pkt_stream); 1426 umem_reset_alloc(umem); 1427 } 1428 1429 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) 1430 { 1431 u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; 1432 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 1433 LIBBPF_OPTS(bpf_xdp_query_opts, opts); 1434 void *bufs; 1435 int ret; 1436 1437 if (ifobject->umem->unaligned_mode) 1438 mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB; 1439 1440 if (ifobject->shared_umem) 1441 umem_sz *= 2; 1442 1443 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 1444 if (bufs == MAP_FAILED) 1445 exit_with_error(errno); 1446 1447 ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz); 1448 if (ret) 1449 exit_with_error(-ret); 1450 1451 xsk_configure_socket(test, ifobject, ifobject->umem, false); 1452 1453 ifobject->xsk = &ifobject->xsk_arr[0]; 1454 1455 if (!ifobject->rx_on) 1456 return; 1457 1458 xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring); 1459 1460 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); 1461 if (ret) 1462 exit_with_error(errno); 1463 } 1464 1465 static void *worker_testapp_validate_tx(void *arg) 1466 { 1467 struct test_spec *test = (struct test_spec *)arg; 1468 struct ifobject *ifobject = test->ifobj_tx; 1469 int err; 1470 1471 if (test->current_step == 1) { 1472 if (!ifobject->shared_umem) 1473 thread_common_ops(test, ifobject); 1474 else 1475 thread_common_ops_tx(test, ifobject); 1476 } 1477 1478 print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, 1479 ifobject->ifname); 1480 err = send_pkts(test, ifobject); 1481 1482 if (!err && ifobject->validation_func) 1483 err = ifobject->validation_func(ifobject); 1484 if (err) 1485 report_failure(test); 1486 1487 pthread_exit(NULL); 1488 } 1489 1490 static void *worker_testapp_validate_rx(void *arg) 1491 { 1492 struct test_spec *test = (struct test_spec *)arg; 1493 struct ifobject *ifobject = test->ifobj_rx; 1494 struct pollfd fds = { }; 1495 int err; 1496 1497 if (test->current_step == 1) { 1498 thread_common_ops(test, ifobject); 1499 } else { 1500 xsk_clear_xskmap(ifobject->xskmap); 1501 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); 1502 if (err) { 1503 printf("Error: Failed to update xskmap, error %s\n", strerror(-err)); 1504 exit_with_error(-err); 1505 } 1506 } 1507 1508 fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 1509 fds.events = POLLIN; 1510 1511 pthread_barrier_wait(&barr); 1512 1513 err = receive_pkts(test, &fds); 1514 1515 if (!err && ifobject->validation_func) 1516 err = ifobject->validation_func(ifobject); 1517 if (err) 1518 report_failure(test); 1519 1520 pthread_exit(NULL); 1521 } 1522 1523 static u64 ceil_u64(u64 a, u64 b) 1524 { 1525 return (a + b - 1) / b; 1526 } 1527 1528 static void testapp_clean_xsk_umem(struct ifobject *ifobj) 1529 { 1530 u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; 1531 1532 if (ifobj->shared_umem) 1533 umem_sz *= 2; 1534 1535 umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; 1536 xsk_umem__delete(ifobj->umem->umem); 1537 munmap(ifobj->umem->buffer, umem_sz); 1538 } 1539 1540 static void handler(int signum) 1541 { 1542 pthread_exit(NULL); 1543 } 1544 1545 static bool xdp_prog_changed_rx(struct test_spec *test) 1546 { 1547 struct ifobject *ifobj = test->ifobj_rx; 1548 1549 return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode; 1550 } 1551 1552 static bool xdp_prog_changed_tx(struct test_spec *test) 1553 { 1554 struct ifobject *ifobj = test->ifobj_tx; 1555 1556 return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode; 1557 } 1558 1559 static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog, 1560 struct bpf_map *xskmap, enum test_mode mode) 1561 { 1562 int err; 1563 1564 xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode)); 1565 err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode)); 1566 if (err) { 1567 printf("Error attaching XDP program\n"); 1568 exit_with_error(-err); 1569 } 1570 1571 if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC)) 1572 if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) { 1573 ksft_print_msg("ERROR: XDP prog not in DRV mode\n"); 1574 exit_with_error(EINVAL); 1575 } 1576 1577 ifobj->xdp_prog = xdp_prog; 1578 ifobj->xskmap = xskmap; 1579 ifobj->mode = mode; 1580 } 1581 1582 static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx, 1583 struct ifobject *ifobj_tx) 1584 { 1585 if (xdp_prog_changed_rx(test)) 1586 xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode); 1587 1588 if (!ifobj_tx || ifobj_tx->shared_umem) 1589 return; 1590 1591 if (xdp_prog_changed_tx(test)) 1592 xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode); 1593 } 1594 1595 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1, 1596 struct ifobject *ifobj2) 1597 { 1598 pthread_t t0, t1; 1599 int err; 1600 1601 if (test->mtu > MAX_ETH_PKT_SIZE) { 1602 if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp || 1603 (ifobj2 && !ifobj2->multi_buff_zc_supp))) { 1604 ksft_test_result_skip("Multi buffer for zero-copy not supported.\n"); 1605 return TEST_SKIP; 1606 } 1607 if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp || 1608 (ifobj2 && !ifobj2->multi_buff_supp))) { 1609 ksft_test_result_skip("Multi buffer not supported.\n"); 1610 return TEST_SKIP; 1611 } 1612 } 1613 err = test_spec_set_mtu(test, test->mtu); 1614 if (err) { 1615 ksft_print_msg("Error, could not set mtu.\n"); 1616 exit_with_error(err); 1617 } 1618 1619 if (ifobj2) { 1620 if (pthread_barrier_init(&barr, NULL, 2)) 1621 exit_with_error(errno); 1622 pkt_stream_reset(ifobj2->pkt_stream); 1623 } 1624 1625 test->current_step++; 1626 pkt_stream_reset(ifobj1->pkt_stream); 1627 pkts_in_flight = 0; 1628 1629 signal(SIGUSR1, handler); 1630 /*Spawn RX thread */ 1631 pthread_create(&t0, NULL, ifobj1->func_ptr, test); 1632 1633 if (ifobj2) { 1634 pthread_barrier_wait(&barr); 1635 if (pthread_barrier_destroy(&barr)) 1636 exit_with_error(errno); 1637 1638 /*Spawn TX thread */ 1639 pthread_create(&t1, NULL, ifobj2->func_ptr, test); 1640 1641 pthread_join(t1, NULL); 1642 } 1643 1644 if (!ifobj2) 1645 pthread_kill(t0, SIGUSR1); 1646 else 1647 pthread_join(t0, NULL); 1648 1649 if (test->total_steps == test->current_step || test->fail) { 1650 if (ifobj2) 1651 xsk_socket__delete(ifobj2->xsk->xsk); 1652 xsk_socket__delete(ifobj1->xsk->xsk); 1653 testapp_clean_xsk_umem(ifobj1); 1654 if (ifobj2 && !ifobj2->shared_umem) 1655 testapp_clean_xsk_umem(ifobj2); 1656 } 1657 1658 return !!test->fail; 1659 } 1660 1661 static int testapp_validate_traffic(struct test_spec *test) 1662 { 1663 struct ifobject *ifobj_rx = test->ifobj_rx; 1664 struct ifobject *ifobj_tx = test->ifobj_tx; 1665 1666 if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) || 1667 (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) { 1668 ksft_test_result_skip("No huge pages present.\n"); 1669 return TEST_SKIP; 1670 } 1671 1672 xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); 1673 return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx); 1674 } 1675 1676 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj) 1677 { 1678 return __testapp_validate_traffic(test, ifobj, NULL); 1679 } 1680 1681 static int testapp_teardown(struct test_spec *test) 1682 { 1683 int i; 1684 1685 test_spec_set_name(test, "TEARDOWN"); 1686 for (i = 0; i < MAX_TEARDOWN_ITER; i++) { 1687 if (testapp_validate_traffic(test)) 1688 return TEST_FAILURE; 1689 test_spec_reset(test); 1690 } 1691 1692 return TEST_PASS; 1693 } 1694 1695 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) 1696 { 1697 thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr; 1698 struct ifobject *tmp_ifobj = (*ifobj1); 1699 1700 (*ifobj1)->func_ptr = (*ifobj2)->func_ptr; 1701 (*ifobj2)->func_ptr = tmp_func_ptr; 1702 1703 *ifobj1 = *ifobj2; 1704 *ifobj2 = tmp_ifobj; 1705 } 1706 1707 static int testapp_bidi(struct test_spec *test) 1708 { 1709 int res; 1710 1711 test_spec_set_name(test, "BIDIRECTIONAL"); 1712 test->ifobj_tx->rx_on = true; 1713 test->ifobj_rx->tx_on = true; 1714 test->total_steps = 2; 1715 if (testapp_validate_traffic(test)) 1716 return TEST_FAILURE; 1717 1718 print_verbose("Switching Tx/Rx vectors\n"); 1719 swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1720 res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); 1721 1722 swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1723 return res; 1724 } 1725 1726 static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx) 1727 { 1728 int ret; 1729 1730 xsk_socket__delete(ifobj_tx->xsk->xsk); 1731 xsk_socket__delete(ifobj_rx->xsk->xsk); 1732 ifobj_tx->xsk = &ifobj_tx->xsk_arr[1]; 1733 ifobj_rx->xsk = &ifobj_rx->xsk_arr[1]; 1734 1735 ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk); 1736 if (ret) 1737 exit_with_error(errno); 1738 } 1739 1740 static int testapp_bpf_res(struct test_spec *test) 1741 { 1742 test_spec_set_name(test, "BPF_RES"); 1743 test->total_steps = 2; 1744 test->nb_sockets = 2; 1745 if (testapp_validate_traffic(test)) 1746 return TEST_FAILURE; 1747 1748 swap_xsk_resources(test->ifobj_tx, test->ifobj_rx); 1749 return testapp_validate_traffic(test); 1750 } 1751 1752 static int testapp_headroom(struct test_spec *test) 1753 { 1754 test_spec_set_name(test, "UMEM_HEADROOM"); 1755 test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE; 1756 return testapp_validate_traffic(test); 1757 } 1758 1759 static int testapp_stats_rx_dropped(struct test_spec *test) 1760 { 1761 test_spec_set_name(test, "STAT_RX_DROPPED"); 1762 if (test->mode == TEST_MODE_ZC) { 1763 ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); 1764 return TEST_SKIP; 1765 } 1766 1767 pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); 1768 test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - 1769 XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; 1770 pkt_stream_receive_half(test); 1771 test->ifobj_rx->validation_func = validate_rx_dropped; 1772 return testapp_validate_traffic(test); 1773 } 1774 1775 static int testapp_stats_tx_invalid_descs(struct test_spec *test) 1776 { 1777 test_spec_set_name(test, "STAT_TX_INVALID"); 1778 pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0); 1779 test->ifobj_tx->validation_func = validate_tx_invalid_descs; 1780 return testapp_validate_traffic(test); 1781 } 1782 1783 static int testapp_stats_rx_full(struct test_spec *test) 1784 { 1785 test_spec_set_name(test, "STAT_RX_FULL"); 1786 pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 1787 test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 1788 DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 1789 1790 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; 1791 test->ifobj_rx->release_rx = false; 1792 test->ifobj_rx->validation_func = validate_rx_full; 1793 return testapp_validate_traffic(test); 1794 } 1795 1796 static int testapp_stats_fill_empty(struct test_spec *test) 1797 { 1798 test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); 1799 pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 1800 test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 1801 DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 1802 1803 test->ifobj_rx->use_fill_ring = false; 1804 test->ifobj_rx->validation_func = validate_fill_empty; 1805 return testapp_validate_traffic(test); 1806 } 1807 1808 static int testapp_unaligned(struct test_spec *test) 1809 { 1810 test_spec_set_name(test, "UNALIGNED_MODE"); 1811 test->ifobj_tx->umem->unaligned_mode = true; 1812 test->ifobj_rx->umem->unaligned_mode = true; 1813 /* Let half of the packets straddle a 4K buffer boundary */ 1814 pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2); 1815 1816 return testapp_validate_traffic(test); 1817 } 1818 1819 static int testapp_unaligned_mb(struct test_spec *test) 1820 { 1821 test_spec_set_name(test, "UNALIGNED_MODE_9K"); 1822 test->mtu = MAX_ETH_JUMBO_SIZE; 1823 test->ifobj_tx->umem->unaligned_mode = true; 1824 test->ifobj_rx->umem->unaligned_mode = true; 1825 pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE); 1826 return testapp_validate_traffic(test); 1827 } 1828 1829 static int testapp_single_pkt(struct test_spec *test) 1830 { 1831 struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}}; 1832 1833 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1834 return testapp_validate_traffic(test); 1835 } 1836 1837 static int testapp_multi_buffer(struct test_spec *test) 1838 { 1839 test_spec_set_name(test, "RUN_TO_COMPLETION_9K_PACKETS"); 1840 test->mtu = MAX_ETH_JUMBO_SIZE; 1841 pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE); 1842 1843 return testapp_validate_traffic(test); 1844 } 1845 1846 static int testapp_invalid_desc_mb(struct test_spec *test) 1847 { 1848 struct xsk_umem_info *umem = test->ifobj_tx->umem; 1849 u64 umem_size = umem->num_frames * umem->frame_size; 1850 struct pkt pkts[] = { 1851 /* Valid packet for synch to start with */ 1852 {0, MIN_PKT_SIZE, 0, true, 0}, 1853 /* Zero frame len is not legal */ 1854 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 1855 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 1856 {0, 0, 0, false, 0}, 1857 /* Invalid address in the second frame */ 1858 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 1859 {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 1860 /* Invalid len in the middle */ 1861 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 1862 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 1863 /* Invalid options in the middle */ 1864 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 1865 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION}, 1866 /* Transmit 2 frags, receive 3 */ 1867 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD}, 1868 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0}, 1869 /* Middle frame crosses chunk boundary with small length */ 1870 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 1871 {-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0}, 1872 /* Valid packet for synch so that something is received */ 1873 {0, MIN_PKT_SIZE, 0, true, 0}}; 1874 1875 if (umem->unaligned_mode) { 1876 /* Crossing a chunk boundary allowed */ 1877 pkts[12].valid = true; 1878 pkts[13].valid = true; 1879 } 1880 1881 test->mtu = MAX_ETH_JUMBO_SIZE; 1882 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1883 return testapp_validate_traffic(test); 1884 } 1885 1886 static int testapp_invalid_desc(struct test_spec *test) 1887 { 1888 struct xsk_umem_info *umem = test->ifobj_tx->umem; 1889 u64 umem_size = umem->num_frames * umem->frame_size; 1890 struct pkt pkts[] = { 1891 /* Zero packet address allowed */ 1892 {0, MIN_PKT_SIZE, 0, true}, 1893 /* Allowed packet */ 1894 {0, MIN_PKT_SIZE, 0, true}, 1895 /* Straddling the start of umem */ 1896 {-2, MIN_PKT_SIZE, 0, false}, 1897 /* Packet too large */ 1898 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, 1899 /* Up to end of umem allowed */ 1900 {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true}, 1901 /* After umem ends */ 1902 {umem_size, MIN_PKT_SIZE, 0, false}, 1903 /* Straddle the end of umem */ 1904 {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, 1905 /* Straddle a 4K boundary */ 1906 {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, 1907 /* Straddle a 2K boundary */ 1908 {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true}, 1909 /* Valid packet for synch so that something is received */ 1910 {0, MIN_PKT_SIZE, 0, true}}; 1911 1912 if (umem->unaligned_mode) { 1913 /* Crossing a page boundary allowed */ 1914 pkts[7].valid = true; 1915 } 1916 if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { 1917 /* Crossing a 2K frame size boundary not allowed */ 1918 pkts[8].valid = false; 1919 } 1920 1921 if (test->ifobj_tx->shared_umem) { 1922 pkts[4].offset += umem_size; 1923 pkts[5].offset += umem_size; 1924 pkts[6].offset += umem_size; 1925 } 1926 1927 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1928 return testapp_validate_traffic(test); 1929 } 1930 1931 static int testapp_xdp_drop(struct test_spec *test) 1932 { 1933 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 1934 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 1935 1936 test_spec_set_name(test, "XDP_DROP_HALF"); 1937 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop, 1938 skel_rx->maps.xsk, skel_tx->maps.xsk); 1939 1940 pkt_stream_receive_half(test); 1941 return testapp_validate_traffic(test); 1942 } 1943 1944 static int testapp_xdp_metadata_count(struct test_spec *test) 1945 { 1946 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 1947 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 1948 struct bpf_map *data_map; 1949 int count = 0; 1950 int key = 0; 1951 1952 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata, 1953 skel_tx->progs.xsk_xdp_populate_metadata, 1954 skel_rx->maps.xsk, skel_tx->maps.xsk); 1955 test->ifobj_rx->use_metadata = true; 1956 1957 data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss"); 1958 if (!data_map || !bpf_map__is_internal(data_map)) 1959 exit_with_error(ENOMEM); 1960 1961 if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) 1962 exit_with_error(errno); 1963 1964 return testapp_validate_traffic(test); 1965 } 1966 1967 static int testapp_poll_txq_tmout(struct test_spec *test) 1968 { 1969 test_spec_set_name(test, "POLL_TXQ_FULL"); 1970 1971 test->ifobj_tx->use_poll = true; 1972 /* create invalid frame by set umem frame_size and pkt length equal to 2048 */ 1973 test->ifobj_tx->umem->frame_size = 2048; 1974 pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048); 1975 return testapp_validate_traffic_single_thread(test, test->ifobj_tx); 1976 } 1977 1978 static int testapp_poll_rxq_tmout(struct test_spec *test) 1979 { 1980 test_spec_set_name(test, "POLL_RXQ_EMPTY"); 1981 test->ifobj_rx->use_poll = true; 1982 return testapp_validate_traffic_single_thread(test, test->ifobj_rx); 1983 } 1984 1985 static int testapp_too_many_frags(struct test_spec *test) 1986 { 1987 struct pkt pkts[2 * XSK_DESC__MAX_SKB_FRAGS + 2] = {}; 1988 u32 max_frags, i; 1989 1990 test_spec_set_name(test, "TOO_MANY_FRAGS"); 1991 if (test->mode == TEST_MODE_ZC) 1992 max_frags = test->ifobj_tx->xdp_zc_max_segs; 1993 else 1994 max_frags = XSK_DESC__MAX_SKB_FRAGS; 1995 1996 test->mtu = MAX_ETH_JUMBO_SIZE; 1997 1998 /* Valid packet for synch */ 1999 pkts[0].len = MIN_PKT_SIZE; 2000 pkts[0].valid = true; 2001 2002 /* One valid packet with the max amount of frags */ 2003 for (i = 1; i < max_frags + 1; i++) { 2004 pkts[i].len = MIN_PKT_SIZE; 2005 pkts[i].options = XDP_PKT_CONTD; 2006 pkts[i].valid = true; 2007 } 2008 pkts[max_frags].options = 0; 2009 2010 /* An invalid packet with the max amount of frags but signals packet 2011 * continues on the last frag 2012 */ 2013 for (i = max_frags + 1; i < 2 * max_frags + 1; i++) { 2014 pkts[i].len = MIN_PKT_SIZE; 2015 pkts[i].options = XDP_PKT_CONTD; 2016 pkts[i].valid = false; 2017 } 2018 2019 /* Valid packet for synch */ 2020 pkts[2 * max_frags + 1].len = MIN_PKT_SIZE; 2021 pkts[2 * max_frags + 1].valid = true; 2022 2023 pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2); 2024 return testapp_validate_traffic(test); 2025 } 2026 2027 static int xsk_load_xdp_programs(struct ifobject *ifobj) 2028 { 2029 ifobj->xdp_progs = xsk_xdp_progs__open_and_load(); 2030 if (libbpf_get_error(ifobj->xdp_progs)) 2031 return libbpf_get_error(ifobj->xdp_progs); 2032 2033 return 0; 2034 } 2035 2036 static void xsk_unload_xdp_programs(struct ifobject *ifobj) 2037 { 2038 xsk_xdp_progs__destroy(ifobj->xdp_progs); 2039 } 2040 2041 /* Simple test */ 2042 static bool hugepages_present(void) 2043 { 2044 size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; 2045 void *bufs; 2046 2047 bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 2048 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB); 2049 if (bufs == MAP_FAILED) 2050 return false; 2051 2052 mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; 2053 munmap(bufs, mmap_sz); 2054 return true; 2055 } 2056 2057 static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, 2058 thread_func_t func_ptr) 2059 { 2060 LIBBPF_OPTS(bpf_xdp_query_opts, query_opts); 2061 int err; 2062 2063 memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN); 2064 memcpy(ifobj->src_mac, src_mac, ETH_ALEN); 2065 2066 ifobj->func_ptr = func_ptr; 2067 2068 err = xsk_load_xdp_programs(ifobj); 2069 if (err) { 2070 printf("Error loading XDP program\n"); 2071 exit_with_error(err); 2072 } 2073 2074 if (hugepages_present()) 2075 ifobj->unaligned_supp = true; 2076 2077 err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts); 2078 if (err) { 2079 ksft_print_msg("Error querying XDP capabilities\n"); 2080 exit_with_error(-err); 2081 } 2082 if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG) 2083 ifobj->multi_buff_supp = true; 2084 if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 2085 if (query_opts.xdp_zc_max_segs > 1) { 2086 ifobj->multi_buff_zc_supp = true; 2087 ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs; 2088 } else { 2089 ifobj->xdp_zc_max_segs = 0; 2090 } 2091 } 2092 } 2093 2094 static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type) 2095 { 2096 int ret = TEST_SKIP; 2097 2098 switch (type) { 2099 case TEST_TYPE_STATS_RX_DROPPED: 2100 ret = testapp_stats_rx_dropped(test); 2101 break; 2102 case TEST_TYPE_STATS_TX_INVALID_DESCS: 2103 ret = testapp_stats_tx_invalid_descs(test); 2104 break; 2105 case TEST_TYPE_STATS_RX_FULL: 2106 ret = testapp_stats_rx_full(test); 2107 break; 2108 case TEST_TYPE_STATS_FILL_EMPTY: 2109 ret = testapp_stats_fill_empty(test); 2110 break; 2111 case TEST_TYPE_TEARDOWN: 2112 ret = testapp_teardown(test); 2113 break; 2114 case TEST_TYPE_BIDI: 2115 ret = testapp_bidi(test); 2116 break; 2117 case TEST_TYPE_BPF_RES: 2118 ret = testapp_bpf_res(test); 2119 break; 2120 case TEST_TYPE_RUN_TO_COMPLETION: 2121 test_spec_set_name(test, "RUN_TO_COMPLETION"); 2122 ret = testapp_validate_traffic(test); 2123 break; 2124 case TEST_TYPE_RUN_TO_COMPLETION_MB: 2125 ret = testapp_multi_buffer(test); 2126 break; 2127 case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT: 2128 test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT"); 2129 ret = testapp_single_pkt(test); 2130 break; 2131 case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME: 2132 test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); 2133 test->ifobj_tx->umem->frame_size = 2048; 2134 test->ifobj_rx->umem->frame_size = 2048; 2135 pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE); 2136 ret = testapp_validate_traffic(test); 2137 break; 2138 case TEST_TYPE_RX_POLL: 2139 test->ifobj_rx->use_poll = true; 2140 test_spec_set_name(test, "POLL_RX"); 2141 ret = testapp_validate_traffic(test); 2142 break; 2143 case TEST_TYPE_TX_POLL: 2144 test->ifobj_tx->use_poll = true; 2145 test_spec_set_name(test, "POLL_TX"); 2146 ret = testapp_validate_traffic(test); 2147 break; 2148 case TEST_TYPE_POLL_TXQ_TMOUT: 2149 ret = testapp_poll_txq_tmout(test); 2150 break; 2151 case TEST_TYPE_POLL_RXQ_TMOUT: 2152 ret = testapp_poll_rxq_tmout(test); 2153 break; 2154 case TEST_TYPE_ALIGNED_INV_DESC: 2155 test_spec_set_name(test, "ALIGNED_INV_DESC"); 2156 ret = testapp_invalid_desc(test); 2157 break; 2158 case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME: 2159 test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE"); 2160 test->ifobj_tx->umem->frame_size = 2048; 2161 test->ifobj_rx->umem->frame_size = 2048; 2162 ret = testapp_invalid_desc(test); 2163 break; 2164 case TEST_TYPE_UNALIGNED_INV_DESC: 2165 test_spec_set_name(test, "UNALIGNED_INV_DESC"); 2166 test->ifobj_tx->umem->unaligned_mode = true; 2167 test->ifobj_rx->umem->unaligned_mode = true; 2168 ret = testapp_invalid_desc(test); 2169 break; 2170 case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: { 2171 u64 page_size, umem_size; 2172 2173 test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE"); 2174 /* Odd frame size so the UMEM doesn't end near a page boundary. */ 2175 test->ifobj_tx->umem->frame_size = 4001; 2176 test->ifobj_rx->umem->frame_size = 4001; 2177 test->ifobj_tx->umem->unaligned_mode = true; 2178 test->ifobj_rx->umem->unaligned_mode = true; 2179 /* This test exists to test descriptors that staddle the end of 2180 * the UMEM but not a page. 2181 */ 2182 page_size = sysconf(_SC_PAGESIZE); 2183 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; 2184 assert(umem_size % page_size > MIN_PKT_SIZE); 2185 assert(umem_size % page_size < page_size - MIN_PKT_SIZE); 2186 ret = testapp_invalid_desc(test); 2187 break; 2188 } 2189 case TEST_TYPE_ALIGNED_INV_DESC_MB: 2190 test_spec_set_name(test, "ALIGNED_INV_DESC_MULTI_BUFF"); 2191 ret = testapp_invalid_desc_mb(test); 2192 break; 2193 case TEST_TYPE_UNALIGNED_INV_DESC_MB: 2194 test_spec_set_name(test, "UNALIGNED_INV_DESC_MULTI_BUFF"); 2195 test->ifobj_tx->umem->unaligned_mode = true; 2196 test->ifobj_rx->umem->unaligned_mode = true; 2197 ret = testapp_invalid_desc_mb(test); 2198 break; 2199 case TEST_TYPE_UNALIGNED: 2200 ret = testapp_unaligned(test); 2201 break; 2202 case TEST_TYPE_UNALIGNED_MB: 2203 ret = testapp_unaligned_mb(test); 2204 break; 2205 case TEST_TYPE_HEADROOM: 2206 ret = testapp_headroom(test); 2207 break; 2208 case TEST_TYPE_XDP_DROP_HALF: 2209 ret = testapp_xdp_drop(test); 2210 break; 2211 case TEST_TYPE_XDP_METADATA_COUNT: 2212 test_spec_set_name(test, "XDP_METADATA_COUNT"); 2213 ret = testapp_xdp_metadata_count(test); 2214 break; 2215 case TEST_TYPE_XDP_METADATA_COUNT_MB: 2216 test_spec_set_name(test, "XDP_METADATA_COUNT_MULTI_BUFF"); 2217 test->mtu = MAX_ETH_JUMBO_SIZE; 2218 ret = testapp_xdp_metadata_count(test); 2219 break; 2220 case TEST_TYPE_TOO_MANY_FRAGS: 2221 ret = testapp_too_many_frags(test); 2222 break; 2223 default: 2224 break; 2225 } 2226 2227 if (ret == TEST_PASS) 2228 ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test), 2229 test->name); 2230 pkt_stream_restore_default(test); 2231 } 2232 2233 static struct ifobject *ifobject_create(void) 2234 { 2235 struct ifobject *ifobj; 2236 2237 ifobj = calloc(1, sizeof(struct ifobject)); 2238 if (!ifobj) 2239 return NULL; 2240 2241 ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr)); 2242 if (!ifobj->xsk_arr) 2243 goto out_xsk_arr; 2244 2245 ifobj->umem = calloc(1, sizeof(*ifobj->umem)); 2246 if (!ifobj->umem) 2247 goto out_umem; 2248 2249 return ifobj; 2250 2251 out_umem: 2252 free(ifobj->xsk_arr); 2253 out_xsk_arr: 2254 free(ifobj); 2255 return NULL; 2256 } 2257 2258 static void ifobject_delete(struct ifobject *ifobj) 2259 { 2260 free(ifobj->umem); 2261 free(ifobj->xsk_arr); 2262 free(ifobj); 2263 } 2264 2265 static bool is_xdp_supported(int ifindex) 2266 { 2267 int flags = XDP_FLAGS_DRV_MODE; 2268 2269 LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags); 2270 struct bpf_insn insns[2] = { 2271 BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), 2272 BPF_EXIT_INSN() 2273 }; 2274 int prog_fd, insn_cnt = ARRAY_SIZE(insns); 2275 int err; 2276 2277 prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); 2278 if (prog_fd < 0) 2279 return false; 2280 2281 err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL); 2282 if (err) { 2283 close(prog_fd); 2284 return false; 2285 } 2286 2287 bpf_xdp_detach(ifindex, flags, NULL); 2288 close(prog_fd); 2289 2290 return true; 2291 } 2292 2293 int main(int argc, char **argv) 2294 { 2295 struct pkt_stream *rx_pkt_stream_default; 2296 struct pkt_stream *tx_pkt_stream_default; 2297 struct ifobject *ifobj_tx, *ifobj_rx; 2298 int modes = TEST_MODE_SKB + 1; 2299 u32 i, j, failed_tests = 0; 2300 struct test_spec test; 2301 bool shared_netdev; 2302 2303 /* Use libbpf 1.0 API mode */ 2304 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 2305 2306 ifobj_tx = ifobject_create(); 2307 if (!ifobj_tx) 2308 exit_with_error(ENOMEM); 2309 ifobj_rx = ifobject_create(); 2310 if (!ifobj_rx) 2311 exit_with_error(ENOMEM); 2312 2313 setlocale(LC_ALL, ""); 2314 2315 parse_command_line(ifobj_tx, ifobj_rx, argc, argv); 2316 2317 shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex); 2318 ifobj_tx->shared_umem = shared_netdev; 2319 ifobj_rx->shared_umem = shared_netdev; 2320 2321 if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { 2322 usage(basename(argv[0])); 2323 ksft_exit_xfail(); 2324 } 2325 2326 if (is_xdp_supported(ifobj_tx->ifindex)) { 2327 modes++; 2328 if (ifobj_zc_avail(ifobj_tx)) 2329 modes++; 2330 } 2331 2332 init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx); 2333 init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx); 2334 2335 test_spec_init(&test, ifobj_tx, ifobj_rx, 0); 2336 tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE); 2337 rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE); 2338 if (!tx_pkt_stream_default || !rx_pkt_stream_default) 2339 exit_with_error(ENOMEM); 2340 test.tx_pkt_stream_default = tx_pkt_stream_default; 2341 test.rx_pkt_stream_default = rx_pkt_stream_default; 2342 2343 ksft_set_plan(modes * TEST_TYPE_MAX); 2344 2345 for (i = 0; i < modes; i++) { 2346 for (j = 0; j < TEST_TYPE_MAX; j++) { 2347 test_spec_init(&test, ifobj_tx, ifobj_rx, i); 2348 run_pkt_test(&test, i, j); 2349 usleep(USLEEP_MAX); 2350 2351 if (test.fail) 2352 failed_tests++; 2353 } 2354 } 2355 2356 pkt_stream_delete(tx_pkt_stream_default); 2357 pkt_stream_delete(rx_pkt_stream_default); 2358 xsk_unload_xdp_programs(ifobj_tx); 2359 xsk_unload_xdp_programs(ifobj_rx); 2360 ifobject_delete(ifobj_tx); 2361 ifobject_delete(ifobj_rx); 2362 2363 if (failed_tests) 2364 ksft_exit_fail(); 2365 else 2366 ksft_exit_pass(); 2367 } 2368