1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2020 Intel Corporation. */ 3 4 /* 5 * Some functions in this program are taken from 6 * Linux kernel samples/bpf/xdpsock* and modified 7 * for use. 8 * 9 * See test_xsk.sh for detailed information on test topology 10 * and prerequisite network setup. 11 * 12 * This test program contains two threads, each thread is single socket with 13 * a unique UMEM. It validates in-order packet delivery and packet content 14 * by sending packets to each other. 15 * 16 * Tests Information: 17 * ------------------ 18 * These selftests test AF_XDP SKB and Native/DRV modes using veth 19 * Virtual Ethernet interfaces. 20 * 21 * For each mode, the following tests are run: 22 * a. nopoll - soft-irq processing in run-to-completion mode 23 * b. poll - using poll() syscall 24 * c. Socket Teardown 25 * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy 26 * both sockets, then repeat multiple times. Only nopoll mode is used 27 * d. Bi-directional sockets 28 * Configure sockets as bi-directional tx/rx sockets, sets up fill and 29 * completion rings on each socket, tx/rx in both directions. Only nopoll 30 * mode is used 31 * e. Statistics 32 * Trigger some error conditions and ensure that the appropriate statistics 33 * are incremented. Within this test, the following statistics are tested: 34 * i. rx dropped 35 * Increase the UMEM frame headroom to a value which results in 36 * insufficient space in the rx buffer for both the packet and the headroom. 37 * ii. tx invalid 38 * Set the 'len' field of tx descriptors to an invalid value (umem frame 39 * size + 1). 40 * iii. rx ring full 41 * Reduce the size of the RX ring to a fraction of the fill ring size. 42 * iv. fill queue empty 43 * Do not populate the fill queue and then try to receive pkts. 44 * f. bpf_link resource persistence 45 * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0, 46 * then remove xsk sockets from queue 0 on both veth interfaces and 47 * finally run a traffic on queues ids 1 48 * g. unaligned mode 49 * h. tests for invalid and corner case Tx descriptors so that the correct ones 50 * are discarded and let through, respectively. 51 * i. 2K frame size tests 52 * 53 * Total tests: 12 54 * 55 * Flow: 56 * ----- 57 * - Single process spawns two threads: Tx and Rx 58 * - Each of these two threads attach to a veth interface 59 * - Each thread creates one AF_XDP socket connected to a unique umem for each 60 * veth interface 61 * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy> 62 * - Rx thread verifies if all packets were received and delivered in-order, 63 * and have the right content 64 * 65 * Enable/disable packet dump mode: 66 * -------------------------- 67 * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add 68 * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D") 69 */ 70 71 #define _GNU_SOURCE 72 #include <assert.h> 73 #include <fcntl.h> 74 #include <errno.h> 75 #include <getopt.h> 76 #include <asm/barrier.h> 77 #include <linux/if_link.h> 78 #include <linux/if_ether.h> 79 #include <linux/mman.h> 80 #include <arpa/inet.h> 81 #include <net/if.h> 82 #include <locale.h> 83 #include <poll.h> 84 #include <pthread.h> 85 #include <signal.h> 86 #include <stdio.h> 87 #include <stdlib.h> 88 #include <string.h> 89 #include <stddef.h> 90 #include <sys/mman.h> 91 #include <sys/socket.h> 92 #include <sys/time.h> 93 #include <sys/types.h> 94 #include <time.h> 95 #include <unistd.h> 96 97 #include "xsk_xdp_progs.skel.h" 98 #include "xsk.h" 99 #include "xskxceiver.h" 100 #include <bpf/bpf.h> 101 #include <linux/filter.h> 102 #include "../kselftest.h" 103 #include "xsk_xdp_metadata.h" 104 105 static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; 106 static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; 107 108 static void __exit_with_error(int error, const char *file, const char *func, int line) 109 { 110 ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, 111 strerror(error)); 112 ksft_exit_xfail(); 113 } 114 115 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) 116 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : "" 117 static char *mode_string(struct test_spec *test) 118 { 119 switch (test->mode) { 120 case TEST_MODE_SKB: 121 return "SKB"; 122 case TEST_MODE_DRV: 123 return "DRV"; 124 case TEST_MODE_ZC: 125 return "ZC"; 126 default: 127 return "BOGUS"; 128 } 129 } 130 131 static void report_failure(struct test_spec *test) 132 { 133 if (test->fail) 134 return; 135 136 ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test), 137 test->name); 138 test->fail = true; 139 } 140 141 /* The payload is a word consisting of a packet sequence number in the upper 142 * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's 143 * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0. 144 */ 145 static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size) 146 { 147 u32 *ptr = (u32 *)dest, i; 148 149 start /= sizeof(*ptr); 150 size /= sizeof(*ptr); 151 for (i = 0; i < size; i++) 152 ptr[i] = htonl(pkt_nb << 16 | (i + start)); 153 } 154 155 static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr) 156 { 157 memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN); 158 memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN); 159 eth_hdr->h_proto = htons(ETH_P_LOOPBACK); 160 } 161 162 static bool is_umem_valid(struct ifobject *ifobj) 163 { 164 return !!ifobj->umem->umem; 165 } 166 167 static u32 mode_to_xdp_flags(enum test_mode mode) 168 { 169 return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; 170 } 171 172 static u64 umem_size(struct xsk_umem_info *umem) 173 { 174 return umem->num_frames * umem->frame_size; 175 } 176 177 static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, 178 u64 size) 179 { 180 struct xsk_umem_config cfg = { 181 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, 182 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, 183 .frame_size = umem->frame_size, 184 .frame_headroom = umem->frame_headroom, 185 .flags = XSK_UMEM__DEFAULT_FLAGS 186 }; 187 int ret; 188 189 if (umem->unaligned_mode) 190 cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; 191 192 ret = xsk_umem__create(&umem->umem, buffer, size, 193 &umem->fq, &umem->cq, &cfg); 194 if (ret) 195 return ret; 196 197 umem->buffer = buffer; 198 if (ifobj->shared_umem && ifobj->rx_on) { 199 umem->base_addr = umem_size(umem); 200 umem->next_buffer = umem_size(umem); 201 } 202 203 return 0; 204 } 205 206 static u64 umem_alloc_buffer(struct xsk_umem_info *umem) 207 { 208 u64 addr; 209 210 addr = umem->next_buffer; 211 umem->next_buffer += umem->frame_size; 212 if (umem->next_buffer >= umem->base_addr + umem_size(umem)) 213 umem->next_buffer = umem->base_addr; 214 215 return addr; 216 } 217 218 static void umem_reset_alloc(struct xsk_umem_info *umem) 219 { 220 umem->next_buffer = 0; 221 } 222 223 static void enable_busy_poll(struct xsk_socket_info *xsk) 224 { 225 int sock_opt; 226 227 sock_opt = 1; 228 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL, 229 (void *)&sock_opt, sizeof(sock_opt)) < 0) 230 exit_with_error(errno); 231 232 sock_opt = 20; 233 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL, 234 (void *)&sock_opt, sizeof(sock_opt)) < 0) 235 exit_with_error(errno); 236 237 sock_opt = BATCH_SIZE; 238 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET, 239 (void *)&sock_opt, sizeof(sock_opt)) < 0) 240 exit_with_error(errno); 241 } 242 243 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, 244 struct ifobject *ifobject, bool shared) 245 { 246 struct xsk_socket_config cfg = {}; 247 struct xsk_ring_cons *rxr; 248 struct xsk_ring_prod *txr; 249 250 xsk->umem = umem; 251 cfg.rx_size = xsk->rxqsize; 252 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; 253 cfg.bind_flags = ifobject->bind_flags; 254 if (shared) 255 cfg.bind_flags |= XDP_SHARED_UMEM; 256 257 txr = ifobject->tx_on ? &xsk->tx : NULL; 258 rxr = ifobject->rx_on ? &xsk->rx : NULL; 259 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); 260 } 261 262 static bool ifobj_zc_avail(struct ifobject *ifobject) 263 { 264 size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; 265 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 266 struct xsk_socket_info *xsk; 267 struct xsk_umem_info *umem; 268 bool zc_avail = false; 269 void *bufs; 270 int ret; 271 272 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 273 if (bufs == MAP_FAILED) 274 exit_with_error(errno); 275 276 umem = calloc(1, sizeof(struct xsk_umem_info)); 277 if (!umem) { 278 munmap(bufs, umem_sz); 279 exit_with_error(ENOMEM); 280 } 281 umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 282 ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz); 283 if (ret) 284 exit_with_error(-ret); 285 286 xsk = calloc(1, sizeof(struct xsk_socket_info)); 287 if (!xsk) 288 goto out; 289 ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY; 290 ifobject->rx_on = true; 291 xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 292 ret = __xsk_configure_socket(xsk, umem, ifobject, false); 293 if (!ret) 294 zc_avail = true; 295 296 xsk_socket__delete(xsk->xsk); 297 free(xsk); 298 out: 299 munmap(umem->buffer, umem_sz); 300 xsk_umem__delete(umem->umem); 301 free(umem); 302 return zc_avail; 303 } 304 305 static struct option long_options[] = { 306 {"interface", required_argument, 0, 'i'}, 307 {"busy-poll", no_argument, 0, 'b'}, 308 {"verbose", no_argument, 0, 'v'}, 309 {0, 0, 0, 0} 310 }; 311 312 static void usage(const char *prog) 313 { 314 const char *str = 315 " Usage: %s [OPTIONS]\n" 316 " Options:\n" 317 " -i, --interface Use interface\n" 318 " -v, --verbose Verbose output\n" 319 " -b, --busy-poll Enable busy poll\n"; 320 321 ksft_print_msg(str, prog); 322 } 323 324 static bool validate_interface(struct ifobject *ifobj) 325 { 326 if (!strcmp(ifobj->ifname, "")) 327 return false; 328 return true; 329 } 330 331 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc, 332 char **argv) 333 { 334 struct ifobject *ifobj; 335 u32 interface_nb = 0; 336 int option_index, c; 337 338 opterr = 0; 339 340 for (;;) { 341 c = getopt_long(argc, argv, "i:vb", long_options, &option_index); 342 if (c == -1) 343 break; 344 345 switch (c) { 346 case 'i': 347 if (interface_nb == 0) 348 ifobj = ifobj_tx; 349 else if (interface_nb == 1) 350 ifobj = ifobj_rx; 351 else 352 break; 353 354 memcpy(ifobj->ifname, optarg, 355 min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg))); 356 357 ifobj->ifindex = if_nametoindex(ifobj->ifname); 358 if (!ifobj->ifindex) 359 exit_with_error(errno); 360 361 interface_nb++; 362 break; 363 case 'v': 364 opt_verbose = true; 365 break; 366 case 'b': 367 ifobj_tx->busy_poll = true; 368 ifobj_rx->busy_poll = true; 369 break; 370 default: 371 usage(basename(argv[0])); 372 ksft_exit_xfail(); 373 } 374 } 375 } 376 377 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 378 struct ifobject *ifobj_rx) 379 { 380 u32 i, j; 381 382 for (i = 0; i < MAX_INTERFACES; i++) { 383 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 384 385 ifobj->xsk = &ifobj->xsk_arr[0]; 386 ifobj->use_poll = false; 387 ifobj->use_fill_ring = true; 388 ifobj->release_rx = true; 389 ifobj->validation_func = NULL; 390 ifobj->use_metadata = false; 391 392 if (i == 0) { 393 ifobj->rx_on = false; 394 ifobj->tx_on = true; 395 ifobj->pkt_stream = test->tx_pkt_stream_default; 396 } else { 397 ifobj->rx_on = true; 398 ifobj->tx_on = false; 399 ifobj->pkt_stream = test->rx_pkt_stream_default; 400 } 401 402 memset(ifobj->umem, 0, sizeof(*ifobj->umem)); 403 ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; 404 ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 405 406 for (j = 0; j < MAX_SOCKETS; j++) { 407 memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); 408 ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 409 } 410 } 411 412 test->ifobj_tx = ifobj_tx; 413 test->ifobj_rx = ifobj_rx; 414 test->current_step = 0; 415 test->total_steps = 1; 416 test->nb_sockets = 1; 417 test->fail = false; 418 test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog; 419 test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk; 420 test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog; 421 test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk; 422 } 423 424 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 425 struct ifobject *ifobj_rx, enum test_mode mode) 426 { 427 struct pkt_stream *tx_pkt_stream; 428 struct pkt_stream *rx_pkt_stream; 429 u32 i; 430 431 tx_pkt_stream = test->tx_pkt_stream_default; 432 rx_pkt_stream = test->rx_pkt_stream_default; 433 memset(test, 0, sizeof(*test)); 434 test->tx_pkt_stream_default = tx_pkt_stream; 435 test->rx_pkt_stream_default = rx_pkt_stream; 436 437 for (i = 0; i < MAX_INTERFACES; i++) { 438 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 439 440 ifobj->bind_flags = XDP_USE_NEED_WAKEUP; 441 if (mode == TEST_MODE_ZC) 442 ifobj->bind_flags |= XDP_ZEROCOPY; 443 else 444 ifobj->bind_flags |= XDP_COPY; 445 } 446 447 test->mode = mode; 448 __test_spec_init(test, ifobj_tx, ifobj_rx); 449 } 450 451 static void test_spec_reset(struct test_spec *test) 452 { 453 __test_spec_init(test, test->ifobj_tx, test->ifobj_rx); 454 } 455 456 static void test_spec_set_name(struct test_spec *test, const char *name) 457 { 458 strncpy(test->name, name, MAX_TEST_NAME_SIZE); 459 } 460 461 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx, 462 struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx, 463 struct bpf_map *xskmap_tx) 464 { 465 test->xdp_prog_rx = xdp_prog_rx; 466 test->xdp_prog_tx = xdp_prog_tx; 467 test->xskmap_rx = xskmap_rx; 468 test->xskmap_tx = xskmap_tx; 469 } 470 471 static void pkt_stream_reset(struct pkt_stream *pkt_stream) 472 { 473 if (pkt_stream) 474 pkt_stream->current_pkt_nb = 0; 475 } 476 477 static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream) 478 { 479 if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) 480 return NULL; 481 482 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; 483 } 484 485 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent) 486 { 487 while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { 488 (*pkts_sent)++; 489 if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid) 490 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; 491 pkt_stream->current_pkt_nb++; 492 } 493 return NULL; 494 } 495 496 static void pkt_stream_delete(struct pkt_stream *pkt_stream) 497 { 498 free(pkt_stream->pkts); 499 free(pkt_stream); 500 } 501 502 static void pkt_stream_restore_default(struct test_spec *test) 503 { 504 struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream; 505 struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream; 506 507 if (tx_pkt_stream != test->tx_pkt_stream_default) { 508 pkt_stream_delete(test->ifobj_tx->pkt_stream); 509 test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default; 510 } 511 512 if (rx_pkt_stream != test->rx_pkt_stream_default) { 513 pkt_stream_delete(test->ifobj_rx->pkt_stream); 514 test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default; 515 } 516 } 517 518 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) 519 { 520 struct pkt_stream *pkt_stream; 521 522 pkt_stream = calloc(1, sizeof(*pkt_stream)); 523 if (!pkt_stream) 524 return NULL; 525 526 pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); 527 if (!pkt_stream->pkts) { 528 free(pkt_stream); 529 return NULL; 530 } 531 532 pkt_stream->nb_pkts = nb_pkts; 533 return pkt_stream; 534 } 535 536 static u32 ceil_u32(u32 a, u32 b) 537 { 538 return (a + b - 1) / b; 539 } 540 541 static u32 pkt_nb_frags(u32 frame_size, struct pkt *pkt) 542 { 543 if (!pkt || !pkt->valid) 544 return 1; 545 return ceil_u32(pkt->len, frame_size); 546 } 547 548 static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len) 549 { 550 pkt->offset = offset; 551 pkt->len = len; 552 if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom) 553 pkt->valid = false; 554 else 555 pkt->valid = true; 556 } 557 558 static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len) 559 { 560 return ceil_u32(len, umem->frame_size) * umem->frame_size; 561 } 562 563 static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) 564 { 565 struct pkt_stream *pkt_stream; 566 u32 i; 567 568 pkt_stream = __pkt_stream_alloc(nb_pkts); 569 if (!pkt_stream) 570 exit_with_error(ENOMEM); 571 572 pkt_stream->nb_pkts = nb_pkts; 573 pkt_stream->max_pkt_len = pkt_len; 574 for (i = 0; i < nb_pkts; i++) { 575 struct pkt *pkt = &pkt_stream->pkts[i]; 576 577 pkt_set(umem, pkt, 0, pkt_len); 578 pkt->pkt_nb = i; 579 } 580 581 return pkt_stream; 582 } 583 584 static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem, 585 struct pkt_stream *pkt_stream) 586 { 587 return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len); 588 } 589 590 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) 591 { 592 struct pkt_stream *pkt_stream; 593 594 pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len); 595 test->ifobj_tx->pkt_stream = pkt_stream; 596 pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len); 597 test->ifobj_rx->pkt_stream = pkt_stream; 598 } 599 600 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, 601 int offset) 602 { 603 struct xsk_umem_info *umem = ifobj->umem; 604 struct pkt_stream *pkt_stream; 605 u32 i; 606 607 pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream); 608 for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) 609 pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len); 610 611 ifobj->pkt_stream = pkt_stream; 612 } 613 614 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) 615 { 616 __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); 617 __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); 618 } 619 620 static void pkt_stream_receive_half(struct test_spec *test) 621 { 622 struct xsk_umem_info *umem = test->ifobj_rx->umem; 623 struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream; 624 u32 i; 625 626 test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts, 627 pkt_stream->pkts[0].len); 628 pkt_stream = test->ifobj_rx->pkt_stream; 629 for (i = 1; i < pkt_stream->nb_pkts; i += 2) 630 pkt_stream->pkts[i].valid = false; 631 } 632 633 static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem) 634 { 635 if (!pkt->valid) 636 return pkt->offset; 637 return pkt->offset + umem_alloc_buffer(umem); 638 } 639 640 static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb, 641 u32 bytes_written) 642 { 643 void *data = xsk_umem__get_data(ifobject->umem->buffer, addr); 644 645 if (len < MIN_PKT_SIZE) 646 return; 647 648 if (!bytes_written) { 649 gen_eth_hdr(ifobject, data); 650 651 len -= PKT_HDR_SIZE; 652 data += PKT_HDR_SIZE; 653 } else { 654 bytes_written -= PKT_HDR_SIZE; 655 } 656 657 write_payload(data, pkt_nb, bytes_written, len); 658 } 659 660 static void __pkt_stream_generate_custom(struct ifobject *ifobj, 661 struct pkt *pkts, u32 nb_pkts) 662 { 663 struct pkt_stream *pkt_stream; 664 u32 i; 665 666 pkt_stream = __pkt_stream_alloc(nb_pkts); 667 if (!pkt_stream) 668 exit_with_error(ENOMEM); 669 670 for (i = 0; i < nb_pkts; i++) { 671 struct pkt *pkt = &pkt_stream->pkts[i]; 672 673 pkt->offset = pkts[i].offset; 674 pkt->len = pkts[i].len; 675 pkt->pkt_nb = i; 676 pkt->valid = pkts[i].valid; 677 if (pkt->len > pkt_stream->max_pkt_len) 678 pkt_stream->max_pkt_len = pkt->len; 679 } 680 681 ifobj->pkt_stream = pkt_stream; 682 } 683 684 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) 685 { 686 __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts); 687 __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts); 688 } 689 690 static void pkt_print_data(u32 *data, u32 cnt) 691 { 692 u32 i; 693 694 for (i = 0; i < cnt; i++) { 695 u32 seqnum, pkt_nb; 696 697 seqnum = ntohl(*data) & 0xffff; 698 pkt_nb = ntohl(*data) >> 16; 699 fprintf(stdout, "%u:%u ", pkt_nb, seqnum); 700 data++; 701 } 702 } 703 704 static void pkt_dump(void *pkt, u32 len, bool eth_header) 705 { 706 struct ethhdr *ethhdr = pkt; 707 u32 i, *data; 708 709 if (eth_header) { 710 /*extract L2 frame */ 711 fprintf(stdout, "DEBUG>> L2: dst mac: "); 712 for (i = 0; i < ETH_ALEN; i++) 713 fprintf(stdout, "%02X", ethhdr->h_dest[i]); 714 715 fprintf(stdout, "\nDEBUG>> L2: src mac: "); 716 for (i = 0; i < ETH_ALEN; i++) 717 fprintf(stdout, "%02X", ethhdr->h_source[i]); 718 719 data = pkt + PKT_HDR_SIZE; 720 } else { 721 data = pkt; 722 } 723 724 /*extract L5 frame */ 725 fprintf(stdout, "\nDEBUG>> L5: seqnum: "); 726 pkt_print_data(data, PKT_DUMP_NB_TO_PRINT); 727 fprintf(stdout, "...."); 728 if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) { 729 fprintf(stdout, "\n.... "); 730 pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT, 731 PKT_DUMP_NB_TO_PRINT); 732 } 733 fprintf(stdout, "\n---------------------------------------\n"); 734 } 735 736 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr) 737 { 738 u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; 739 u32 offset = addr % umem->frame_size, expected_offset; 740 int pkt_offset = pkt->valid ? pkt->offset : 0; 741 742 if (!umem->unaligned_mode) 743 pkt_offset = 0; 744 745 expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; 746 747 if (offset == expected_offset) 748 return true; 749 750 ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset); 751 return false; 752 } 753 754 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) 755 { 756 void *data = xsk_umem__get_data(buffer, addr); 757 struct xdp_info *meta = data - sizeof(struct xdp_info); 758 759 if (meta->count != pkt->pkt_nb) { 760 ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n", 761 __func__, pkt->pkt_nb, meta->count); 762 return false; 763 } 764 765 return true; 766 } 767 768 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) 769 { 770 void *data = xsk_umem__get_data(buffer, addr); 771 u32 seqnum, pkt_data; 772 773 if (!pkt) { 774 ksft_print_msg("[%s] too many packets received\n", __func__); 775 goto error; 776 } 777 778 if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) { 779 /* Do not try to verify packets that are smaller than minimum size. */ 780 return true; 781 } 782 783 if (pkt->len != len) { 784 ksft_print_msg("[%s] expected length [%d], got length [%d]\n", 785 __func__, pkt->len, len); 786 goto error; 787 } 788 789 pkt_data = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); 790 seqnum = pkt_data >> 16; 791 792 if (pkt->pkt_nb != seqnum) { 793 ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", 794 __func__, pkt->pkt_nb, seqnum); 795 goto error; 796 } 797 798 return true; 799 800 error: 801 pkt_dump(data, len, true); 802 return false; 803 } 804 805 static void kick_tx(struct xsk_socket_info *xsk) 806 { 807 int ret; 808 809 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); 810 if (ret >= 0) 811 return; 812 if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) { 813 usleep(100); 814 return; 815 } 816 exit_with_error(errno); 817 } 818 819 static void kick_rx(struct xsk_socket_info *xsk) 820 { 821 int ret; 822 823 ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); 824 if (ret < 0) 825 exit_with_error(errno); 826 } 827 828 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size) 829 { 830 unsigned int rcvd; 831 u32 idx; 832 833 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) 834 kick_tx(xsk); 835 836 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx); 837 if (rcvd) { 838 if (rcvd > xsk->outstanding_tx) { 839 u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1); 840 841 ksft_print_msg("[%s] Too many packets completed\n", __func__); 842 ksft_print_msg("Last completion address: %llx\n", addr); 843 return TEST_FAILURE; 844 } 845 846 xsk_ring_cons__release(&xsk->umem->cq, rcvd); 847 xsk->outstanding_tx -= rcvd; 848 } 849 850 return TEST_PASS; 851 } 852 853 static int receive_pkts(struct test_spec *test, struct pollfd *fds) 854 { 855 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0}; 856 struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream; 857 u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0; 858 struct xsk_socket_info *xsk = test->ifobj_rx->xsk; 859 struct ifobject *ifobj = test->ifobj_rx; 860 struct xsk_umem_info *umem = xsk->umem; 861 struct pkt *pkt; 862 int ret; 863 864 ret = gettimeofday(&tv_now, NULL); 865 if (ret) 866 exit_with_error(errno); 867 timeradd(&tv_now, &tv_timeout, &tv_end); 868 869 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 870 while (pkt) { 871 ret = gettimeofday(&tv_now, NULL); 872 if (ret) 873 exit_with_error(errno); 874 if (timercmp(&tv_now, &tv_end, >)) { 875 ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__); 876 return TEST_FAILURE; 877 } 878 879 kick_rx(xsk); 880 if (ifobj->use_poll) { 881 ret = poll(fds, 1, POLL_TMOUT); 882 if (ret < 0) 883 exit_with_error(errno); 884 885 if (!ret) { 886 if (!is_umem_valid(test->ifobj_tx)) 887 return TEST_PASS; 888 889 ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__); 890 return TEST_FAILURE; 891 892 } 893 894 if (!(fds->revents & POLLIN)) 895 continue; 896 } 897 898 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 899 if (!rcvd) 900 continue; 901 902 if (ifobj->use_fill_ring) { 903 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 904 while (ret != rcvd) { 905 if (ret < 0) 906 exit_with_error(-ret); 907 if (xsk_ring_prod__needs_wakeup(&umem->fq)) { 908 ret = poll(fds, 1, POLL_TMOUT); 909 if (ret < 0) 910 exit_with_error(errno); 911 } 912 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 913 } 914 } 915 916 for (i = 0; i < rcvd; i++) { 917 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); 918 u64 addr = desc->addr, orig; 919 920 orig = xsk_umem__extract_addr(addr); 921 addr = xsk_umem__add_offset_to_addr(addr); 922 923 if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) || 924 !is_offset_correct(umem, pkt, addr) || 925 (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr))) 926 return TEST_FAILURE; 927 928 if (ifobj->use_fill_ring) 929 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig; 930 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 931 } 932 933 if (ifobj->use_fill_ring) 934 xsk_ring_prod__submit(&umem->fq, rcvd); 935 if (ifobj->release_rx) 936 xsk_ring_cons__release(&xsk->rx, rcvd); 937 938 pthread_mutex_lock(&pacing_mutex); 939 pkts_in_flight -= pkts_sent; 940 pthread_mutex_unlock(&pacing_mutex); 941 pkts_sent = 0; 942 } 943 944 return TEST_PASS; 945 } 946 947 static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout) 948 { 949 struct xsk_socket_info *xsk = ifobject->xsk; 950 struct xsk_umem_info *umem = ifobject->umem; 951 u32 i, idx = 0, valid_pkts = 0, buffer_len; 952 bool use_poll = ifobject->use_poll; 953 int ret; 954 955 buffer_len = pkt_get_buffer_len(umem, ifobject->pkt_stream->max_pkt_len); 956 /* pkts_in_flight might be negative if many invalid packets are sent */ 957 if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) { 958 kick_tx(xsk); 959 return TEST_CONTINUE; 960 } 961 962 while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { 963 if (use_poll) { 964 ret = poll(fds, 1, POLL_TMOUT); 965 if (timeout) { 966 if (ret < 0) { 967 ksft_print_msg("ERROR: [%s] Poll error %d\n", 968 __func__, errno); 969 return TEST_FAILURE; 970 } 971 if (ret == 0) 972 return TEST_PASS; 973 break; 974 } 975 if (ret <= 0) { 976 ksft_print_msg("ERROR: [%s] Poll error %d\n", 977 __func__, errno); 978 return TEST_FAILURE; 979 } 980 } 981 982 complete_pkts(xsk, BATCH_SIZE); 983 } 984 985 for (i = 0; i < BATCH_SIZE; i++) { 986 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); 987 struct pkt *pkt = pkt_stream_get_next_tx_pkt(ifobject->pkt_stream); 988 989 if (!pkt) 990 break; 991 992 tx_desc->addr = pkt_get_addr(pkt, umem); 993 tx_desc->len = pkt->len; 994 if (pkt->valid) { 995 valid_pkts++; 996 pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb, 0); 997 } 998 } 999 1000 pthread_mutex_lock(&pacing_mutex); 1001 pkts_in_flight += valid_pkts; 1002 pthread_mutex_unlock(&pacing_mutex); 1003 1004 xsk_ring_prod__submit(&xsk->tx, i); 1005 xsk->outstanding_tx += valid_pkts; 1006 1007 if (use_poll) { 1008 ret = poll(fds, 1, POLL_TMOUT); 1009 if (ret <= 0) { 1010 if (ret == 0 && timeout) 1011 return TEST_PASS; 1012 1013 ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret); 1014 return TEST_FAILURE; 1015 } 1016 } 1017 1018 if (!timeout) { 1019 if (complete_pkts(xsk, i)) 1020 return TEST_FAILURE; 1021 1022 usleep(10); 1023 return TEST_PASS; 1024 } 1025 1026 return TEST_CONTINUE; 1027 } 1028 1029 static void wait_for_tx_completion(struct xsk_socket_info *xsk) 1030 { 1031 while (xsk->outstanding_tx) 1032 complete_pkts(xsk, BATCH_SIZE); 1033 } 1034 1035 static int send_pkts(struct test_spec *test, struct ifobject *ifobject) 1036 { 1037 struct pkt_stream *pkt_stream = ifobject->pkt_stream; 1038 bool timeout = !is_umem_valid(test->ifobj_rx); 1039 struct pollfd fds = { }; 1040 u32 ret; 1041 1042 fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 1043 fds.events = POLLOUT; 1044 1045 while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { 1046 ret = __send_pkts(ifobject, &fds, timeout); 1047 if (ret == TEST_CONTINUE && !test->fail) 1048 continue; 1049 if ((ret || test->fail) && !timeout) 1050 return TEST_FAILURE; 1051 if (ret == TEST_PASS && timeout) 1052 return ret; 1053 } 1054 1055 wait_for_tx_completion(ifobject->xsk); 1056 return TEST_PASS; 1057 } 1058 1059 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats) 1060 { 1061 int fd = xsk_socket__fd(xsk), err; 1062 socklen_t optlen, expected_len; 1063 1064 optlen = sizeof(*stats); 1065 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen); 1066 if (err) { 1067 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1068 __func__, -err, strerror(-err)); 1069 return TEST_FAILURE; 1070 } 1071 1072 expected_len = sizeof(struct xdp_statistics); 1073 if (optlen != expected_len) { 1074 ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n", 1075 __func__, expected_len, optlen); 1076 return TEST_FAILURE; 1077 } 1078 1079 return TEST_PASS; 1080 } 1081 1082 static int validate_rx_dropped(struct ifobject *ifobject) 1083 { 1084 struct xsk_socket *xsk = ifobject->xsk->xsk; 1085 struct xdp_statistics stats; 1086 int err; 1087 1088 kick_rx(ifobject->xsk); 1089 1090 err = get_xsk_stats(xsk, &stats); 1091 if (err) 1092 return TEST_FAILURE; 1093 1094 /* The receiver calls getsockopt after receiving the last (valid) 1095 * packet which is not the final packet sent in this test (valid and 1096 * invalid packets are sent in alternating fashion with the final 1097 * packet being invalid). Since the last packet may or may not have 1098 * been dropped already, both outcomes must be allowed. 1099 */ 1100 if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 || 1101 stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1) 1102 return TEST_PASS; 1103 1104 return TEST_FAILURE; 1105 } 1106 1107 static int validate_rx_full(struct ifobject *ifobject) 1108 { 1109 struct xsk_socket *xsk = ifobject->xsk->xsk; 1110 struct xdp_statistics stats; 1111 int err; 1112 1113 usleep(1000); 1114 kick_rx(ifobject->xsk); 1115 1116 err = get_xsk_stats(xsk, &stats); 1117 if (err) 1118 return TEST_FAILURE; 1119 1120 if (stats.rx_ring_full) 1121 return TEST_PASS; 1122 1123 return TEST_FAILURE; 1124 } 1125 1126 static int validate_fill_empty(struct ifobject *ifobject) 1127 { 1128 struct xsk_socket *xsk = ifobject->xsk->xsk; 1129 struct xdp_statistics stats; 1130 int err; 1131 1132 usleep(1000); 1133 kick_rx(ifobject->xsk); 1134 1135 err = get_xsk_stats(xsk, &stats); 1136 if (err) 1137 return TEST_FAILURE; 1138 1139 if (stats.rx_fill_ring_empty_descs) 1140 return TEST_PASS; 1141 1142 return TEST_FAILURE; 1143 } 1144 1145 static int validate_tx_invalid_descs(struct ifobject *ifobject) 1146 { 1147 struct xsk_socket *xsk = ifobject->xsk->xsk; 1148 int fd = xsk_socket__fd(xsk); 1149 struct xdp_statistics stats; 1150 socklen_t optlen; 1151 int err; 1152 1153 optlen = sizeof(stats); 1154 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 1155 if (err) { 1156 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1157 __func__, -err, strerror(-err)); 1158 return TEST_FAILURE; 1159 } 1160 1161 if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) { 1162 ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n", 1163 __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts); 1164 return TEST_FAILURE; 1165 } 1166 1167 return TEST_PASS; 1168 } 1169 1170 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, 1171 struct xsk_umem_info *umem, bool tx) 1172 { 1173 int i, ret; 1174 1175 for (i = 0; i < test->nb_sockets; i++) { 1176 bool shared = (ifobject->shared_umem && tx) ? true : !!i; 1177 u32 ctr = 0; 1178 1179 while (ctr++ < SOCK_RECONF_CTR) { 1180 ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem, 1181 ifobject, shared); 1182 if (!ret) 1183 break; 1184 1185 /* Retry if it fails as xsk_socket__create() is asynchronous */ 1186 if (ctr >= SOCK_RECONF_CTR) 1187 exit_with_error(-ret); 1188 usleep(USLEEP_MAX); 1189 } 1190 if (ifobject->busy_poll) 1191 enable_busy_poll(&ifobject->xsk_arr[i]); 1192 } 1193 } 1194 1195 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) 1196 { 1197 xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); 1198 ifobject->xsk = &ifobject->xsk_arr[0]; 1199 ifobject->xskmap = test->ifobj_rx->xskmap; 1200 memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); 1201 ifobject->umem->base_addr = 0; 1202 } 1203 1204 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, 1205 bool fill_up) 1206 { 1207 u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM; 1208 u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts; 1209 int ret; 1210 1211 if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) 1212 buffers_to_fill = umem->num_frames; 1213 else 1214 buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; 1215 1216 ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); 1217 if (ret != buffers_to_fill) 1218 exit_with_error(ENOSPC); 1219 1220 while (filled < buffers_to_fill) { 1221 struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts); 1222 u64 addr; 1223 u32 i; 1224 1225 for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt); i++) { 1226 if (!pkt) { 1227 if (!fill_up) 1228 break; 1229 addr = filled * umem->frame_size + umem->base_addr; 1230 } else if (pkt->offset >= 0) { 1231 addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem); 1232 } else { 1233 addr = pkt->offset + umem_alloc_buffer(umem); 1234 } 1235 1236 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; 1237 if (++filled >= buffers_to_fill) 1238 break; 1239 } 1240 } 1241 xsk_ring_prod__submit(&umem->fq, filled); 1242 xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled); 1243 1244 pkt_stream_reset(pkt_stream); 1245 umem_reset_alloc(umem); 1246 } 1247 1248 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) 1249 { 1250 u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; 1251 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 1252 LIBBPF_OPTS(bpf_xdp_query_opts, opts); 1253 void *bufs; 1254 int ret; 1255 1256 if (ifobject->umem->unaligned_mode) 1257 mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB; 1258 1259 if (ifobject->shared_umem) 1260 umem_sz *= 2; 1261 1262 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 1263 if (bufs == MAP_FAILED) 1264 exit_with_error(errno); 1265 1266 ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz); 1267 if (ret) 1268 exit_with_error(-ret); 1269 1270 xsk_configure_socket(test, ifobject, ifobject->umem, false); 1271 1272 ifobject->xsk = &ifobject->xsk_arr[0]; 1273 1274 if (!ifobject->rx_on) 1275 return; 1276 1277 xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring); 1278 1279 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); 1280 if (ret) 1281 exit_with_error(errno); 1282 } 1283 1284 static void *worker_testapp_validate_tx(void *arg) 1285 { 1286 struct test_spec *test = (struct test_spec *)arg; 1287 struct ifobject *ifobject = test->ifobj_tx; 1288 int err; 1289 1290 if (test->current_step == 1) { 1291 if (!ifobject->shared_umem) 1292 thread_common_ops(test, ifobject); 1293 else 1294 thread_common_ops_tx(test, ifobject); 1295 } 1296 1297 print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, 1298 ifobject->ifname); 1299 err = send_pkts(test, ifobject); 1300 1301 if (!err && ifobject->validation_func) 1302 err = ifobject->validation_func(ifobject); 1303 if (err) 1304 report_failure(test); 1305 1306 pthread_exit(NULL); 1307 } 1308 1309 static void *worker_testapp_validate_rx(void *arg) 1310 { 1311 struct test_spec *test = (struct test_spec *)arg; 1312 struct ifobject *ifobject = test->ifobj_rx; 1313 struct pollfd fds = { }; 1314 int err; 1315 1316 if (test->current_step == 1) { 1317 thread_common_ops(test, ifobject); 1318 } else { 1319 xsk_clear_xskmap(ifobject->xskmap); 1320 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); 1321 if (err) { 1322 printf("Error: Failed to update xskmap, error %s\n", strerror(-err)); 1323 exit_with_error(-err); 1324 } 1325 } 1326 1327 fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 1328 fds.events = POLLIN; 1329 1330 pthread_barrier_wait(&barr); 1331 1332 err = receive_pkts(test, &fds); 1333 1334 if (!err && ifobject->validation_func) 1335 err = ifobject->validation_func(ifobject); 1336 if (err) 1337 report_failure(test); 1338 1339 pthread_exit(NULL); 1340 } 1341 1342 static u64 ceil_u64(u64 a, u64 b) 1343 { 1344 return (a + b - 1) / b; 1345 } 1346 1347 static void testapp_clean_xsk_umem(struct ifobject *ifobj) 1348 { 1349 u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; 1350 1351 if (ifobj->shared_umem) 1352 umem_sz *= 2; 1353 1354 umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; 1355 xsk_umem__delete(ifobj->umem->umem); 1356 munmap(ifobj->umem->buffer, umem_sz); 1357 } 1358 1359 static void handler(int signum) 1360 { 1361 pthread_exit(NULL); 1362 } 1363 1364 static bool xdp_prog_changed_rx(struct test_spec *test) 1365 { 1366 struct ifobject *ifobj = test->ifobj_rx; 1367 1368 return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode; 1369 } 1370 1371 static bool xdp_prog_changed_tx(struct test_spec *test) 1372 { 1373 struct ifobject *ifobj = test->ifobj_tx; 1374 1375 return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode; 1376 } 1377 1378 static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog, 1379 struct bpf_map *xskmap, enum test_mode mode) 1380 { 1381 int err; 1382 1383 xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode)); 1384 err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode)); 1385 if (err) { 1386 printf("Error attaching XDP program\n"); 1387 exit_with_error(-err); 1388 } 1389 1390 if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC)) 1391 if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) { 1392 ksft_print_msg("ERROR: XDP prog not in DRV mode\n"); 1393 exit_with_error(EINVAL); 1394 } 1395 1396 ifobj->xdp_prog = xdp_prog; 1397 ifobj->xskmap = xskmap; 1398 ifobj->mode = mode; 1399 } 1400 1401 static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx, 1402 struct ifobject *ifobj_tx) 1403 { 1404 if (xdp_prog_changed_rx(test)) 1405 xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode); 1406 1407 if (!ifobj_tx || ifobj_tx->shared_umem) 1408 return; 1409 1410 if (xdp_prog_changed_tx(test)) 1411 xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode); 1412 } 1413 1414 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1, 1415 struct ifobject *ifobj2) 1416 { 1417 pthread_t t0, t1; 1418 1419 if (ifobj2) { 1420 if (pthread_barrier_init(&barr, NULL, 2)) 1421 exit_with_error(errno); 1422 pkt_stream_reset(ifobj2->pkt_stream); 1423 } 1424 1425 test->current_step++; 1426 pkt_stream_reset(ifobj1->pkt_stream); 1427 pkts_in_flight = 0; 1428 1429 signal(SIGUSR1, handler); 1430 /*Spawn RX thread */ 1431 pthread_create(&t0, NULL, ifobj1->func_ptr, test); 1432 1433 if (ifobj2) { 1434 pthread_barrier_wait(&barr); 1435 if (pthread_barrier_destroy(&barr)) 1436 exit_with_error(errno); 1437 1438 /*Spawn TX thread */ 1439 pthread_create(&t1, NULL, ifobj2->func_ptr, test); 1440 1441 pthread_join(t1, NULL); 1442 } 1443 1444 if (!ifobj2) 1445 pthread_kill(t0, SIGUSR1); 1446 else 1447 pthread_join(t0, NULL); 1448 1449 if (test->total_steps == test->current_step || test->fail) { 1450 if (ifobj2) 1451 xsk_socket__delete(ifobj2->xsk->xsk); 1452 xsk_socket__delete(ifobj1->xsk->xsk); 1453 testapp_clean_xsk_umem(ifobj1); 1454 if (ifobj2 && !ifobj2->shared_umem) 1455 testapp_clean_xsk_umem(ifobj2); 1456 } 1457 1458 return !!test->fail; 1459 } 1460 1461 static int testapp_validate_traffic(struct test_spec *test) 1462 { 1463 struct ifobject *ifobj_rx = test->ifobj_rx; 1464 struct ifobject *ifobj_tx = test->ifobj_tx; 1465 1466 if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) || 1467 (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) { 1468 ksft_test_result_skip("No huge pages present.\n"); 1469 return TEST_SKIP; 1470 } 1471 1472 xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); 1473 return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx); 1474 } 1475 1476 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj) 1477 { 1478 return __testapp_validate_traffic(test, ifobj, NULL); 1479 } 1480 1481 static int testapp_teardown(struct test_spec *test) 1482 { 1483 int i; 1484 1485 test_spec_set_name(test, "TEARDOWN"); 1486 for (i = 0; i < MAX_TEARDOWN_ITER; i++) { 1487 if (testapp_validate_traffic(test)) 1488 return TEST_FAILURE; 1489 test_spec_reset(test); 1490 } 1491 1492 return TEST_PASS; 1493 } 1494 1495 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) 1496 { 1497 thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr; 1498 struct ifobject *tmp_ifobj = (*ifobj1); 1499 1500 (*ifobj1)->func_ptr = (*ifobj2)->func_ptr; 1501 (*ifobj2)->func_ptr = tmp_func_ptr; 1502 1503 *ifobj1 = *ifobj2; 1504 *ifobj2 = tmp_ifobj; 1505 } 1506 1507 static int testapp_bidi(struct test_spec *test) 1508 { 1509 int res; 1510 1511 test_spec_set_name(test, "BIDIRECTIONAL"); 1512 test->ifobj_tx->rx_on = true; 1513 test->ifobj_rx->tx_on = true; 1514 test->total_steps = 2; 1515 if (testapp_validate_traffic(test)) 1516 return TEST_FAILURE; 1517 1518 print_verbose("Switching Tx/Rx vectors\n"); 1519 swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1520 res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); 1521 1522 swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1523 return res; 1524 } 1525 1526 static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx) 1527 { 1528 int ret; 1529 1530 xsk_socket__delete(ifobj_tx->xsk->xsk); 1531 xsk_socket__delete(ifobj_rx->xsk->xsk); 1532 ifobj_tx->xsk = &ifobj_tx->xsk_arr[1]; 1533 ifobj_rx->xsk = &ifobj_rx->xsk_arr[1]; 1534 1535 ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk); 1536 if (ret) 1537 exit_with_error(errno); 1538 } 1539 1540 static int testapp_bpf_res(struct test_spec *test) 1541 { 1542 test_spec_set_name(test, "BPF_RES"); 1543 test->total_steps = 2; 1544 test->nb_sockets = 2; 1545 if (testapp_validate_traffic(test)) 1546 return TEST_FAILURE; 1547 1548 swap_xsk_resources(test->ifobj_tx, test->ifobj_rx); 1549 return testapp_validate_traffic(test); 1550 } 1551 1552 static int testapp_headroom(struct test_spec *test) 1553 { 1554 test_spec_set_name(test, "UMEM_HEADROOM"); 1555 test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE; 1556 return testapp_validate_traffic(test); 1557 } 1558 1559 static int testapp_stats_rx_dropped(struct test_spec *test) 1560 { 1561 test_spec_set_name(test, "STAT_RX_DROPPED"); 1562 if (test->mode == TEST_MODE_ZC) { 1563 ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); 1564 return TEST_SKIP; 1565 } 1566 1567 pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); 1568 test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - 1569 XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; 1570 pkt_stream_receive_half(test); 1571 test->ifobj_rx->validation_func = validate_rx_dropped; 1572 return testapp_validate_traffic(test); 1573 } 1574 1575 static int testapp_stats_tx_invalid_descs(struct test_spec *test) 1576 { 1577 test_spec_set_name(test, "STAT_TX_INVALID"); 1578 pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0); 1579 test->ifobj_tx->validation_func = validate_tx_invalid_descs; 1580 return testapp_validate_traffic(test); 1581 } 1582 1583 static int testapp_stats_rx_full(struct test_spec *test) 1584 { 1585 test_spec_set_name(test, "STAT_RX_FULL"); 1586 pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 1587 test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 1588 DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 1589 1590 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; 1591 test->ifobj_rx->release_rx = false; 1592 test->ifobj_rx->validation_func = validate_rx_full; 1593 return testapp_validate_traffic(test); 1594 } 1595 1596 static int testapp_stats_fill_empty(struct test_spec *test) 1597 { 1598 test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); 1599 pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 1600 test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 1601 DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 1602 1603 test->ifobj_rx->use_fill_ring = false; 1604 test->ifobj_rx->validation_func = validate_fill_empty; 1605 return testapp_validate_traffic(test); 1606 } 1607 1608 static int testapp_unaligned(struct test_spec *test) 1609 { 1610 test_spec_set_name(test, "UNALIGNED_MODE"); 1611 test->ifobj_tx->umem->unaligned_mode = true; 1612 test->ifobj_rx->umem->unaligned_mode = true; 1613 /* Let half of the packets straddle a 4K buffer boundary */ 1614 pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2); 1615 1616 return testapp_validate_traffic(test); 1617 } 1618 1619 static int testapp_single_pkt(struct test_spec *test) 1620 { 1621 struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}}; 1622 1623 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1624 return testapp_validate_traffic(test); 1625 } 1626 1627 static int testapp_invalid_desc(struct test_spec *test) 1628 { 1629 struct xsk_umem_info *umem = test->ifobj_tx->umem; 1630 u64 umem_size = umem->num_frames * umem->frame_size; 1631 struct pkt pkts[] = { 1632 /* Zero packet address allowed */ 1633 {0, MIN_PKT_SIZE, 0, true}, 1634 /* Allowed packet */ 1635 {0, MIN_PKT_SIZE, 0, true}, 1636 /* Straddling the start of umem */ 1637 {-2, MIN_PKT_SIZE, 0, false}, 1638 /* Packet too large */ 1639 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, 1640 /* Up to end of umem allowed */ 1641 {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true}, 1642 /* After umem ends */ 1643 {umem_size, MIN_PKT_SIZE, 0, false}, 1644 /* Straddle the end of umem */ 1645 {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, 1646 /* Straddle a 4K boundary */ 1647 {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, 1648 /* Straddle a 2K boundary */ 1649 {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true}, 1650 /* Valid packet for synch so that something is received */ 1651 {0, MIN_PKT_SIZE, 0, true}}; 1652 1653 if (umem->unaligned_mode) { 1654 /* Crossing a page boundary allowed */ 1655 pkts[7].valid = true; 1656 } 1657 if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { 1658 /* Crossing a 2K frame size boundary not allowed */ 1659 pkts[8].valid = false; 1660 } 1661 1662 if (test->ifobj_tx->shared_umem) { 1663 pkts[4].offset += umem_size; 1664 pkts[5].offset += umem_size; 1665 pkts[6].offset += umem_size; 1666 } 1667 1668 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1669 return testapp_validate_traffic(test); 1670 } 1671 1672 static int testapp_xdp_drop(struct test_spec *test) 1673 { 1674 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 1675 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 1676 1677 test_spec_set_name(test, "XDP_DROP_HALF"); 1678 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop, 1679 skel_rx->maps.xsk, skel_tx->maps.xsk); 1680 1681 pkt_stream_receive_half(test); 1682 return testapp_validate_traffic(test); 1683 } 1684 1685 static int testapp_xdp_metadata_count(struct test_spec *test) 1686 { 1687 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 1688 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 1689 struct bpf_map *data_map; 1690 int count = 0; 1691 int key = 0; 1692 1693 test_spec_set_name(test, "XDP_METADATA_COUNT"); 1694 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata, 1695 skel_tx->progs.xsk_xdp_populate_metadata, 1696 skel_rx->maps.xsk, skel_tx->maps.xsk); 1697 test->ifobj_rx->use_metadata = true; 1698 1699 data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss"); 1700 if (!data_map || !bpf_map__is_internal(data_map)) 1701 exit_with_error(ENOMEM); 1702 1703 if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) 1704 exit_with_error(errno); 1705 1706 return testapp_validate_traffic(test); 1707 } 1708 1709 static int testapp_poll_txq_tmout(struct test_spec *test) 1710 { 1711 test_spec_set_name(test, "POLL_TXQ_FULL"); 1712 1713 test->ifobj_tx->use_poll = true; 1714 /* create invalid frame by set umem frame_size and pkt length equal to 2048 */ 1715 test->ifobj_tx->umem->frame_size = 2048; 1716 pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048); 1717 return testapp_validate_traffic_single_thread(test, test->ifobj_tx); 1718 } 1719 1720 static int testapp_poll_rxq_tmout(struct test_spec *test) 1721 { 1722 test_spec_set_name(test, "POLL_RXQ_EMPTY"); 1723 test->ifobj_rx->use_poll = true; 1724 return testapp_validate_traffic_single_thread(test, test->ifobj_rx); 1725 } 1726 1727 static int xsk_load_xdp_programs(struct ifobject *ifobj) 1728 { 1729 ifobj->xdp_progs = xsk_xdp_progs__open_and_load(); 1730 if (libbpf_get_error(ifobj->xdp_progs)) 1731 return libbpf_get_error(ifobj->xdp_progs); 1732 1733 return 0; 1734 } 1735 1736 static void xsk_unload_xdp_programs(struct ifobject *ifobj) 1737 { 1738 xsk_xdp_progs__destroy(ifobj->xdp_progs); 1739 } 1740 1741 /* Simple test */ 1742 static bool hugepages_present(void) 1743 { 1744 size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; 1745 void *bufs; 1746 1747 bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 1748 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB); 1749 if (bufs == MAP_FAILED) 1750 return false; 1751 1752 mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; 1753 munmap(bufs, mmap_sz); 1754 return true; 1755 } 1756 1757 static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, 1758 thread_func_t func_ptr) 1759 { 1760 int err; 1761 1762 memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN); 1763 memcpy(ifobj->src_mac, src_mac, ETH_ALEN); 1764 1765 ifobj->func_ptr = func_ptr; 1766 1767 err = xsk_load_xdp_programs(ifobj); 1768 if (err) { 1769 printf("Error loading XDP program\n"); 1770 exit_with_error(err); 1771 } 1772 1773 if (hugepages_present()) 1774 ifobj->unaligned_supp = true; 1775 } 1776 1777 static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type) 1778 { 1779 int ret = TEST_SKIP; 1780 1781 switch (type) { 1782 case TEST_TYPE_STATS_RX_DROPPED: 1783 ret = testapp_stats_rx_dropped(test); 1784 break; 1785 case TEST_TYPE_STATS_TX_INVALID_DESCS: 1786 ret = testapp_stats_tx_invalid_descs(test); 1787 break; 1788 case TEST_TYPE_STATS_RX_FULL: 1789 ret = testapp_stats_rx_full(test); 1790 break; 1791 case TEST_TYPE_STATS_FILL_EMPTY: 1792 ret = testapp_stats_fill_empty(test); 1793 break; 1794 case TEST_TYPE_TEARDOWN: 1795 ret = testapp_teardown(test); 1796 break; 1797 case TEST_TYPE_BIDI: 1798 ret = testapp_bidi(test); 1799 break; 1800 case TEST_TYPE_BPF_RES: 1801 ret = testapp_bpf_res(test); 1802 break; 1803 case TEST_TYPE_RUN_TO_COMPLETION: 1804 test_spec_set_name(test, "RUN_TO_COMPLETION"); 1805 ret = testapp_validate_traffic(test); 1806 break; 1807 case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT: 1808 test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT"); 1809 ret = testapp_single_pkt(test); 1810 break; 1811 case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME: 1812 test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); 1813 test->ifobj_tx->umem->frame_size = 2048; 1814 test->ifobj_rx->umem->frame_size = 2048; 1815 pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE); 1816 ret = testapp_validate_traffic(test); 1817 break; 1818 case TEST_TYPE_RX_POLL: 1819 test->ifobj_rx->use_poll = true; 1820 test_spec_set_name(test, "POLL_RX"); 1821 ret = testapp_validate_traffic(test); 1822 break; 1823 case TEST_TYPE_TX_POLL: 1824 test->ifobj_tx->use_poll = true; 1825 test_spec_set_name(test, "POLL_TX"); 1826 ret = testapp_validate_traffic(test); 1827 break; 1828 case TEST_TYPE_POLL_TXQ_TMOUT: 1829 ret = testapp_poll_txq_tmout(test); 1830 break; 1831 case TEST_TYPE_POLL_RXQ_TMOUT: 1832 ret = testapp_poll_rxq_tmout(test); 1833 break; 1834 case TEST_TYPE_ALIGNED_INV_DESC: 1835 test_spec_set_name(test, "ALIGNED_INV_DESC"); 1836 ret = testapp_invalid_desc(test); 1837 break; 1838 case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME: 1839 test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE"); 1840 test->ifobj_tx->umem->frame_size = 2048; 1841 test->ifobj_rx->umem->frame_size = 2048; 1842 ret = testapp_invalid_desc(test); 1843 break; 1844 case TEST_TYPE_UNALIGNED_INV_DESC: 1845 test_spec_set_name(test, "UNALIGNED_INV_DESC"); 1846 test->ifobj_tx->umem->unaligned_mode = true; 1847 test->ifobj_rx->umem->unaligned_mode = true; 1848 ret = testapp_invalid_desc(test); 1849 break; 1850 case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: { 1851 u64 page_size, umem_size; 1852 1853 test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE"); 1854 /* Odd frame size so the UMEM doesn't end near a page boundary. */ 1855 test->ifobj_tx->umem->frame_size = 4001; 1856 test->ifobj_rx->umem->frame_size = 4001; 1857 test->ifobj_tx->umem->unaligned_mode = true; 1858 test->ifobj_rx->umem->unaligned_mode = true; 1859 /* This test exists to test descriptors that staddle the end of 1860 * the UMEM but not a page. 1861 */ 1862 page_size = sysconf(_SC_PAGESIZE); 1863 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; 1864 assert(umem_size % page_size > MIN_PKT_SIZE); 1865 assert(umem_size % page_size < page_size - MIN_PKT_SIZE); 1866 ret = testapp_invalid_desc(test); 1867 break; 1868 } 1869 case TEST_TYPE_UNALIGNED: 1870 ret = testapp_unaligned(test); 1871 break; 1872 case TEST_TYPE_HEADROOM: 1873 ret = testapp_headroom(test); 1874 break; 1875 case TEST_TYPE_XDP_DROP_HALF: 1876 ret = testapp_xdp_drop(test); 1877 break; 1878 case TEST_TYPE_XDP_METADATA_COUNT: 1879 ret = testapp_xdp_metadata_count(test); 1880 break; 1881 default: 1882 break; 1883 } 1884 1885 if (ret == TEST_PASS) 1886 ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test), 1887 test->name); 1888 pkt_stream_restore_default(test); 1889 } 1890 1891 static struct ifobject *ifobject_create(void) 1892 { 1893 struct ifobject *ifobj; 1894 1895 ifobj = calloc(1, sizeof(struct ifobject)); 1896 if (!ifobj) 1897 return NULL; 1898 1899 ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr)); 1900 if (!ifobj->xsk_arr) 1901 goto out_xsk_arr; 1902 1903 ifobj->umem = calloc(1, sizeof(*ifobj->umem)); 1904 if (!ifobj->umem) 1905 goto out_umem; 1906 1907 return ifobj; 1908 1909 out_umem: 1910 free(ifobj->xsk_arr); 1911 out_xsk_arr: 1912 free(ifobj); 1913 return NULL; 1914 } 1915 1916 static void ifobject_delete(struct ifobject *ifobj) 1917 { 1918 free(ifobj->umem); 1919 free(ifobj->xsk_arr); 1920 free(ifobj); 1921 } 1922 1923 static bool is_xdp_supported(int ifindex) 1924 { 1925 int flags = XDP_FLAGS_DRV_MODE; 1926 1927 LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags); 1928 struct bpf_insn insns[2] = { 1929 BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), 1930 BPF_EXIT_INSN() 1931 }; 1932 int prog_fd, insn_cnt = ARRAY_SIZE(insns); 1933 int err; 1934 1935 prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); 1936 if (prog_fd < 0) 1937 return false; 1938 1939 err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL); 1940 if (err) { 1941 close(prog_fd); 1942 return false; 1943 } 1944 1945 bpf_xdp_detach(ifindex, flags, NULL); 1946 close(prog_fd); 1947 1948 return true; 1949 } 1950 1951 int main(int argc, char **argv) 1952 { 1953 struct pkt_stream *rx_pkt_stream_default; 1954 struct pkt_stream *tx_pkt_stream_default; 1955 struct ifobject *ifobj_tx, *ifobj_rx; 1956 int modes = TEST_MODE_SKB + 1; 1957 u32 i, j, failed_tests = 0; 1958 struct test_spec test; 1959 bool shared_netdev; 1960 1961 /* Use libbpf 1.0 API mode */ 1962 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 1963 1964 ifobj_tx = ifobject_create(); 1965 if (!ifobj_tx) 1966 exit_with_error(ENOMEM); 1967 ifobj_rx = ifobject_create(); 1968 if (!ifobj_rx) 1969 exit_with_error(ENOMEM); 1970 1971 setlocale(LC_ALL, ""); 1972 1973 parse_command_line(ifobj_tx, ifobj_rx, argc, argv); 1974 1975 shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex); 1976 ifobj_tx->shared_umem = shared_netdev; 1977 ifobj_rx->shared_umem = shared_netdev; 1978 1979 if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { 1980 usage(basename(argv[0])); 1981 ksft_exit_xfail(); 1982 } 1983 1984 if (is_xdp_supported(ifobj_tx->ifindex)) { 1985 modes++; 1986 if (ifobj_zc_avail(ifobj_tx)) 1987 modes++; 1988 } 1989 1990 init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx); 1991 init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx); 1992 1993 test_spec_init(&test, ifobj_tx, ifobj_rx, 0); 1994 tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE); 1995 rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE); 1996 if (!tx_pkt_stream_default || !rx_pkt_stream_default) 1997 exit_with_error(ENOMEM); 1998 test.tx_pkt_stream_default = tx_pkt_stream_default; 1999 test.rx_pkt_stream_default = rx_pkt_stream_default; 2000 2001 ksft_set_plan(modes * TEST_TYPE_MAX); 2002 2003 for (i = 0; i < modes; i++) { 2004 for (j = 0; j < TEST_TYPE_MAX; j++) { 2005 test_spec_init(&test, ifobj_tx, ifobj_rx, i); 2006 run_pkt_test(&test, i, j); 2007 usleep(USLEEP_MAX); 2008 2009 if (test.fail) 2010 failed_tests++; 2011 } 2012 } 2013 2014 pkt_stream_delete(tx_pkt_stream_default); 2015 pkt_stream_delete(rx_pkt_stream_default); 2016 xsk_unload_xdp_programs(ifobj_tx); 2017 xsk_unload_xdp_programs(ifobj_rx); 2018 ifobject_delete(ifobj_tx); 2019 ifobject_delete(ifobj_rx); 2020 2021 if (failed_tests) 2022 ksft_exit_fail(); 2023 else 2024 ksft_exit_pass(); 2025 } 2026