1018a8e75SMaciej Fijalkowski // SPDX-License-Identifier: GPL-2.0 2018a8e75SMaciej Fijalkowski /* Copyright(c) 2020 Intel Corporation. */ 3018a8e75SMaciej Fijalkowski 4018a8e75SMaciej Fijalkowski /* 5018a8e75SMaciej Fijalkowski * Some functions in this program are taken from 6018a8e75SMaciej Fijalkowski * Linux kernel samples/bpf/xdpsock* and modified 7018a8e75SMaciej Fijalkowski * for use. 8018a8e75SMaciej Fijalkowski * 9018a8e75SMaciej Fijalkowski * See test_xsk.sh for detailed information on test topology 10018a8e75SMaciej Fijalkowski * and prerequisite network setup. 11018a8e75SMaciej Fijalkowski * 12018a8e75SMaciej Fijalkowski * This test program contains two threads, each thread is single socket with 13018a8e75SMaciej Fijalkowski * a unique UMEM. It validates in-order packet delivery and packet content 14018a8e75SMaciej Fijalkowski * by sending packets to each other. 15018a8e75SMaciej Fijalkowski * 16018a8e75SMaciej Fijalkowski * Tests Information: 17018a8e75SMaciej Fijalkowski * ------------------ 18018a8e75SMaciej Fijalkowski * These selftests test AF_XDP SKB and Native/DRV modes using veth 19018a8e75SMaciej Fijalkowski * Virtual Ethernet interfaces. 20018a8e75SMaciej Fijalkowski * 21018a8e75SMaciej Fijalkowski * For each mode, the following tests are run: 22018a8e75SMaciej Fijalkowski * a. nopoll - soft-irq processing in run-to-completion mode 23018a8e75SMaciej Fijalkowski * b. poll - using poll() syscall 24018a8e75SMaciej Fijalkowski * c. Socket Teardown 25018a8e75SMaciej Fijalkowski * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy 26018a8e75SMaciej Fijalkowski * both sockets, then repeat multiple times. Only nopoll mode is used 27018a8e75SMaciej Fijalkowski * d. Bi-directional sockets 28018a8e75SMaciej Fijalkowski * Configure sockets as bi-directional tx/rx sockets, sets up fill and 29018a8e75SMaciej Fijalkowski * completion rings on each socket, tx/rx in both directions. Only nopoll 30018a8e75SMaciej Fijalkowski * mode is used 31018a8e75SMaciej Fijalkowski * e. Statistics 32018a8e75SMaciej Fijalkowski * Trigger some error conditions and ensure that the appropriate statistics 33018a8e75SMaciej Fijalkowski * are incremented. Within this test, the following statistics are tested: 34018a8e75SMaciej Fijalkowski * i. rx dropped 35018a8e75SMaciej Fijalkowski * Increase the UMEM frame headroom to a value which results in 36018a8e75SMaciej Fijalkowski * insufficient space in the rx buffer for both the packet and the headroom. 37018a8e75SMaciej Fijalkowski * ii. tx invalid 38018a8e75SMaciej Fijalkowski * Set the 'len' field of tx descriptors to an invalid value (umem frame 39018a8e75SMaciej Fijalkowski * size + 1). 40018a8e75SMaciej Fijalkowski * iii. rx ring full 41018a8e75SMaciej Fijalkowski * Reduce the size of the RX ring to a fraction of the fill ring size. 42018a8e75SMaciej Fijalkowski * iv. fill queue empty 43018a8e75SMaciej Fijalkowski * Do not populate the fill queue and then try to receive pkts. 44018a8e75SMaciej Fijalkowski * f. bpf_link resource persistence 45018a8e75SMaciej Fijalkowski * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0, 46018a8e75SMaciej Fijalkowski * then remove xsk sockets from queue 0 on both veth interfaces and 47018a8e75SMaciej Fijalkowski * finally run a traffic on queues ids 1 48018a8e75SMaciej Fijalkowski * g. unaligned mode 49018a8e75SMaciej Fijalkowski * h. tests for invalid and corner case Tx descriptors so that the correct ones 50018a8e75SMaciej Fijalkowski * are discarded and let through, respectively. 51018a8e75SMaciej Fijalkowski * i. 2K frame size tests 52018a8e75SMaciej Fijalkowski * 53018a8e75SMaciej Fijalkowski * Total tests: 12 54018a8e75SMaciej Fijalkowski * 55018a8e75SMaciej Fijalkowski * Flow: 56018a8e75SMaciej Fijalkowski * ----- 57018a8e75SMaciej Fijalkowski * - Single process spawns two threads: Tx and Rx 5864aef77dSMagnus Karlsson * - Each of these two threads attach to a veth interface 5964aef77dSMagnus Karlsson * - Each thread creates one AF_XDP socket connected to a unique umem for each 60018a8e75SMaciej Fijalkowski * veth interface 6164aef77dSMagnus Karlsson * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy> 6264aef77dSMagnus Karlsson * - Rx thread verifies if all packets were received and delivered in-order, 63018a8e75SMaciej Fijalkowski * and have the right content 64018a8e75SMaciej Fijalkowski * 65018a8e75SMaciej Fijalkowski * Enable/disable packet dump mode: 66018a8e75SMaciej Fijalkowski * -------------------------- 67018a8e75SMaciej Fijalkowski * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add 68018a8e75SMaciej Fijalkowski * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D") 69018a8e75SMaciej Fijalkowski */ 70018a8e75SMaciej Fijalkowski 71018a8e75SMaciej Fijalkowski #define _GNU_SOURCE 72c0801598SKal Conley #include <assert.h> 73018a8e75SMaciej Fijalkowski #include <fcntl.h> 74018a8e75SMaciej Fijalkowski #include <errno.h> 75018a8e75SMaciej Fijalkowski #include <getopt.h> 76018a8e75SMaciej Fijalkowski #include <asm/barrier.h> 77018a8e75SMaciej Fijalkowski #include <linux/if_link.h> 78018a8e75SMaciej Fijalkowski #include <linux/if_ether.h> 79018a8e75SMaciej Fijalkowski #include <linux/ip.h> 802ddade32SMagnus Karlsson #include <linux/mman.h> 81018a8e75SMaciej Fijalkowski #include <linux/udp.h> 82018a8e75SMaciej Fijalkowski #include <arpa/inet.h> 83018a8e75SMaciej Fijalkowski #include <net/if.h> 84018a8e75SMaciej Fijalkowski #include <locale.h> 85018a8e75SMaciej Fijalkowski #include <poll.h> 86018a8e75SMaciej Fijalkowski #include <pthread.h> 87018a8e75SMaciej Fijalkowski #include <signal.h> 88018a8e75SMaciej Fijalkowski #include <stdbool.h> 89018a8e75SMaciej Fijalkowski #include <stdio.h> 90018a8e75SMaciej Fijalkowski #include <stdlib.h> 91018a8e75SMaciej Fijalkowski #include <string.h> 92018a8e75SMaciej Fijalkowski #include <stddef.h> 93018a8e75SMaciej Fijalkowski #include <sys/mman.h> 94018a8e75SMaciej Fijalkowski #include <sys/socket.h> 95018a8e75SMaciej Fijalkowski #include <sys/time.h> 96018a8e75SMaciej Fijalkowski #include <sys/types.h> 97018a8e75SMaciej Fijalkowski #include <sys/queue.h> 98018a8e75SMaciej Fijalkowski #include <time.h> 99018a8e75SMaciej Fijalkowski #include <unistd.h> 100018a8e75SMaciej Fijalkowski #include <stdatomic.h> 1017d8319a7SMagnus Karlsson 1027d8319a7SMagnus Karlsson #include "xsk_xdp_progs.skel.h" 103018a8e75SMaciej Fijalkowski #include "xsk.h" 104018a8e75SMaciej Fijalkowski #include "xskxceiver.h" 1050d68e6feSMaciej Fijalkowski #include <bpf/bpf.h> 1060d68e6feSMaciej Fijalkowski #include <linux/filter.h> 107018a8e75SMaciej Fijalkowski #include "../kselftest.h" 1089a321fd3STushar Vyavahare #include "xsk_xdp_metadata.h" 109018a8e75SMaciej Fijalkowski 110018a8e75SMaciej Fijalkowski static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; 111018a8e75SMaciej Fijalkowski static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; 112018a8e75SMaciej Fijalkowski static const char *IP1 = "192.168.100.162"; 113018a8e75SMaciej Fijalkowski static const char *IP2 = "192.168.100.161"; 114018a8e75SMaciej Fijalkowski static const u16 UDP_PORT1 = 2020; 115018a8e75SMaciej Fijalkowski static const u16 UDP_PORT2 = 2121; 116018a8e75SMaciej Fijalkowski 117018a8e75SMaciej Fijalkowski static void __exit_with_error(int error, const char *file, const char *func, int line) 118018a8e75SMaciej Fijalkowski { 119018a8e75SMaciej Fijalkowski ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, 120018a8e75SMaciej Fijalkowski strerror(error)); 121018a8e75SMaciej Fijalkowski ksft_exit_xfail(); 122018a8e75SMaciej Fijalkowski } 123018a8e75SMaciej Fijalkowski 124018a8e75SMaciej Fijalkowski #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) 125018a8e75SMaciej Fijalkowski #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : "" 126fe2ad08eSMaciej Fijalkowski static char *mode_string(struct test_spec *test) 127fe2ad08eSMaciej Fijalkowski { 128fe2ad08eSMaciej Fijalkowski switch (test->mode) { 129fe2ad08eSMaciej Fijalkowski case TEST_MODE_SKB: 130fe2ad08eSMaciej Fijalkowski return "SKB"; 131fe2ad08eSMaciej Fijalkowski case TEST_MODE_DRV: 132fe2ad08eSMaciej Fijalkowski return "DRV"; 133fe2ad08eSMaciej Fijalkowski case TEST_MODE_ZC: 134fe2ad08eSMaciej Fijalkowski return "ZC"; 135fe2ad08eSMaciej Fijalkowski default: 136fe2ad08eSMaciej Fijalkowski return "BOGUS"; 137fe2ad08eSMaciej Fijalkowski } 138fe2ad08eSMaciej Fijalkowski } 139018a8e75SMaciej Fijalkowski 140018a8e75SMaciej Fijalkowski static void report_failure(struct test_spec *test) 141018a8e75SMaciej Fijalkowski { 142018a8e75SMaciej Fijalkowski if (test->fail) 143018a8e75SMaciej Fijalkowski return; 144018a8e75SMaciej Fijalkowski 145018a8e75SMaciej Fijalkowski ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test), 146018a8e75SMaciej Fijalkowski test->name); 147018a8e75SMaciej Fijalkowski test->fail = true; 148018a8e75SMaciej Fijalkowski } 149018a8e75SMaciej Fijalkowski 150018a8e75SMaciej Fijalkowski static void memset32_htonl(void *dest, u32 val, u32 size) 151018a8e75SMaciej Fijalkowski { 152018a8e75SMaciej Fijalkowski u32 *ptr = (u32 *)dest; 153018a8e75SMaciej Fijalkowski int i; 154018a8e75SMaciej Fijalkowski 155018a8e75SMaciej Fijalkowski val = htonl(val); 156018a8e75SMaciej Fijalkowski 157018a8e75SMaciej Fijalkowski for (i = 0; i < (size & (~0x3)); i += 4) 158018a8e75SMaciej Fijalkowski ptr[i >> 2] = val; 159018a8e75SMaciej Fijalkowski } 160018a8e75SMaciej Fijalkowski 161018a8e75SMaciej Fijalkowski /* 162018a8e75SMaciej Fijalkowski * Fold a partial checksum 163018a8e75SMaciej Fijalkowski * This function code has been taken from 164018a8e75SMaciej Fijalkowski * Linux kernel include/asm-generic/checksum.h 165018a8e75SMaciej Fijalkowski */ 166018a8e75SMaciej Fijalkowski static __u16 csum_fold(__u32 csum) 167018a8e75SMaciej Fijalkowski { 168018a8e75SMaciej Fijalkowski u32 sum = (__force u32)csum; 169018a8e75SMaciej Fijalkowski 170018a8e75SMaciej Fijalkowski sum = (sum & 0xffff) + (sum >> 16); 171018a8e75SMaciej Fijalkowski sum = (sum & 0xffff) + (sum >> 16); 172018a8e75SMaciej Fijalkowski return (__force __u16)~sum; 173018a8e75SMaciej Fijalkowski } 174018a8e75SMaciej Fijalkowski 175018a8e75SMaciej Fijalkowski /* 176018a8e75SMaciej Fijalkowski * This function code has been taken from 177018a8e75SMaciej Fijalkowski * Linux kernel lib/checksum.c 178018a8e75SMaciej Fijalkowski */ 179018a8e75SMaciej Fijalkowski static u32 from64to32(u64 x) 180018a8e75SMaciej Fijalkowski { 181018a8e75SMaciej Fijalkowski /* add up 32-bit and 32-bit for 32+c bit */ 182018a8e75SMaciej Fijalkowski x = (x & 0xffffffff) + (x >> 32); 183018a8e75SMaciej Fijalkowski /* add up carry.. */ 184018a8e75SMaciej Fijalkowski x = (x & 0xffffffff) + (x >> 32); 185018a8e75SMaciej Fijalkowski return (u32)x; 186018a8e75SMaciej Fijalkowski } 187018a8e75SMaciej Fijalkowski 188018a8e75SMaciej Fijalkowski /* 189018a8e75SMaciej Fijalkowski * This function code has been taken from 190018a8e75SMaciej Fijalkowski * Linux kernel lib/checksum.c 191018a8e75SMaciej Fijalkowski */ 192018a8e75SMaciej Fijalkowski static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum) 193018a8e75SMaciej Fijalkowski { 194018a8e75SMaciej Fijalkowski unsigned long long s = (__force u32)sum; 195018a8e75SMaciej Fijalkowski 196018a8e75SMaciej Fijalkowski s += (__force u32)saddr; 197018a8e75SMaciej Fijalkowski s += (__force u32)daddr; 198018a8e75SMaciej Fijalkowski #ifdef __BIG_ENDIAN__ 199018a8e75SMaciej Fijalkowski s += proto + len; 200018a8e75SMaciej Fijalkowski #else 201018a8e75SMaciej Fijalkowski s += (proto + len) << 8; 202018a8e75SMaciej Fijalkowski #endif 203018a8e75SMaciej Fijalkowski return (__force __u32)from64to32(s); 204018a8e75SMaciej Fijalkowski } 205018a8e75SMaciej Fijalkowski 206018a8e75SMaciej Fijalkowski /* 207018a8e75SMaciej Fijalkowski * This function has been taken from 208018a8e75SMaciej Fijalkowski * Linux kernel include/asm-generic/checksum.h 209018a8e75SMaciej Fijalkowski */ 210018a8e75SMaciej Fijalkowski static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum) 211018a8e75SMaciej Fijalkowski { 212018a8e75SMaciej Fijalkowski return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 213018a8e75SMaciej Fijalkowski } 214018a8e75SMaciej Fijalkowski 215018a8e75SMaciej Fijalkowski static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt) 216018a8e75SMaciej Fijalkowski { 217018a8e75SMaciej Fijalkowski u32 csum = 0; 218018a8e75SMaciej Fijalkowski u32 cnt = 0; 219018a8e75SMaciej Fijalkowski 220018a8e75SMaciej Fijalkowski /* udp hdr and data */ 221018a8e75SMaciej Fijalkowski for (; cnt < len; cnt += 2) 222018a8e75SMaciej Fijalkowski csum += udp_pkt[cnt >> 1]; 223018a8e75SMaciej Fijalkowski 224018a8e75SMaciej Fijalkowski return csum_tcpudp_magic(saddr, daddr, len, proto, csum); 225018a8e75SMaciej Fijalkowski } 226018a8e75SMaciej Fijalkowski 227018a8e75SMaciej Fijalkowski static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr) 228018a8e75SMaciej Fijalkowski { 229018a8e75SMaciej Fijalkowski memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN); 230018a8e75SMaciej Fijalkowski memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN); 231018a8e75SMaciej Fijalkowski eth_hdr->h_proto = htons(ETH_P_IP); 232018a8e75SMaciej Fijalkowski } 233018a8e75SMaciej Fijalkowski 234018a8e75SMaciej Fijalkowski static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr) 235018a8e75SMaciej Fijalkowski { 236018a8e75SMaciej Fijalkowski ip_hdr->version = IP_PKT_VER; 237018a8e75SMaciej Fijalkowski ip_hdr->ihl = 0x5; 238018a8e75SMaciej Fijalkowski ip_hdr->tos = IP_PKT_TOS; 239018a8e75SMaciej Fijalkowski ip_hdr->tot_len = htons(IP_PKT_SIZE); 240018a8e75SMaciej Fijalkowski ip_hdr->id = 0; 241018a8e75SMaciej Fijalkowski ip_hdr->frag_off = 0; 242018a8e75SMaciej Fijalkowski ip_hdr->ttl = IPDEFTTL; 243018a8e75SMaciej Fijalkowski ip_hdr->protocol = IPPROTO_UDP; 244018a8e75SMaciej Fijalkowski ip_hdr->saddr = ifobject->src_ip; 245018a8e75SMaciej Fijalkowski ip_hdr->daddr = ifobject->dst_ip; 246018a8e75SMaciej Fijalkowski ip_hdr->check = 0; 247018a8e75SMaciej Fijalkowski } 248018a8e75SMaciej Fijalkowski 249018a8e75SMaciej Fijalkowski static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject, 250018a8e75SMaciej Fijalkowski struct udphdr *udp_hdr) 251018a8e75SMaciej Fijalkowski { 252018a8e75SMaciej Fijalkowski udp_hdr->source = htons(ifobject->src_port); 253018a8e75SMaciej Fijalkowski udp_hdr->dest = htons(ifobject->dst_port); 254018a8e75SMaciej Fijalkowski udp_hdr->len = htons(UDP_PKT_SIZE); 255018a8e75SMaciej Fijalkowski memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE); 256018a8e75SMaciej Fijalkowski } 257018a8e75SMaciej Fijalkowski 2583143d10bSShibin Koikkara Reeny static bool is_umem_valid(struct ifobject *ifobj) 2593143d10bSShibin Koikkara Reeny { 2603143d10bSShibin Koikkara Reeny return !!ifobj->umem->umem; 2613143d10bSShibin Koikkara Reeny } 2623143d10bSShibin Koikkara Reeny 263018a8e75SMaciej Fijalkowski static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr) 264018a8e75SMaciej Fijalkowski { 265018a8e75SMaciej Fijalkowski udp_hdr->check = 0; 266018a8e75SMaciej Fijalkowski udp_hdr->check = 267018a8e75SMaciej Fijalkowski udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr); 268018a8e75SMaciej Fijalkowski } 269018a8e75SMaciej Fijalkowski 270aa61d81fSMagnus Karlsson static u32 mode_to_xdp_flags(enum test_mode mode) 271aa61d81fSMagnus Karlsson { 272aa61d81fSMagnus Karlsson return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; 273aa61d81fSMagnus Karlsson } 274aa61d81fSMagnus Karlsson 275018a8e75SMaciej Fijalkowski static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size) 276018a8e75SMaciej Fijalkowski { 277018a8e75SMaciej Fijalkowski struct xsk_umem_config cfg = { 278018a8e75SMaciej Fijalkowski .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, 279018a8e75SMaciej Fijalkowski .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, 280018a8e75SMaciej Fijalkowski .frame_size = umem->frame_size, 281018a8e75SMaciej Fijalkowski .frame_headroom = umem->frame_headroom, 282018a8e75SMaciej Fijalkowski .flags = XSK_UMEM__DEFAULT_FLAGS 283018a8e75SMaciej Fijalkowski }; 284018a8e75SMaciej Fijalkowski int ret; 285018a8e75SMaciej Fijalkowski 286018a8e75SMaciej Fijalkowski if (umem->unaligned_mode) 287018a8e75SMaciej Fijalkowski cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; 288018a8e75SMaciej Fijalkowski 289018a8e75SMaciej Fijalkowski ret = xsk_umem__create(&umem->umem, buffer, size, 290018a8e75SMaciej Fijalkowski &umem->fq, &umem->cq, &cfg); 291018a8e75SMaciej Fijalkowski if (ret) 292018a8e75SMaciej Fijalkowski return ret; 293018a8e75SMaciej Fijalkowski 294018a8e75SMaciej Fijalkowski umem->buffer = buffer; 295018a8e75SMaciej Fijalkowski return 0; 296018a8e75SMaciej Fijalkowski } 297018a8e75SMaciej Fijalkowski 298018a8e75SMaciej Fijalkowski static void enable_busy_poll(struct xsk_socket_info *xsk) 299018a8e75SMaciej Fijalkowski { 300018a8e75SMaciej Fijalkowski int sock_opt; 301018a8e75SMaciej Fijalkowski 302018a8e75SMaciej Fijalkowski sock_opt = 1; 303018a8e75SMaciej Fijalkowski if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL, 304018a8e75SMaciej Fijalkowski (void *)&sock_opt, sizeof(sock_opt)) < 0) 305018a8e75SMaciej Fijalkowski exit_with_error(errno); 306018a8e75SMaciej Fijalkowski 307018a8e75SMaciej Fijalkowski sock_opt = 20; 308018a8e75SMaciej Fijalkowski if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL, 309018a8e75SMaciej Fijalkowski (void *)&sock_opt, sizeof(sock_opt)) < 0) 310018a8e75SMaciej Fijalkowski exit_with_error(errno); 311018a8e75SMaciej Fijalkowski 312018a8e75SMaciej Fijalkowski sock_opt = BATCH_SIZE; 313018a8e75SMaciej Fijalkowski if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET, 314018a8e75SMaciej Fijalkowski (void *)&sock_opt, sizeof(sock_opt)) < 0) 315018a8e75SMaciej Fijalkowski exit_with_error(errno); 316018a8e75SMaciej Fijalkowski } 317018a8e75SMaciej Fijalkowski 318a693ff3eSMaciej Fijalkowski static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, 319018a8e75SMaciej Fijalkowski struct ifobject *ifobject, bool shared) 320018a8e75SMaciej Fijalkowski { 321018a8e75SMaciej Fijalkowski struct xsk_socket_config cfg = {}; 322018a8e75SMaciej Fijalkowski struct xsk_ring_cons *rxr; 323018a8e75SMaciej Fijalkowski struct xsk_ring_prod *txr; 324018a8e75SMaciej Fijalkowski 325018a8e75SMaciej Fijalkowski xsk->umem = umem; 326018a8e75SMaciej Fijalkowski cfg.rx_size = xsk->rxqsize; 327018a8e75SMaciej Fijalkowski cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; 328018a8e75SMaciej Fijalkowski cfg.bind_flags = ifobject->bind_flags; 329018a8e75SMaciej Fijalkowski if (shared) 330018a8e75SMaciej Fijalkowski cfg.bind_flags |= XDP_SHARED_UMEM; 331018a8e75SMaciej Fijalkowski 332018a8e75SMaciej Fijalkowski txr = ifobject->tx_on ? &xsk->tx : NULL; 333018a8e75SMaciej Fijalkowski rxr = ifobject->rx_on ? &xsk->rx : NULL; 334aa61d81fSMagnus Karlsson return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); 335018a8e75SMaciej Fijalkowski } 336018a8e75SMaciej Fijalkowski 337fe2ad08eSMaciej Fijalkowski static bool ifobj_zc_avail(struct ifobject *ifobject) 338fe2ad08eSMaciej Fijalkowski { 339fe2ad08eSMaciej Fijalkowski size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; 340fe2ad08eSMaciej Fijalkowski int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 341fe2ad08eSMaciej Fijalkowski struct xsk_socket_info *xsk; 342fe2ad08eSMaciej Fijalkowski struct xsk_umem_info *umem; 343fe2ad08eSMaciej Fijalkowski bool zc_avail = false; 344fe2ad08eSMaciej Fijalkowski void *bufs; 345fe2ad08eSMaciej Fijalkowski int ret; 346fe2ad08eSMaciej Fijalkowski 347fe2ad08eSMaciej Fijalkowski bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 348fe2ad08eSMaciej Fijalkowski if (bufs == MAP_FAILED) 349fe2ad08eSMaciej Fijalkowski exit_with_error(errno); 350fe2ad08eSMaciej Fijalkowski 351fe2ad08eSMaciej Fijalkowski umem = calloc(1, sizeof(struct xsk_umem_info)); 352fe2ad08eSMaciej Fijalkowski if (!umem) { 353fe2ad08eSMaciej Fijalkowski munmap(bufs, umem_sz); 354085dcccfSMagnus Karlsson exit_with_error(ENOMEM); 355fe2ad08eSMaciej Fijalkowski } 356fe2ad08eSMaciej Fijalkowski umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 357fe2ad08eSMaciej Fijalkowski ret = xsk_configure_umem(umem, bufs, umem_sz); 358fe2ad08eSMaciej Fijalkowski if (ret) 359fe2ad08eSMaciej Fijalkowski exit_with_error(-ret); 360fe2ad08eSMaciej Fijalkowski 361fe2ad08eSMaciej Fijalkowski xsk = calloc(1, sizeof(struct xsk_socket_info)); 362fe2ad08eSMaciej Fijalkowski if (!xsk) 363fe2ad08eSMaciej Fijalkowski goto out; 364fe2ad08eSMaciej Fijalkowski ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY; 365fe2ad08eSMaciej Fijalkowski ifobject->rx_on = true; 366fe2ad08eSMaciej Fijalkowski xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 367fe2ad08eSMaciej Fijalkowski ret = __xsk_configure_socket(xsk, umem, ifobject, false); 368fe2ad08eSMaciej Fijalkowski if (!ret) 369fe2ad08eSMaciej Fijalkowski zc_avail = true; 370fe2ad08eSMaciej Fijalkowski 371fe2ad08eSMaciej Fijalkowski xsk_socket__delete(xsk->xsk); 372fe2ad08eSMaciej Fijalkowski free(xsk); 373fe2ad08eSMaciej Fijalkowski out: 374fe2ad08eSMaciej Fijalkowski munmap(umem->buffer, umem_sz); 375fe2ad08eSMaciej Fijalkowski xsk_umem__delete(umem->umem); 376fe2ad08eSMaciej Fijalkowski free(umem); 377fe2ad08eSMaciej Fijalkowski return zc_avail; 378fe2ad08eSMaciej Fijalkowski } 379fe2ad08eSMaciej Fijalkowski 380018a8e75SMaciej Fijalkowski static struct option long_options[] = { 381018a8e75SMaciej Fijalkowski {"interface", required_argument, 0, 'i'}, 382018a8e75SMaciej Fijalkowski {"busy-poll", no_argument, 0, 'b'}, 383018a8e75SMaciej Fijalkowski {"dump-pkts", no_argument, 0, 'D'}, 384018a8e75SMaciej Fijalkowski {"verbose", no_argument, 0, 'v'}, 385018a8e75SMaciej Fijalkowski {0, 0, 0, 0} 386018a8e75SMaciej Fijalkowski }; 387018a8e75SMaciej Fijalkowski 388018a8e75SMaciej Fijalkowski static void usage(const char *prog) 389018a8e75SMaciej Fijalkowski { 390018a8e75SMaciej Fijalkowski const char *str = 391018a8e75SMaciej Fijalkowski " Usage: %s [OPTIONS]\n" 392018a8e75SMaciej Fijalkowski " Options:\n" 393018a8e75SMaciej Fijalkowski " -i, --interface Use interface\n" 394018a8e75SMaciej Fijalkowski " -D, --dump-pkts Dump packets L2 - L5\n" 395018a8e75SMaciej Fijalkowski " -v, --verbose Verbose output\n" 396018a8e75SMaciej Fijalkowski " -b, --busy-poll Enable busy poll\n"; 397018a8e75SMaciej Fijalkowski 398018a8e75SMaciej Fijalkowski ksft_print_msg(str, prog); 399018a8e75SMaciej Fijalkowski } 400018a8e75SMaciej Fijalkowski 401018a8e75SMaciej Fijalkowski static bool validate_interface(struct ifobject *ifobj) 402018a8e75SMaciej Fijalkowski { 403018a8e75SMaciej Fijalkowski if (!strcmp(ifobj->ifname, "")) 404018a8e75SMaciej Fijalkowski return false; 405018a8e75SMaciej Fijalkowski return true; 406018a8e75SMaciej Fijalkowski } 407018a8e75SMaciej Fijalkowski 408018a8e75SMaciej Fijalkowski static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc, 409018a8e75SMaciej Fijalkowski char **argv) 410018a8e75SMaciej Fijalkowski { 411018a8e75SMaciej Fijalkowski struct ifobject *ifobj; 412018a8e75SMaciej Fijalkowski u32 interface_nb = 0; 413018a8e75SMaciej Fijalkowski int option_index, c; 414018a8e75SMaciej Fijalkowski 415018a8e75SMaciej Fijalkowski opterr = 0; 416018a8e75SMaciej Fijalkowski 417018a8e75SMaciej Fijalkowski for (;;) { 418018a8e75SMaciej Fijalkowski c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index); 419018a8e75SMaciej Fijalkowski if (c == -1) 420018a8e75SMaciej Fijalkowski break; 421018a8e75SMaciej Fijalkowski 422018a8e75SMaciej Fijalkowski switch (c) { 423018a8e75SMaciej Fijalkowski case 'i': 424018a8e75SMaciej Fijalkowski if (interface_nb == 0) 425018a8e75SMaciej Fijalkowski ifobj = ifobj_tx; 426018a8e75SMaciej Fijalkowski else if (interface_nb == 1) 427018a8e75SMaciej Fijalkowski ifobj = ifobj_rx; 428018a8e75SMaciej Fijalkowski else 429018a8e75SMaciej Fijalkowski break; 430018a8e75SMaciej Fijalkowski 43164aef77dSMagnus Karlsson memcpy(ifobj->ifname, optarg, 43264aef77dSMagnus Karlsson min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg))); 433aa61d81fSMagnus Karlsson 434aa61d81fSMagnus Karlsson ifobj->ifindex = if_nametoindex(ifobj->ifname); 435aa61d81fSMagnus Karlsson if (!ifobj->ifindex) 436aa61d81fSMagnus Karlsson exit_with_error(errno); 437aa61d81fSMagnus Karlsson 438018a8e75SMaciej Fijalkowski interface_nb++; 439018a8e75SMaciej Fijalkowski break; 440018a8e75SMaciej Fijalkowski case 'D': 441018a8e75SMaciej Fijalkowski opt_pkt_dump = true; 442018a8e75SMaciej Fijalkowski break; 443018a8e75SMaciej Fijalkowski case 'v': 444018a8e75SMaciej Fijalkowski opt_verbose = true; 445018a8e75SMaciej Fijalkowski break; 446018a8e75SMaciej Fijalkowski case 'b': 447018a8e75SMaciej Fijalkowski ifobj_tx->busy_poll = true; 448018a8e75SMaciej Fijalkowski ifobj_rx->busy_poll = true; 449018a8e75SMaciej Fijalkowski break; 450018a8e75SMaciej Fijalkowski default: 451018a8e75SMaciej Fijalkowski usage(basename(argv[0])); 452018a8e75SMaciej Fijalkowski ksft_exit_xfail(); 453018a8e75SMaciej Fijalkowski } 454018a8e75SMaciej Fijalkowski } 455018a8e75SMaciej Fijalkowski } 456018a8e75SMaciej Fijalkowski 457018a8e75SMaciej Fijalkowski static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 458018a8e75SMaciej Fijalkowski struct ifobject *ifobj_rx) 459018a8e75SMaciej Fijalkowski { 460018a8e75SMaciej Fijalkowski u32 i, j; 461018a8e75SMaciej Fijalkowski 462018a8e75SMaciej Fijalkowski for (i = 0; i < MAX_INTERFACES; i++) { 463018a8e75SMaciej Fijalkowski struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 464018a8e75SMaciej Fijalkowski 465018a8e75SMaciej Fijalkowski ifobj->xsk = &ifobj->xsk_arr[0]; 466018a8e75SMaciej Fijalkowski ifobj->use_poll = false; 467018a8e75SMaciej Fijalkowski ifobj->use_fill_ring = true; 468018a8e75SMaciej Fijalkowski ifobj->release_rx = true; 469018a8e75SMaciej Fijalkowski ifobj->validation_func = NULL; 4709a321fd3STushar Vyavahare ifobj->use_metadata = false; 471018a8e75SMaciej Fijalkowski 472018a8e75SMaciej Fijalkowski if (i == 0) { 473018a8e75SMaciej Fijalkowski ifobj->rx_on = false; 474018a8e75SMaciej Fijalkowski ifobj->tx_on = true; 4751adef064SMaciej Fijalkowski ifobj->pkt_stream = test->tx_pkt_stream_default; 476018a8e75SMaciej Fijalkowski } else { 477018a8e75SMaciej Fijalkowski ifobj->rx_on = true; 478018a8e75SMaciej Fijalkowski ifobj->tx_on = false; 4791adef064SMaciej Fijalkowski ifobj->pkt_stream = test->rx_pkt_stream_default; 480018a8e75SMaciej Fijalkowski } 481018a8e75SMaciej Fijalkowski 482018a8e75SMaciej Fijalkowski memset(ifobj->umem, 0, sizeof(*ifobj->umem)); 483018a8e75SMaciej Fijalkowski ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; 484018a8e75SMaciej Fijalkowski ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 485a693ff3eSMaciej Fijalkowski if (ifobj->shared_umem && ifobj->rx_on) 486a693ff3eSMaciej Fijalkowski ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS * 487a693ff3eSMaciej Fijalkowski XSK_UMEM__DEFAULT_FRAME_SIZE; 488018a8e75SMaciej Fijalkowski 489018a8e75SMaciej Fijalkowski for (j = 0; j < MAX_SOCKETS; j++) { 490018a8e75SMaciej Fijalkowski memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); 491018a8e75SMaciej Fijalkowski ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 492018a8e75SMaciej Fijalkowski } 493018a8e75SMaciej Fijalkowski } 494018a8e75SMaciej Fijalkowski 495018a8e75SMaciej Fijalkowski test->ifobj_tx = ifobj_tx; 496018a8e75SMaciej Fijalkowski test->ifobj_rx = ifobj_rx; 497018a8e75SMaciej Fijalkowski test->current_step = 0; 498018a8e75SMaciej Fijalkowski test->total_steps = 1; 499018a8e75SMaciej Fijalkowski test->nb_sockets = 1; 500018a8e75SMaciej Fijalkowski test->fail = false; 5017d8319a7SMagnus Karlsson test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog; 5027d8319a7SMagnus Karlsson test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk; 5037d8319a7SMagnus Karlsson test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog; 5047d8319a7SMagnus Karlsson test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk; 505018a8e75SMaciej Fijalkowski } 506018a8e75SMaciej Fijalkowski 507018a8e75SMaciej Fijalkowski static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 508018a8e75SMaciej Fijalkowski struct ifobject *ifobj_rx, enum test_mode mode) 509018a8e75SMaciej Fijalkowski { 5101adef064SMaciej Fijalkowski struct pkt_stream *tx_pkt_stream; 5111adef064SMaciej Fijalkowski struct pkt_stream *rx_pkt_stream; 512018a8e75SMaciej Fijalkowski u32 i; 513018a8e75SMaciej Fijalkowski 5141adef064SMaciej Fijalkowski tx_pkt_stream = test->tx_pkt_stream_default; 5151adef064SMaciej Fijalkowski rx_pkt_stream = test->rx_pkt_stream_default; 516018a8e75SMaciej Fijalkowski memset(test, 0, sizeof(*test)); 5171adef064SMaciej Fijalkowski test->tx_pkt_stream_default = tx_pkt_stream; 5181adef064SMaciej Fijalkowski test->rx_pkt_stream_default = rx_pkt_stream; 519018a8e75SMaciej Fijalkowski 520018a8e75SMaciej Fijalkowski for (i = 0; i < MAX_INTERFACES; i++) { 521018a8e75SMaciej Fijalkowski struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 522018a8e75SMaciej Fijalkowski 523fe2ad08eSMaciej Fijalkowski ifobj->bind_flags = XDP_USE_NEED_WAKEUP; 524fe2ad08eSMaciej Fijalkowski if (mode == TEST_MODE_ZC) 525fe2ad08eSMaciej Fijalkowski ifobj->bind_flags |= XDP_ZEROCOPY; 526fe2ad08eSMaciej Fijalkowski else 527fe2ad08eSMaciej Fijalkowski ifobj->bind_flags |= XDP_COPY; 528018a8e75SMaciej Fijalkowski } 529018a8e75SMaciej Fijalkowski 530fe2ad08eSMaciej Fijalkowski test->mode = mode; 531018a8e75SMaciej Fijalkowski __test_spec_init(test, ifobj_tx, ifobj_rx); 532018a8e75SMaciej Fijalkowski } 533018a8e75SMaciej Fijalkowski 534018a8e75SMaciej Fijalkowski static void test_spec_reset(struct test_spec *test) 535018a8e75SMaciej Fijalkowski { 536018a8e75SMaciej Fijalkowski __test_spec_init(test, test->ifobj_tx, test->ifobj_rx); 537018a8e75SMaciej Fijalkowski } 538018a8e75SMaciej Fijalkowski 539018a8e75SMaciej Fijalkowski static void test_spec_set_name(struct test_spec *test, const char *name) 540018a8e75SMaciej Fijalkowski { 541018a8e75SMaciej Fijalkowski strncpy(test->name, name, MAX_TEST_NAME_SIZE); 542018a8e75SMaciej Fijalkowski } 543018a8e75SMaciej Fijalkowski 5447d8319a7SMagnus Karlsson static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx, 5457d8319a7SMagnus Karlsson struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx, 5467d8319a7SMagnus Karlsson struct bpf_map *xskmap_tx) 5477d8319a7SMagnus Karlsson { 5487d8319a7SMagnus Karlsson test->xdp_prog_rx = xdp_prog_rx; 5497d8319a7SMagnus Karlsson test->xdp_prog_tx = xdp_prog_tx; 5507d8319a7SMagnus Karlsson test->xskmap_rx = xskmap_rx; 5517d8319a7SMagnus Karlsson test->xskmap_tx = xskmap_tx; 5527d8319a7SMagnus Karlsson } 5537d8319a7SMagnus Karlsson 554018a8e75SMaciej Fijalkowski static void pkt_stream_reset(struct pkt_stream *pkt_stream) 555018a8e75SMaciej Fijalkowski { 556018a8e75SMaciej Fijalkowski if (pkt_stream) 557018a8e75SMaciej Fijalkowski pkt_stream->rx_pkt_nb = 0; 558018a8e75SMaciej Fijalkowski } 559018a8e75SMaciej Fijalkowski 560018a8e75SMaciej Fijalkowski static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb) 561018a8e75SMaciej Fijalkowski { 562018a8e75SMaciej Fijalkowski if (pkt_nb >= pkt_stream->nb_pkts) 563018a8e75SMaciej Fijalkowski return NULL; 564018a8e75SMaciej Fijalkowski 565018a8e75SMaciej Fijalkowski return &pkt_stream->pkts[pkt_nb]; 566018a8e75SMaciej Fijalkowski } 567018a8e75SMaciej Fijalkowski 568018a8e75SMaciej Fijalkowski static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent) 569018a8e75SMaciej Fijalkowski { 570018a8e75SMaciej Fijalkowski while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) { 571018a8e75SMaciej Fijalkowski (*pkts_sent)++; 572018a8e75SMaciej Fijalkowski if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid) 573018a8e75SMaciej Fijalkowski return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++]; 574018a8e75SMaciej Fijalkowski pkt_stream->rx_pkt_nb++; 575018a8e75SMaciej Fijalkowski } 576018a8e75SMaciej Fijalkowski return NULL; 577018a8e75SMaciej Fijalkowski } 578018a8e75SMaciej Fijalkowski 579018a8e75SMaciej Fijalkowski static void pkt_stream_delete(struct pkt_stream *pkt_stream) 580018a8e75SMaciej Fijalkowski { 581018a8e75SMaciej Fijalkowski free(pkt_stream->pkts); 582018a8e75SMaciej Fijalkowski free(pkt_stream); 583018a8e75SMaciej Fijalkowski } 584018a8e75SMaciej Fijalkowski 585018a8e75SMaciej Fijalkowski static void pkt_stream_restore_default(struct test_spec *test) 586018a8e75SMaciej Fijalkowski { 587018a8e75SMaciej Fijalkowski struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream; 5881adef064SMaciej Fijalkowski struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream; 589018a8e75SMaciej Fijalkowski 5901adef064SMaciej Fijalkowski if (tx_pkt_stream != test->tx_pkt_stream_default) { 591018a8e75SMaciej Fijalkowski pkt_stream_delete(test->ifobj_tx->pkt_stream); 5921adef064SMaciej Fijalkowski test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default; 593018a8e75SMaciej Fijalkowski } 594018a8e75SMaciej Fijalkowski 5951adef064SMaciej Fijalkowski if (rx_pkt_stream != test->rx_pkt_stream_default) { 596018a8e75SMaciej Fijalkowski pkt_stream_delete(test->ifobj_rx->pkt_stream); 5971adef064SMaciej Fijalkowski test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default; 5981adef064SMaciej Fijalkowski } 599018a8e75SMaciej Fijalkowski } 600018a8e75SMaciej Fijalkowski 601018a8e75SMaciej Fijalkowski static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) 602018a8e75SMaciej Fijalkowski { 603018a8e75SMaciej Fijalkowski struct pkt_stream *pkt_stream; 604018a8e75SMaciej Fijalkowski 605018a8e75SMaciej Fijalkowski pkt_stream = calloc(1, sizeof(*pkt_stream)); 606018a8e75SMaciej Fijalkowski if (!pkt_stream) 607018a8e75SMaciej Fijalkowski return NULL; 608018a8e75SMaciej Fijalkowski 609018a8e75SMaciej Fijalkowski pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); 610018a8e75SMaciej Fijalkowski if (!pkt_stream->pkts) { 611018a8e75SMaciej Fijalkowski free(pkt_stream); 612018a8e75SMaciej Fijalkowski return NULL; 613018a8e75SMaciej Fijalkowski } 614018a8e75SMaciej Fijalkowski 615018a8e75SMaciej Fijalkowski pkt_stream->nb_pkts = nb_pkts; 616018a8e75SMaciej Fijalkowski return pkt_stream; 617018a8e75SMaciej Fijalkowski } 618018a8e75SMaciej Fijalkowski 619018a8e75SMaciej Fijalkowski static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len) 620018a8e75SMaciej Fijalkowski { 6211adef064SMaciej Fijalkowski pkt->addr = addr + umem->base_addr; 622018a8e75SMaciej Fijalkowski pkt->len = len; 623018a8e75SMaciej Fijalkowski if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom) 624018a8e75SMaciej Fijalkowski pkt->valid = false; 625018a8e75SMaciej Fijalkowski else 626018a8e75SMaciej Fijalkowski pkt->valid = true; 627018a8e75SMaciej Fijalkowski } 628018a8e75SMaciej Fijalkowski 629018a8e75SMaciej Fijalkowski static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) 630018a8e75SMaciej Fijalkowski { 631018a8e75SMaciej Fijalkowski struct pkt_stream *pkt_stream; 632018a8e75SMaciej Fijalkowski u32 i; 633018a8e75SMaciej Fijalkowski 634018a8e75SMaciej Fijalkowski pkt_stream = __pkt_stream_alloc(nb_pkts); 635018a8e75SMaciej Fijalkowski if (!pkt_stream) 636018a8e75SMaciej Fijalkowski exit_with_error(ENOMEM); 637018a8e75SMaciej Fijalkowski 638018a8e75SMaciej Fijalkowski for (i = 0; i < nb_pkts; i++) { 639018a8e75SMaciej Fijalkowski pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size, 640018a8e75SMaciej Fijalkowski pkt_len); 641018a8e75SMaciej Fijalkowski pkt_stream->pkts[i].payload = i; 642018a8e75SMaciej Fijalkowski } 643018a8e75SMaciej Fijalkowski 644018a8e75SMaciej Fijalkowski return pkt_stream; 645018a8e75SMaciej Fijalkowski } 646018a8e75SMaciej Fijalkowski 647018a8e75SMaciej Fijalkowski static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem, 648018a8e75SMaciej Fijalkowski struct pkt_stream *pkt_stream) 649018a8e75SMaciej Fijalkowski { 650018a8e75SMaciej Fijalkowski return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len); 651018a8e75SMaciej Fijalkowski } 652018a8e75SMaciej Fijalkowski 653018a8e75SMaciej Fijalkowski static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) 654018a8e75SMaciej Fijalkowski { 655018a8e75SMaciej Fijalkowski struct pkt_stream *pkt_stream; 656018a8e75SMaciej Fijalkowski 657018a8e75SMaciej Fijalkowski pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len); 658018a8e75SMaciej Fijalkowski test->ifobj_tx->pkt_stream = pkt_stream; 6591adef064SMaciej Fijalkowski pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len); 660018a8e75SMaciej Fijalkowski test->ifobj_rx->pkt_stream = pkt_stream; 661018a8e75SMaciej Fijalkowski } 662018a8e75SMaciej Fijalkowski 6631adef064SMaciej Fijalkowski static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, 6641adef064SMaciej Fijalkowski int offset) 6651adef064SMaciej Fijalkowski { 6661adef064SMaciej Fijalkowski struct xsk_umem_info *umem = ifobj->umem; 6671adef064SMaciej Fijalkowski struct pkt_stream *pkt_stream; 6681adef064SMaciej Fijalkowski u32 i; 6691adef064SMaciej Fijalkowski 6701adef064SMaciej Fijalkowski pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream); 6711adef064SMaciej Fijalkowski for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) 6721adef064SMaciej Fijalkowski pkt_set(umem, &pkt_stream->pkts[i], 6731adef064SMaciej Fijalkowski (i % umem->num_frames) * umem->frame_size + offset, pkt_len); 6741adef064SMaciej Fijalkowski 6751adef064SMaciej Fijalkowski ifobj->pkt_stream = pkt_stream; 6761adef064SMaciej Fijalkowski } 6771adef064SMaciej Fijalkowski 678018a8e75SMaciej Fijalkowski static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) 679018a8e75SMaciej Fijalkowski { 6801adef064SMaciej Fijalkowski __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); 6811adef064SMaciej Fijalkowski __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); 682018a8e75SMaciej Fijalkowski } 683018a8e75SMaciej Fijalkowski 684018a8e75SMaciej Fijalkowski static void pkt_stream_receive_half(struct test_spec *test) 685018a8e75SMaciej Fijalkowski { 686018a8e75SMaciej Fijalkowski struct xsk_umem_info *umem = test->ifobj_rx->umem; 687018a8e75SMaciej Fijalkowski struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream; 688018a8e75SMaciej Fijalkowski u32 i; 689018a8e75SMaciej Fijalkowski 690018a8e75SMaciej Fijalkowski test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts, 691018a8e75SMaciej Fijalkowski pkt_stream->pkts[0].len); 692018a8e75SMaciej Fijalkowski pkt_stream = test->ifobj_rx->pkt_stream; 693018a8e75SMaciej Fijalkowski for (i = 1; i < pkt_stream->nb_pkts; i += 2) 694018a8e75SMaciej Fijalkowski pkt_stream->pkts[i].valid = false; 695018a8e75SMaciej Fijalkowski } 696018a8e75SMaciej Fijalkowski 697018a8e75SMaciej Fijalkowski static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) 698018a8e75SMaciej Fijalkowski { 699018a8e75SMaciej Fijalkowski struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb); 700018a8e75SMaciej Fijalkowski struct udphdr *udp_hdr; 701018a8e75SMaciej Fijalkowski struct ethhdr *eth_hdr; 702018a8e75SMaciej Fijalkowski struct iphdr *ip_hdr; 703018a8e75SMaciej Fijalkowski void *data; 704018a8e75SMaciej Fijalkowski 705018a8e75SMaciej Fijalkowski if (!pkt) 706018a8e75SMaciej Fijalkowski return NULL; 707018a8e75SMaciej Fijalkowski if (!pkt->valid || pkt->len < MIN_PKT_SIZE) 708018a8e75SMaciej Fijalkowski return pkt; 709018a8e75SMaciej Fijalkowski 710018a8e75SMaciej Fijalkowski data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr); 711018a8e75SMaciej Fijalkowski udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr)); 712018a8e75SMaciej Fijalkowski ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr)); 713018a8e75SMaciej Fijalkowski eth_hdr = (struct ethhdr *)data; 714018a8e75SMaciej Fijalkowski 715018a8e75SMaciej Fijalkowski gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr); 716018a8e75SMaciej Fijalkowski gen_ip_hdr(ifobject, ip_hdr); 717018a8e75SMaciej Fijalkowski gen_udp_csum(udp_hdr, ip_hdr); 718018a8e75SMaciej Fijalkowski gen_eth_hdr(ifobject, eth_hdr); 719018a8e75SMaciej Fijalkowski 720018a8e75SMaciej Fijalkowski return pkt; 721018a8e75SMaciej Fijalkowski } 722018a8e75SMaciej Fijalkowski 7231adef064SMaciej Fijalkowski static void __pkt_stream_generate_custom(struct ifobject *ifobj, 7241adef064SMaciej Fijalkowski struct pkt *pkts, u32 nb_pkts) 725018a8e75SMaciej Fijalkowski { 726018a8e75SMaciej Fijalkowski struct pkt_stream *pkt_stream; 727018a8e75SMaciej Fijalkowski u32 i; 728018a8e75SMaciej Fijalkowski 729018a8e75SMaciej Fijalkowski pkt_stream = __pkt_stream_alloc(nb_pkts); 730018a8e75SMaciej Fijalkowski if (!pkt_stream) 731018a8e75SMaciej Fijalkowski exit_with_error(ENOMEM); 732018a8e75SMaciej Fijalkowski 733018a8e75SMaciej Fijalkowski for (i = 0; i < nb_pkts; i++) { 7341adef064SMaciej Fijalkowski pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr; 735018a8e75SMaciej Fijalkowski pkt_stream->pkts[i].len = pkts[i].len; 736018a8e75SMaciej Fijalkowski pkt_stream->pkts[i].payload = i; 737018a8e75SMaciej Fijalkowski pkt_stream->pkts[i].valid = pkts[i].valid; 738018a8e75SMaciej Fijalkowski } 7391adef064SMaciej Fijalkowski 7401adef064SMaciej Fijalkowski ifobj->pkt_stream = pkt_stream; 7411adef064SMaciej Fijalkowski } 7421adef064SMaciej Fijalkowski 7431adef064SMaciej Fijalkowski static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) 7441adef064SMaciej Fijalkowski { 7451adef064SMaciej Fijalkowski __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts); 7461adef064SMaciej Fijalkowski __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts); 747018a8e75SMaciej Fijalkowski } 748018a8e75SMaciej Fijalkowski 749018a8e75SMaciej Fijalkowski static void pkt_dump(void *pkt, u32 len) 750018a8e75SMaciej Fijalkowski { 751018a8e75SMaciej Fijalkowski char s[INET_ADDRSTRLEN]; 752018a8e75SMaciej Fijalkowski struct ethhdr *ethhdr; 753018a8e75SMaciej Fijalkowski struct udphdr *udphdr; 754018a8e75SMaciej Fijalkowski struct iphdr *iphdr; 7552d0b2ae2SMagnus Karlsson u32 payload, i; 756018a8e75SMaciej Fijalkowski 757018a8e75SMaciej Fijalkowski ethhdr = pkt; 758018a8e75SMaciej Fijalkowski iphdr = pkt + sizeof(*ethhdr); 759018a8e75SMaciej Fijalkowski udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr); 760018a8e75SMaciej Fijalkowski 761018a8e75SMaciej Fijalkowski /*extract L2 frame */ 762018a8e75SMaciej Fijalkowski fprintf(stdout, "DEBUG>> L2: dst mac: "); 763018a8e75SMaciej Fijalkowski for (i = 0; i < ETH_ALEN; i++) 764018a8e75SMaciej Fijalkowski fprintf(stdout, "%02X", ethhdr->h_dest[i]); 765018a8e75SMaciej Fijalkowski 766018a8e75SMaciej Fijalkowski fprintf(stdout, "\nDEBUG>> L2: src mac: "); 767018a8e75SMaciej Fijalkowski for (i = 0; i < ETH_ALEN; i++) 768018a8e75SMaciej Fijalkowski fprintf(stdout, "%02X", ethhdr->h_source[i]); 769018a8e75SMaciej Fijalkowski 770018a8e75SMaciej Fijalkowski /*extract L3 frame */ 771018a8e75SMaciej Fijalkowski fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl); 772018a8e75SMaciej Fijalkowski fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n", 773018a8e75SMaciej Fijalkowski inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s))); 774018a8e75SMaciej Fijalkowski fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n", 775018a8e75SMaciej Fijalkowski inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s))); 776018a8e75SMaciej Fijalkowski /*extract L4 frame */ 777018a8e75SMaciej Fijalkowski fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source)); 778018a8e75SMaciej Fijalkowski fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest)); 779018a8e75SMaciej Fijalkowski /*extract L5 frame */ 7802d0b2ae2SMagnus Karlsson payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE))); 781018a8e75SMaciej Fijalkowski 782018a8e75SMaciej Fijalkowski fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload); 783018a8e75SMaciej Fijalkowski fprintf(stdout, "---------------------------------------\n"); 784018a8e75SMaciej Fijalkowski } 785018a8e75SMaciej Fijalkowski 786018a8e75SMaciej Fijalkowski static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr, 787018a8e75SMaciej Fijalkowski u64 pkt_stream_addr) 788018a8e75SMaciej Fijalkowski { 789018a8e75SMaciej Fijalkowski u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; 790018a8e75SMaciej Fijalkowski u32 offset = addr % umem->frame_size, expected_offset = 0; 791018a8e75SMaciej Fijalkowski 792018a8e75SMaciej Fijalkowski if (!pkt_stream->use_addr_for_fill) 793018a8e75SMaciej Fijalkowski pkt_stream_addr = 0; 794018a8e75SMaciej Fijalkowski 795018a8e75SMaciej Fijalkowski expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; 796018a8e75SMaciej Fijalkowski 797018a8e75SMaciej Fijalkowski if (offset == expected_offset) 798018a8e75SMaciej Fijalkowski return true; 799018a8e75SMaciej Fijalkowski 800018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset); 801018a8e75SMaciej Fijalkowski return false; 802018a8e75SMaciej Fijalkowski } 803018a8e75SMaciej Fijalkowski 8049a321fd3STushar Vyavahare static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) 8059a321fd3STushar Vyavahare { 8069a321fd3STushar Vyavahare void *data = xsk_umem__get_data(buffer, addr); 8079a321fd3STushar Vyavahare struct xdp_info *meta = data - sizeof(struct xdp_info); 8089a321fd3STushar Vyavahare 8099a321fd3STushar Vyavahare if (meta->count != pkt->payload) { 8109a321fd3STushar Vyavahare ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n", 8119a321fd3STushar Vyavahare __func__, pkt->payload, meta->count); 8129a321fd3STushar Vyavahare return false; 8139a321fd3STushar Vyavahare } 8149a321fd3STushar Vyavahare 8159a321fd3STushar Vyavahare return true; 8169a321fd3STushar Vyavahare } 8179a321fd3STushar Vyavahare 818018a8e75SMaciej Fijalkowski static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) 819018a8e75SMaciej Fijalkowski { 820018a8e75SMaciej Fijalkowski void *data = xsk_umem__get_data(buffer, addr); 821018a8e75SMaciej Fijalkowski struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr)); 822018a8e75SMaciej Fijalkowski 823018a8e75SMaciej Fijalkowski if (!pkt) { 824018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] too many packets received\n", __func__); 825018a8e75SMaciej Fijalkowski return false; 826018a8e75SMaciej Fijalkowski } 827018a8e75SMaciej Fijalkowski 828018a8e75SMaciej Fijalkowski if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) { 829018a8e75SMaciej Fijalkowski /* Do not try to verify packets that are smaller than minimum size. */ 830018a8e75SMaciej Fijalkowski return true; 831018a8e75SMaciej Fijalkowski } 832018a8e75SMaciej Fijalkowski 833018a8e75SMaciej Fijalkowski if (pkt->len != len) { 834018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] expected length [%d], got length [%d]\n", 835018a8e75SMaciej Fijalkowski __func__, pkt->len, len); 836018a8e75SMaciej Fijalkowski return false; 837018a8e75SMaciej Fijalkowski } 838018a8e75SMaciej Fijalkowski 839018a8e75SMaciej Fijalkowski if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) { 840018a8e75SMaciej Fijalkowski u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); 841018a8e75SMaciej Fijalkowski 842018a8e75SMaciej Fijalkowski if (opt_pkt_dump) 843018a8e75SMaciej Fijalkowski pkt_dump(data, PKT_SIZE); 844018a8e75SMaciej Fijalkowski 845018a8e75SMaciej Fijalkowski if (pkt->payload != seqnum) { 846018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", 847018a8e75SMaciej Fijalkowski __func__, pkt->payload, seqnum); 848018a8e75SMaciej Fijalkowski return false; 849018a8e75SMaciej Fijalkowski } 850018a8e75SMaciej Fijalkowski } else { 851018a8e75SMaciej Fijalkowski ksft_print_msg("Invalid frame received: "); 852018a8e75SMaciej Fijalkowski ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version, 853018a8e75SMaciej Fijalkowski iphdr->tos); 854018a8e75SMaciej Fijalkowski return false; 855018a8e75SMaciej Fijalkowski } 856018a8e75SMaciej Fijalkowski 857018a8e75SMaciej Fijalkowski return true; 858018a8e75SMaciej Fijalkowski } 859018a8e75SMaciej Fijalkowski 860018a8e75SMaciej Fijalkowski static void kick_tx(struct xsk_socket_info *xsk) 861018a8e75SMaciej Fijalkowski { 862018a8e75SMaciej Fijalkowski int ret; 863018a8e75SMaciej Fijalkowski 864018a8e75SMaciej Fijalkowski ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); 865018a8e75SMaciej Fijalkowski if (ret >= 0) 866018a8e75SMaciej Fijalkowski return; 867018a8e75SMaciej Fijalkowski if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) { 868018a8e75SMaciej Fijalkowski usleep(100); 869018a8e75SMaciej Fijalkowski return; 870018a8e75SMaciej Fijalkowski } 871018a8e75SMaciej Fijalkowski exit_with_error(errno); 872018a8e75SMaciej Fijalkowski } 873018a8e75SMaciej Fijalkowski 874018a8e75SMaciej Fijalkowski static void kick_rx(struct xsk_socket_info *xsk) 875018a8e75SMaciej Fijalkowski { 876018a8e75SMaciej Fijalkowski int ret; 877018a8e75SMaciej Fijalkowski 878018a8e75SMaciej Fijalkowski ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); 879018a8e75SMaciej Fijalkowski if (ret < 0) 880018a8e75SMaciej Fijalkowski exit_with_error(errno); 881018a8e75SMaciej Fijalkowski } 882018a8e75SMaciej Fijalkowski 883018a8e75SMaciej Fijalkowski static int complete_pkts(struct xsk_socket_info *xsk, int batch_size) 884018a8e75SMaciej Fijalkowski { 885018a8e75SMaciej Fijalkowski unsigned int rcvd; 886018a8e75SMaciej Fijalkowski u32 idx; 887018a8e75SMaciej Fijalkowski 888018a8e75SMaciej Fijalkowski if (xsk_ring_prod__needs_wakeup(&xsk->tx)) 889018a8e75SMaciej Fijalkowski kick_tx(xsk); 890018a8e75SMaciej Fijalkowski 891018a8e75SMaciej Fijalkowski rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx); 892018a8e75SMaciej Fijalkowski if (rcvd) { 893018a8e75SMaciej Fijalkowski if (rcvd > xsk->outstanding_tx) { 894018a8e75SMaciej Fijalkowski u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1); 895018a8e75SMaciej Fijalkowski 896018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] Too many packets completed\n", __func__); 897018a8e75SMaciej Fijalkowski ksft_print_msg("Last completion address: %llx\n", addr); 898018a8e75SMaciej Fijalkowski return TEST_FAILURE; 899018a8e75SMaciej Fijalkowski } 900018a8e75SMaciej Fijalkowski 901018a8e75SMaciej Fijalkowski xsk_ring_cons__release(&xsk->umem->cq, rcvd); 902018a8e75SMaciej Fijalkowski xsk->outstanding_tx -= rcvd; 903018a8e75SMaciej Fijalkowski } 904018a8e75SMaciej Fijalkowski 905018a8e75SMaciej Fijalkowski return TEST_PASS; 906018a8e75SMaciej Fijalkowski } 907018a8e75SMaciej Fijalkowski 9083143d10bSShibin Koikkara Reeny static int receive_pkts(struct test_spec *test, struct pollfd *fds) 909018a8e75SMaciej Fijalkowski { 9103143d10bSShibin Koikkara Reeny struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0}; 9113143d10bSShibin Koikkara Reeny struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream; 912018a8e75SMaciej Fijalkowski u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0; 9133143d10bSShibin Koikkara Reeny struct xsk_socket_info *xsk = test->ifobj_rx->xsk; 9143143d10bSShibin Koikkara Reeny struct ifobject *ifobj = test->ifobj_rx; 915018a8e75SMaciej Fijalkowski struct xsk_umem_info *umem = xsk->umem; 916018a8e75SMaciej Fijalkowski struct pkt *pkt; 917018a8e75SMaciej Fijalkowski int ret; 918018a8e75SMaciej Fijalkowski 919018a8e75SMaciej Fijalkowski ret = gettimeofday(&tv_now, NULL); 920018a8e75SMaciej Fijalkowski if (ret) 921018a8e75SMaciej Fijalkowski exit_with_error(errno); 922018a8e75SMaciej Fijalkowski timeradd(&tv_now, &tv_timeout, &tv_end); 923018a8e75SMaciej Fijalkowski 924018a8e75SMaciej Fijalkowski pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 925018a8e75SMaciej Fijalkowski while (pkt) { 926018a8e75SMaciej Fijalkowski ret = gettimeofday(&tv_now, NULL); 927018a8e75SMaciej Fijalkowski if (ret) 928018a8e75SMaciej Fijalkowski exit_with_error(errno); 929018a8e75SMaciej Fijalkowski if (timercmp(&tv_now, &tv_end, >)) { 930018a8e75SMaciej Fijalkowski ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__); 931018a8e75SMaciej Fijalkowski return TEST_FAILURE; 932018a8e75SMaciej Fijalkowski } 933018a8e75SMaciej Fijalkowski 934018a8e75SMaciej Fijalkowski kick_rx(xsk); 9353143d10bSShibin Koikkara Reeny if (ifobj->use_poll) { 936018a8e75SMaciej Fijalkowski ret = poll(fds, 1, POLL_TMOUT); 937018a8e75SMaciej Fijalkowski if (ret < 0) 938085dcccfSMagnus Karlsson exit_with_error(errno); 9393143d10bSShibin Koikkara Reeny 9403143d10bSShibin Koikkara Reeny if (!ret) { 9413143d10bSShibin Koikkara Reeny if (!is_umem_valid(test->ifobj_tx)) 9423143d10bSShibin Koikkara Reeny return TEST_PASS; 9433143d10bSShibin Koikkara Reeny 9443143d10bSShibin Koikkara Reeny ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__); 9453143d10bSShibin Koikkara Reeny return TEST_FAILURE; 9463143d10bSShibin Koikkara Reeny 947018a8e75SMaciej Fijalkowski } 9483143d10bSShibin Koikkara Reeny 9493143d10bSShibin Koikkara Reeny if (!(fds->revents & POLLIN)) 950018a8e75SMaciej Fijalkowski continue; 951018a8e75SMaciej Fijalkowski } 952018a8e75SMaciej Fijalkowski 9533143d10bSShibin Koikkara Reeny rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 9543143d10bSShibin Koikkara Reeny if (!rcvd) 9553143d10bSShibin Koikkara Reeny continue; 9563143d10bSShibin Koikkara Reeny 957018a8e75SMaciej Fijalkowski if (ifobj->use_fill_ring) { 958018a8e75SMaciej Fijalkowski ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 959018a8e75SMaciej Fijalkowski while (ret != rcvd) { 960018a8e75SMaciej Fijalkowski if (ret < 0) 961018a8e75SMaciej Fijalkowski exit_with_error(-ret); 962018a8e75SMaciej Fijalkowski if (xsk_ring_prod__needs_wakeup(&umem->fq)) { 963018a8e75SMaciej Fijalkowski ret = poll(fds, 1, POLL_TMOUT); 964018a8e75SMaciej Fijalkowski if (ret < 0) 965085dcccfSMagnus Karlsson exit_with_error(errno); 966018a8e75SMaciej Fijalkowski } 967018a8e75SMaciej Fijalkowski ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 968018a8e75SMaciej Fijalkowski } 969018a8e75SMaciej Fijalkowski } 970018a8e75SMaciej Fijalkowski 971018a8e75SMaciej Fijalkowski for (i = 0; i < rcvd; i++) { 972018a8e75SMaciej Fijalkowski const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); 973018a8e75SMaciej Fijalkowski u64 addr = desc->addr, orig; 974018a8e75SMaciej Fijalkowski 975018a8e75SMaciej Fijalkowski orig = xsk_umem__extract_addr(addr); 976018a8e75SMaciej Fijalkowski addr = xsk_umem__add_offset_to_addr(addr); 977018a8e75SMaciej Fijalkowski 978018a8e75SMaciej Fijalkowski if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) || 9799a321fd3STushar Vyavahare !is_offset_correct(umem, pkt_stream, addr, pkt->addr) || 9809a321fd3STushar Vyavahare (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr))) 981018a8e75SMaciej Fijalkowski return TEST_FAILURE; 982018a8e75SMaciej Fijalkowski 983018a8e75SMaciej Fijalkowski if (ifobj->use_fill_ring) 984018a8e75SMaciej Fijalkowski *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig; 985018a8e75SMaciej Fijalkowski pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 986018a8e75SMaciej Fijalkowski } 987018a8e75SMaciej Fijalkowski 988018a8e75SMaciej Fijalkowski if (ifobj->use_fill_ring) 989018a8e75SMaciej Fijalkowski xsk_ring_prod__submit(&umem->fq, rcvd); 990018a8e75SMaciej Fijalkowski if (ifobj->release_rx) 991018a8e75SMaciej Fijalkowski xsk_ring_cons__release(&xsk->rx, rcvd); 992018a8e75SMaciej Fijalkowski 993018a8e75SMaciej Fijalkowski pthread_mutex_lock(&pacing_mutex); 994018a8e75SMaciej Fijalkowski pkts_in_flight -= pkts_sent; 995018a8e75SMaciej Fijalkowski if (pkts_in_flight < umem->num_frames) 996018a8e75SMaciej Fijalkowski pthread_cond_signal(&pacing_cond); 997018a8e75SMaciej Fijalkowski pthread_mutex_unlock(&pacing_mutex); 998018a8e75SMaciej Fijalkowski pkts_sent = 0; 999018a8e75SMaciej Fijalkowski } 1000018a8e75SMaciej Fijalkowski 1001018a8e75SMaciej Fijalkowski return TEST_PASS; 1002018a8e75SMaciej Fijalkowski } 1003018a8e75SMaciej Fijalkowski 10043143d10bSShibin Koikkara Reeny static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds, 10053143d10bSShibin Koikkara Reeny bool timeout) 1006018a8e75SMaciej Fijalkowski { 1007018a8e75SMaciej Fijalkowski struct xsk_socket_info *xsk = ifobject->xsk; 10083143d10bSShibin Koikkara Reeny bool use_poll = ifobject->use_poll; 1009e8f50c4fSKang Minchul u32 i, idx = 0, valid_pkts = 0; 1010e8f50c4fSKang Minchul int ret; 1011018a8e75SMaciej Fijalkowski 10123143d10bSShibin Koikkara Reeny while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { 10133143d10bSShibin Koikkara Reeny if (use_poll) { 10143143d10bSShibin Koikkara Reeny ret = poll(fds, 1, POLL_TMOUT); 10153143d10bSShibin Koikkara Reeny if (timeout) { 10163143d10bSShibin Koikkara Reeny if (ret < 0) { 10173143d10bSShibin Koikkara Reeny ksft_print_msg("ERROR: [%s] Poll error %d\n", 1018085dcccfSMagnus Karlsson __func__, errno); 10193143d10bSShibin Koikkara Reeny return TEST_FAILURE; 10203143d10bSShibin Koikkara Reeny } 10213143d10bSShibin Koikkara Reeny if (ret == 0) 10223143d10bSShibin Koikkara Reeny return TEST_PASS; 10233143d10bSShibin Koikkara Reeny break; 10243143d10bSShibin Koikkara Reeny } 10253143d10bSShibin Koikkara Reeny if (ret <= 0) { 10263143d10bSShibin Koikkara Reeny ksft_print_msg("ERROR: [%s] Poll error %d\n", 1027085dcccfSMagnus Karlsson __func__, errno); 10283143d10bSShibin Koikkara Reeny return TEST_FAILURE; 10293143d10bSShibin Koikkara Reeny } 10303143d10bSShibin Koikkara Reeny } 10313143d10bSShibin Koikkara Reeny 1032018a8e75SMaciej Fijalkowski complete_pkts(xsk, BATCH_SIZE); 10333143d10bSShibin Koikkara Reeny } 1034018a8e75SMaciej Fijalkowski 1035018a8e75SMaciej Fijalkowski for (i = 0; i < BATCH_SIZE; i++) { 1036018a8e75SMaciej Fijalkowski struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); 1037018a8e75SMaciej Fijalkowski struct pkt *pkt = pkt_generate(ifobject, *pkt_nb); 1038018a8e75SMaciej Fijalkowski 1039018a8e75SMaciej Fijalkowski if (!pkt) 1040018a8e75SMaciej Fijalkowski break; 1041018a8e75SMaciej Fijalkowski 1042018a8e75SMaciej Fijalkowski tx_desc->addr = pkt->addr; 1043018a8e75SMaciej Fijalkowski tx_desc->len = pkt->len; 1044018a8e75SMaciej Fijalkowski (*pkt_nb)++; 1045018a8e75SMaciej Fijalkowski if (pkt->valid) 1046018a8e75SMaciej Fijalkowski valid_pkts++; 1047018a8e75SMaciej Fijalkowski } 1048018a8e75SMaciej Fijalkowski 1049018a8e75SMaciej Fijalkowski pthread_mutex_lock(&pacing_mutex); 1050018a8e75SMaciej Fijalkowski pkts_in_flight += valid_pkts; 1051018a8e75SMaciej Fijalkowski /* pkts_in_flight might be negative if many invalid packets are sent */ 1052018a8e75SMaciej Fijalkowski if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) { 1053018a8e75SMaciej Fijalkowski kick_tx(xsk); 1054018a8e75SMaciej Fijalkowski pthread_cond_wait(&pacing_cond, &pacing_mutex); 1055018a8e75SMaciej Fijalkowski } 1056018a8e75SMaciej Fijalkowski pthread_mutex_unlock(&pacing_mutex); 1057018a8e75SMaciej Fijalkowski 1058018a8e75SMaciej Fijalkowski xsk_ring_prod__submit(&xsk->tx, i); 1059018a8e75SMaciej Fijalkowski xsk->outstanding_tx += valid_pkts; 10603143d10bSShibin Koikkara Reeny 10613143d10bSShibin Koikkara Reeny if (use_poll) { 10623143d10bSShibin Koikkara Reeny ret = poll(fds, 1, POLL_TMOUT); 10633143d10bSShibin Koikkara Reeny if (ret <= 0) { 10643143d10bSShibin Koikkara Reeny if (ret == 0 && timeout) 10653143d10bSShibin Koikkara Reeny return TEST_PASS; 10663143d10bSShibin Koikkara Reeny 10673143d10bSShibin Koikkara Reeny ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret); 10683143d10bSShibin Koikkara Reeny return TEST_FAILURE; 10693143d10bSShibin Koikkara Reeny } 10703143d10bSShibin Koikkara Reeny } 10713143d10bSShibin Koikkara Reeny 10723143d10bSShibin Koikkara Reeny if (!timeout) { 1073018a8e75SMaciej Fijalkowski if (complete_pkts(xsk, i)) 1074018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1075018a8e75SMaciej Fijalkowski 1076018a8e75SMaciej Fijalkowski usleep(10); 1077018a8e75SMaciej Fijalkowski return TEST_PASS; 1078018a8e75SMaciej Fijalkowski } 1079018a8e75SMaciej Fijalkowski 10803143d10bSShibin Koikkara Reeny return TEST_CONTINUE; 10813143d10bSShibin Koikkara Reeny } 10823143d10bSShibin Koikkara Reeny 1083018a8e75SMaciej Fijalkowski static void wait_for_tx_completion(struct xsk_socket_info *xsk) 1084018a8e75SMaciej Fijalkowski { 1085018a8e75SMaciej Fijalkowski while (xsk->outstanding_tx) 1086018a8e75SMaciej Fijalkowski complete_pkts(xsk, BATCH_SIZE); 1087018a8e75SMaciej Fijalkowski } 1088018a8e75SMaciej Fijalkowski 1089018a8e75SMaciej Fijalkowski static int send_pkts(struct test_spec *test, struct ifobject *ifobject) 1090018a8e75SMaciej Fijalkowski { 10913143d10bSShibin Koikkara Reeny bool timeout = !is_umem_valid(test->ifobj_rx); 1092018a8e75SMaciej Fijalkowski struct pollfd fds = { }; 10933143d10bSShibin Koikkara Reeny u32 pkt_cnt = 0, ret; 1094018a8e75SMaciej Fijalkowski 1095018a8e75SMaciej Fijalkowski fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 1096018a8e75SMaciej Fijalkowski fds.events = POLLOUT; 1097018a8e75SMaciej Fijalkowski 1098018a8e75SMaciej Fijalkowski while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { 10993143d10bSShibin Koikkara Reeny ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout); 11003143d10bSShibin Koikkara Reeny if ((ret || test->fail) && !timeout) 1101018a8e75SMaciej Fijalkowski return TEST_FAILURE; 11023143d10bSShibin Koikkara Reeny else if (ret == TEST_PASS && timeout) 11033143d10bSShibin Koikkara Reeny return ret; 1104018a8e75SMaciej Fijalkowski } 1105018a8e75SMaciej Fijalkowski 1106018a8e75SMaciej Fijalkowski wait_for_tx_completion(ifobject->xsk); 1107018a8e75SMaciej Fijalkowski return TEST_PASS; 1108018a8e75SMaciej Fijalkowski } 1109018a8e75SMaciej Fijalkowski 1110018a8e75SMaciej Fijalkowski static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats) 1111018a8e75SMaciej Fijalkowski { 1112018a8e75SMaciej Fijalkowski int fd = xsk_socket__fd(xsk), err; 1113018a8e75SMaciej Fijalkowski socklen_t optlen, expected_len; 1114018a8e75SMaciej Fijalkowski 1115018a8e75SMaciej Fijalkowski optlen = sizeof(*stats); 1116018a8e75SMaciej Fijalkowski err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen); 1117018a8e75SMaciej Fijalkowski if (err) { 1118018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1119018a8e75SMaciej Fijalkowski __func__, -err, strerror(-err)); 1120018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1121018a8e75SMaciej Fijalkowski } 1122018a8e75SMaciej Fijalkowski 1123018a8e75SMaciej Fijalkowski expected_len = sizeof(struct xdp_statistics); 1124018a8e75SMaciej Fijalkowski if (optlen != expected_len) { 1125018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n", 1126018a8e75SMaciej Fijalkowski __func__, expected_len, optlen); 1127018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1128018a8e75SMaciej Fijalkowski } 1129018a8e75SMaciej Fijalkowski 1130018a8e75SMaciej Fijalkowski return TEST_PASS; 1131018a8e75SMaciej Fijalkowski } 1132018a8e75SMaciej Fijalkowski 1133018a8e75SMaciej Fijalkowski static int validate_rx_dropped(struct ifobject *ifobject) 1134018a8e75SMaciej Fijalkowski { 1135018a8e75SMaciej Fijalkowski struct xsk_socket *xsk = ifobject->xsk->xsk; 1136018a8e75SMaciej Fijalkowski struct xdp_statistics stats; 1137018a8e75SMaciej Fijalkowski int err; 1138018a8e75SMaciej Fijalkowski 1139018a8e75SMaciej Fijalkowski kick_rx(ifobject->xsk); 1140018a8e75SMaciej Fijalkowski 1141018a8e75SMaciej Fijalkowski err = get_xsk_stats(xsk, &stats); 1142018a8e75SMaciej Fijalkowski if (err) 1143018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1144018a8e75SMaciej Fijalkowski 114568e73221SKal Conley /* The receiver calls getsockopt after receiving the last (valid) 114668e73221SKal Conley * packet which is not the final packet sent in this test (valid and 114768e73221SKal Conley * invalid packets are sent in alternating fashion with the final 114868e73221SKal Conley * packet being invalid). Since the last packet may or may not have 114968e73221SKal Conley * been dropped already, both outcomes must be allowed. 115068e73221SKal Conley */ 115168e73221SKal Conley if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 || 115268e73221SKal Conley stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1) 1153018a8e75SMaciej Fijalkowski return TEST_PASS; 1154018a8e75SMaciej Fijalkowski 1155018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1156018a8e75SMaciej Fijalkowski } 1157018a8e75SMaciej Fijalkowski 1158018a8e75SMaciej Fijalkowski static int validate_rx_full(struct ifobject *ifobject) 1159018a8e75SMaciej Fijalkowski { 1160018a8e75SMaciej Fijalkowski struct xsk_socket *xsk = ifobject->xsk->xsk; 1161018a8e75SMaciej Fijalkowski struct xdp_statistics stats; 1162018a8e75SMaciej Fijalkowski int err; 1163018a8e75SMaciej Fijalkowski 1164018a8e75SMaciej Fijalkowski usleep(1000); 1165018a8e75SMaciej Fijalkowski kick_rx(ifobject->xsk); 1166018a8e75SMaciej Fijalkowski 1167018a8e75SMaciej Fijalkowski err = get_xsk_stats(xsk, &stats); 1168018a8e75SMaciej Fijalkowski if (err) 1169018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1170018a8e75SMaciej Fijalkowski 1171018a8e75SMaciej Fijalkowski if (stats.rx_ring_full) 1172018a8e75SMaciej Fijalkowski return TEST_PASS; 1173018a8e75SMaciej Fijalkowski 1174018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1175018a8e75SMaciej Fijalkowski } 1176018a8e75SMaciej Fijalkowski 1177018a8e75SMaciej Fijalkowski static int validate_fill_empty(struct ifobject *ifobject) 1178018a8e75SMaciej Fijalkowski { 1179018a8e75SMaciej Fijalkowski struct xsk_socket *xsk = ifobject->xsk->xsk; 1180018a8e75SMaciej Fijalkowski struct xdp_statistics stats; 1181018a8e75SMaciej Fijalkowski int err; 1182018a8e75SMaciej Fijalkowski 1183018a8e75SMaciej Fijalkowski usleep(1000); 1184018a8e75SMaciej Fijalkowski kick_rx(ifobject->xsk); 1185018a8e75SMaciej Fijalkowski 1186018a8e75SMaciej Fijalkowski err = get_xsk_stats(xsk, &stats); 1187018a8e75SMaciej Fijalkowski if (err) 1188018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1189018a8e75SMaciej Fijalkowski 1190018a8e75SMaciej Fijalkowski if (stats.rx_fill_ring_empty_descs) 1191018a8e75SMaciej Fijalkowski return TEST_PASS; 1192018a8e75SMaciej Fijalkowski 1193018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1194018a8e75SMaciej Fijalkowski } 1195018a8e75SMaciej Fijalkowski 1196018a8e75SMaciej Fijalkowski static int validate_tx_invalid_descs(struct ifobject *ifobject) 1197018a8e75SMaciej Fijalkowski { 1198018a8e75SMaciej Fijalkowski struct xsk_socket *xsk = ifobject->xsk->xsk; 1199018a8e75SMaciej Fijalkowski int fd = xsk_socket__fd(xsk); 1200018a8e75SMaciej Fijalkowski struct xdp_statistics stats; 1201018a8e75SMaciej Fijalkowski socklen_t optlen; 1202018a8e75SMaciej Fijalkowski int err; 1203018a8e75SMaciej Fijalkowski 1204018a8e75SMaciej Fijalkowski optlen = sizeof(stats); 1205018a8e75SMaciej Fijalkowski err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 1206018a8e75SMaciej Fijalkowski if (err) { 1207018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1208018a8e75SMaciej Fijalkowski __func__, -err, strerror(-err)); 1209018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1210018a8e75SMaciej Fijalkowski } 1211018a8e75SMaciej Fijalkowski 1212018a8e75SMaciej Fijalkowski if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) { 1213018a8e75SMaciej Fijalkowski ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n", 1214018a8e75SMaciej Fijalkowski __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts); 1215018a8e75SMaciej Fijalkowski return TEST_FAILURE; 1216018a8e75SMaciej Fijalkowski } 1217018a8e75SMaciej Fijalkowski 1218018a8e75SMaciej Fijalkowski return TEST_PASS; 1219018a8e75SMaciej Fijalkowski } 1220018a8e75SMaciej Fijalkowski 1221a693ff3eSMaciej Fijalkowski static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, 1222a693ff3eSMaciej Fijalkowski struct xsk_umem_info *umem, bool tx) 1223a693ff3eSMaciej Fijalkowski { 1224a693ff3eSMaciej Fijalkowski int i, ret; 1225a693ff3eSMaciej Fijalkowski 1226a693ff3eSMaciej Fijalkowski for (i = 0; i < test->nb_sockets; i++) { 1227a693ff3eSMaciej Fijalkowski bool shared = (ifobject->shared_umem && tx) ? true : !!i; 1228a693ff3eSMaciej Fijalkowski u32 ctr = 0; 1229a693ff3eSMaciej Fijalkowski 1230a693ff3eSMaciej Fijalkowski while (ctr++ < SOCK_RECONF_CTR) { 1231a693ff3eSMaciej Fijalkowski ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem, 1232a693ff3eSMaciej Fijalkowski ifobject, shared); 1233a693ff3eSMaciej Fijalkowski if (!ret) 1234a693ff3eSMaciej Fijalkowski break; 1235a693ff3eSMaciej Fijalkowski 1236a693ff3eSMaciej Fijalkowski /* Retry if it fails as xsk_socket__create() is asynchronous */ 1237a693ff3eSMaciej Fijalkowski if (ctr >= SOCK_RECONF_CTR) 1238a693ff3eSMaciej Fijalkowski exit_with_error(-ret); 1239a693ff3eSMaciej Fijalkowski usleep(USLEEP_MAX); 1240a693ff3eSMaciej Fijalkowski } 1241a693ff3eSMaciej Fijalkowski if (ifobject->busy_poll) 1242a693ff3eSMaciej Fijalkowski enable_busy_poll(&ifobject->xsk_arr[i]); 1243a693ff3eSMaciej Fijalkowski } 1244a693ff3eSMaciej Fijalkowski } 1245a693ff3eSMaciej Fijalkowski 1246a693ff3eSMaciej Fijalkowski static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) 1247a693ff3eSMaciej Fijalkowski { 1248a693ff3eSMaciej Fijalkowski xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); 1249a693ff3eSMaciej Fijalkowski ifobject->xsk = &ifobject->xsk_arr[0]; 1250f0a249dfSMagnus Karlsson ifobject->xskmap = test->ifobj_rx->xskmap; 1251a693ff3eSMaciej Fijalkowski memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); 1252a693ff3eSMaciej Fijalkowski } 1253a693ff3eSMaciej Fijalkowski 1254a693ff3eSMaciej Fijalkowski static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) 1255a693ff3eSMaciej Fijalkowski { 1256a693ff3eSMaciej Fijalkowski u32 idx = 0, i, buffers_to_fill; 1257a693ff3eSMaciej Fijalkowski int ret; 1258a693ff3eSMaciej Fijalkowski 1259a693ff3eSMaciej Fijalkowski if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) 1260a693ff3eSMaciej Fijalkowski buffers_to_fill = umem->num_frames; 1261a693ff3eSMaciej Fijalkowski else 1262a693ff3eSMaciej Fijalkowski buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; 1263a693ff3eSMaciej Fijalkowski 1264a693ff3eSMaciej Fijalkowski ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); 1265a693ff3eSMaciej Fijalkowski if (ret != buffers_to_fill) 1266a693ff3eSMaciej Fijalkowski exit_with_error(ENOSPC); 1267a693ff3eSMaciej Fijalkowski for (i = 0; i < buffers_to_fill; i++) { 1268a693ff3eSMaciej Fijalkowski u64 addr; 1269a693ff3eSMaciej Fijalkowski 1270a693ff3eSMaciej Fijalkowski if (pkt_stream->use_addr_for_fill) { 1271a693ff3eSMaciej Fijalkowski struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); 1272a693ff3eSMaciej Fijalkowski 1273a693ff3eSMaciej Fijalkowski if (!pkt) 1274a693ff3eSMaciej Fijalkowski break; 1275a693ff3eSMaciej Fijalkowski addr = pkt->addr; 1276a693ff3eSMaciej Fijalkowski } else { 1277a693ff3eSMaciej Fijalkowski addr = i * umem->frame_size; 1278a693ff3eSMaciej Fijalkowski } 1279a693ff3eSMaciej Fijalkowski 1280a693ff3eSMaciej Fijalkowski *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; 1281a693ff3eSMaciej Fijalkowski } 12821e04f23bSMagnus Karlsson xsk_ring_prod__submit(&umem->fq, i); 1283a693ff3eSMaciej Fijalkowski } 1284a693ff3eSMaciej Fijalkowski 1285018a8e75SMaciej Fijalkowski static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) 1286018a8e75SMaciej Fijalkowski { 1287018a8e75SMaciej Fijalkowski u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; 1288018a8e75SMaciej Fijalkowski int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 1289018a8e75SMaciej Fijalkowski LIBBPF_OPTS(bpf_xdp_query_opts, opts); 1290018a8e75SMaciej Fijalkowski void *bufs; 1291f0a249dfSMagnus Karlsson int ret; 1292018a8e75SMaciej Fijalkowski 129302e93e04SMagnus Karlsson if (ifobject->umem->unaligned_mode) 129402e93e04SMagnus Karlsson mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB; 1295018a8e75SMaciej Fijalkowski 1296a693ff3eSMaciej Fijalkowski if (ifobject->shared_umem) 1297a693ff3eSMaciej Fijalkowski umem_sz *= 2; 1298a693ff3eSMaciej Fijalkowski 129902e93e04SMagnus Karlsson bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 1300018a8e75SMaciej Fijalkowski if (bufs == MAP_FAILED) 1301018a8e75SMaciej Fijalkowski exit_with_error(errno); 1302018a8e75SMaciej Fijalkowski 1303018a8e75SMaciej Fijalkowski ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz); 1304018a8e75SMaciej Fijalkowski if (ret) 1305018a8e75SMaciej Fijalkowski exit_with_error(-ret); 1306018a8e75SMaciej Fijalkowski 1307a693ff3eSMaciej Fijalkowski xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); 1308018a8e75SMaciej Fijalkowski 1309a693ff3eSMaciej Fijalkowski xsk_configure_socket(test, ifobject, ifobject->umem, false); 1310018a8e75SMaciej Fijalkowski 1311018a8e75SMaciej Fijalkowski ifobject->xsk = &ifobject->xsk_arr[0]; 1312018a8e75SMaciej Fijalkowski 1313018a8e75SMaciej Fijalkowski if (!ifobject->rx_on) 1314018a8e75SMaciej Fijalkowski return; 1315018a8e75SMaciej Fijalkowski 1316f0a249dfSMagnus Karlsson ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); 1317018a8e75SMaciej Fijalkowski if (ret) 1318085dcccfSMagnus Karlsson exit_with_error(errno); 1319018a8e75SMaciej Fijalkowski } 1320018a8e75SMaciej Fijalkowski 1321018a8e75SMaciej Fijalkowski static void *worker_testapp_validate_tx(void *arg) 1322018a8e75SMaciej Fijalkowski { 1323018a8e75SMaciej Fijalkowski struct test_spec *test = (struct test_spec *)arg; 1324018a8e75SMaciej Fijalkowski struct ifobject *ifobject = test->ifobj_tx; 1325018a8e75SMaciej Fijalkowski int err; 1326018a8e75SMaciej Fijalkowski 1327a693ff3eSMaciej Fijalkowski if (test->current_step == 1) { 1328a693ff3eSMaciej Fijalkowski if (!ifobject->shared_umem) 1329018a8e75SMaciej Fijalkowski thread_common_ops(test, ifobject); 1330a693ff3eSMaciej Fijalkowski else 1331a693ff3eSMaciej Fijalkowski thread_common_ops_tx(test, ifobject); 1332a693ff3eSMaciej Fijalkowski } 1333018a8e75SMaciej Fijalkowski 1334018a8e75SMaciej Fijalkowski print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, 1335018a8e75SMaciej Fijalkowski ifobject->ifname); 1336018a8e75SMaciej Fijalkowski err = send_pkts(test, ifobject); 1337018a8e75SMaciej Fijalkowski 1338018a8e75SMaciej Fijalkowski if (!err && ifobject->validation_func) 1339018a8e75SMaciej Fijalkowski err = ifobject->validation_func(ifobject); 1340018a8e75SMaciej Fijalkowski if (err) 1341018a8e75SMaciej Fijalkowski report_failure(test); 1342018a8e75SMaciej Fijalkowski 1343018a8e75SMaciej Fijalkowski pthread_exit(NULL); 1344018a8e75SMaciej Fijalkowski } 1345018a8e75SMaciej Fijalkowski 1346018a8e75SMaciej Fijalkowski static void *worker_testapp_validate_rx(void *arg) 1347018a8e75SMaciej Fijalkowski { 1348018a8e75SMaciej Fijalkowski struct test_spec *test = (struct test_spec *)arg; 1349018a8e75SMaciej Fijalkowski struct ifobject *ifobject = test->ifobj_rx; 1350018a8e75SMaciej Fijalkowski struct pollfd fds = { }; 1351f0a249dfSMagnus Karlsson int err; 1352018a8e75SMaciej Fijalkowski 1353a693ff3eSMaciej Fijalkowski if (test->current_step == 1) { 1354018a8e75SMaciej Fijalkowski thread_common_ops(test, ifobject); 1355a693ff3eSMaciej Fijalkowski } else { 1356f0a249dfSMagnus Karlsson xsk_clear_xskmap(ifobject->xskmap); 1357f0a249dfSMagnus Karlsson err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); 1358aa61d81fSMagnus Karlsson if (err) { 1359f0a249dfSMagnus Karlsson printf("Error: Failed to update xskmap, error %s\n", strerror(-err)); 1360f0a249dfSMagnus Karlsson exit_with_error(-err); 1361aa61d81fSMagnus Karlsson } 1362a693ff3eSMaciej Fijalkowski } 1363018a8e75SMaciej Fijalkowski 1364018a8e75SMaciej Fijalkowski fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 1365018a8e75SMaciej Fijalkowski fds.events = POLLIN; 1366018a8e75SMaciej Fijalkowski 1367018a8e75SMaciej Fijalkowski pthread_barrier_wait(&barr); 1368018a8e75SMaciej Fijalkowski 13693143d10bSShibin Koikkara Reeny err = receive_pkts(test, &fds); 1370018a8e75SMaciej Fijalkowski 1371018a8e75SMaciej Fijalkowski if (!err && ifobject->validation_func) 1372018a8e75SMaciej Fijalkowski err = ifobject->validation_func(ifobject); 1373018a8e75SMaciej Fijalkowski if (err) { 1374018a8e75SMaciej Fijalkowski report_failure(test); 1375018a8e75SMaciej Fijalkowski pthread_mutex_lock(&pacing_mutex); 1376018a8e75SMaciej Fijalkowski pthread_cond_signal(&pacing_cond); 1377018a8e75SMaciej Fijalkowski pthread_mutex_unlock(&pacing_mutex); 1378018a8e75SMaciej Fijalkowski } 1379018a8e75SMaciej Fijalkowski 1380018a8e75SMaciej Fijalkowski pthread_exit(NULL); 1381018a8e75SMaciej Fijalkowski } 1382018a8e75SMaciej Fijalkowski 13832ddade32SMagnus Karlsson static u64 ceil_u64(u64 a, u64 b) 13842ddade32SMagnus Karlsson { 13852ddade32SMagnus Karlsson return (a + b - 1) / b; 13862ddade32SMagnus Karlsson } 13872ddade32SMagnus Karlsson 1388a693ff3eSMaciej Fijalkowski static void testapp_clean_xsk_umem(struct ifobject *ifobj) 1389a693ff3eSMaciej Fijalkowski { 1390a693ff3eSMaciej Fijalkowski u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; 1391a693ff3eSMaciej Fijalkowski 1392a693ff3eSMaciej Fijalkowski if (ifobj->shared_umem) 1393a693ff3eSMaciej Fijalkowski umem_sz *= 2; 1394a693ff3eSMaciej Fijalkowski 13952ddade32SMagnus Karlsson umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; 1396a693ff3eSMaciej Fijalkowski xsk_umem__delete(ifobj->umem->umem); 1397a693ff3eSMaciej Fijalkowski munmap(ifobj->umem->buffer, umem_sz); 1398a693ff3eSMaciej Fijalkowski } 1399a693ff3eSMaciej Fijalkowski 1400c29fe883SMaciej Fijalkowski static void handler(int signum) 1401c29fe883SMaciej Fijalkowski { 1402c29fe883SMaciej Fijalkowski pthread_exit(NULL); 1403c29fe883SMaciej Fijalkowski } 1404c29fe883SMaciej Fijalkowski 1405*d2e54149SMagnus Karlsson static bool xdp_prog_changed_rx(struct test_spec *test) 14067d8319a7SMagnus Karlsson { 1407*d2e54149SMagnus Karlsson struct ifobject *ifobj = test->ifobj_rx; 1408*d2e54149SMagnus Karlsson 14097d8319a7SMagnus Karlsson return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode; 14107d8319a7SMagnus Karlsson } 14117d8319a7SMagnus Karlsson 1412*d2e54149SMagnus Karlsson static bool xdp_prog_changed_tx(struct test_spec *test) 1413*d2e54149SMagnus Karlsson { 1414*d2e54149SMagnus Karlsson struct ifobject *ifobj = test->ifobj_tx; 1415*d2e54149SMagnus Karlsson 1416*d2e54149SMagnus Karlsson return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode; 1417*d2e54149SMagnus Karlsson } 1418*d2e54149SMagnus Karlsson 14197d8319a7SMagnus Karlsson static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog, 14207d8319a7SMagnus Karlsson struct bpf_map *xskmap, enum test_mode mode) 14217d8319a7SMagnus Karlsson { 14227d8319a7SMagnus Karlsson int err; 14237d8319a7SMagnus Karlsson 14247d8319a7SMagnus Karlsson xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode)); 14257d8319a7SMagnus Karlsson err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode)); 14267d8319a7SMagnus Karlsson if (err) { 14277d8319a7SMagnus Karlsson printf("Error attaching XDP program\n"); 14287d8319a7SMagnus Karlsson exit_with_error(-err); 14297d8319a7SMagnus Karlsson } 14307d8319a7SMagnus Karlsson 14317d8319a7SMagnus Karlsson if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC)) 14327d8319a7SMagnus Karlsson if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) { 14337d8319a7SMagnus Karlsson ksft_print_msg("ERROR: XDP prog not in DRV mode\n"); 14347d8319a7SMagnus Karlsson exit_with_error(EINVAL); 14357d8319a7SMagnus Karlsson } 14367d8319a7SMagnus Karlsson 14377d8319a7SMagnus Karlsson ifobj->xdp_prog = xdp_prog; 14387d8319a7SMagnus Karlsson ifobj->xskmap = xskmap; 14397d8319a7SMagnus Karlsson ifobj->mode = mode; 14407d8319a7SMagnus Karlsson } 14417d8319a7SMagnus Karlsson 14427d8319a7SMagnus Karlsson static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx, 14437d8319a7SMagnus Karlsson struct ifobject *ifobj_tx) 14447d8319a7SMagnus Karlsson { 1445*d2e54149SMagnus Karlsson if (xdp_prog_changed_rx(test)) 14467d8319a7SMagnus Karlsson xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode); 14477d8319a7SMagnus Karlsson 14487d8319a7SMagnus Karlsson if (!ifobj_tx || ifobj_tx->shared_umem) 14497d8319a7SMagnus Karlsson return; 14507d8319a7SMagnus Karlsson 1451*d2e54149SMagnus Karlsson if (xdp_prog_changed_tx(test)) 14527d8319a7SMagnus Karlsson xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode); 14537d8319a7SMagnus Karlsson } 14547d8319a7SMagnus Karlsson 14557f881984SMagnus Karlsson static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1, 14567f881984SMagnus Karlsson struct ifobject *ifobj2) 14573143d10bSShibin Koikkara Reeny { 14587f881984SMagnus Karlsson pthread_t t0, t1; 14593143d10bSShibin Koikkara Reeny 14607f881984SMagnus Karlsson if (ifobj2) 14613143d10bSShibin Koikkara Reeny if (pthread_barrier_init(&barr, NULL, 2)) 14623143d10bSShibin Koikkara Reeny exit_with_error(errno); 14633143d10bSShibin Koikkara Reeny 14643143d10bSShibin Koikkara Reeny test->current_step++; 14657f881984SMagnus Karlsson pkt_stream_reset(ifobj1->pkt_stream); 14663143d10bSShibin Koikkara Reeny pkts_in_flight = 0; 14673143d10bSShibin Koikkara Reeny 1468c29fe883SMaciej Fijalkowski signal(SIGUSR1, handler); 14697f881984SMagnus Karlsson /*Spawn RX thread */ 14707f881984SMagnus Karlsson pthread_create(&t0, NULL, ifobj1->func_ptr, test); 14713143d10bSShibin Koikkara Reeny 14727f881984SMagnus Karlsson if (ifobj2) { 14733143d10bSShibin Koikkara Reeny pthread_barrier_wait(&barr); 14743143d10bSShibin Koikkara Reeny if (pthread_barrier_destroy(&barr)) 14753143d10bSShibin Koikkara Reeny exit_with_error(errno); 14763143d10bSShibin Koikkara Reeny 14777f881984SMagnus Karlsson /*Spawn TX thread */ 14787f881984SMagnus Karlsson pthread_create(&t1, NULL, ifobj2->func_ptr, test); 14797f881984SMagnus Karlsson 14807f881984SMagnus Karlsson pthread_join(t1, NULL); 14817f881984SMagnus Karlsson } 14827f881984SMagnus Karlsson 14837f881984SMagnus Karlsson if (!ifobj2) 1484c29fe883SMaciej Fijalkowski pthread_kill(t0, SIGUSR1); 14857f881984SMagnus Karlsson else 14863143d10bSShibin Koikkara Reeny pthread_join(t0, NULL); 14873143d10bSShibin Koikkara Reeny 1488a693ff3eSMaciej Fijalkowski if (test->total_steps == test->current_step || test->fail) { 14897f881984SMagnus Karlsson if (ifobj2) 14907f881984SMagnus Karlsson xsk_socket__delete(ifobj2->xsk->xsk); 14917f881984SMagnus Karlsson xsk_socket__delete(ifobj1->xsk->xsk); 14927f881984SMagnus Karlsson testapp_clean_xsk_umem(ifobj1); 14937f881984SMagnus Karlsson if (ifobj2 && !ifobj2->shared_umem) 14947f881984SMagnus Karlsson testapp_clean_xsk_umem(ifobj2); 1495a693ff3eSMaciej Fijalkowski } 1496a693ff3eSMaciej Fijalkowski 14973143d10bSShibin Koikkara Reeny return !!test->fail; 14983143d10bSShibin Koikkara Reeny } 14993143d10bSShibin Koikkara Reeny 1500018a8e75SMaciej Fijalkowski static int testapp_validate_traffic(struct test_spec *test) 1501018a8e75SMaciej Fijalkowski { 15027d8319a7SMagnus Karlsson struct ifobject *ifobj_rx = test->ifobj_rx; 15037d8319a7SMagnus Karlsson struct ifobject *ifobj_tx = test->ifobj_tx; 15047d8319a7SMagnus Karlsson 15057d8319a7SMagnus Karlsson xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); 15067d8319a7SMagnus Karlsson return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx); 1507a693ff3eSMaciej Fijalkowski } 1508a693ff3eSMaciej Fijalkowski 15097f881984SMagnus Karlsson static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj) 15107f881984SMagnus Karlsson { 15117f881984SMagnus Karlsson return __testapp_validate_traffic(test, ifobj, NULL); 1512018a8e75SMaciej Fijalkowski } 1513018a8e75SMaciej Fijalkowski 1514018a8e75SMaciej Fijalkowski static void testapp_teardown(struct test_spec *test) 1515018a8e75SMaciej Fijalkowski { 1516018a8e75SMaciej Fijalkowski int i; 1517018a8e75SMaciej Fijalkowski 1518018a8e75SMaciej Fijalkowski test_spec_set_name(test, "TEARDOWN"); 1519018a8e75SMaciej Fijalkowski for (i = 0; i < MAX_TEARDOWN_ITER; i++) { 1520018a8e75SMaciej Fijalkowski if (testapp_validate_traffic(test)) 1521018a8e75SMaciej Fijalkowski return; 1522018a8e75SMaciej Fijalkowski test_spec_reset(test); 1523018a8e75SMaciej Fijalkowski } 1524018a8e75SMaciej Fijalkowski } 1525018a8e75SMaciej Fijalkowski 1526018a8e75SMaciej Fijalkowski static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) 1527018a8e75SMaciej Fijalkowski { 1528018a8e75SMaciej Fijalkowski thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr; 1529018a8e75SMaciej Fijalkowski struct ifobject *tmp_ifobj = (*ifobj1); 1530018a8e75SMaciej Fijalkowski 1531018a8e75SMaciej Fijalkowski (*ifobj1)->func_ptr = (*ifobj2)->func_ptr; 1532018a8e75SMaciej Fijalkowski (*ifobj2)->func_ptr = tmp_func_ptr; 1533018a8e75SMaciej Fijalkowski 1534018a8e75SMaciej Fijalkowski *ifobj1 = *ifobj2; 1535018a8e75SMaciej Fijalkowski *ifobj2 = tmp_ifobj; 1536018a8e75SMaciej Fijalkowski } 1537018a8e75SMaciej Fijalkowski 1538018a8e75SMaciej Fijalkowski static void testapp_bidi(struct test_spec *test) 1539018a8e75SMaciej Fijalkowski { 1540018a8e75SMaciej Fijalkowski test_spec_set_name(test, "BIDIRECTIONAL"); 1541018a8e75SMaciej Fijalkowski test->ifobj_tx->rx_on = true; 1542018a8e75SMaciej Fijalkowski test->ifobj_rx->tx_on = true; 1543018a8e75SMaciej Fijalkowski test->total_steps = 2; 1544018a8e75SMaciej Fijalkowski if (testapp_validate_traffic(test)) 1545018a8e75SMaciej Fijalkowski return; 1546018a8e75SMaciej Fijalkowski 1547018a8e75SMaciej Fijalkowski print_verbose("Switching Tx/Rx vectors\n"); 1548018a8e75SMaciej Fijalkowski swap_directions(&test->ifobj_rx, &test->ifobj_tx); 15497d8319a7SMagnus Karlsson __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); 1550018a8e75SMaciej Fijalkowski 1551018a8e75SMaciej Fijalkowski swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1552018a8e75SMaciej Fijalkowski } 1553018a8e75SMaciej Fijalkowski 1554018a8e75SMaciej Fijalkowski static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx) 1555018a8e75SMaciej Fijalkowski { 1556f0a249dfSMagnus Karlsson int ret; 1557018a8e75SMaciej Fijalkowski 1558018a8e75SMaciej Fijalkowski xsk_socket__delete(ifobj_tx->xsk->xsk); 1559018a8e75SMaciej Fijalkowski xsk_socket__delete(ifobj_rx->xsk->xsk); 1560018a8e75SMaciej Fijalkowski ifobj_tx->xsk = &ifobj_tx->xsk_arr[1]; 1561018a8e75SMaciej Fijalkowski ifobj_rx->xsk = &ifobj_rx->xsk_arr[1]; 1562018a8e75SMaciej Fijalkowski 1563f0a249dfSMagnus Karlsson ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk); 1564018a8e75SMaciej Fijalkowski if (ret) 1565085dcccfSMagnus Karlsson exit_with_error(errno); 1566018a8e75SMaciej Fijalkowski } 1567018a8e75SMaciej Fijalkowski 1568018a8e75SMaciej Fijalkowski static void testapp_bpf_res(struct test_spec *test) 1569018a8e75SMaciej Fijalkowski { 1570018a8e75SMaciej Fijalkowski test_spec_set_name(test, "BPF_RES"); 1571018a8e75SMaciej Fijalkowski test->total_steps = 2; 1572018a8e75SMaciej Fijalkowski test->nb_sockets = 2; 1573018a8e75SMaciej Fijalkowski if (testapp_validate_traffic(test)) 1574018a8e75SMaciej Fijalkowski return; 1575018a8e75SMaciej Fijalkowski 1576018a8e75SMaciej Fijalkowski swap_xsk_resources(test->ifobj_tx, test->ifobj_rx); 1577018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1578018a8e75SMaciej Fijalkowski } 1579018a8e75SMaciej Fijalkowski 1580018a8e75SMaciej Fijalkowski static void testapp_headroom(struct test_spec *test) 1581018a8e75SMaciej Fijalkowski { 1582018a8e75SMaciej Fijalkowski test_spec_set_name(test, "UMEM_HEADROOM"); 1583018a8e75SMaciej Fijalkowski test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE; 1584018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1585018a8e75SMaciej Fijalkowski } 1586018a8e75SMaciej Fijalkowski 1587018a8e75SMaciej Fijalkowski static void testapp_stats_rx_dropped(struct test_spec *test) 1588018a8e75SMaciej Fijalkowski { 1589018a8e75SMaciej Fijalkowski test_spec_set_name(test, "STAT_RX_DROPPED"); 1590a693ff3eSMaciej Fijalkowski pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); 1591018a8e75SMaciej Fijalkowski test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - 1592018a8e75SMaciej Fijalkowski XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; 1593018a8e75SMaciej Fijalkowski pkt_stream_receive_half(test); 1594018a8e75SMaciej Fijalkowski test->ifobj_rx->validation_func = validate_rx_dropped; 1595018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1596018a8e75SMaciej Fijalkowski } 1597018a8e75SMaciej Fijalkowski 1598018a8e75SMaciej Fijalkowski static void testapp_stats_tx_invalid_descs(struct test_spec *test) 1599018a8e75SMaciej Fijalkowski { 1600018a8e75SMaciej Fijalkowski test_spec_set_name(test, "STAT_TX_INVALID"); 1601018a8e75SMaciej Fijalkowski pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0); 1602018a8e75SMaciej Fijalkowski test->ifobj_tx->validation_func = validate_tx_invalid_descs; 1603018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1604018a8e75SMaciej Fijalkowski } 1605018a8e75SMaciej Fijalkowski 1606018a8e75SMaciej Fijalkowski static void testapp_stats_rx_full(struct test_spec *test) 1607018a8e75SMaciej Fijalkowski { 1608018a8e75SMaciej Fijalkowski test_spec_set_name(test, "STAT_RX_FULL"); 1609018a8e75SMaciej Fijalkowski pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE); 1610018a8e75SMaciej Fijalkowski test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 1611018a8e75SMaciej Fijalkowski DEFAULT_UMEM_BUFFERS, PKT_SIZE); 1612018a8e75SMaciej Fijalkowski if (!test->ifobj_rx->pkt_stream) 1613018a8e75SMaciej Fijalkowski exit_with_error(ENOMEM); 1614018a8e75SMaciej Fijalkowski 1615018a8e75SMaciej Fijalkowski test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; 1616018a8e75SMaciej Fijalkowski test->ifobj_rx->release_rx = false; 1617018a8e75SMaciej Fijalkowski test->ifobj_rx->validation_func = validate_rx_full; 1618018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1619018a8e75SMaciej Fijalkowski } 1620018a8e75SMaciej Fijalkowski 1621018a8e75SMaciej Fijalkowski static void testapp_stats_fill_empty(struct test_spec *test) 1622018a8e75SMaciej Fijalkowski { 1623018a8e75SMaciej Fijalkowski test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); 1624018a8e75SMaciej Fijalkowski pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE); 1625018a8e75SMaciej Fijalkowski test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 1626018a8e75SMaciej Fijalkowski DEFAULT_UMEM_BUFFERS, PKT_SIZE); 1627018a8e75SMaciej Fijalkowski if (!test->ifobj_rx->pkt_stream) 1628018a8e75SMaciej Fijalkowski exit_with_error(ENOMEM); 1629018a8e75SMaciej Fijalkowski 1630018a8e75SMaciej Fijalkowski test->ifobj_rx->use_fill_ring = false; 1631018a8e75SMaciej Fijalkowski test->ifobj_rx->validation_func = validate_fill_empty; 1632018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1633018a8e75SMaciej Fijalkowski } 1634018a8e75SMaciej Fijalkowski 1635018a8e75SMaciej Fijalkowski /* Simple test */ 1636018a8e75SMaciej Fijalkowski static bool hugepages_present(struct ifobject *ifobject) 1637018a8e75SMaciej Fijalkowski { 16382ddade32SMagnus Karlsson size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; 1639018a8e75SMaciej Fijalkowski void *bufs; 1640018a8e75SMaciej Fijalkowski 1641018a8e75SMaciej Fijalkowski bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 164202e93e04SMagnus Karlsson MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_2MB, -1, 0); 1643018a8e75SMaciej Fijalkowski if (bufs == MAP_FAILED) 1644018a8e75SMaciej Fijalkowski return false; 1645018a8e75SMaciej Fijalkowski 16462ddade32SMagnus Karlsson mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; 1647018a8e75SMaciej Fijalkowski munmap(bufs, mmap_sz); 1648018a8e75SMaciej Fijalkowski return true; 1649018a8e75SMaciej Fijalkowski } 1650018a8e75SMaciej Fijalkowski 1651018a8e75SMaciej Fijalkowski static bool testapp_unaligned(struct test_spec *test) 1652018a8e75SMaciej Fijalkowski { 1653018a8e75SMaciej Fijalkowski if (!hugepages_present(test->ifobj_tx)) { 1654018a8e75SMaciej Fijalkowski ksft_test_result_skip("No 2M huge pages present.\n"); 1655018a8e75SMaciej Fijalkowski return false; 1656018a8e75SMaciej Fijalkowski } 1657018a8e75SMaciej Fijalkowski 1658018a8e75SMaciej Fijalkowski test_spec_set_name(test, "UNALIGNED_MODE"); 1659018a8e75SMaciej Fijalkowski test->ifobj_tx->umem->unaligned_mode = true; 1660018a8e75SMaciej Fijalkowski test->ifobj_rx->umem->unaligned_mode = true; 1661018a8e75SMaciej Fijalkowski /* Let half of the packets straddle a buffer boundrary */ 1662018a8e75SMaciej Fijalkowski pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2); 1663018a8e75SMaciej Fijalkowski test->ifobj_rx->pkt_stream->use_addr_for_fill = true; 1664018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1665018a8e75SMaciej Fijalkowski 1666018a8e75SMaciej Fijalkowski return true; 1667018a8e75SMaciej Fijalkowski } 1668018a8e75SMaciej Fijalkowski 1669018a8e75SMaciej Fijalkowski static void testapp_single_pkt(struct test_spec *test) 1670018a8e75SMaciej Fijalkowski { 1671018a8e75SMaciej Fijalkowski struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}}; 1672018a8e75SMaciej Fijalkowski 1673018a8e75SMaciej Fijalkowski pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1674018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1675018a8e75SMaciej Fijalkowski } 1676018a8e75SMaciej Fijalkowski 1677018a8e75SMaciej Fijalkowski static void testapp_invalid_desc(struct test_spec *test) 1678018a8e75SMaciej Fijalkowski { 16797a2050dfSKal Conley u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; 1680018a8e75SMaciej Fijalkowski struct pkt pkts[] = { 1681018a8e75SMaciej Fijalkowski /* Zero packet address allowed */ 1682018a8e75SMaciej Fijalkowski {0, PKT_SIZE, 0, true}, 1683018a8e75SMaciej Fijalkowski /* Allowed packet */ 1684018a8e75SMaciej Fijalkowski {0x1000, PKT_SIZE, 0, true}, 1685018a8e75SMaciej Fijalkowski /* Straddling the start of umem */ 1686018a8e75SMaciej Fijalkowski {-2, PKT_SIZE, 0, false}, 1687018a8e75SMaciej Fijalkowski /* Packet too large */ 1688018a8e75SMaciej Fijalkowski {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, 1689ccd1b293SKal Conley /* Up to end of umem allowed */ 1690ccd1b293SKal Conley {umem_size - PKT_SIZE, PKT_SIZE, 0, true}, 1691018a8e75SMaciej Fijalkowski /* After umem ends */ 16927a2050dfSKal Conley {umem_size, PKT_SIZE, 0, false}, 1693018a8e75SMaciej Fijalkowski /* Straddle the end of umem */ 16947a2050dfSKal Conley {umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false}, 1695018a8e75SMaciej Fijalkowski /* Straddle a page boundrary */ 1696018a8e75SMaciej Fijalkowski {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false}, 1697018a8e75SMaciej Fijalkowski /* Straddle a 2K boundrary */ 1698018a8e75SMaciej Fijalkowski {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true}, 1699018a8e75SMaciej Fijalkowski /* Valid packet for synch so that something is received */ 1700018a8e75SMaciej Fijalkowski {0x4000, PKT_SIZE, 0, true}}; 1701018a8e75SMaciej Fijalkowski 1702018a8e75SMaciej Fijalkowski if (test->ifobj_tx->umem->unaligned_mode) { 1703018a8e75SMaciej Fijalkowski /* Crossing a page boundrary allowed */ 1704ccd1b293SKal Conley pkts[7].valid = true; 1705018a8e75SMaciej Fijalkowski } 1706018a8e75SMaciej Fijalkowski if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { 1707018a8e75SMaciej Fijalkowski /* Crossing a 2K frame size boundrary not allowed */ 1708ccd1b293SKal Conley pkts[8].valid = false; 1709018a8e75SMaciej Fijalkowski } 1710018a8e75SMaciej Fijalkowski 1711a693ff3eSMaciej Fijalkowski if (test->ifobj_tx->shared_umem) { 17127a2050dfSKal Conley pkts[4].addr += umem_size; 17137a2050dfSKal Conley pkts[5].addr += umem_size; 1714ccd1b293SKal Conley pkts[6].addr += umem_size; 1715a693ff3eSMaciej Fijalkowski } 1716a693ff3eSMaciej Fijalkowski 1717018a8e75SMaciej Fijalkowski pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1718018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1719018a8e75SMaciej Fijalkowski } 1720018a8e75SMaciej Fijalkowski 172180bea9acSMagnus Karlsson static void testapp_xdp_drop(struct test_spec *test) 172280bea9acSMagnus Karlsson { 17237d8319a7SMagnus Karlsson struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 17247d8319a7SMagnus Karlsson struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 172580bea9acSMagnus Karlsson 172680bea9acSMagnus Karlsson test_spec_set_name(test, "XDP_DROP_HALF"); 17277d8319a7SMagnus Karlsson test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop, 17287d8319a7SMagnus Karlsson skel_rx->maps.xsk, skel_tx->maps.xsk); 172980bea9acSMagnus Karlsson 173080bea9acSMagnus Karlsson pkt_stream_receive_half(test); 173180bea9acSMagnus Karlsson testapp_validate_traffic(test); 173280bea9acSMagnus Karlsson } 173380bea9acSMagnus Karlsson 17349a321fd3STushar Vyavahare static void testapp_xdp_metadata_count(struct test_spec *test) 17359a321fd3STushar Vyavahare { 17369a321fd3STushar Vyavahare struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 17379a321fd3STushar Vyavahare struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 17389a321fd3STushar Vyavahare struct bpf_map *data_map; 17399a321fd3STushar Vyavahare int count = 0; 17409a321fd3STushar Vyavahare int key = 0; 17419a321fd3STushar Vyavahare 17429a321fd3STushar Vyavahare test_spec_set_name(test, "XDP_METADATA_COUNT"); 17439a321fd3STushar Vyavahare test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata, 17449a321fd3STushar Vyavahare skel_tx->progs.xsk_xdp_populate_metadata, 17459a321fd3STushar Vyavahare skel_rx->maps.xsk, skel_tx->maps.xsk); 17469a321fd3STushar Vyavahare test->ifobj_rx->use_metadata = true; 17479a321fd3STushar Vyavahare 17489a321fd3STushar Vyavahare data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss"); 17499a321fd3STushar Vyavahare if (!data_map || !bpf_map__is_internal(data_map)) 17509a321fd3STushar Vyavahare exit_with_error(ENOMEM); 17519a321fd3STushar Vyavahare 17529a321fd3STushar Vyavahare if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) 17539a321fd3STushar Vyavahare exit_with_error(errno); 17549a321fd3STushar Vyavahare 17559a321fd3STushar Vyavahare testapp_validate_traffic(test); 17569a321fd3STushar Vyavahare } 17579a321fd3STushar Vyavahare 17587f881984SMagnus Karlsson static void testapp_poll_txq_tmout(struct test_spec *test) 17597f881984SMagnus Karlsson { 17607f881984SMagnus Karlsson test_spec_set_name(test, "POLL_TXQ_FULL"); 17617f881984SMagnus Karlsson 17627f881984SMagnus Karlsson test->ifobj_tx->use_poll = true; 17637f881984SMagnus Karlsson /* create invalid frame by set umem frame_size and pkt length equal to 2048 */ 17647f881984SMagnus Karlsson test->ifobj_tx->umem->frame_size = 2048; 17657f881984SMagnus Karlsson pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048); 17667f881984SMagnus Karlsson testapp_validate_traffic_single_thread(test, test->ifobj_tx); 17677f881984SMagnus Karlsson } 17687f881984SMagnus Karlsson 17697f881984SMagnus Karlsson static void testapp_poll_rxq_tmout(struct test_spec *test) 17707f881984SMagnus Karlsson { 17717f881984SMagnus Karlsson test_spec_set_name(test, "POLL_RXQ_EMPTY"); 17727f881984SMagnus Karlsson test->ifobj_rx->use_poll = true; 17737f881984SMagnus Karlsson testapp_validate_traffic_single_thread(test, test->ifobj_rx); 17747f881984SMagnus Karlsson } 17757f881984SMagnus Karlsson 1776f0a249dfSMagnus Karlsson static int xsk_load_xdp_programs(struct ifobject *ifobj) 1777f0a249dfSMagnus Karlsson { 1778f0a249dfSMagnus Karlsson ifobj->xdp_progs = xsk_xdp_progs__open_and_load(); 1779f0a249dfSMagnus Karlsson if (libbpf_get_error(ifobj->xdp_progs)) 1780f0a249dfSMagnus Karlsson return libbpf_get_error(ifobj->xdp_progs); 1781f0a249dfSMagnus Karlsson 1782f0a249dfSMagnus Karlsson return 0; 1783f0a249dfSMagnus Karlsson } 1784f0a249dfSMagnus Karlsson 1785f0a249dfSMagnus Karlsson static void xsk_unload_xdp_programs(struct ifobject *ifobj) 1786f0a249dfSMagnus Karlsson { 1787f0a249dfSMagnus Karlsson xsk_xdp_progs__destroy(ifobj->xdp_progs); 1788f0a249dfSMagnus Karlsson } 1789f0a249dfSMagnus Karlsson 1790018a8e75SMaciej Fijalkowski static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, 1791018a8e75SMaciej Fijalkowski const char *dst_ip, const char *src_ip, const u16 dst_port, 17927d8319a7SMagnus Karlsson const u16 src_port, thread_func_t func_ptr) 1793018a8e75SMaciej Fijalkowski { 1794018a8e75SMaciej Fijalkowski struct in_addr ip; 1795f0a249dfSMagnus Karlsson int err; 1796018a8e75SMaciej Fijalkowski 1797018a8e75SMaciej Fijalkowski memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN); 1798018a8e75SMaciej Fijalkowski memcpy(ifobj->src_mac, src_mac, ETH_ALEN); 1799018a8e75SMaciej Fijalkowski 1800018a8e75SMaciej Fijalkowski inet_aton(dst_ip, &ip); 1801018a8e75SMaciej Fijalkowski ifobj->dst_ip = ip.s_addr; 1802018a8e75SMaciej Fijalkowski 1803018a8e75SMaciej Fijalkowski inet_aton(src_ip, &ip); 1804018a8e75SMaciej Fijalkowski ifobj->src_ip = ip.s_addr; 1805018a8e75SMaciej Fijalkowski 1806018a8e75SMaciej Fijalkowski ifobj->dst_port = dst_port; 1807018a8e75SMaciej Fijalkowski ifobj->src_port = src_port; 1808018a8e75SMaciej Fijalkowski 1809018a8e75SMaciej Fijalkowski ifobj->func_ptr = func_ptr; 1810aa61d81fSMagnus Karlsson 1811f0a249dfSMagnus Karlsson err = xsk_load_xdp_programs(ifobj); 1812aa61d81fSMagnus Karlsson if (err) { 1813aa61d81fSMagnus Karlsson printf("Error loading XDP program\n"); 1814aa61d81fSMagnus Karlsson exit_with_error(err); 1815aa61d81fSMagnus Karlsson } 1816018a8e75SMaciej Fijalkowski } 1817018a8e75SMaciej Fijalkowski 1818018a8e75SMaciej Fijalkowski static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type) 1819018a8e75SMaciej Fijalkowski { 1820018a8e75SMaciej Fijalkowski switch (type) { 1821018a8e75SMaciej Fijalkowski case TEST_TYPE_STATS_RX_DROPPED: 1822fe2ad08eSMaciej Fijalkowski if (mode == TEST_MODE_ZC) { 1823fe2ad08eSMaciej Fijalkowski ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); 1824fe2ad08eSMaciej Fijalkowski return; 1825fe2ad08eSMaciej Fijalkowski } 1826018a8e75SMaciej Fijalkowski testapp_stats_rx_dropped(test); 1827018a8e75SMaciej Fijalkowski break; 1828018a8e75SMaciej Fijalkowski case TEST_TYPE_STATS_TX_INVALID_DESCS: 1829018a8e75SMaciej Fijalkowski testapp_stats_tx_invalid_descs(test); 1830018a8e75SMaciej Fijalkowski break; 1831018a8e75SMaciej Fijalkowski case TEST_TYPE_STATS_RX_FULL: 1832018a8e75SMaciej Fijalkowski testapp_stats_rx_full(test); 1833018a8e75SMaciej Fijalkowski break; 1834018a8e75SMaciej Fijalkowski case TEST_TYPE_STATS_FILL_EMPTY: 1835018a8e75SMaciej Fijalkowski testapp_stats_fill_empty(test); 1836018a8e75SMaciej Fijalkowski break; 1837018a8e75SMaciej Fijalkowski case TEST_TYPE_TEARDOWN: 1838018a8e75SMaciej Fijalkowski testapp_teardown(test); 1839018a8e75SMaciej Fijalkowski break; 1840018a8e75SMaciej Fijalkowski case TEST_TYPE_BIDI: 1841018a8e75SMaciej Fijalkowski testapp_bidi(test); 1842018a8e75SMaciej Fijalkowski break; 1843018a8e75SMaciej Fijalkowski case TEST_TYPE_BPF_RES: 1844018a8e75SMaciej Fijalkowski testapp_bpf_res(test); 1845018a8e75SMaciej Fijalkowski break; 1846018a8e75SMaciej Fijalkowski case TEST_TYPE_RUN_TO_COMPLETION: 1847018a8e75SMaciej Fijalkowski test_spec_set_name(test, "RUN_TO_COMPLETION"); 1848018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1849018a8e75SMaciej Fijalkowski break; 1850018a8e75SMaciej Fijalkowski case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT: 1851018a8e75SMaciej Fijalkowski test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT"); 1852018a8e75SMaciej Fijalkowski testapp_single_pkt(test); 1853018a8e75SMaciej Fijalkowski break; 1854018a8e75SMaciej Fijalkowski case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME: 1855018a8e75SMaciej Fijalkowski test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); 1856018a8e75SMaciej Fijalkowski test->ifobj_tx->umem->frame_size = 2048; 1857018a8e75SMaciej Fijalkowski test->ifobj_rx->umem->frame_size = 2048; 1858018a8e75SMaciej Fijalkowski pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE); 1859018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1860018a8e75SMaciej Fijalkowski break; 18613143d10bSShibin Koikkara Reeny case TEST_TYPE_RX_POLL: 1862018a8e75SMaciej Fijalkowski test->ifobj_rx->use_poll = true; 18633143d10bSShibin Koikkara Reeny test_spec_set_name(test, "POLL_RX"); 1864018a8e75SMaciej Fijalkowski testapp_validate_traffic(test); 1865018a8e75SMaciej Fijalkowski break; 18663143d10bSShibin Koikkara Reeny case TEST_TYPE_TX_POLL: 18673143d10bSShibin Koikkara Reeny test->ifobj_tx->use_poll = true; 18683143d10bSShibin Koikkara Reeny test_spec_set_name(test, "POLL_TX"); 18693143d10bSShibin Koikkara Reeny testapp_validate_traffic(test); 18703143d10bSShibin Koikkara Reeny break; 18713143d10bSShibin Koikkara Reeny case TEST_TYPE_POLL_TXQ_TMOUT: 18727f881984SMagnus Karlsson testapp_poll_txq_tmout(test); 18733143d10bSShibin Koikkara Reeny break; 18743143d10bSShibin Koikkara Reeny case TEST_TYPE_POLL_RXQ_TMOUT: 18757f881984SMagnus Karlsson testapp_poll_rxq_tmout(test); 18763143d10bSShibin Koikkara Reeny break; 1877018a8e75SMaciej Fijalkowski case TEST_TYPE_ALIGNED_INV_DESC: 1878018a8e75SMaciej Fijalkowski test_spec_set_name(test, "ALIGNED_INV_DESC"); 1879018a8e75SMaciej Fijalkowski testapp_invalid_desc(test); 1880018a8e75SMaciej Fijalkowski break; 1881018a8e75SMaciej Fijalkowski case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME: 1882018a8e75SMaciej Fijalkowski test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE"); 1883018a8e75SMaciej Fijalkowski test->ifobj_tx->umem->frame_size = 2048; 1884018a8e75SMaciej Fijalkowski test->ifobj_rx->umem->frame_size = 2048; 1885018a8e75SMaciej Fijalkowski testapp_invalid_desc(test); 1886018a8e75SMaciej Fijalkowski break; 1887018a8e75SMaciej Fijalkowski case TEST_TYPE_UNALIGNED_INV_DESC: 1888018a8e75SMaciej Fijalkowski if (!hugepages_present(test->ifobj_tx)) { 1889018a8e75SMaciej Fijalkowski ksft_test_result_skip("No 2M huge pages present.\n"); 1890018a8e75SMaciej Fijalkowski return; 1891018a8e75SMaciej Fijalkowski } 1892018a8e75SMaciej Fijalkowski test_spec_set_name(test, "UNALIGNED_INV_DESC"); 1893018a8e75SMaciej Fijalkowski test->ifobj_tx->umem->unaligned_mode = true; 1894018a8e75SMaciej Fijalkowski test->ifobj_rx->umem->unaligned_mode = true; 1895018a8e75SMaciej Fijalkowski testapp_invalid_desc(test); 1896018a8e75SMaciej Fijalkowski break; 1897c0801598SKal Conley case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: { 1898c0801598SKal Conley u64 page_size, umem_size; 1899c0801598SKal Conley 1900c0801598SKal Conley if (!hugepages_present(test->ifobj_tx)) { 1901c0801598SKal Conley ksft_test_result_skip("No 2M huge pages present.\n"); 1902c0801598SKal Conley return; 1903c0801598SKal Conley } 1904c0801598SKal Conley test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE"); 1905c0801598SKal Conley /* Odd frame size so the UMEM doesn't end near a page boundary. */ 1906c0801598SKal Conley test->ifobj_tx->umem->frame_size = 4001; 1907c0801598SKal Conley test->ifobj_rx->umem->frame_size = 4001; 1908c0801598SKal Conley test->ifobj_tx->umem->unaligned_mode = true; 1909c0801598SKal Conley test->ifobj_rx->umem->unaligned_mode = true; 1910c0801598SKal Conley /* This test exists to test descriptors that staddle the end of 1911c0801598SKal Conley * the UMEM but not a page. 1912c0801598SKal Conley */ 1913c0801598SKal Conley page_size = sysconf(_SC_PAGESIZE); 1914c0801598SKal Conley umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; 1915c0801598SKal Conley assert(umem_size % page_size > PKT_SIZE); 1916c0801598SKal Conley assert(umem_size % page_size < page_size - PKT_SIZE); 1917c0801598SKal Conley testapp_invalid_desc(test); 1918c0801598SKal Conley break; 1919c0801598SKal Conley } 1920018a8e75SMaciej Fijalkowski case TEST_TYPE_UNALIGNED: 1921018a8e75SMaciej Fijalkowski if (!testapp_unaligned(test)) 1922018a8e75SMaciej Fijalkowski return; 1923018a8e75SMaciej Fijalkowski break; 1924018a8e75SMaciej Fijalkowski case TEST_TYPE_HEADROOM: 1925018a8e75SMaciej Fijalkowski testapp_headroom(test); 1926018a8e75SMaciej Fijalkowski break; 192780bea9acSMagnus Karlsson case TEST_TYPE_XDP_DROP_HALF: 192880bea9acSMagnus Karlsson testapp_xdp_drop(test); 192980bea9acSMagnus Karlsson break; 19309a321fd3STushar Vyavahare case TEST_TYPE_XDP_METADATA_COUNT: 19319a321fd3STushar Vyavahare testapp_xdp_metadata_count(test); 19329a321fd3STushar Vyavahare break; 1933018a8e75SMaciej Fijalkowski default: 1934018a8e75SMaciej Fijalkowski break; 1935018a8e75SMaciej Fijalkowski } 1936018a8e75SMaciej Fijalkowski 1937018a8e75SMaciej Fijalkowski if (!test->fail) 1938018a8e75SMaciej Fijalkowski ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test), 1939018a8e75SMaciej Fijalkowski test->name); 1940e67b2554SMagnus Karlsson pkt_stream_restore_default(test); 1941018a8e75SMaciej Fijalkowski } 1942018a8e75SMaciej Fijalkowski 1943018a8e75SMaciej Fijalkowski static struct ifobject *ifobject_create(void) 1944018a8e75SMaciej Fijalkowski { 1945018a8e75SMaciej Fijalkowski struct ifobject *ifobj; 1946018a8e75SMaciej Fijalkowski 1947018a8e75SMaciej Fijalkowski ifobj = calloc(1, sizeof(struct ifobject)); 1948018a8e75SMaciej Fijalkowski if (!ifobj) 1949018a8e75SMaciej Fijalkowski return NULL; 1950018a8e75SMaciej Fijalkowski 1951018a8e75SMaciej Fijalkowski ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr)); 1952018a8e75SMaciej Fijalkowski if (!ifobj->xsk_arr) 1953018a8e75SMaciej Fijalkowski goto out_xsk_arr; 1954018a8e75SMaciej Fijalkowski 1955018a8e75SMaciej Fijalkowski ifobj->umem = calloc(1, sizeof(*ifobj->umem)); 1956018a8e75SMaciej Fijalkowski if (!ifobj->umem) 1957018a8e75SMaciej Fijalkowski goto out_umem; 1958018a8e75SMaciej Fijalkowski 1959018a8e75SMaciej Fijalkowski return ifobj; 1960018a8e75SMaciej Fijalkowski 1961018a8e75SMaciej Fijalkowski out_umem: 1962018a8e75SMaciej Fijalkowski free(ifobj->xsk_arr); 1963018a8e75SMaciej Fijalkowski out_xsk_arr: 1964018a8e75SMaciej Fijalkowski free(ifobj); 1965018a8e75SMaciej Fijalkowski return NULL; 1966018a8e75SMaciej Fijalkowski } 1967018a8e75SMaciej Fijalkowski 1968018a8e75SMaciej Fijalkowski static void ifobject_delete(struct ifobject *ifobj) 1969018a8e75SMaciej Fijalkowski { 1970018a8e75SMaciej Fijalkowski free(ifobj->umem); 1971018a8e75SMaciej Fijalkowski free(ifobj->xsk_arr); 1972018a8e75SMaciej Fijalkowski free(ifobj); 1973018a8e75SMaciej Fijalkowski } 1974018a8e75SMaciej Fijalkowski 1975aa61d81fSMagnus Karlsson static bool is_xdp_supported(int ifindex) 19760d68e6feSMaciej Fijalkowski { 19770d68e6feSMaciej Fijalkowski int flags = XDP_FLAGS_DRV_MODE; 19780d68e6feSMaciej Fijalkowski 19790d68e6feSMaciej Fijalkowski LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags); 19800d68e6feSMaciej Fijalkowski struct bpf_insn insns[2] = { 19810d68e6feSMaciej Fijalkowski BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), 19820d68e6feSMaciej Fijalkowski BPF_EXIT_INSN() 19830d68e6feSMaciej Fijalkowski }; 19840d68e6feSMaciej Fijalkowski int prog_fd, insn_cnt = ARRAY_SIZE(insns); 19850d68e6feSMaciej Fijalkowski int err; 19860d68e6feSMaciej Fijalkowski 19870d68e6feSMaciej Fijalkowski prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); 19880d68e6feSMaciej Fijalkowski if (prog_fd < 0) 19890d68e6feSMaciej Fijalkowski return false; 19900d68e6feSMaciej Fijalkowski 19910d68e6feSMaciej Fijalkowski err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL); 19920d68e6feSMaciej Fijalkowski if (err) { 19930d68e6feSMaciej Fijalkowski close(prog_fd); 19940d68e6feSMaciej Fijalkowski return false; 19950d68e6feSMaciej Fijalkowski } 19960d68e6feSMaciej Fijalkowski 19970d68e6feSMaciej Fijalkowski bpf_xdp_detach(ifindex, flags, NULL); 19980d68e6feSMaciej Fijalkowski close(prog_fd); 19990d68e6feSMaciej Fijalkowski 20000d68e6feSMaciej Fijalkowski return true; 20010d68e6feSMaciej Fijalkowski } 20020d68e6feSMaciej Fijalkowski 2003018a8e75SMaciej Fijalkowski int main(int argc, char **argv) 2004018a8e75SMaciej Fijalkowski { 20051adef064SMaciej Fijalkowski struct pkt_stream *rx_pkt_stream_default; 20061adef064SMaciej Fijalkowski struct pkt_stream *tx_pkt_stream_default; 2007018a8e75SMaciej Fijalkowski struct ifobject *ifobj_tx, *ifobj_rx; 20080d68e6feSMaciej Fijalkowski int modes = TEST_MODE_SKB + 1; 2009018a8e75SMaciej Fijalkowski u32 i, j, failed_tests = 0; 2010018a8e75SMaciej Fijalkowski struct test_spec test; 2011aa61d81fSMagnus Karlsson bool shared_netdev; 2012018a8e75SMaciej Fijalkowski 2013018a8e75SMaciej Fijalkowski /* Use libbpf 1.0 API mode */ 2014018a8e75SMaciej Fijalkowski libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 2015018a8e75SMaciej Fijalkowski 2016018a8e75SMaciej Fijalkowski ifobj_tx = ifobject_create(); 2017018a8e75SMaciej Fijalkowski if (!ifobj_tx) 2018018a8e75SMaciej Fijalkowski exit_with_error(ENOMEM); 2019018a8e75SMaciej Fijalkowski ifobj_rx = ifobject_create(); 2020018a8e75SMaciej Fijalkowski if (!ifobj_rx) 2021018a8e75SMaciej Fijalkowski exit_with_error(ENOMEM); 2022018a8e75SMaciej Fijalkowski 2023018a8e75SMaciej Fijalkowski setlocale(LC_ALL, ""); 2024018a8e75SMaciej Fijalkowski 2025018a8e75SMaciej Fijalkowski parse_command_line(ifobj_tx, ifobj_rx, argc, argv); 2026a693ff3eSMaciej Fijalkowski 2027aa61d81fSMagnus Karlsson shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex); 2028aa61d81fSMagnus Karlsson ifobj_tx->shared_umem = shared_netdev; 2029aa61d81fSMagnus Karlsson ifobj_rx->shared_umem = shared_netdev; 2030018a8e75SMaciej Fijalkowski 2031018a8e75SMaciej Fijalkowski if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { 2032018a8e75SMaciej Fijalkowski usage(basename(argv[0])); 2033018a8e75SMaciej Fijalkowski ksft_exit_xfail(); 2034018a8e75SMaciej Fijalkowski } 2035018a8e75SMaciej Fijalkowski 2036aa61d81fSMagnus Karlsson if (is_xdp_supported(ifobj_tx->ifindex)) { 20370d68e6feSMaciej Fijalkowski modes++; 2038fe2ad08eSMaciej Fijalkowski if (ifobj_zc_avail(ifobj_tx)) 2039fe2ad08eSMaciej Fijalkowski modes++; 2040fe2ad08eSMaciej Fijalkowski } 20410d68e6feSMaciej Fijalkowski 2042aa61d81fSMagnus Karlsson init_iface(ifobj_rx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, 20437d8319a7SMagnus Karlsson worker_testapp_validate_rx); 2044aa61d81fSMagnus Karlsson init_iface(ifobj_tx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, 20457d8319a7SMagnus Karlsson worker_testapp_validate_tx); 2046aa61d81fSMagnus Karlsson 2047018a8e75SMaciej Fijalkowski test_spec_init(&test, ifobj_tx, ifobj_rx, 0); 20481adef064SMaciej Fijalkowski tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); 20491adef064SMaciej Fijalkowski rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE); 20501adef064SMaciej Fijalkowski if (!tx_pkt_stream_default || !rx_pkt_stream_default) 2051018a8e75SMaciej Fijalkowski exit_with_error(ENOMEM); 20521adef064SMaciej Fijalkowski test.tx_pkt_stream_default = tx_pkt_stream_default; 20531adef064SMaciej Fijalkowski test.rx_pkt_stream_default = rx_pkt_stream_default; 2054018a8e75SMaciej Fijalkowski 20550d68e6feSMaciej Fijalkowski ksft_set_plan(modes * TEST_TYPE_MAX); 2056018a8e75SMaciej Fijalkowski 2057aa61d81fSMagnus Karlsson for (i = 0; i < modes; i++) { 2058018a8e75SMaciej Fijalkowski for (j = 0; j < TEST_TYPE_MAX; j++) { 2059018a8e75SMaciej Fijalkowski test_spec_init(&test, ifobj_tx, ifobj_rx, i); 2060018a8e75SMaciej Fijalkowski run_pkt_test(&test, i, j); 2061018a8e75SMaciej Fijalkowski usleep(USLEEP_MAX); 2062018a8e75SMaciej Fijalkowski 2063018a8e75SMaciej Fijalkowski if (test.fail) 2064018a8e75SMaciej Fijalkowski failed_tests++; 2065018a8e75SMaciej Fijalkowski } 2066aa61d81fSMagnus Karlsson } 2067018a8e75SMaciej Fijalkowski 20681adef064SMaciej Fijalkowski pkt_stream_delete(tx_pkt_stream_default); 20691adef064SMaciej Fijalkowski pkt_stream_delete(rx_pkt_stream_default); 2070f0a249dfSMagnus Karlsson xsk_unload_xdp_programs(ifobj_tx); 2071f0a249dfSMagnus Karlsson xsk_unload_xdp_programs(ifobj_rx); 2072018a8e75SMaciej Fijalkowski ifobject_delete(ifobj_tx); 2073018a8e75SMaciej Fijalkowski ifobject_delete(ifobj_rx); 2074018a8e75SMaciej Fijalkowski 2075018a8e75SMaciej Fijalkowski if (failed_tests) 2076018a8e75SMaciej Fijalkowski ksft_exit_fail(); 2077018a8e75SMaciej Fijalkowski else 2078018a8e75SMaciej Fijalkowski ksft_exit_pass(); 2079018a8e75SMaciej Fijalkowski } 2080