Home
last modified time | relevance | path

Searched refs:xsk (Results 1 – 25 of 50) sorted by relevance

12

/openbmc/linux/tools/testing/selftests/bpf/prog_tests/
H A Dxdp_metadata.c37 struct xsk { struct
47 static int open_xsk(int ifindex, struct xsk *xsk) in open_xsk() argument
66 xsk->umem_area = mmap(NULL, UMEM_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); in open_xsk()
67 if (!ASSERT_NEQ(xsk->umem_area, MAP_FAILED, "mmap")) in open_xsk()
70 ret = xsk_umem__create(&xsk->umem, in open_xsk()
71 xsk->umem_area, UMEM_SIZE, in open_xsk()
72 &xsk->fill, in open_xsk()
73 &xsk->comp, in open_xsk()
78 ret = xsk_socket__create(&xsk->socket, ifindex, QUEUE_ID, in open_xsk()
79 xsk->umem, in open_xsk()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Dpool.c23 static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk) in mlx5e_xsk_get_pools() argument
25 if (!xsk->pools) { in mlx5e_xsk_get_pools()
26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools()
27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools()
28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools()
32 xsk->refcnt++; in mlx5e_xsk_get_pools()
33 xsk->ever_used = true; in mlx5e_xsk_get_pools()
38 static void mlx5e_xsk_put_pools(struct mlx5e_xsk *xsk) in mlx5e_xsk_put_pools() argument
40 if (!--xsk->refcnt) { in mlx5e_xsk_put_pools()
41 kfree(xsk->pools); in mlx5e_xsk_put_pools()
[all …]
H A Dsetup.c12 struct mlx5e_xsk_param *xsk) in mlx5e_legacy_rq_validate_xsk() argument
14 if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) { in mlx5e_legacy_rq_validate_xsk()
28 struct mlx5e_xsk_param *xsk, in mlx5e_validate_xsk_param() argument
34 if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) { in mlx5e_validate_xsk_param()
35 mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size, in mlx5e_validate_xsk_param()
45 return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk); in mlx5e_validate_xsk_param()
47 return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk); in mlx5e_validate_xsk_param()
53 struct mlx5e_xsk_param *xsk, in mlx5e_build_xsk_cparam() argument
57 mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq); in mlx5e_build_xsk_cparam()
58 mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq); in mlx5e_build_xsk_cparam()
[all …]
H A Dpool.h10 struct mlx5e_xsk *xsk, u16 ix) in mlx5e_xsk_get_pool() argument
12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool()
18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
22 void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk);
H A Dsetup.h12 struct mlx5e_xsk_param *xsk,
15 struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
/openbmc/linux/tools/testing/selftests/bpf/
H A Dxdp_hw_metadata.c39 struct xsk { struct
50 struct xsk *rx_xsk; argument
57 static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id) in open_xsk() argument
76 xsk->umem_area = mmap(NULL, UMEM_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); in open_xsk()
77 if (xsk->umem_area == MAP_FAILED) in open_xsk()
80 ret = xsk_umem__create(&xsk->umem, in open_xsk()
81 xsk->umem_area, UMEM_SIZE, in open_xsk()
82 &xsk->fill, in open_xsk()
83 &xsk->comp, in open_xsk()
88 ret = xsk_socket__create(&xsk->socket, ifindex, queue_id, in open_xsk()
[all …]
H A Dxsk.c97 int xsk_socket__fd(const struct xsk_socket *xsk) in xsk_socket__fd() argument
99 return xsk ? xsk->fd : -EINVAL; in xsk_socket__fd()
445 int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk) in xsk_update_xskmap() argument
451 sock_fd = xsk_socket__fd(xsk); in xsk_update_xskmap()
500 static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk, in xsk_create_ctx() argument
514 err = xsk_create_umem_rings(umem, xsk->fd, fill, comp); in xsk_create_ctx()
549 struct xsk_socket *xsk; in xsk_socket__create_shared() local
558 xsk = calloc(1, sizeof(*xsk)); in xsk_socket__create_shared()
559 if (!xsk) in xsk_socket__create_shared()
562 err = xsk_set_xdp_socket_config(&xsk->config, usr_config); in xsk_socket__create_shared()
[all …]
H A Dxskxceiver.c225 static void enable_busy_poll(struct xsk_socket_info *xsk) in enable_busy_poll() argument
230 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL, in enable_busy_poll()
235 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL, in enable_busy_poll()
240 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET, in enable_busy_poll()
245 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, in __xsk_configure_socket() argument
252 xsk->umem = umem; in __xsk_configure_socket()
253 cfg.rx_size = xsk->rxqsize; in __xsk_configure_socket()
261 txr = ifobject->tx_on ? &xsk->tx : NULL; in __xsk_configure_socket()
262 rxr = ifobject->rx_on ? &xsk->rx : NULL; in __xsk_configure_socket()
263 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); in __xsk_configure_socket()
[all …]
H A Dxsk.h188 int xsk_socket__fd(const struct xsk_socket *xsk);
207 int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk);
223 int xsk_socket__create(struct xsk_socket **xsk,
240 void xsk_socket__delete(struct xsk_socket *xsk);
H A Dxskxceiver.h120 struct xsk_socket *xsk; member
147 struct xsk_socket_info *xsk; member
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dparams.c19 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) in mlx5e_mpwrq_page_shift() argument
21 u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT; in mlx5e_mpwrq_page_shift()
25 if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift)) in mlx5e_mpwrq_page_shift()
32 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) in mlx5e_mpwrq_umr_mode() argument
43 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); in mlx5e_mpwrq_umr_mode()
44 bool unaligned = xsk ? xsk->unaligned : false; in mlx5e_mpwrq_umr_mode()
47 if (xsk) { in mlx5e_mpwrq_umr_mode()
48 oversized = xsk->chunk_size < (1 << page_shift); in mlx5e_mpwrq_umr_mode()
49 WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift)); in mlx5e_mpwrq_umr_mode()
65 if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3)) in mlx5e_mpwrq_umr_mode()
[all …]
H A Dparams.h59 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
61 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
88 struct mlx5e_xsk_param *xsk);
94 struct mlx5e_xsk_param *xsk);
97 struct mlx5e_xsk_param *xsk);
100 struct mlx5e_xsk_param *xsk);
103 struct mlx5e_xsk_param *xsk);
118 struct mlx5e_xsk_param *xsk);
121 struct mlx5e_xsk_param *xsk);
125 struct mlx5e_xsk_param *xsk);
[all …]
H A Drx_res.h42 unsigned int ix, bool xsk);
/openbmc/linux/tools/testing/selftests/bpf/progs/
H A Dxsk_xdp_progs.c13 } xsk SEC(".maps");
20 return bpf_redirect_map(&xsk, 0, XDP_DROP); in xsk_def_prog()
29 return bpf_redirect_map(&xsk, 0, XDP_DROP); in xsk_xdp_drop()
52 return bpf_redirect_map(&xsk, 0, XDP_DROP); in xsk_xdp_populate_metadata()
H A Dxdp_metadata.c13 } xsk SEC(".maps");
61 return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS); in rx()
H A Dxdp_hw_metadata.c13 } xsk SEC(".maps");
90 return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS); in rx()
/openbmc/linux/Documentation/bpf/
H A Dmap_xskmap.rst21 | xsk A | xsk B | xsk C |<---+ User space
28 | | key | xsk | | |
30 | | | | 0 | xsk A | | |
32 | | | | 1 | xsk B | | |
34 | | prog | | 2 | xsk C | |
/openbmc/qemu/net/
H A Daf-xdp.c35 struct xsk_socket *xsk; member
65 qemu_set_fd_handler(xsk_socket__fd(s->xsk), in af_xdp_update_fd_handler()
269 xsk_socket__delete(s->xsk); in af_xdp_cleanup()
270 s->xsk = NULL; in af_xdp_cleanup()
366 if (xsk_socket__create(&s->xsk, s->ifname, queue_id, in af_xdp_socket_create()
374 if (xsk_socket__create(&s->xsk, s->ifname, queue_id, in af_xdp_socket_create()
380 if (xsk_socket__create(&s->xsk, s->ifname, queue_id, in af_xdp_socket_create()
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A DMakefile29 en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
30 en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
H A Den_main.c732 struct mlx5e_xsk_param *xsk, in mlx5e_alloc_rq() argument
752 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); in mlx5e_alloc_rq()
772 rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); in mlx5e_alloc_rq()
773 rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); in mlx5e_alloc_rq()
785 mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk); in mlx5e_alloc_rq()
787 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog) in mlx5e_alloc_rq()
790 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); in mlx5e_alloc_rq()
792 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); in mlx5e_alloc_rq()
828 if (xsk) { in mlx5e_alloc_rq()
1213 struct mlx5e_xsk_param *xsk, int node, in mlx5e_open_rq() argument
[all …]
H A Den.h317 struct mlx5e_xsk *xsk; member
601 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
932 struct mlx5e_xsk xsk; member
1016 struct mlx5e_xsk_param *xsk, int node,
1205 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
/openbmc/linux/net/xdp/
H A DMakefile2 obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o xsk_queue.o xskmap.o
H A Dxsk_buff_pool.c135 bpf.xsk.pool = NULL; in xp_disable_drv_zc()
136 bpf.xsk.queue_id = pool->queue_id; in xp_disable_drv_zc()
201 bpf.xsk.pool = pool; in xp_assign_dev()
202 bpf.xsk.queue_id = queue_id; in xp_assign_dev()
/openbmc/linux/drivers/net/ethernet/freescale/dpaa2/
H A DMakefile10 …-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o
/openbmc/linux/Documentation/networking/
H A Daf_xdp.rst212 in tools/lib/bpf/xsk.h for facilitating the use of AF_XDP. It
527 On the Rx path in copy-mode, the xsk core copies the XDP data into
557 The XDP code sample included in tools/lib/bpf/xsk.c is the following:
636 void rx_packets(struct xsk_socket_info *xsk)
642 int rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
644 xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
647 struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
648 char *frag = xsk_umem__get_data(xsk->umem->buffer, desc->addr);
661 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = desc->addr;
664 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
[all …]

12