Searched refs:mpwqe (Results 1 – 13 of 13) sorted by relevance
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | rx.c | 30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe() 36 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe() 44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe() 50 pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs); in mlx5e_xsk_alloc_rx_mpwqe() 52 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_xsk_alloc_rx_mpwqe() 54 if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) { in mlx5e_xsk_alloc_rx_mpwqe() 64 } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) { in mlx5e_xsk_alloc_rx_mpwqe() 75 } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) { in mlx5e_xsk_alloc_rx_mpwqe() 76 u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2); in mlx5e_xsk_alloc_rx_mpwqe() 101 __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) - in mlx5e_xsk_alloc_rx_mpwqe() [all …]
|
H A D | tx.c | 103 if (sq->mpwqe.wqe) in mlx5e_xsk_tx() 115 if (sq->mpwqe.wqe) in mlx5e_xsk_tx()
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_rx.c | 558 if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe)) in mlx5e_free_rx_mpwqe() 561 no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); in mlx5e_free_rx_mpwqe() 570 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) in mlx5e_free_rx_mpwqe() 574 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) { in mlx5e_free_rx_mpwqe() 587 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; in mlx5e_post_rx_mpwqe() 642 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_build_shampo_hd_umr() 722 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_alloc_rx_hd_mpwqe() 752 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); in mlx5e_alloc_rx_hd_mpwqe() 777 pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs); in mlx5e_alloc_rx_mpwqe() 779 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_alloc_rx_mpwqe() [all …]
|
H A D | en_main.c | 231 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift, in mlx5e_build_umr_wqe() 232 rq->mpwqe.umr_mode), in mlx5e_build_umr_wqe() 237 cseg->umr_mkey = rq->mpwqe.umr_mkey_be; in mlx5e_build_umr_wqe() 240 octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode); in mlx5e_build_umr_wqe() 247 rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo), in mlx5e_rq_shampo_hd_alloc() 249 if (!rq->mpwqe.shampo) in mlx5e_rq_shampo_hd_alloc() 256 kvfree(rq->mpwqe.shampo); in mlx5e_rq_shampo_hd_free() 261 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_rq_shampo_hd_info_alloc() 286 kvfree(rq->mpwqe.shampo->bitmap); in mlx5e_rq_shampo_hd_info_free() 287 kvfree(rq->mpwqe.shampo->info); in mlx5e_rq_shampo_hd_info_free() [all …]
|
H A D | en_tx.c | 517 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_same_eseg() 526 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_session_start() 549 return sq->mpwqe.wqe; in mlx5e_tx_mpwqe_session_is_active() 554 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_add_dseg() 572 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_session_complete() 629 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) { in mlx5e_sq_xmit_mpwqe()
|
H A D | en.h | 431 struct mlx5e_tx_mpwqe mpwqe; member 495 struct mlx5e_tx_mpwqe mpwqe; member 687 } mpwqe; member
|
H A D | en_stats.c | 277 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xdp_red() 289 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xdpsq() 301 s->tx_xsk_mpwqe += xsksq_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xsksq() 2074 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2084 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2115 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
|
H A D | en_stats.h | 440 u64 mpwqe; member
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | txrx.h | 200 return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); in mlx5e_shampo_get_cqe_header_index() 348 mlx5_wq_ll_reset(&rq->mpwqe.wq); in mlx5e_rqwq_reset() 349 rq->mpwqe.actual_wq_head = 0; in mlx5e_rqwq_reset() 375 return mlx5_wq_ll_get_size(&rq->mpwqe.wq); in mlx5e_rqwq_get_size() 385 return rq->mpwqe.wq.cur_sz; in mlx5e_rqwq_get_cur_sz() 395 return mlx5_wq_ll_get_head(&rq->mpwqe.wq); in mlx5e_rqwq_get_head() 405 return mlx5_wq_ll_get_counter(&rq->mpwqe.wq); in mlx5e_rqwq_get_wqe_counter() 505 size_t isz = struct_size(rq->mpwqe.info, alloc_units.frag_pages, rq->mpwqe.pages_per_wqe); in mlx5e_get_mpw_info() 507 return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz)); in mlx5e_get_mpw_info()
|
H A D | xdp.c | 334 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_session_start() 351 stats->mpwqe++; in mlx5e_xdp_mpwqe_session_start() 357 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_complete() 384 if (unlikely(!sq->mpwqe.wqe)) { in mlx5e_xmit_xdp_frame_check_mpwqe() 407 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xmit_xdp_frame_mpwqe() 421 if (unlikely(sq->mpwqe.wqe)) in mlx5e_xmit_xdp_frame_mpwqe() 879 if (sq->mpwqe.wqe) in mlx5e_xdp_xmit() 892 if (xdpsq->mpwqe.wqe) in mlx5e_xdp_rx_poll_complete()
|
H A D | xdp.h | 199 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_add_dseg()
|
H A D | params.c | 255 bool mpwqe) in mlx5e_rx_get_linear_stride_sz() argument 263 return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; in mlx5e_rx_get_linear_stride_sz()
|
/openbmc/linux/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/ |
H A D | counters.rst | 334 - The number of send blocks processed from Multi-Packet WQEs (mpwqe). 338 - The number of send packets processed from Multi-Packet WQEs (mpwqe).
|