Lines Matching refs:rq

64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, in mlx5e_read_enhanced_title_slot() argument
92 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_enhanced_title_slot()
97 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state))) in mlx5e_read_enhanced_title_slot()
100 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_read_enhanced_title_slot()
105 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1); in mlx5e_read_enhanced_title_slot()
108 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, in mlx5e_read_title_slot() argument
112 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_title_slot()
118 rq->stats->cqe_compress_blks++; in mlx5e_read_title_slot()
153 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, in mlx5e_decompress_cqe() argument
157 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqe()
169 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { in mlx5e_decompress_cqe()
176 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_decompress_cqe()
180 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); in mlx5e_decompress_cqe()
183 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, in mlx5e_decompress_cqe_no_hash() argument
187 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqe_no_hash()
189 mlx5e_decompress_cqe(rq, wq, cqcc); in mlx5e_decompress_cqe_no_hash()
194 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq, in mlx5e_decompress_enhanced_cqe() argument
199 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_enhanced_cqe()
215 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); in mlx5e_decompress_enhanced_cqe()
216 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, in mlx5e_decompress_enhanced_cqe()
218 rq, &cqd->title); in mlx5e_decompress_enhanced_cqe()
221 rq->stats->cqe_compress_pkts += left; in mlx5e_decompress_enhanced_cqe()
226 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, in mlx5e_decompress_cqes_cont() argument
231 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqes_cont()
243 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); in mlx5e_decompress_cqes_cont()
244 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, in mlx5e_decompress_cqes_cont()
246 rq, &cqd->title); in mlx5e_decompress_cqes_cont()
251 rq->stats->cqe_compress_pkts += cqe_count; in mlx5e_decompress_cqes_cont()
256 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, in mlx5e_decompress_cqes_start() argument
260 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_decompress_cqes_start()
263 mlx5e_read_title_slot(rq, wq, cc); in mlx5e_decompress_cqes_start()
265 mlx5e_decompress_cqe(rq, wq, cc); in mlx5e_decompress_cqes_start()
266 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, in mlx5e_decompress_cqes_start()
268 rq, &cqd->title); in mlx5e_decompress_cqes_start()
271 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem); in mlx5e_decompress_cqes_start()
276 static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq, in mlx5e_page_alloc_fragmented() argument
281 page = page_pool_dev_alloc_pages(rq->page_pool); in mlx5e_page_alloc_fragmented()
295 static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq, in mlx5e_page_release_fragmented() argument
302 page_pool_put_defragged_page(rq->page_pool, page, -1, true); in mlx5e_page_release_fragmented()
305 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, in mlx5e_get_rx_frag() argument
316 err = mlx5e_page_alloc_fragmented(rq, frag->frag_page); in mlx5e_get_rx_frag()
331 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, in mlx5e_put_rx_frag() argument
335 mlx5e_page_release_fragmented(rq, frag->frag_page); in mlx5e_put_rx_frag()
338 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) in get_frag() argument
340 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; in get_frag()
343 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, in mlx5e_alloc_rx_wqe() argument
346 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); in mlx5e_alloc_rx_wqe()
350 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { in mlx5e_alloc_rx_wqe()
354 err = mlx5e_get_rx_frag(rq, frag); in mlx5e_alloc_rx_wqe()
360 headroom = i == 0 ? rq->buff.headroom : 0; in mlx5e_alloc_rx_wqe()
369 mlx5e_put_rx_frag(rq, --frag); in mlx5e_alloc_rx_wqe()
374 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, in mlx5e_free_rx_wqe() argument
379 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) in mlx5e_free_rx_wqe()
380 mlx5e_put_rx_frag(rq, wi); in mlx5e_free_rx_wqe()
389 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_dealloc_rx_wqe() argument
391 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); in mlx5e_dealloc_rx_wqe()
393 if (rq->xsk_pool) { in mlx5e_dealloc_rx_wqe()
396 mlx5e_free_rx_wqe(rq, wi); in mlx5e_dealloc_rx_wqe()
402 for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++) in mlx5e_dealloc_rx_wqe()
407 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) in mlx5e_xsk_free_rx_wqes() argument
409 struct mlx5_wq_cyc *wq = &rq->wqe.wq; in mlx5e_xsk_free_rx_wqes()
416 wi = get_frag(rq, j); in mlx5e_xsk_free_rx_wqes()
425 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) in mlx5e_free_rx_wqes() argument
427 struct mlx5_wq_cyc *wq = &rq->wqe.wq; in mlx5e_free_rx_wqes()
434 wi = get_frag(rq, j); in mlx5e_free_rx_wqes()
435 mlx5e_free_rx_wqe(rq, wi); in mlx5e_free_rx_wqes()
439 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) in mlx5e_alloc_rx_wqes() argument
441 struct mlx5_wq_cyc *wq = &rq->wqe.wq; in mlx5e_alloc_rx_wqes()
450 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j))) in mlx5e_alloc_rx_wqes()
457 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) in mlx5e_refill_rx_wqes() argument
469 refill = min_t(u16, rq->wqe.info.refill_unit, remaining); in mlx5e_refill_rx_wqes()
471 mlx5e_free_rx_wqes(rq, ix + total_alloc, refill); in mlx5e_refill_rx_wqes()
472 refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill); in mlx5e_refill_rx_wqes()
483 mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc); in mlx5e_refill_rx_wqes()
486 int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i); in mlx5e_refill_rx_wqes()
489 frag = get_frag(rq, j); in mlx5e_refill_rx_wqes()
490 for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++) in mlx5e_refill_rx_wqes()
498 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo, in mlx5e_add_skb_shared_info_frag() argument
506 dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir); in mlx5e_add_skb_shared_info_frag()
525 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, in mlx5e_add_skb_frag() argument
531 dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, in mlx5e_add_skb_frag()
532 rq->buff.map_dir); in mlx5e_add_skb_frag()
538 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb, in mlx5e_copy_skb_header() argument
546 dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len, in mlx5e_copy_skb_header()
547 rq->buff.map_dir); in mlx5e_copy_skb_header()
552 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) in mlx5e_free_rx_mpwqe() argument
558 if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe)) in mlx5e_free_rx_mpwqe()
561 no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); in mlx5e_free_rx_mpwqe()
563 if (rq->xsk_pool) { in mlx5e_free_rx_mpwqe()
570 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) in mlx5e_free_rx_mpwqe()
574 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) { in mlx5e_free_rx_mpwqe()
579 mlx5e_page_release_fragmented(rq, frag_page); in mlx5e_free_rx_mpwqe()
585 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) in mlx5e_post_rx_mpwqe() argument
587 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; in mlx5e_post_rx_mpwqe()
638 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, in mlx5e_build_shampo_hd_umr() argument
642 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_build_shampo_hd_umr()
644 u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; in mlx5e_build_shampo_hd_umr()
652 headroom = rq->buff.headroom; in mlx5e_build_shampo_hd_umr()
673 err = mlx5e_page_alloc_fragmented(rq, frag_page); in mlx5e_build_shampo_hd_umr()
713 mlx5e_page_release_fragmented(rq, dma_info->frag_page); in mlx5e_build_shampo_hd_umr()
716 rq->stats->buff_alloc_err++; in mlx5e_build_shampo_hd_umr()
720 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) in mlx5e_alloc_rx_hd_mpwqe() argument
722 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_alloc_rx_hd_mpwqe()
724 struct mlx5e_icosq *sq = rq->icosq; in mlx5e_alloc_rx_hd_mpwqe()
727 max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); in mlx5e_alloc_rx_hd_mpwqe()
749 err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); in mlx5e_alloc_rx_hd_mpwqe()
752 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); in mlx5e_alloc_rx_hd_mpwqe()
759 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_alloc_rx_mpwqe() argument
761 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); in mlx5e_alloc_rx_mpwqe()
762 struct mlx5e_icosq *sq = rq->icosq; in mlx5e_alloc_rx_mpwqe()
771 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { in mlx5e_alloc_rx_mpwqe()
772 err = mlx5e_alloc_rx_hd_mpwqe(rq); in mlx5e_alloc_rx_mpwqe()
777 pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs); in mlx5e_alloc_rx_mpwqe()
779 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_alloc_rx_mpwqe()
783 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) { in mlx5e_alloc_rx_mpwqe()
786 err = mlx5e_page_alloc_fragmented(rq, frag_page); in mlx5e_alloc_rx_mpwqe()
798 if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) { in mlx5e_alloc_rx_mpwqe()
799 int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) - in mlx5e_alloc_rx_mpwqe()
800 rq->mpwqe.pages_per_wqe; in mlx5e_alloc_rx_mpwqe()
802 memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0, in mlx5e_alloc_rx_mpwqe()
806 bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); in mlx5e_alloc_rx_mpwqe()
813 offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD; in mlx5e_alloc_rx_mpwqe()
818 .num_wqebbs = rq->mpwqe.umr_wqebbs, in mlx5e_alloc_rx_mpwqe()
819 .umr.rq = rq, in mlx5e_alloc_rx_mpwqe()
822 sq->pc += rq->mpwqe.umr_wqebbs; in mlx5e_alloc_rx_mpwqe()
831 mlx5e_page_release_fragmented(rq, frag_page); in mlx5e_alloc_rx_mpwqe()
834 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); in mlx5e_alloc_rx_mpwqe()
837 rq->stats->buff_alloc_err++; in mlx5e_alloc_rx_mpwqe()
848 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) in mlx5e_shampo_dealloc_hd() argument
850 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_shampo_dealloc_hd()
867 mlx5e_page_release_fragmented(rq, hd_info->frag_page); in mlx5e_shampo_dealloc_hd()
882 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_dealloc_rx_mpwqe() argument
884 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); in mlx5e_dealloc_rx_mpwqe()
886 mlx5e_free_rx_mpwqe(rq, wi); in mlx5e_dealloc_rx_mpwqe()
891 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); in mlx5e_dealloc_rx_mpwqe()
894 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) in mlx5e_post_rx_wqes() argument
896 struct mlx5_wq_cyc *wq = &rq->wqe.wq; in mlx5e_post_rx_wqes()
901 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) in mlx5e_post_rx_wqes()
904 if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk) in mlx5e_post_rx_wqes()
907 if (rq->page_pool) in mlx5e_post_rx_wqes()
908 page_pool_nid_changed(rq->page_pool, numa_mem_id()); in mlx5e_post_rx_wqes()
916 wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask; in mlx5e_post_rx_wqes()
918 if (!rq->xsk_pool) { in mlx5e_post_rx_wqes()
919 count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk); in mlx5e_post_rx_wqes()
920 } else if (likely(!rq->xsk_pool->dma_need_sync)) { in mlx5e_post_rx_wqes()
921 mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk); in mlx5e_post_rx_wqes()
922 count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk); in mlx5e_post_rx_wqes()
924 mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk); in mlx5e_post_rx_wqes()
930 count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk); in mlx5e_post_rx_wqes()
935 rq->stats->buff_alloc_err++; in mlx5e_post_rx_wqes()
980 struct mlx5e_rq *rq = &c->rq; in mlx5e_handle_shampo_hd_umr() local
983 shampo = rq->mpwqe.shampo; in mlx5e_handle_shampo_hd_umr()
1048 wi->umr.rq->mpwqe.umr_completed++; in mlx5e_poll_ico_cq()
1080 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) in mlx5e_post_rx_mpwqes() argument
1082 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; in mlx5e_post_rx_mpwqes()
1083 u8 umr_completed = rq->mpwqe.umr_completed; in mlx5e_post_rx_mpwqes()
1084 struct mlx5e_icosq *sq = rq->icosq; in mlx5e_post_rx_mpwqes()
1089 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) in mlx5e_post_rx_mpwqes()
1093 mlx5e_post_rx_mpwqe(rq, umr_completed); in mlx5e_post_rx_mpwqes()
1094 rq->mpwqe.umr_in_progress -= umr_completed; in mlx5e_post_rx_mpwqes()
1095 rq->mpwqe.umr_completed = 0; in mlx5e_post_rx_mpwqes()
1098 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; in mlx5e_post_rx_mpwqes()
1100 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk)) in mlx5e_post_rx_mpwqes()
1101 rq->stats->congst_umr++; in mlx5e_post_rx_mpwqes()
1103 if (likely(missing < rq->mpwqe.min_wqe_bulk)) in mlx5e_post_rx_mpwqes()
1106 if (rq->page_pool) in mlx5e_post_rx_mpwqes()
1107 page_pool_nid_changed(rq->page_pool, numa_mem_id()); in mlx5e_post_rx_mpwqes()
1109 head = rq->mpwqe.actual_wq_head; in mlx5e_post_rx_mpwqes()
1112 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head); in mlx5e_post_rx_mpwqes()
1115 mlx5e_free_rx_mpwqe(rq, wi); in mlx5e_post_rx_mpwqes()
1117 alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) : in mlx5e_post_rx_mpwqes()
1118 mlx5e_alloc_rx_mpwqe(rq, head); in mlx5e_post_rx_mpwqes()
1125 rq->mpwqe.umr_last_bulk = missing - i; in mlx5e_post_rx_mpwqes()
1131 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; in mlx5e_post_rx_mpwqes()
1132 rq->mpwqe.actual_wq_head = head; in mlx5e_post_rx_mpwqes()
1140 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) in mlx5e_post_rx_mpwqes()
1216 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) in mlx5e_shampo_get_packet_hd() argument
1218 struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; in mlx5e_shampo_get_packet_hd()
1219 u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; in mlx5e_shampo_get_packet_hd()
1224 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) in mlx5e_shampo_update_ipv4_udp_hdr() argument
1226 int udp_off = rq->hw_gro_data->fk.control.thoff; in mlx5e_shampo_update_ipv4_udp_hdr()
1227 struct sk_buff *skb = rq->hw_gro_data->skb; in mlx5e_shampo_update_ipv4_udp_hdr()
1243 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6) in mlx5e_shampo_update_ipv6_udp_hdr() argument
1245 int udp_off = rq->hw_gro_data->fk.control.thoff; in mlx5e_shampo_update_ipv6_udp_hdr()
1246 struct sk_buff *skb = rq->hw_gro_data->skb; in mlx5e_shampo_update_ipv6_udp_hdr()
1262 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, in mlx5e_shampo_update_fin_psh_flags() argument
1265 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); in mlx5e_shampo_update_fin_psh_flags()
1269 last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); in mlx5e_shampo_update_fin_psh_flags()
1270 last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff; in mlx5e_shampo_update_fin_psh_flags()
1274 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, in mlx5e_shampo_update_ipv4_tcp_hdr() argument
1277 int tcp_off = rq->hw_gro_data->fk.control.thoff; in mlx5e_shampo_update_ipv4_tcp_hdr()
1278 struct sk_buff *skb = rq->hw_gro_data->skb; in mlx5e_shampo_update_ipv4_tcp_hdr()
1283 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); in mlx5e_shampo_update_ipv4_tcp_hdr()
1288 if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) in mlx5e_shampo_update_ipv4_tcp_hdr()
1298 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, in mlx5e_shampo_update_ipv6_tcp_hdr() argument
1301 int tcp_off = rq->hw_gro_data->fk.control.thoff; in mlx5e_shampo_update_ipv6_tcp_hdr()
1302 struct sk_buff *skb = rq->hw_gro_data->skb; in mlx5e_shampo_update_ipv6_tcp_hdr()
1307 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); in mlx5e_shampo_update_ipv6_tcp_hdr()
1319 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) in mlx5e_shampo_update_hdr() argument
1321 bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)); in mlx5e_shampo_update_hdr()
1322 struct sk_buff *skb = rq->hw_gro_data->skb; in mlx5e_shampo_update_hdr()
1328 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr); in mlx5e_shampo_update_hdr()
1336 mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match); in mlx5e_shampo_update_hdr()
1338 mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4); in mlx5e_shampo_update_hdr()
1340 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr); in mlx5e_shampo_update_hdr()
1346 mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match); in mlx5e_shampo_update_hdr()
1348 mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6); in mlx5e_shampo_update_hdr()
1377 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) in mlx5e_enable_ecn() argument
1391 rq->stats->ecn_mark += !!rc; in mlx5e_enable_ecn()
1479 struct mlx5e_rq *rq, in mlx5e_handle_csum() argument
1483 struct mlx5e_rq_stats *stats = rq->stats; in mlx5e_handle_csum()
1497 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || in mlx5e_handle_csum()
1520 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state)) in mlx5e_handle_csum()
1550 struct mlx5e_rq *rq, in mlx5e_build_rx_skb() argument
1554 struct mlx5e_rq_stats *stats = rq->stats; in mlx5e_build_rx_skb()
1555 struct net_device *netdev = rq->netdev; in mlx5e_build_rx_skb()
1560 mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); in mlx5e_build_rx_skb()
1580 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) in mlx5e_build_rx_skb()
1581 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, in mlx5e_build_rx_skb()
1582 rq->clock, get_cqe_ts(cqe)); in mlx5e_build_rx_skb()
1583 skb_record_rx_queue(skb, rq->ix); in mlx5e_build_rx_skb()
1596 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); in mlx5e_build_rx_skb()
1599 mlx5e_enable_ecn(rq, skb); in mlx5e_build_rx_skb()
1607 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, in mlx5e_shampo_complete_rx_cqe() argument
1612 struct mlx5e_rq_stats *stats = rq->stats; in mlx5e_shampo_complete_rx_cqe()
1620 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); in mlx5e_shampo_complete_rx_cqe()
1622 if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) { in mlx5e_shampo_complete_rx_cqe()
1623 napi_gro_receive(rq->cq.napi, skb); in mlx5e_shampo_complete_rx_cqe()
1624 rq->hw_gro_data->skb = NULL; in mlx5e_shampo_complete_rx_cqe()
1628 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, in mlx5e_complete_rx_cqe() argument
1633 struct mlx5e_rq_stats *stats = rq->stats; in mlx5e_complete_rx_cqe()
1637 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); in mlx5e_complete_rx_cqe()
1641 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, in mlx5e_build_linear_skb() argument
1648 rq->stats->buff_alloc_err++; in mlx5e_build_linear_skb()
1661 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, in mlx5e_fill_mxbuf() argument
1665 xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq); in mlx5e_fill_mxbuf()
1668 mxbuf->rq = rq; in mlx5e_fill_mxbuf()
1672 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, in mlx5e_skb_from_cqe_linear() argument
1676 u16 rx_headroom = rq->buff.headroom; in mlx5e_skb_from_cqe_linear()
1689 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, in mlx5e_skb_from_cqe_linear()
1690 frag_size, rq->buff.map_dir); in mlx5e_skb_from_cqe_linear()
1693 prog = rcu_dereference(rq->xdp_prog); in mlx5e_skb_from_cqe_linear()
1698 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, in mlx5e_skb_from_cqe_linear()
1700 if (mlx5e_xdp_handle(rq, prog, &mxbuf)) in mlx5e_skb_from_cqe_linear()
1708 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); in mlx5e_skb_from_cqe_linear()
1720 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, in mlx5e_skb_from_cqe_nonlinear() argument
1723 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; in mlx5e_skb_from_cqe_nonlinear()
1725 u16 rx_headroom = rq->buff.headroom; in mlx5e_skb_from_cqe_nonlinear()
1742 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, in mlx5e_skb_from_cqe_nonlinear()
1743 rq->buff.frame0_sz, rq->buff.map_dir); in mlx5e_skb_from_cqe_nonlinear()
1747 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, in mlx5e_skb_from_cqe_nonlinear()
1761 mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, in mlx5e_skb_from_cqe_nonlinear()
1770 prog = rcu_dereference(rq->xdp_prog); in mlx5e_skb_from_cqe_nonlinear()
1771 if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) { in mlx5e_skb_from_cqe_nonlinear()
1772 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { in mlx5e_skb_from_cqe_nonlinear()
1781 skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz, in mlx5e_skb_from_cqe_nonlinear()
1804 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in trigger_report() argument
1807 struct mlx5e_priv *priv = rq->priv; in trigger_report()
1810 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { in trigger_report()
1811 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); in trigger_report()
1812 queue_work(priv->wq, &rq->recover_work); in trigger_report()
1816 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in mlx5e_handle_rx_err_cqe() argument
1818 trigger_report(rq, cqe); in mlx5e_handle_rx_err_cqe()
1819 rq->stats->wqe_err++; in mlx5e_handle_rx_err_cqe()
1822 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in mlx5e_handle_rx_cqe() argument
1824 struct mlx5_wq_cyc *wq = &rq->wqe.wq; in mlx5e_handle_rx_cqe()
1831 wi = get_frag(rq, ci); in mlx5e_handle_rx_cqe()
1835 mlx5e_handle_rx_err_cqe(rq, cqe); in mlx5e_handle_rx_cqe()
1839 skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe, in mlx5e_handle_rx_cqe()
1843 rq, wi, cqe, cqe_bcnt); in mlx5e_handle_rx_cqe()
1846 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) in mlx5e_handle_rx_cqe()
1851 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); in mlx5e_handle_rx_cqe()
1859 napi_gro_receive(rq->cq.napi, skb); in mlx5e_handle_rx_cqe()
1866 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in mlx5e_handle_rx_cqe_rep() argument
1868 struct net_device *netdev = rq->netdev; in mlx5e_handle_rx_cqe_rep()
1872 struct mlx5_wq_cyc *wq = &rq->wqe.wq; in mlx5e_handle_rx_cqe_rep()
1879 wi = get_frag(rq, ci); in mlx5e_handle_rx_cqe_rep()
1883 mlx5e_handle_rx_err_cqe(rq, cqe); in mlx5e_handle_rx_cqe_rep()
1887 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, in mlx5e_handle_rx_cqe_rep()
1890 rq, wi, cqe, cqe_bcnt); in mlx5e_handle_rx_cqe_rep()
1893 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) in mlx5e_handle_rx_cqe_rep()
1898 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); in mlx5e_handle_rx_cqe_rep()
1903 mlx5e_rep_tc_receive(cqe, rq, skb); in mlx5e_handle_rx_cqe_rep()
1909 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in mlx5e_handle_rx_cqe_mpwrq_rep() argument
1913 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id); in mlx5e_handle_rx_cqe_mpwrq_rep()
1915 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; in mlx5e_handle_rx_cqe_mpwrq_rep()
1916 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1); in mlx5e_handle_rx_cqe_mpwrq_rep()
1917 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift; in mlx5e_handle_rx_cqe_mpwrq_rep()
1926 mlx5e_handle_rx_err_cqe(rq, cqe); in mlx5e_handle_rx_cqe_mpwrq_rep()
1931 struct mlx5e_rq_stats *stats = rq->stats; in mlx5e_handle_rx_cqe_mpwrq_rep()
1940 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, in mlx5e_handle_rx_cqe_mpwrq_rep()
1943 rq, wi, cqe, cqe_bcnt, head_offset, page_idx); in mlx5e_handle_rx_cqe_mpwrq_rep()
1947 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); in mlx5e_handle_rx_cqe_mpwrq_rep()
1949 mlx5e_rep_tc_receive(cqe, rq, skb); in mlx5e_handle_rx_cqe_mpwrq_rep()
1952 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) in mlx5e_handle_rx_cqe_mpwrq_rep()
1955 wq = &rq->mpwqe.wq; in mlx5e_handle_rx_cqe_mpwrq_rep()
1967 mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, in mlx5e_fill_skb_data() argument
1978 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) in mlx5e_fill_skb_data()
1981 truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); in mlx5e_fill_skb_data()
1984 mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset, in mlx5e_fill_skb_data()
1994 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, in mlx5e_skb_from_cqe_mpwrq_nonlinear() argument
2013 prog = rcu_dereference(rq->xdp_prog); in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2018 if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) { in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2019 rq->stats->buff_alloc_err++; in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2028 skb = napi_alloc_skb(rq->cq.napi, in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2031 rq->stats->buff_alloc_err++; in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2050 mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf); in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2058 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2061 truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2063 mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset, in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2071 if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2072 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2080 mlx5e_page_release_fragmented(rq, &wi->linear_page); in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2084 skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2089 mlx5e_page_release_fragmented(rq, &wi->linear_page); in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2095 mlx5e_page_release_fragmented(rq, &wi->linear_page); in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2128 mlx5e_copy_skb_header(rq, skb, head_page->page, addr, in mlx5e_skb_from_cqe_mpwrq_nonlinear()
2139 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, in mlx5e_skb_from_cqe_mpwrq_linear() argument
2144 u16 rx_headroom = rq->buff.headroom; in mlx5e_skb_from_cqe_mpwrq_linear()
2153 if (unlikely(cqe_bcnt > rq->hw_mtu)) { in mlx5e_skb_from_cqe_mpwrq_linear()
2154 rq->stats->oversize_pkts_sw_drop++; in mlx5e_skb_from_cqe_mpwrq_linear()
2163 dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset, in mlx5e_skb_from_cqe_mpwrq_linear()
2164 frag_size, rq->buff.map_dir); in mlx5e_skb_from_cqe_mpwrq_linear()
2167 prog = rcu_dereference(rq->xdp_prog); in mlx5e_skb_from_cqe_mpwrq_linear()
2172 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, in mlx5e_skb_from_cqe_mpwrq_linear()
2174 if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { in mlx5e_skb_from_cqe_mpwrq_linear()
2175 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) in mlx5e_skb_from_cqe_mpwrq_linear()
2185 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); in mlx5e_skb_from_cqe_mpwrq_linear()
2197 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, in mlx5e_skb_from_cqe_shampo() argument
2200 struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; in mlx5e_skb_from_cqe_shampo()
2203 u16 rx_headroom = rq->buff.headroom; in mlx5e_skb_from_cqe_shampo()
2214 dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); in mlx5e_skb_from_cqe_shampo()
2217 skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); in mlx5e_skb_from_cqe_shampo()
2225 rq->stats->gro_large_hds++; in mlx5e_skb_from_cqe_shampo()
2226 skb = napi_alloc_skb(rq->cq.napi, in mlx5e_skb_from_cqe_shampo()
2229 rq->stats->buff_alloc_err++; in mlx5e_skb_from_cqe_shampo()
2234 mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr, in mlx5e_skb_from_cqe_shampo()
2260 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) in mlx5e_shampo_flush_skb() argument
2262 struct sk_buff *skb = rq->hw_gro_data->skb; in mlx5e_shampo_flush_skb()
2263 struct mlx5e_rq_stats *stats = rq->stats; in mlx5e_shampo_flush_skb()
2267 mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz); in mlx5e_shampo_flush_skb()
2269 mlx5e_shampo_update_hdr(rq, cqe, match); in mlx5e_shampo_flush_skb()
2270 napi_gro_receive(rq->cq.napi, skb); in mlx5e_shampo_flush_skb()
2271 rq->hw_gro_data->skb = NULL; in mlx5e_shampo_flush_skb()
2283 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) in mlx5e_free_rx_shampo_hd_entry() argument
2285 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_free_rx_shampo_hd_entry()
2292 mlx5e_page_release_fragmented(rq, dma_info->frag_page); in mlx5e_free_rx_shampo_hd_entry()
2297 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in mlx5e_handle_rx_cqe_mpwrq_shampo() argument
2300 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2308 struct sk_buff **skb = &rq->hw_gro_data->skb; in mlx5e_handle_rx_cqe_mpwrq_shampo()
2311 struct mlx5e_rq_stats *stats = rq->stats; in mlx5e_handle_rx_cqe_mpwrq_shampo()
2316 wi = mlx5e_get_mpw_info(rq, wqe_id); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2320 mlx5e_handle_rx_err_cqe(rq, cqe); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2334 mlx5e_shampo_flush_skb(rq, cqe, match); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2339 *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2341 *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt, in mlx5e_handle_rx_cqe_mpwrq_shampo()
2351 rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) { in mlx5e_handle_rx_cqe_mpwrq_shampo()
2352 void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2353 int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff - in mlx5e_handle_rx_cqe_mpwrq_shampo()
2357 rq->hw_gro_data->second_ip_id = ntohs(iph->id); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2365 mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2368 mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2370 mlx5e_shampo_flush_skb(rq, cqe, match); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2372 mlx5e_free_rx_shampo_hd_entry(rq, header_index); in mlx5e_handle_rx_cqe_mpwrq_shampo()
2374 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) in mlx5e_handle_rx_cqe_mpwrq_shampo()
2377 wq = &rq->mpwqe.wq; in mlx5e_handle_rx_cqe_mpwrq_shampo()
2382 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in mlx5e_handle_rx_cqe_mpwrq() argument
2386 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id); in mlx5e_handle_rx_cqe_mpwrq()
2388 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; in mlx5e_handle_rx_cqe_mpwrq()
2389 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1); in mlx5e_handle_rx_cqe_mpwrq()
2390 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift; in mlx5e_handle_rx_cqe_mpwrq()
2399 mlx5e_handle_rx_err_cqe(rq, cqe); in mlx5e_handle_rx_cqe_mpwrq()
2404 struct mlx5e_rq_stats *stats = rq->stats; in mlx5e_handle_rx_cqe_mpwrq()
2413 skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq, in mlx5e_handle_rx_cqe_mpwrq()
2417 rq, wi, cqe, cqe_bcnt, head_offset, in mlx5e_handle_rx_cqe_mpwrq()
2422 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); in mlx5e_handle_rx_cqe_mpwrq()
2430 napi_gro_receive(rq->cq.napi, skb); in mlx5e_handle_rx_cqe_mpwrq()
2433 if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) in mlx5e_handle_rx_cqe_mpwrq()
2436 wq = &rq->mpwqe.wq; in mlx5e_handle_rx_cqe_mpwrq()
2441 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq, in mlx5e_rx_cq_process_enhanced_cqe_comp() argument
2446 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_rx_cq_process_enhanced_cqe_comp()
2455 rq->stats->cqe_compress_blks++; in mlx5e_rx_cq_process_enhanced_cqe_comp()
2462 mlx5e_read_enhanced_title_slot(rq, title_cqe); in mlx5e_rx_cq_process_enhanced_cqe_comp()
2464 rq->stats->cqe_compress_blks++; in mlx5e_rx_cq_process_enhanced_cqe_comp()
2467 mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe, in mlx5e_rx_cq_process_enhanced_cqe_comp()
2474 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, in mlx5e_rx_cq_process_enhanced_cqe_comp()
2476 rq, cqe); in mlx5e_rx_cq_process_enhanced_cqe_comp()
2483 mlx5e_read_enhanced_title_slot(rq, title_cqe); in mlx5e_rx_cq_process_enhanced_cqe_comp()
2490 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq, in mlx5e_rx_cq_process_basic_cqe_comp() argument
2497 if (rq->cqd.left) in mlx5e_rx_cq_process_basic_cqe_comp()
2498 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem); in mlx5e_rx_cq_process_basic_cqe_comp()
2503 mlx5e_decompress_cqes_start(rq, cqwq, in mlx5e_rx_cq_process_basic_cqe_comp()
2509 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, in mlx5e_rx_cq_process_basic_cqe_comp()
2511 rq, cqe); in mlx5e_rx_cq_process_basic_cqe_comp()
2520 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); in mlx5e_poll_rx_cq() local
2524 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) in mlx5e_poll_rx_cq()
2527 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) in mlx5e_poll_rx_cq()
2528 work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq, in mlx5e_poll_rx_cq()
2531 work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq, in mlx5e_poll_rx_cq()
2537 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb) in mlx5e_poll_rx_cq()
2538 mlx5e_shampo_flush_skb(rq, NULL, false); in mlx5e_poll_rx_cq()
2540 if (rcu_access_pointer(rq->xdp_prog)) in mlx5e_poll_rx_cq()
2541 mlx5e_xdp_rx_poll_complete(rq); in mlx5e_poll_rx_cq()
2557 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, in mlx5i_complete_rx_cqe() argument
2573 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn); in mlx5i_complete_rx_cqe()
2587 stats = &priv->channel_stats[rq->ix]->rq; in mlx5i_complete_rx_cqe()
2623 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, in mlx5i_complete_rx_cqe()
2624 rq->clock, get_cqe_ts(cqe)); in mlx5i_complete_rx_cqe()
2625 skb_record_rx_queue(skb, rq->ix); in mlx5i_complete_rx_cqe()
2642 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in mlx5i_handle_rx_cqe() argument
2644 struct mlx5_wq_cyc *wq = &rq->wqe.wq; in mlx5i_handle_rx_cqe()
2651 wi = get_frag(rq, ci); in mlx5i_handle_rx_cqe()
2655 rq->stats->wqe_err++; in mlx5i_handle_rx_cqe()
2659 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, in mlx5i_handle_rx_cqe()
2662 rq, wi, cqe, cqe_bcnt); in mlx5i_handle_rx_cqe()
2666 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); in mlx5i_handle_rx_cqe()
2671 napi_gro_receive(rq->cq.napi, skb); in mlx5i_handle_rx_cqe()
2683 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) in mlx5e_rq_set_handlers() argument
2685 struct net_device *netdev = rq->netdev; in mlx5e_rq_set_handlers()
2686 struct mlx5_core_dev *mdev = rq->mdev; in mlx5e_rq_set_handlers()
2687 struct mlx5e_priv *priv = rq->priv; in mlx5e_rq_set_handlers()
2689 switch (rq->wq_type) { in mlx5e_rq_set_handlers()
2691 rq->mpwqe.skb_from_cqe_mpwrq = xsk ? in mlx5e_rq_set_handlers()
2696 rq->post_wqes = mlx5e_post_rx_mpwqes; in mlx5e_rq_set_handlers()
2697 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; in mlx5e_rq_set_handlers()
2700 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; in mlx5e_rq_set_handlers()
2701 if (!rq->handle_rx_cqe) { in mlx5e_rq_set_handlers()
2706 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; in mlx5e_rq_set_handlers()
2707 if (!rq->handle_rx_cqe) { in mlx5e_rq_set_handlers()
2715 rq->wqe.skb_from_cqe = xsk ? in mlx5e_rq_set_handlers()
2720 rq->post_wqes = mlx5e_post_rx_wqes; in mlx5e_rq_set_handlers()
2721 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; in mlx5e_rq_set_handlers()
2722 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; in mlx5e_rq_set_handlers()
2723 if (!rq->handle_rx_cqe) { in mlx5e_rq_set_handlers()
2732 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) in mlx5e_trap_handle_rx_cqe() argument
2734 struct mlx5_wq_cyc *wq = &rq->wqe.wq; in mlx5e_trap_handle_rx_cqe()
2743 wi = get_frag(rq, ci); in mlx5e_trap_handle_rx_cqe()
2747 rq->stats->wqe_err++; in mlx5e_trap_handle_rx_cqe()
2751 skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt); in mlx5e_trap_handle_rx_cqe()
2755 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); in mlx5e_trap_handle_rx_cqe()
2758 mlx5_devlink_trap_report(rq->mdev, trap_id, skb, in mlx5e_trap_handle_rx_cqe()
2759 rq->netdev->devlink_port); in mlx5e_trap_handle_rx_cqe()
2766 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) in mlx5e_rq_set_trap_handlers() argument
2768 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ? in mlx5e_rq_set_trap_handlers()
2771 rq->post_wqes = mlx5e_post_rx_wqes; in mlx5e_rq_set_trap_handlers()
2772 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; in mlx5e_rq_set_trap_handlers()
2773 rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe; in mlx5e_rq_set_trap_handlers()