1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <linux/bitmap.h>
37 #include <linux/filter.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool.h>
40 #include <net/inet_ecn.h>
41 #include <net/gro.h>
42 #include <net/udp.h>
43 #include <net/tcp.h>
44 #include <net/xdp_sock_drv.h>
45 #include "en.h"
46 #include "en/txrx.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "en_rep.h"
50 #include "en/rep/tc.h"
51 #include "ipoib/ipoib.h"
52 #include "en_accel/ipsec.h"
53 #include "en_accel/macsec.h"
54 #include "en_accel/ipsec_rxtx.h"
55 #include "en_accel/ktls_txrx.h"
56 #include "en/xdp.h"
57 #include "en/xsk/rx.h"
58 #include "en/health.h"
59 #include "en/params.h"
60 #include "devlink.h"
61 #include "en/devlink.h"
62 
63 static struct sk_buff *
64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
65 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
66 				u32 page_idx);
67 static struct sk_buff *
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
69 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
70 				   u32 page_idx);
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74 
75 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
76 	.handle_rx_cqe       = mlx5e_handle_rx_cqe,
77 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
78 	.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
79 };
80 
81 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
82 				       u32 cqcc, void *data)
83 {
84 	u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
85 
86 	memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
87 }
88 
89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
90 					   struct mlx5_cqe64 *cqe)
91 {
92 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
93 	struct mlx5_cqe64 *title = &cqd->title;
94 
95 	memcpy(title, cqe, sizeof(struct mlx5_cqe64));
96 
97 	if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
98 		return;
99 
100 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
101 		cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
102 			mpwrq_get_cqe_consumed_strides(title);
103 	else
104 		cqd->wqe_counter =
105 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
106 }
107 
108 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
109 					 struct mlx5_cqwq *wq,
110 					 u32 cqcc)
111 {
112 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
113 	struct mlx5_cqe64 *title = &cqd->title;
114 
115 	mlx5e_read_cqe_slot(wq, cqcc, title);
116 	cqd->left        = be32_to_cpu(title->byte_cnt);
117 	cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
118 	rq->stats->cqe_compress_blks++;
119 }
120 
121 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
122 					    struct mlx5e_cq_decomp *cqd,
123 					    u32 cqcc)
124 {
125 	mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
126 	cqd->mini_arr_idx = 0;
127 }
128 
129 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
130 {
131 	u32 cqcc   = wq->cc;
132 	u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
133 	u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
134 	u32 wq_sz  = mlx5_cqwq_get_size(wq);
135 	u32 ci_top = min_t(u32, wq_sz, ci + n);
136 
137 	for (; ci < ci_top; ci++, n--) {
138 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
139 
140 		cqe->op_own = op_own;
141 	}
142 
143 	if (unlikely(ci == wq_sz)) {
144 		op_own = !op_own;
145 		for (ci = 0; ci < n; ci++) {
146 			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
147 
148 			cqe->op_own = op_own;
149 		}
150 	}
151 }
152 
153 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
154 					struct mlx5_cqwq *wq,
155 					u32 cqcc)
156 {
157 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
158 	struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
159 	struct mlx5_cqe64 *title = &cqd->title;
160 
161 	title->byte_cnt     = mini_cqe->byte_cnt;
162 	title->check_sum    = mini_cqe->checksum;
163 	title->op_own      &= 0xf0;
164 	title->op_own      |= 0x01 & (cqcc >> wq->fbc.log_sz);
165 
166 	/* state bit set implies linked-list striding RQ wq type and
167 	 * HW stride index capability supported
168 	 */
169 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
170 		title->wqe_counter = mini_cqe->stridx;
171 		return;
172 	}
173 
174 	/* HW stride index capability not supported */
175 	title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
176 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
177 		cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
178 	else
179 		cqd->wqe_counter =
180 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
181 }
182 
183 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
184 						struct mlx5_cqwq *wq,
185 						u32 cqcc)
186 {
187 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
188 
189 	mlx5e_decompress_cqe(rq, wq, cqcc);
190 	cqd->title.rss_hash_type   = 0;
191 	cqd->title.rss_hash_result = 0;
192 }
193 
194 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
195 					 struct mlx5_cqwq *wq,
196 					 struct mlx5_cqe64 *cqe,
197 					 int budget_rem)
198 {
199 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
200 	u32 cqcc, left;
201 	u32 i;
202 
203 	left = get_cqe_enhanced_num_mini_cqes(cqe);
204 	/* Here we avoid breaking the cqe compression session in the middle
205 	 * in case budget is not sufficient to handle all of it. In this case
206 	 * we return work_done == budget_rem to give 'busy' napi indication.
207 	 */
208 	if (unlikely(left > budget_rem))
209 		return budget_rem;
210 
211 	cqcc = wq->cc;
212 	cqd->mini_arr_idx = 0;
213 	memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
214 	for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
215 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
216 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
217 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
218 				rq, &cqd->title);
219 	}
220 	wq->cc = cqcc;
221 	rq->stats->cqe_compress_pkts += left;
222 
223 	return left;
224 }
225 
226 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
227 					     struct mlx5_cqwq *wq,
228 					     int update_owner_only,
229 					     int budget_rem)
230 {
231 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
232 	u32 cqcc = wq->cc + update_owner_only;
233 	u32 cqe_count;
234 	u32 i;
235 
236 	cqe_count = min_t(u32, cqd->left, budget_rem);
237 
238 	for (i = update_owner_only; i < cqe_count;
239 	     i++, cqd->mini_arr_idx++, cqcc++) {
240 		if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
241 			mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
242 
243 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
244 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
245 				mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
246 				rq, &cqd->title);
247 	}
248 	mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
249 	wq->cc = cqcc;
250 	cqd->left -= cqe_count;
251 	rq->stats->cqe_compress_pkts += cqe_count;
252 
253 	return cqe_count;
254 }
255 
256 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
257 					      struct mlx5_cqwq *wq,
258 					      int budget_rem)
259 {
260 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
261 	u32 cc = wq->cc;
262 
263 	mlx5e_read_title_slot(rq, wq, cc);
264 	mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
265 	mlx5e_decompress_cqe(rq, wq, cc);
266 	INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
267 			mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
268 			rq, &cqd->title);
269 	cqd->mini_arr_idx++;
270 
271 	return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
272 }
273 
274 #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
275 
276 static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
277 				       struct mlx5e_frag_page *frag_page)
278 {
279 	struct page *page;
280 
281 	page = page_pool_dev_alloc_pages(rq->page_pool);
282 	if (unlikely(!page))
283 		return -ENOMEM;
284 
285 	page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX);
286 
287 	*frag_page = (struct mlx5e_frag_page) {
288 		.page	= page,
289 		.frags	= 0,
290 	};
291 
292 	return 0;
293 }
294 
295 static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
296 					  struct mlx5e_frag_page *frag_page)
297 {
298 	u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
299 	struct page *page = frag_page->page;
300 
301 	if (page_pool_defrag_page(page, drain_count) == 0)
302 		page_pool_put_defragged_page(rq->page_pool, page, -1, true);
303 }
304 
305 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
306 				    struct mlx5e_wqe_frag_info *frag)
307 {
308 	int err = 0;
309 
310 	if (!frag->offset)
311 		/* On first frag (offset == 0), replenish page.
312 		 * Other frags that point to the same page (with a different
313 		 * offset) should just use the new one without replenishing again
314 		 * by themselves.
315 		 */
316 		err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
317 
318 	return err;
319 }
320 
321 static bool mlx5e_frag_can_release(struct mlx5e_wqe_frag_info *frag)
322 {
323 #define CAN_RELEASE_MASK \
324 	(BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
325 
326 #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
327 
328 	return (frag->flags & CAN_RELEASE_MASK) == CAN_RELEASE_VALUE;
329 }
330 
331 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
332 				     struct mlx5e_wqe_frag_info *frag)
333 {
334 	if (mlx5e_frag_can_release(frag))
335 		mlx5e_page_release_fragmented(rq, frag->frag_page);
336 }
337 
338 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
339 {
340 	return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
341 }
342 
343 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
344 			      u16 ix)
345 {
346 	struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
347 	int err;
348 	int i;
349 
350 	for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
351 		dma_addr_t addr;
352 		u16 headroom;
353 
354 		err = mlx5e_get_rx_frag(rq, frag);
355 		if (unlikely(err))
356 			goto free_frags;
357 
358 		frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
359 
360 		headroom = i == 0 ? rq->buff.headroom : 0;
361 		addr = page_pool_get_dma_addr(frag->frag_page->page);
362 		wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
363 	}
364 
365 	return 0;
366 
367 free_frags:
368 	while (--i >= 0)
369 		mlx5e_put_rx_frag(rq, --frag);
370 
371 	return err;
372 }
373 
374 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
375 				     struct mlx5e_wqe_frag_info *wi)
376 {
377 	int i;
378 
379 	for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
380 		mlx5e_put_rx_frag(rq, wi);
381 }
382 
383 static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info *wi)
384 {
385 	if (!(wi->flags & BIT(MLX5E_WQE_FRAG_SKIP_RELEASE)))
386 		xsk_buff_free(*wi->xskp);
387 }
388 
389 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
390 {
391 	struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
392 
393 	if (rq->xsk_pool)
394 		mlx5e_xsk_free_rx_wqe(wi);
395 	else
396 		mlx5e_free_rx_wqe(rq, wi);
397 }
398 
399 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
400 {
401 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
402 	int i;
403 
404 	for (i = 0; i < wqe_bulk; i++) {
405 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
406 		struct mlx5e_wqe_frag_info *wi;
407 
408 		wi = get_frag(rq, j);
409 		/* The page is always put into the Reuse Ring, because there
410 		 * is no way to return the page to the userspace when the
411 		 * interface goes down.
412 		 */
413 		mlx5e_xsk_free_rx_wqe(wi);
414 	}
415 }
416 
417 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
418 {
419 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
420 	int i;
421 
422 	for (i = 0; i < wqe_bulk; i++) {
423 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
424 		struct mlx5e_wqe_frag_info *wi;
425 
426 		wi = get_frag(rq, j);
427 		mlx5e_free_rx_wqe(rq, wi);
428 	}
429 }
430 
431 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
432 {
433 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
434 	int i;
435 
436 	for (i = 0; i < wqe_bulk; i++) {
437 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
438 		struct mlx5e_rx_wqe_cyc *wqe;
439 
440 		wqe = mlx5_wq_cyc_get_wqe(wq, j);
441 
442 		if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
443 			break;
444 	}
445 
446 	return i;
447 }
448 
449 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
450 {
451 	int remaining = wqe_bulk;
452 	int i = 0;
453 
454 	/* The WQE bulk is split into smaller bulks that are sized
455 	 * according to the page pool cache refill size to avoid overflowing
456 	 * the page pool cache due to too many page releases at once.
457 	 */
458 	do {
459 		int refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
460 		int alloc_count;
461 
462 		mlx5e_free_rx_wqes(rq, ix + i, refill);
463 		alloc_count = mlx5e_alloc_rx_wqes(rq, ix + i, refill);
464 		i += alloc_count;
465 		if (unlikely(alloc_count != refill))
466 			break;
467 
468 		remaining -= refill;
469 	} while (remaining);
470 
471 	return i;
472 }
473 
474 static void
475 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
476 			       struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
477 			       u32 frag_offset, u32 len)
478 {
479 	skb_frag_t *frag;
480 
481 	dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
482 
483 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
484 	if (!xdp_buff_has_frags(xdp)) {
485 		/* Init on the first fragment to avoid cold cache access
486 		 * when possible.
487 		 */
488 		sinfo->nr_frags = 0;
489 		sinfo->xdp_frags_size = 0;
490 		xdp_buff_set_frags_flag(xdp);
491 	}
492 
493 	frag = &sinfo->frags[sinfo->nr_frags++];
494 	__skb_frag_set_page(frag, frag_page->page);
495 	skb_frag_off_set(frag, frag_offset);
496 	skb_frag_size_set(frag, len);
497 
498 	if (page_is_pfmemalloc(frag_page->page))
499 		xdp_buff_set_frag_pfmemalloc(xdp);
500 	sinfo->xdp_frags_size += len;
501 }
502 
503 static inline void
504 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
505 		   struct page *page, u32 frag_offset, u32 len,
506 		   unsigned int truesize)
507 {
508 	dma_addr_t addr = page_pool_get_dma_addr(page);
509 
510 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
511 				rq->buff.map_dir);
512 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
513 			page, frag_offset, len, truesize);
514 }
515 
516 static inline void
517 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
518 		      struct page *page, dma_addr_t addr,
519 		      int offset_from, int dma_offset, u32 headlen)
520 {
521 	const void *from = page_address(page) + offset_from;
522 	/* Aligning len to sizeof(long) optimizes memcpy performance */
523 	unsigned int len = ALIGN(headlen, sizeof(long));
524 
525 	dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
526 				rq->buff.map_dir);
527 	skb_copy_to_linear_data(skb, from, len);
528 }
529 
530 static void
531 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
532 {
533 	bool no_xdp_xmit;
534 	int i;
535 
536 	/* A common case for AF_XDP. */
537 	if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
538 		return;
539 
540 	no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
541 
542 	if (rq->xsk_pool) {
543 		struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
544 
545 		/* The page is always put into the Reuse Ring, because there
546 		 * is no way to return the page to userspace when the interface
547 		 * goes down.
548 		 */
549 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
550 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap))
551 				xsk_buff_free(xsk_buffs[i]);
552 	} else {
553 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
554 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap)) {
555 				struct mlx5e_frag_page *frag_page;
556 
557 				frag_page = &wi->alloc_units.frag_pages[i];
558 				mlx5e_page_release_fragmented(rq, frag_page);
559 			}
560 		}
561 	}
562 }
563 
564 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
565 {
566 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
567 
568 	do {
569 		u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
570 
571 		mlx5_wq_ll_push(wq, next_wqe_index);
572 	} while (--n);
573 
574 	/* ensure wqes are visible to device before updating doorbell record */
575 	dma_wmb();
576 
577 	mlx5_wq_ll_update_db_record(wq);
578 }
579 
580 /* This function returns the size of the continuous free space inside a bitmap
581  * that starts from first and no longer than len including circular ones.
582  */
583 static int bitmap_find_window(unsigned long *bitmap, int len,
584 			      int bitmap_size, int first)
585 {
586 	int next_one, count;
587 
588 	next_one = find_next_bit(bitmap, bitmap_size, first);
589 	if (next_one == bitmap_size) {
590 		if (bitmap_size - first >= len)
591 			return len;
592 		next_one = find_next_bit(bitmap, bitmap_size, 0);
593 		count = next_one + bitmap_size - first;
594 	} else {
595 		count = next_one - first;
596 	}
597 
598 	return min(len, count);
599 }
600 
601 static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
602 			  __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
603 {
604 	memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
605 	umr_wqe->ctrl.opmod_idx_opcode =
606 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
607 			     MLX5_OPCODE_UMR);
608 	umr_wqe->ctrl.umr_mkey = key;
609 	umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
610 					    | MLX5E_KLM_UMR_DS_CNT(klm_len));
611 	umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
612 	umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
613 	umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
614 	umr_wqe->uctrl.mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
615 }
616 
617 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
618 				     struct mlx5e_icosq *sq,
619 				     u16 klm_entries, u16 index)
620 {
621 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
622 	u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
623 	u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
624 	u16 page_index = shampo->curr_page_index;
625 	struct mlx5e_frag_page *frag_page;
626 	u64 addr = shampo->last_addr;
627 	struct mlx5e_dma_info *dma_info;
628 	struct mlx5e_umr_wqe *umr_wqe;
629 	int headroom, i;
630 
631 	headroom = rq->buff.headroom;
632 	new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
633 	entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
634 	wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
635 	pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
636 	umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
637 	build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
638 
639 	frag_page = &shampo->pages[page_index];
640 
641 	for (i = 0; i < entries; i++, index++) {
642 		dma_info = &shampo->info[index];
643 		if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
644 					 MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
645 			goto update_klm;
646 		header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
647 			MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
648 		if (!(header_offset & (PAGE_SIZE - 1))) {
649 			page_index = (page_index + 1) & (shampo->hd_per_wq - 1);
650 			frag_page = &shampo->pages[page_index];
651 
652 			err = mlx5e_page_alloc_fragmented(rq, frag_page);
653 			if (unlikely(err))
654 				goto err_unmap;
655 
656 			addr = page_pool_get_dma_addr(frag_page->page);
657 
658 			dma_info->addr = addr;
659 			dma_info->frag_page = frag_page;
660 		} else {
661 			dma_info->addr = addr + header_offset;
662 			dma_info->frag_page = frag_page;
663 		}
664 
665 update_klm:
666 		umr_wqe->inline_klms[i].bcount =
667 			cpu_to_be32(MLX5E_RX_MAX_HEAD);
668 		umr_wqe->inline_klms[i].key    = cpu_to_be32(lkey);
669 		umr_wqe->inline_klms[i].va     =
670 			cpu_to_be64(dma_info->addr + headroom);
671 	}
672 
673 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
674 		.wqe_type	= MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
675 		.num_wqebbs	= wqe_bbs,
676 		.shampo.len	= new_entries,
677 	};
678 
679 	shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
680 	shampo->curr_page_index = page_index;
681 	shampo->last_addr = addr;
682 	sq->pc += wqe_bbs;
683 	sq->doorbell_cseg = &umr_wqe->ctrl;
684 
685 	return 0;
686 
687 err_unmap:
688 	while (--i >= 0) {
689 		dma_info = &shampo->info[--index];
690 		if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
691 			dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
692 			mlx5e_page_release_fragmented(rq, dma_info->frag_page);
693 		}
694 	}
695 	rq->stats->buff_alloc_err++;
696 	return err;
697 }
698 
699 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
700 {
701 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
702 	u16 klm_entries, num_wqe, index, entries_before;
703 	struct mlx5e_icosq *sq = rq->icosq;
704 	int i, err, max_klm_entries, len;
705 
706 	max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
707 	klm_entries = bitmap_find_window(shampo->bitmap,
708 					 shampo->hd_per_wqe,
709 					 shampo->hd_per_wq, shampo->pi);
710 	if (!klm_entries)
711 		return 0;
712 
713 	klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
714 	index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
715 	entries_before = shampo->hd_per_wq - index;
716 
717 	if (unlikely(entries_before < klm_entries))
718 		num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
719 			  DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
720 	else
721 		num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
722 
723 	for (i = 0; i < num_wqe; i++) {
724 		len = (klm_entries > max_klm_entries) ? max_klm_entries :
725 							klm_entries;
726 		if (unlikely(index + len > shampo->hd_per_wq))
727 			len = shampo->hd_per_wq - index;
728 		err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
729 		if (unlikely(err))
730 			return err;
731 		index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
732 		klm_entries -= len;
733 	}
734 
735 	return 0;
736 }
737 
738 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
739 {
740 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
741 	struct mlx5e_icosq *sq = rq->icosq;
742 	struct mlx5e_frag_page *frag_page;
743 	struct mlx5_wq_cyc *wq = &sq->wq;
744 	struct mlx5e_umr_wqe *umr_wqe;
745 	u32 offset; /* 17-bit value with MTT. */
746 	u16 pi;
747 	int err;
748 	int i;
749 
750 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
751 		err = mlx5e_alloc_rx_hd_mpwqe(rq);
752 		if (unlikely(err))
753 			goto err;
754 	}
755 
756 	pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
757 	umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
758 	memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
759 
760 	frag_page = &wi->alloc_units.frag_pages[0];
761 
762 	for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
763 		dma_addr_t addr;
764 
765 		err = mlx5e_page_alloc_fragmented(rq, frag_page);
766 		if (unlikely(err))
767 			goto err_unmap;
768 		addr = page_pool_get_dma_addr(frag_page->page);
769 		umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
770 			.ptag = cpu_to_be64(addr | MLX5_EN_WR),
771 		};
772 	}
773 
774 	/* Pad if needed, in case the value set to ucseg->xlt_octowords
775 	 * in mlx5e_build_umr_wqe() needed alignment.
776 	 */
777 	if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
778 		int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
779 			rq->mpwqe.pages_per_wqe;
780 
781 		memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
782 		       sizeof(*umr_wqe->inline_mtts) * pad);
783 	}
784 
785 	bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
786 	wi->consumed_strides = 0;
787 
788 	umr_wqe->ctrl.opmod_idx_opcode =
789 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
790 			    MLX5_OPCODE_UMR);
791 
792 	offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
793 	umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
794 
795 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
796 		.wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
797 		.num_wqebbs = rq->mpwqe.umr_wqebbs,
798 		.umr.rq     = rq,
799 	};
800 
801 	sq->pc += rq->mpwqe.umr_wqebbs;
802 
803 	sq->doorbell_cseg = &umr_wqe->ctrl;
804 
805 	return 0;
806 
807 err_unmap:
808 	while (--i >= 0) {
809 		frag_page--;
810 		mlx5e_page_release_fragmented(rq, frag_page);
811 	}
812 
813 err:
814 	rq->stats->buff_alloc_err++;
815 
816 	return err;
817 }
818 
819 /* This function is responsible to dealloc SHAMPO header buffer.
820  * close == true specifies that we are in the middle of closing RQ operation so
821  * we go over all the entries and if they are not in use we free them,
822  * otherwise we only go over a specific range inside the header buffer that are
823  * not in use.
824  */
825 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
826 {
827 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
828 	struct mlx5e_frag_page *deleted_page = NULL;
829 	int hd_per_wq = shampo->hd_per_wq;
830 	struct mlx5e_dma_info *hd_info;
831 	int i, index = start;
832 
833 	for (i = 0; i < len; i++, index++) {
834 		if (index == hd_per_wq)
835 			index = 0;
836 
837 		if (close && !test_bit(index, shampo->bitmap))
838 			continue;
839 
840 		hd_info = &shampo->info[index];
841 		hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
842 		if (hd_info->frag_page && hd_info->frag_page != deleted_page) {
843 			deleted_page = hd_info->frag_page;
844 			mlx5e_page_release_fragmented(rq, hd_info->frag_page);
845 		}
846 
847 		hd_info->frag_page = NULL;
848 	}
849 
850 	if (start + len > hd_per_wq) {
851 		len -= hd_per_wq - start;
852 		bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
853 		start = 0;
854 	}
855 
856 	bitmap_clear(shampo->bitmap, start, len);
857 }
858 
859 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
860 {
861 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
862 	/* This function is called on rq/netdev close. */
863 	mlx5e_free_rx_mpwqe(rq, wi);
864 
865 	/* Avoid a second release of the wqe pages: dealloc is called also
866 	 * for missing wqes on an already flushed RQ.
867 	 */
868 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
869 }
870 
871 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
872 {
873 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
874 	int wqe_bulk, count;
875 	bool busy = false;
876 	u16 head;
877 
878 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
879 		return false;
880 
881 	if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
882 		return false;
883 
884 	if (rq->page_pool)
885 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
886 
887 	wqe_bulk = mlx5_wq_cyc_missing(wq);
888 	head = mlx5_wq_cyc_get_head(wq);
889 
890 	/* Don't allow any newly allocated WQEs to share the same page with old
891 	 * WQEs that aren't completed yet. Stop earlier.
892 	 */
893 	wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
894 
895 	if (!rq->xsk_pool) {
896 		count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
897 	} else if (likely(!rq->xsk_pool->dma_need_sync)) {
898 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
899 		count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
900 	} else {
901 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
902 		/* If dma_need_sync is true, it's more efficient to call
903 		 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
904 		 * because the latter does the same check and returns only one
905 		 * frame.
906 		 */
907 		count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
908 	}
909 
910 	mlx5_wq_cyc_push_n(wq, count);
911 	if (unlikely(count != wqe_bulk)) {
912 		rq->stats->buff_alloc_err++;
913 		busy = true;
914 	}
915 
916 	/* ensure wqes are visible to device before updating doorbell record */
917 	dma_wmb();
918 
919 	mlx5_wq_cyc_update_db_record(wq);
920 
921 	return busy;
922 }
923 
924 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
925 {
926 	u16 sqcc;
927 
928 	sqcc = sq->cc;
929 
930 	while (sqcc != sq->pc) {
931 		struct mlx5e_icosq_wqe_info *wi;
932 		u16 ci;
933 
934 		ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
935 		wi = &sq->db.wqe_info[ci];
936 		sqcc += wi->num_wqebbs;
937 #ifdef CONFIG_MLX5_EN_TLS
938 		switch (wi->wqe_type) {
939 		case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
940 			mlx5e_ktls_handle_ctx_completion(wi);
941 			break;
942 		case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
943 			mlx5e_ktls_handle_get_psv_completion(wi, sq);
944 			break;
945 		}
946 #endif
947 	}
948 	sq->cc = sqcc;
949 }
950 
951 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
952 				       struct mlx5e_icosq *sq)
953 {
954 	struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
955 	struct mlx5e_shampo_hd *shampo;
956 	/* assume 1:1 relationship between RQ and icosq */
957 	struct mlx5e_rq *rq = &c->rq;
958 	int end, from, len = umr.len;
959 
960 	shampo = rq->mpwqe.shampo;
961 	end = shampo->hd_per_wq;
962 	from = shampo->ci;
963 	if (from + len > shampo->hd_per_wq) {
964 		len -= end - from;
965 		bitmap_set(shampo->bitmap, from, end - from);
966 		from = 0;
967 	}
968 
969 	bitmap_set(shampo->bitmap, from, len);
970 	shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
971 }
972 
973 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
974 {
975 	struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
976 	struct mlx5_cqe64 *cqe;
977 	u16 sqcc;
978 	int i;
979 
980 	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
981 		return 0;
982 
983 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
984 	if (likely(!cqe))
985 		return 0;
986 
987 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
988 	 * otherwise a cq overrun may occur
989 	 */
990 	sqcc = sq->cc;
991 
992 	i = 0;
993 	do {
994 		u16 wqe_counter;
995 		bool last_wqe;
996 
997 		mlx5_cqwq_pop(&cq->wq);
998 
999 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
1000 
1001 		do {
1002 			struct mlx5e_icosq_wqe_info *wi;
1003 			u16 ci;
1004 
1005 			last_wqe = (sqcc == wqe_counter);
1006 
1007 			ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
1008 			wi = &sq->db.wqe_info[ci];
1009 			sqcc += wi->num_wqebbs;
1010 
1011 			if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
1012 				netdev_WARN_ONCE(cq->netdev,
1013 						 "Bad OP in ICOSQ CQE: 0x%x\n",
1014 						 get_cqe_opcode(cqe));
1015 				mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
1016 						     (struct mlx5_err_cqe *)cqe);
1017 				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
1018 				if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
1019 					queue_work(cq->priv->wq, &sq->recover_work);
1020 				break;
1021 			}
1022 
1023 			switch (wi->wqe_type) {
1024 			case MLX5E_ICOSQ_WQE_UMR_RX:
1025 				wi->umr.rq->mpwqe.umr_completed++;
1026 				break;
1027 			case MLX5E_ICOSQ_WQE_NOP:
1028 				break;
1029 			case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
1030 				mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
1031 				break;
1032 #ifdef CONFIG_MLX5_EN_TLS
1033 			case MLX5E_ICOSQ_WQE_UMR_TLS:
1034 				break;
1035 			case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
1036 				mlx5e_ktls_handle_ctx_completion(wi);
1037 				break;
1038 			case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
1039 				mlx5e_ktls_handle_get_psv_completion(wi, sq);
1040 				break;
1041 #endif
1042 			default:
1043 				netdev_WARN_ONCE(cq->netdev,
1044 						 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
1045 						 wi->wqe_type);
1046 			}
1047 		} while (!last_wqe);
1048 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1049 
1050 	sq->cc = sqcc;
1051 
1052 	mlx5_cqwq_update_db_record(&cq->wq);
1053 
1054 	return i;
1055 }
1056 
1057 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
1058 {
1059 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1060 	u8  umr_completed = rq->mpwqe.umr_completed;
1061 	struct mlx5e_icosq *sq = rq->icosq;
1062 	int alloc_err = 0;
1063 	u8  missing, i;
1064 	u16 head;
1065 
1066 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1067 		return false;
1068 
1069 	if (umr_completed) {
1070 		mlx5e_post_rx_mpwqe(rq, umr_completed);
1071 		rq->mpwqe.umr_in_progress -= umr_completed;
1072 		rq->mpwqe.umr_completed = 0;
1073 	}
1074 
1075 	missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
1076 
1077 	if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
1078 		rq->stats->congst_umr++;
1079 
1080 	if (likely(missing < rq->mpwqe.min_wqe_bulk))
1081 		return false;
1082 
1083 	if (rq->page_pool)
1084 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
1085 
1086 	head = rq->mpwqe.actual_wq_head;
1087 	i = missing;
1088 	do {
1089 		struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
1090 
1091 		/* Deferred free for better page pool cache usage. */
1092 		mlx5e_free_rx_mpwqe(rq, wi);
1093 
1094 		alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
1095 					   mlx5e_alloc_rx_mpwqe(rq, head);
1096 
1097 		if (unlikely(alloc_err))
1098 			break;
1099 		head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
1100 	} while (--i);
1101 
1102 	rq->mpwqe.umr_last_bulk    = missing - i;
1103 	if (sq->doorbell_cseg) {
1104 		mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
1105 		sq->doorbell_cseg = NULL;
1106 	}
1107 
1108 	rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
1109 	rq->mpwqe.actual_wq_head   = head;
1110 
1111 	/* If XSK Fill Ring doesn't have enough frames, report the error, so
1112 	 * that one of the actions can be performed:
1113 	 * 1. If need_wakeup is used, signal that the application has to kick
1114 	 * the driver when it refills the Fill Ring.
1115 	 * 2. Otherwise, busy poll by rescheduling the NAPI poll.
1116 	 */
1117 	if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
1118 		return true;
1119 
1120 	return false;
1121 }
1122 
1123 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
1124 {
1125 	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
1126 	u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
1127 			 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
1128 
1129 	tcp->check                      = 0;
1130 	tcp->psh                        = get_cqe_lro_tcppsh(cqe);
1131 
1132 	if (tcp_ack) {
1133 		tcp->ack                = 1;
1134 		tcp->ack_seq            = cqe->lro.ack_seq_num;
1135 		tcp->window             = cqe->lro.tcp_win;
1136 	}
1137 }
1138 
1139 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
1140 				 u32 cqe_bcnt)
1141 {
1142 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
1143 	struct tcphdr	*tcp;
1144 	int network_depth = 0;
1145 	__wsum check;
1146 	__be16 proto;
1147 	u16 tot_len;
1148 	void *ip_p;
1149 
1150 	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1151 
1152 	tot_len = cqe_bcnt - network_depth;
1153 	ip_p = skb->data + network_depth;
1154 
1155 	if (proto == htons(ETH_P_IP)) {
1156 		struct iphdr *ipv4 = ip_p;
1157 
1158 		tcp = ip_p + sizeof(struct iphdr);
1159 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1160 
1161 		ipv4->ttl               = cqe->lro.min_ttl;
1162 		ipv4->tot_len           = cpu_to_be16(tot_len);
1163 		ipv4->check             = 0;
1164 		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
1165 						       ipv4->ihl);
1166 
1167 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1168 		check = csum_partial(tcp, tcp->doff * 4,
1169 				     csum_unfold((__force __sum16)cqe->check_sum));
1170 		/* Almost done, don't forget the pseudo header */
1171 		tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
1172 					       tot_len - sizeof(struct iphdr),
1173 					       IPPROTO_TCP, check);
1174 	} else {
1175 		u16 payload_len = tot_len - sizeof(struct ipv6hdr);
1176 		struct ipv6hdr *ipv6 = ip_p;
1177 
1178 		tcp = ip_p + sizeof(struct ipv6hdr);
1179 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1180 
1181 		ipv6->hop_limit         = cqe->lro.min_ttl;
1182 		ipv6->payload_len       = cpu_to_be16(payload_len);
1183 
1184 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1185 		check = csum_partial(tcp, tcp->doff * 4,
1186 				     csum_unfold((__force __sum16)cqe->check_sum));
1187 		/* Almost done, don't forget the pseudo header */
1188 		tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
1189 					     IPPROTO_TCP, check);
1190 	}
1191 }
1192 
1193 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
1194 {
1195 	struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
1196 	u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
1197 
1198 	return page_address(last_head->frag_page->page) + head_offset;
1199 }
1200 
1201 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
1202 {
1203 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1204 	struct sk_buff *skb = rq->hw_gro_data->skb;
1205 	struct udphdr *uh;
1206 
1207 	uh = (struct udphdr *)(skb->data + udp_off);
1208 	uh->len = htons(skb->len - udp_off);
1209 
1210 	if (uh->check)
1211 		uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
1212 					  ipv4->daddr, 0);
1213 
1214 	skb->csum_start = (unsigned char *)uh - skb->head;
1215 	skb->csum_offset = offsetof(struct udphdr, check);
1216 
1217 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1218 }
1219 
1220 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
1221 {
1222 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1223 	struct sk_buff *skb = rq->hw_gro_data->skb;
1224 	struct udphdr *uh;
1225 
1226 	uh = (struct udphdr *)(skb->data + udp_off);
1227 	uh->len = htons(skb->len - udp_off);
1228 
1229 	if (uh->check)
1230 		uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
1231 					  &ipv6->daddr, 0);
1232 
1233 	skb->csum_start = (unsigned char *)uh - skb->head;
1234 	skb->csum_offset = offsetof(struct udphdr, check);
1235 
1236 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1237 }
1238 
1239 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1240 					      struct tcphdr *skb_tcp_hd)
1241 {
1242 	u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
1243 	struct tcphdr *last_tcp_hd;
1244 	void *last_hd_addr;
1245 
1246 	last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
1247 	last_tcp_hd =  last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
1248 	tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
1249 }
1250 
1251 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
1252 					     struct mlx5_cqe64 *cqe, bool match)
1253 {
1254 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1255 	struct sk_buff *skb = rq->hw_gro_data->skb;
1256 	struct tcphdr *tcp;
1257 
1258 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1259 	if (match)
1260 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1261 
1262 	tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
1263 				   ipv4->daddr, 0);
1264 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
1265 	if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
1266 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
1267 
1268 	skb->csum_start = (unsigned char *)tcp - skb->head;
1269 	skb->csum_offset = offsetof(struct tcphdr, check);
1270 
1271 	if (tcp->cwr)
1272 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1273 }
1274 
1275 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
1276 					     struct mlx5_cqe64 *cqe, bool match)
1277 {
1278 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1279 	struct sk_buff *skb = rq->hw_gro_data->skb;
1280 	struct tcphdr *tcp;
1281 
1282 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1283 	if (match)
1284 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1285 
1286 	tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
1287 				   &ipv6->daddr, 0);
1288 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1289 	skb->csum_start = (unsigned char *)tcp - skb->head;
1290 	skb->csum_offset = offsetof(struct tcphdr, check);
1291 
1292 	if (tcp->cwr)
1293 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1294 }
1295 
1296 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
1297 {
1298 	bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
1299 	struct sk_buff *skb = rq->hw_gro_data->skb;
1300 
1301 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
1302 	skb->ip_summed = CHECKSUM_PARTIAL;
1303 
1304 	if (is_ipv4) {
1305 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
1306 		struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
1307 		__be16 newlen = htons(skb->len - nhoff);
1308 
1309 		csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
1310 		ipv4->tot_len = newlen;
1311 
1312 		if (ipv4->protocol == IPPROTO_TCP)
1313 			mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
1314 		else
1315 			mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
1316 	} else {
1317 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
1318 		struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
1319 
1320 		ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
1321 
1322 		if (ipv6->nexthdr == IPPROTO_TCP)
1323 			mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
1324 		else
1325 			mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
1326 	}
1327 }
1328 
1329 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
1330 				      struct sk_buff *skb)
1331 {
1332 	u8 cht = cqe->rss_hash_type;
1333 	int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
1334 		 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
1335 					    PKT_HASH_TYPE_NONE;
1336 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
1337 }
1338 
1339 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
1340 					__be16 *proto)
1341 {
1342 	*proto = ((struct ethhdr *)skb->data)->h_proto;
1343 	*proto = __vlan_get_protocol(skb, *proto, network_depth);
1344 
1345 	if (*proto == htons(ETH_P_IP))
1346 		return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
1347 
1348 	if (*proto == htons(ETH_P_IPV6))
1349 		return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
1350 
1351 	return false;
1352 }
1353 
1354 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
1355 {
1356 	int network_depth = 0;
1357 	__be16 proto;
1358 	void *ip;
1359 	int rc;
1360 
1361 	if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
1362 		return;
1363 
1364 	ip = skb->data + network_depth;
1365 	rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
1366 					 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
1367 
1368 	rq->stats->ecn_mark += !!rc;
1369 }
1370 
1371 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
1372 {
1373 	void *ip_p = skb->data + network_depth;
1374 
1375 	return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
1376 					    ((struct ipv6hdr *)ip_p)->nexthdr;
1377 }
1378 
1379 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1380 
1381 #define MAX_PADDING 8
1382 
1383 static void
1384 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
1385 		       struct mlx5e_rq_stats *stats)
1386 {
1387 	stats->csum_complete_tail_slow++;
1388 	skb->csum = csum_block_add(skb->csum,
1389 				   skb_checksum(skb, offset, len, 0),
1390 				   offset);
1391 }
1392 
1393 static void
1394 tail_padding_csum(struct sk_buff *skb, int offset,
1395 		  struct mlx5e_rq_stats *stats)
1396 {
1397 	u8 tail_padding[MAX_PADDING];
1398 	int len = skb->len - offset;
1399 	void *tail;
1400 
1401 	if (unlikely(len > MAX_PADDING)) {
1402 		tail_padding_csum_slow(skb, offset, len, stats);
1403 		return;
1404 	}
1405 
1406 	tail = skb_header_pointer(skb, offset, len, tail_padding);
1407 	if (unlikely(!tail)) {
1408 		tail_padding_csum_slow(skb, offset, len, stats);
1409 		return;
1410 	}
1411 
1412 	stats->csum_complete_tail++;
1413 	skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
1414 }
1415 
1416 static void
1417 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
1418 		     struct mlx5e_rq_stats *stats)
1419 {
1420 	struct ipv6hdr *ip6;
1421 	struct iphdr   *ip4;
1422 	int pkt_len;
1423 
1424 	/* Fixup vlan headers, if any */
1425 	if (network_depth > ETH_HLEN)
1426 		/* CQE csum is calculated from the IP header and does
1427 		 * not cover VLAN headers (if present). This will add
1428 		 * the checksum manually.
1429 		 */
1430 		skb->csum = csum_partial(skb->data + ETH_HLEN,
1431 					 network_depth - ETH_HLEN,
1432 					 skb->csum);
1433 
1434 	/* Fixup tail padding, if any */
1435 	switch (proto) {
1436 	case htons(ETH_P_IP):
1437 		ip4 = (struct iphdr *)(skb->data + network_depth);
1438 		pkt_len = network_depth + ntohs(ip4->tot_len);
1439 		break;
1440 	case htons(ETH_P_IPV6):
1441 		ip6 = (struct ipv6hdr *)(skb->data + network_depth);
1442 		pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
1443 		break;
1444 	default:
1445 		return;
1446 	}
1447 
1448 	if (likely(pkt_len >= skb->len))
1449 		return;
1450 
1451 	tail_padding_csum(skb, pkt_len, stats);
1452 }
1453 
1454 static inline void mlx5e_handle_csum(struct net_device *netdev,
1455 				     struct mlx5_cqe64 *cqe,
1456 				     struct mlx5e_rq *rq,
1457 				     struct sk_buff *skb,
1458 				     bool   lro)
1459 {
1460 	struct mlx5e_rq_stats *stats = rq->stats;
1461 	int network_depth = 0;
1462 	__be16 proto;
1463 
1464 	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
1465 		goto csum_none;
1466 
1467 	if (lro) {
1468 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1469 		stats->csum_unnecessary++;
1470 		return;
1471 	}
1472 
1473 	/* True when explicitly set via priv flag, or XDP prog is loaded */
1474 	if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
1475 	    get_cqe_tls_offload(cqe))
1476 		goto csum_unnecessary;
1477 
1478 	/* CQE csum doesn't cover padding octets in short ethernet
1479 	 * frames. And the pad field is appended prior to calculating
1480 	 * and appending the FCS field.
1481 	 *
1482 	 * Detecting these padded frames requires to verify and parse
1483 	 * IP headers, so we simply force all those small frames to be
1484 	 * CHECKSUM_UNNECESSARY even if they are not padded.
1485 	 */
1486 	if (short_frame(skb->len))
1487 		goto csum_unnecessary;
1488 
1489 	if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1490 		if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1491 			goto csum_unnecessary;
1492 
1493 		stats->csum_complete++;
1494 		skb->ip_summed = CHECKSUM_COMPLETE;
1495 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1496 
1497 		if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1498 			return; /* CQE csum covers all received bytes */
1499 
1500 		/* csum might need some fixups ...*/
1501 		mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1502 		return;
1503 	}
1504 
1505 csum_unnecessary:
1506 	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1507 		   (cqe->hds_ip_ext & CQE_L4_OK))) {
1508 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1509 		if (cqe_is_tunneled(cqe)) {
1510 			skb->csum_level = 1;
1511 			skb->encapsulation = 1;
1512 			stats->csum_unnecessary_inner++;
1513 			return;
1514 		}
1515 		stats->csum_unnecessary++;
1516 		return;
1517 	}
1518 csum_none:
1519 	skb->ip_summed = CHECKSUM_NONE;
1520 	stats->csum_none++;
1521 }
1522 
1523 #define MLX5E_CE_BIT_MASK 0x80
1524 
1525 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1526 				      u32 cqe_bcnt,
1527 				      struct mlx5e_rq *rq,
1528 				      struct sk_buff *skb)
1529 {
1530 	u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
1531 	struct mlx5e_rq_stats *stats = rq->stats;
1532 	struct net_device *netdev = rq->netdev;
1533 
1534 	skb->mac_len = ETH_HLEN;
1535 
1536 	if (unlikely(get_cqe_tls_offload(cqe)))
1537 		mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1538 
1539 	if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1540 		mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
1541 
1542 	if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
1543 		mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
1544 
1545 	if (lro_num_seg > 1) {
1546 		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1547 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
1548 		/* Subtract one since we already counted this as one
1549 		 * "regular" packet in mlx5e_complete_rx_cqe()
1550 		 */
1551 		stats->packets += lro_num_seg - 1;
1552 		stats->lro_packets++;
1553 		stats->lro_bytes += cqe_bcnt;
1554 	}
1555 
1556 	if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1557 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1558 								  rq->clock, get_cqe_ts(cqe));
1559 	skb_record_rx_queue(skb, rq->ix);
1560 
1561 	if (likely(netdev->features & NETIF_F_RXHASH))
1562 		mlx5e_skb_set_hash(cqe, skb);
1563 
1564 	if (cqe_has_vlan(cqe)) {
1565 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1566 				       be16_to_cpu(cqe->vlan_info));
1567 		stats->removed_vlan_packets++;
1568 	}
1569 
1570 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1571 
1572 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1573 	/* checking CE bit in cqe - MSB in ml_path field */
1574 	if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1575 		mlx5e_enable_ecn(rq, skb);
1576 
1577 	skb->protocol = eth_type_trans(skb, netdev);
1578 
1579 	if (unlikely(mlx5e_skb_is_multicast(skb)))
1580 		stats->mcast_packets++;
1581 }
1582 
1583 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
1584 					 struct mlx5_cqe64 *cqe,
1585 					 u32 cqe_bcnt,
1586 					 struct sk_buff *skb)
1587 {
1588 	struct mlx5e_rq_stats *stats = rq->stats;
1589 
1590 	stats->packets++;
1591 	stats->gro_packets++;
1592 	stats->bytes += cqe_bcnt;
1593 	stats->gro_bytes += cqe_bcnt;
1594 	if (NAPI_GRO_CB(skb)->count != 1)
1595 		return;
1596 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1597 	skb_reset_network_header(skb);
1598 	if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
1599 		napi_gro_receive(rq->cq.napi, skb);
1600 		rq->hw_gro_data->skb = NULL;
1601 	}
1602 }
1603 
1604 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1605 					 struct mlx5_cqe64 *cqe,
1606 					 u32 cqe_bcnt,
1607 					 struct sk_buff *skb)
1608 {
1609 	struct mlx5e_rq_stats *stats = rq->stats;
1610 
1611 	stats->packets++;
1612 	stats->bytes += cqe_bcnt;
1613 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1614 }
1615 
1616 static inline
1617 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1618 				       u32 frag_size, u16 headroom,
1619 				       u32 cqe_bcnt, u32 metasize)
1620 {
1621 	struct sk_buff *skb = napi_build_skb(va, frag_size);
1622 
1623 	if (unlikely(!skb)) {
1624 		rq->stats->buff_alloc_err++;
1625 		return NULL;
1626 	}
1627 
1628 	skb_reserve(skb, headroom);
1629 	skb_put(skb, cqe_bcnt);
1630 
1631 	if (metasize)
1632 		skb_metadata_set(skb, metasize);
1633 
1634 	return skb;
1635 }
1636 
1637 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1638 			     void *va, u16 headroom, u32 frame_sz, u32 len,
1639 			     struct mlx5e_xdp_buff *mxbuf)
1640 {
1641 	xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
1642 	xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
1643 	mxbuf->cqe = cqe;
1644 	mxbuf->rq = rq;
1645 }
1646 
1647 static struct sk_buff *
1648 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1649 			  struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1650 {
1651 	struct mlx5e_frag_page *frag_page = wi->frag_page;
1652 	u16 rx_headroom = rq->buff.headroom;
1653 	struct bpf_prog *prog;
1654 	struct sk_buff *skb;
1655 	u32 metasize = 0;
1656 	void *va, *data;
1657 	dma_addr_t addr;
1658 	u32 frag_size;
1659 
1660 	va             = page_address(frag_page->page) + wi->offset;
1661 	data           = va + rx_headroom;
1662 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1663 
1664 	addr = page_pool_get_dma_addr(frag_page->page);
1665 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1666 				      frag_size, rq->buff.map_dir);
1667 	net_prefetch(data);
1668 
1669 	prog = rcu_dereference(rq->xdp_prog);
1670 	if (prog) {
1671 		struct mlx5e_xdp_buff mxbuf;
1672 
1673 		net_prefetchw(va); /* xdp_frame data area */
1674 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1675 				 cqe_bcnt, &mxbuf);
1676 		if (mlx5e_xdp_handle(rq, prog, &mxbuf))
1677 			return NULL; /* page/packet was consumed by XDP */
1678 
1679 		rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
1680 		metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
1681 		cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
1682 	}
1683 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1684 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
1685 	if (unlikely(!skb))
1686 		return NULL;
1687 
1688 	/* queue up for recycling/reuse */
1689 	skb_mark_for_recycle(skb);
1690 	frag_page->frags++;
1691 
1692 	return skb;
1693 }
1694 
1695 static struct sk_buff *
1696 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1697 			     struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1698 {
1699 	struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1700 	struct mlx5e_wqe_frag_info *head_wi = wi;
1701 	u16 rx_headroom = rq->buff.headroom;
1702 	struct mlx5e_frag_page *frag_page;
1703 	struct skb_shared_info *sinfo;
1704 	struct mlx5e_xdp_buff mxbuf;
1705 	u32 frag_consumed_bytes;
1706 	struct bpf_prog *prog;
1707 	struct sk_buff *skb;
1708 	dma_addr_t addr;
1709 	u32 truesize;
1710 	void *va;
1711 
1712 	frag_page = wi->frag_page;
1713 
1714 	va = page_address(frag_page->page) + wi->offset;
1715 	frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1716 
1717 	addr = page_pool_get_dma_addr(frag_page->page);
1718 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1719 				      rq->buff.frame0_sz, rq->buff.map_dir);
1720 	net_prefetchw(va); /* xdp_frame data area */
1721 	net_prefetch(va + rx_headroom);
1722 
1723 	mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1724 			 frag_consumed_bytes, &mxbuf);
1725 	sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
1726 	truesize = 0;
1727 
1728 	cqe_bcnt -= frag_consumed_bytes;
1729 	frag_info++;
1730 	wi++;
1731 
1732 	while (cqe_bcnt) {
1733 		frag_page = wi->frag_page;
1734 
1735 		frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1736 
1737 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
1738 					       wi->offset, frag_consumed_bytes);
1739 		truesize += frag_info->frag_stride;
1740 
1741 		cqe_bcnt -= frag_consumed_bytes;
1742 		frag_info++;
1743 		wi++;
1744 	}
1745 
1746 	prog = rcu_dereference(rq->xdp_prog);
1747 	if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
1748 		if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1749 			int i;
1750 
1751 			for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
1752 				mlx5e_put_rx_frag(rq, &head_wi[i]);
1753 		}
1754 		return NULL; /* page/packet was consumed by XDP */
1755 	}
1756 
1757 	skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
1758 				     mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
1759 				     mxbuf.xdp.data_end - mxbuf.xdp.data,
1760 				     mxbuf.xdp.data - mxbuf.xdp.data_meta);
1761 	if (unlikely(!skb))
1762 		return NULL;
1763 
1764 	skb_mark_for_recycle(skb);
1765 	head_wi->frag_page->frags++;
1766 
1767 	if (xdp_buff_has_frags(&mxbuf.xdp)) {
1768 		/* sinfo->nr_frags is reset by build_skb, calculate again. */
1769 		xdp_update_skb_shared_info(skb, wi - head_wi - 1,
1770 					   sinfo->xdp_frags_size, truesize,
1771 					   xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
1772 
1773 		for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
1774 			pwi->frag_page->frags++;
1775 	}
1776 
1777 	return skb;
1778 }
1779 
1780 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1781 {
1782 	struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1783 	struct mlx5e_priv *priv = rq->priv;
1784 
1785 	if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1786 	    !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1787 		mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1788 		queue_work(priv->wq, &rq->recover_work);
1789 	}
1790 }
1791 
1792 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1793 {
1794 	trigger_report(rq, cqe);
1795 	rq->stats->wqe_err++;
1796 }
1797 
1798 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1799 {
1800 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1801 	struct mlx5e_wqe_frag_info *wi;
1802 	struct sk_buff *skb;
1803 	u32 cqe_bcnt;
1804 	u16 ci;
1805 
1806 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1807 	wi       = get_frag(rq, ci);
1808 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1809 
1810 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1811 		mlx5e_handle_rx_err_cqe(rq, cqe);
1812 		goto wq_cyc_pop;
1813 	}
1814 
1815 	skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
1816 			      mlx5e_skb_from_cqe_linear,
1817 			      mlx5e_skb_from_cqe_nonlinear,
1818 			      mlx5e_xsk_skb_from_cqe_linear,
1819 			      rq, wi, cqe, cqe_bcnt);
1820 	if (!skb) {
1821 		/* probably for XDP */
1822 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1823 			/* do not return page to cache,
1824 			 * it will be returned on XDP_TX completion.
1825 			 */
1826 			wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
1827 		}
1828 		goto wq_cyc_pop;
1829 	}
1830 
1831 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1832 
1833 	if (mlx5e_cqe_regb_chain(cqe))
1834 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
1835 			dev_kfree_skb_any(skb);
1836 			goto wq_cyc_pop;
1837 		}
1838 
1839 	napi_gro_receive(rq->cq.napi, skb);
1840 
1841 wq_cyc_pop:
1842 	mlx5_wq_cyc_pop(wq);
1843 }
1844 
1845 #ifdef CONFIG_MLX5_ESWITCH
1846 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1847 {
1848 	struct net_device *netdev = rq->netdev;
1849 	struct mlx5e_priv *priv = netdev_priv(netdev);
1850 	struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1851 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1852 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1853 	struct mlx5e_wqe_frag_info *wi;
1854 	struct sk_buff *skb;
1855 	u32 cqe_bcnt;
1856 	u16 ci;
1857 
1858 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1859 	wi       = get_frag(rq, ci);
1860 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1861 
1862 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1863 		mlx5e_handle_rx_err_cqe(rq, cqe);
1864 		goto wq_cyc_pop;
1865 	}
1866 
1867 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1868 			      mlx5e_skb_from_cqe_linear,
1869 			      mlx5e_skb_from_cqe_nonlinear,
1870 			      rq, wi, cqe, cqe_bcnt);
1871 	if (!skb) {
1872 		/* probably for XDP */
1873 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1874 			/* do not return page to cache,
1875 			 * it will be returned on XDP_TX completion.
1876 			 */
1877 			wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
1878 		}
1879 		goto wq_cyc_pop;
1880 	}
1881 
1882 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1883 
1884 	if (rep->vlan && skb_vlan_tag_present(skb))
1885 		skb_vlan_pop(skb);
1886 
1887 	mlx5e_rep_tc_receive(cqe, rq, skb);
1888 
1889 wq_cyc_pop:
1890 	mlx5_wq_cyc_pop(wq);
1891 }
1892 
1893 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1894 {
1895 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1896 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1897 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
1898 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1899 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1900 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
1901 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
1902 	struct mlx5e_rx_wqe_ll *wqe;
1903 	struct mlx5_wq_ll *wq;
1904 	struct sk_buff *skb;
1905 	u16 cqe_bcnt;
1906 
1907 	wi->consumed_strides += cstrides;
1908 
1909 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1910 		mlx5e_handle_rx_err_cqe(rq, cqe);
1911 		goto mpwrq_cqe_out;
1912 	}
1913 
1914 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1915 		struct mlx5e_rq_stats *stats = rq->stats;
1916 
1917 		stats->mpwqe_filler_cqes++;
1918 		stats->mpwqe_filler_strides += cstrides;
1919 		goto mpwrq_cqe_out;
1920 	}
1921 
1922 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1923 
1924 	skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1925 			      mlx5e_skb_from_cqe_mpwrq_linear,
1926 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
1927 			      rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
1928 	if (!skb)
1929 		goto mpwrq_cqe_out;
1930 
1931 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1932 
1933 	mlx5e_rep_tc_receive(cqe, rq, skb);
1934 
1935 mpwrq_cqe_out:
1936 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1937 		return;
1938 
1939 	wq  = &rq->mpwqe.wq;
1940 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1941 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1942 }
1943 
1944 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1945 	.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1946 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1947 };
1948 #endif
1949 
1950 static void
1951 mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
1952 		    struct mlx5e_frag_page *frag_page,
1953 		    u32 data_bcnt, u32 data_offset)
1954 {
1955 	net_prefetchw(skb->data);
1956 
1957 	while (data_bcnt) {
1958 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1959 		u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
1960 		unsigned int truesize;
1961 
1962 		if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1963 			truesize = pg_consumed_bytes;
1964 		else
1965 			truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1966 
1967 		frag_page->frags++;
1968 		mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
1969 				   pg_consumed_bytes, truesize);
1970 
1971 		data_bcnt -= pg_consumed_bytes;
1972 		data_offset = 0;
1973 		frag_page++;
1974 	}
1975 }
1976 
1977 static struct sk_buff *
1978 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1979 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
1980 				   u32 page_idx)
1981 {
1982 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
1983 	u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1984 	struct mlx5e_frag_page *head_page = frag_page;
1985 	u32 frag_offset    = head_offset;
1986 	u32 byte_cnt       = cqe_bcnt;
1987 	struct skb_shared_info *sinfo;
1988 	struct mlx5e_xdp_buff mxbuf;
1989 	unsigned int truesize = 0;
1990 	struct bpf_prog *prog;
1991 	struct sk_buff *skb;
1992 	u32 linear_frame_sz;
1993 	u16 linear_data_len;
1994 	u16 linear_hr;
1995 	void *va;
1996 
1997 	prog = rcu_dereference(rq->xdp_prog);
1998 
1999 	if (prog) {
2000 		/* area for bpf_xdp_[store|load]_bytes */
2001 		net_prefetchw(page_address(frag_page->page) + frag_offset);
2002 		if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
2003 			rq->stats->buff_alloc_err++;
2004 			return NULL;
2005 		}
2006 		va = page_address(wi->linear_page.page);
2007 		net_prefetchw(va); /* xdp_frame data area */
2008 		linear_hr = XDP_PACKET_HEADROOM;
2009 		linear_data_len = 0;
2010 		linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
2011 	} else {
2012 		skb = napi_alloc_skb(rq->cq.napi,
2013 				     ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
2014 		if (unlikely(!skb)) {
2015 			rq->stats->buff_alloc_err++;
2016 			return NULL;
2017 		}
2018 		skb_mark_for_recycle(skb);
2019 		va = skb->head;
2020 		net_prefetchw(va); /* xdp_frame data area */
2021 		net_prefetchw(skb->data);
2022 
2023 		frag_offset += headlen;
2024 		byte_cnt -= headlen;
2025 		linear_hr = skb_headroom(skb);
2026 		linear_data_len = headlen;
2027 		linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
2028 		if (unlikely(frag_offset >= PAGE_SIZE)) {
2029 			frag_page++;
2030 			frag_offset -= PAGE_SIZE;
2031 		}
2032 	}
2033 
2034 	mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
2035 
2036 	sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
2037 
2038 	while (byte_cnt) {
2039 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2040 		u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
2041 
2042 		if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
2043 			truesize += pg_consumed_bytes;
2044 		else
2045 			truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
2046 
2047 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
2048 					       pg_consumed_bytes);
2049 		byte_cnt -= pg_consumed_bytes;
2050 		frag_offset = 0;
2051 		frag_page++;
2052 	}
2053 
2054 	if (prog) {
2055 		if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
2056 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
2057 				int i;
2058 
2059 				for (i = 0; i < sinfo->nr_frags; i++)
2060 					/* non-atomic */
2061 					__set_bit(page_idx + i, wi->skip_release_bitmap);
2062 				return NULL;
2063 			}
2064 			mlx5e_page_release_fragmented(rq, &wi->linear_page);
2065 			return NULL; /* page/packet was consumed by XDP */
2066 		}
2067 
2068 		skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
2069 					     linear_frame_sz,
2070 					     mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
2071 					     mxbuf.xdp.data - mxbuf.xdp.data_meta);
2072 		if (unlikely(!skb)) {
2073 			mlx5e_page_release_fragmented(rq, &wi->linear_page);
2074 			return NULL;
2075 		}
2076 
2077 		skb_mark_for_recycle(skb);
2078 		wi->linear_page.frags++;
2079 		mlx5e_page_release_fragmented(rq, &wi->linear_page);
2080 
2081 		if (xdp_buff_has_frags(&mxbuf.xdp)) {
2082 			struct mlx5e_frag_page *pagep;
2083 
2084 			/* sinfo->nr_frags is reset by build_skb, calculate again. */
2085 			xdp_update_skb_shared_info(skb, frag_page - head_page,
2086 						   sinfo->xdp_frags_size, truesize,
2087 						   xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
2088 
2089 			pagep = head_page;
2090 			do
2091 				pagep->frags++;
2092 			while (++pagep < frag_page);
2093 		}
2094 		__pskb_pull_tail(skb, headlen);
2095 	} else {
2096 		dma_addr_t addr;
2097 
2098 		if (xdp_buff_has_frags(&mxbuf.xdp)) {
2099 			struct mlx5e_frag_page *pagep;
2100 
2101 			xdp_update_skb_shared_info(skb, sinfo->nr_frags,
2102 						   sinfo->xdp_frags_size, truesize,
2103 						   xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
2104 
2105 			pagep = frag_page - sinfo->nr_frags;
2106 			do
2107 				pagep->frags++;
2108 			while (++pagep < frag_page);
2109 		}
2110 		/* copy header */
2111 		addr = page_pool_get_dma_addr(head_page->page);
2112 		mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
2113 				      head_offset, head_offset, headlen);
2114 		/* skb linear part was allocated with headlen and aligned to long */
2115 		skb->tail += headlen;
2116 		skb->len  += headlen;
2117 	}
2118 
2119 	return skb;
2120 }
2121 
2122 static struct sk_buff *
2123 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2124 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
2125 				u32 page_idx)
2126 {
2127 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2128 	u16 rx_headroom = rq->buff.headroom;
2129 	struct bpf_prog *prog;
2130 	struct sk_buff *skb;
2131 	u32 metasize = 0;
2132 	void *va, *data;
2133 	dma_addr_t addr;
2134 	u32 frag_size;
2135 
2136 	/* Check packet size. Note LRO doesn't use linear SKB */
2137 	if (unlikely(cqe_bcnt > rq->hw_mtu)) {
2138 		rq->stats->oversize_pkts_sw_drop++;
2139 		return NULL;
2140 	}
2141 
2142 	va             = page_address(frag_page->page) + head_offset;
2143 	data           = va + rx_headroom;
2144 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2145 
2146 	addr = page_pool_get_dma_addr(frag_page->page);
2147 	dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
2148 				      frag_size, rq->buff.map_dir);
2149 	net_prefetch(data);
2150 
2151 	prog = rcu_dereference(rq->xdp_prog);
2152 	if (prog) {
2153 		struct mlx5e_xdp_buff mxbuf;
2154 
2155 		net_prefetchw(va); /* xdp_frame data area */
2156 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
2157 				 cqe_bcnt, &mxbuf);
2158 		if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
2159 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2160 				__set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */
2161 			return NULL; /* page/packet was consumed by XDP */
2162 		}
2163 
2164 		rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
2165 		metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
2166 		cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
2167 	}
2168 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2169 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
2170 	if (unlikely(!skb))
2171 		return NULL;
2172 
2173 	/* queue up for recycling/reuse */
2174 	skb_mark_for_recycle(skb);
2175 	frag_page->frags++;
2176 
2177 	return skb;
2178 }
2179 
2180 static struct sk_buff *
2181 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2182 			  struct mlx5_cqe64 *cqe, u16 header_index)
2183 {
2184 	struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
2185 	u16 head_offset = head->addr & (PAGE_SIZE - 1);
2186 	u16 head_size = cqe->shampo.header_size;
2187 	u16 rx_headroom = rq->buff.headroom;
2188 	struct sk_buff *skb = NULL;
2189 	void *hdr, *data;
2190 	u32 frag_size;
2191 
2192 	hdr		= page_address(head->frag_page->page) + head_offset;
2193 	data		= hdr + rx_headroom;
2194 	frag_size	= MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
2195 
2196 	if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
2197 		/* build SKB around header */
2198 		dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
2199 		prefetchw(hdr);
2200 		prefetch(data);
2201 		skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
2202 
2203 		if (unlikely(!skb))
2204 			return NULL;
2205 
2206 		head->frag_page->frags++;
2207 	} else {
2208 		/* allocate SKB and copy header for large header */
2209 		rq->stats->gro_large_hds++;
2210 		skb = napi_alloc_skb(rq->cq.napi,
2211 				     ALIGN(head_size, sizeof(long)));
2212 		if (unlikely(!skb)) {
2213 			rq->stats->buff_alloc_err++;
2214 			return NULL;
2215 		}
2216 
2217 		prefetchw(skb->data);
2218 		mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
2219 				      head_offset + rx_headroom,
2220 				      rx_headroom, head_size);
2221 		/* skb linear part was allocated with headlen and aligned to long */
2222 		skb->tail += head_size;
2223 		skb->len  += head_size;
2224 	}
2225 
2226 	/* queue up for recycling/reuse */
2227 	skb_mark_for_recycle(skb);
2228 
2229 	return skb;
2230 }
2231 
2232 static void
2233 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
2234 {
2235 	skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2236 	unsigned int frag_size = skb_frag_size(last_frag);
2237 	unsigned int frag_truesize;
2238 
2239 	frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
2240 	skb->truesize += frag_truesize - frag_size;
2241 }
2242 
2243 static void
2244 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
2245 {
2246 	struct sk_buff *skb = rq->hw_gro_data->skb;
2247 	struct mlx5e_rq_stats *stats = rq->stats;
2248 
2249 	stats->gro_skbs++;
2250 	if (likely(skb_shinfo(skb)->nr_frags))
2251 		mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
2252 	if (NAPI_GRO_CB(skb)->count > 1)
2253 		mlx5e_shampo_update_hdr(rq, cqe, match);
2254 	napi_gro_receive(rq->cq.napi, skb);
2255 	rq->hw_gro_data->skb = NULL;
2256 }
2257 
2258 static bool
2259 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
2260 {
2261 	int nr_frags = skb_shinfo(skb)->nr_frags;
2262 
2263 	return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2264 }
2265 
2266 static void
2267 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
2268 {
2269 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
2270 	u64 addr = shampo->info[header_index].addr;
2271 
2272 	if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
2273 		struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
2274 
2275 		dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
2276 		mlx5e_page_release_fragmented(rq, dma_info->frag_page);
2277 	}
2278 	bitmap_clear(shampo->bitmap, header_index, 1);
2279 }
2280 
2281 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2282 {
2283 	u16 data_bcnt		= mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
2284 	u16 header_index	= mlx5e_shampo_get_cqe_header_index(rq, cqe);
2285 	u32 wqe_offset		= be32_to_cpu(cqe->shampo.data_offset);
2286 	u16 cstrides		= mpwrq_get_cqe_consumed_strides(cqe);
2287 	u32 data_offset		= wqe_offset & (PAGE_SIZE - 1);
2288 	u32 cqe_bcnt		= mpwrq_get_cqe_byte_cnt(cqe);
2289 	u16 wqe_id		= be16_to_cpu(cqe->wqe_id);
2290 	u32 page_idx		= wqe_offset >> PAGE_SHIFT;
2291 	u16 head_size		= cqe->shampo.header_size;
2292 	struct sk_buff **skb	= &rq->hw_gro_data->skb;
2293 	bool flush		= cqe->shampo.flush;
2294 	bool match		= cqe->shampo.match;
2295 	struct mlx5e_rq_stats *stats = rq->stats;
2296 	struct mlx5e_rx_wqe_ll *wqe;
2297 	struct mlx5e_mpw_info *wi;
2298 	struct mlx5_wq_ll *wq;
2299 
2300 	wi = mlx5e_get_mpw_info(rq, wqe_id);
2301 	wi->consumed_strides += cstrides;
2302 
2303 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2304 		mlx5e_handle_rx_err_cqe(rq, cqe);
2305 		goto mpwrq_cqe_out;
2306 	}
2307 
2308 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2309 		stats->mpwqe_filler_cqes++;
2310 		stats->mpwqe_filler_strides += cstrides;
2311 		goto mpwrq_cqe_out;
2312 	}
2313 
2314 	stats->gro_match_packets += match;
2315 
2316 	if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
2317 		match = false;
2318 		mlx5e_shampo_flush_skb(rq, cqe, match);
2319 	}
2320 
2321 	if (!*skb) {
2322 		if (likely(head_size))
2323 			*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2324 		else
2325 			*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
2326 								  data_offset, page_idx);
2327 		if (unlikely(!*skb))
2328 			goto free_hd_entry;
2329 
2330 		NAPI_GRO_CB(*skb)->count = 1;
2331 		skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
2332 	} else {
2333 		NAPI_GRO_CB(*skb)->count++;
2334 		if (NAPI_GRO_CB(*skb)->count == 2 &&
2335 		    rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
2336 			void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
2337 			int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
2338 				    sizeof(struct iphdr);
2339 			struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
2340 
2341 			rq->hw_gro_data->second_ip_id = ntohs(iph->id);
2342 		}
2343 	}
2344 
2345 	if (likely(head_size)) {
2346 		struct mlx5e_frag_page *frag_page;
2347 
2348 		frag_page = &wi->alloc_units.frag_pages[page_idx];
2349 		mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
2350 	}
2351 
2352 	mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
2353 	if (flush)
2354 		mlx5e_shampo_flush_skb(rq, cqe, match);
2355 free_hd_entry:
2356 	mlx5e_free_rx_shampo_hd_entry(rq, header_index);
2357 mpwrq_cqe_out:
2358 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2359 		return;
2360 
2361 	wq  = &rq->mpwqe.wq;
2362 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2363 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2364 }
2365 
2366 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2367 {
2368 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
2369 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
2370 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
2371 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
2372 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
2373 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
2374 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
2375 	struct mlx5e_rx_wqe_ll *wqe;
2376 	struct mlx5_wq_ll *wq;
2377 	struct sk_buff *skb;
2378 	u16 cqe_bcnt;
2379 
2380 	wi->consumed_strides += cstrides;
2381 
2382 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2383 		mlx5e_handle_rx_err_cqe(rq, cqe);
2384 		goto mpwrq_cqe_out;
2385 	}
2386 
2387 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2388 		struct mlx5e_rq_stats *stats = rq->stats;
2389 
2390 		stats->mpwqe_filler_cqes++;
2391 		stats->mpwqe_filler_strides += cstrides;
2392 		goto mpwrq_cqe_out;
2393 	}
2394 
2395 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
2396 
2397 	skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
2398 			      mlx5e_skb_from_cqe_mpwrq_linear,
2399 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
2400 			      mlx5e_xsk_skb_from_cqe_mpwrq_linear,
2401 			      rq, wi, cqe, cqe_bcnt, head_offset,
2402 			      page_idx);
2403 	if (!skb)
2404 		goto mpwrq_cqe_out;
2405 
2406 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2407 
2408 	if (mlx5e_cqe_regb_chain(cqe))
2409 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
2410 			dev_kfree_skb_any(skb);
2411 			goto mpwrq_cqe_out;
2412 		}
2413 
2414 	napi_gro_receive(rq->cq.napi, skb);
2415 
2416 mpwrq_cqe_out:
2417 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2418 		return;
2419 
2420 	wq  = &rq->mpwqe.wq;
2421 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2422 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2423 }
2424 
2425 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
2426 						 struct mlx5_cqwq *cqwq,
2427 						 int budget_rem)
2428 {
2429 	struct mlx5_cqe64 *cqe, *title_cqe = NULL;
2430 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
2431 	int work_done = 0;
2432 
2433 	cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq);
2434 	if (!cqe)
2435 		return work_done;
2436 
2437 	if (cqd->last_cqe_title &&
2438 	    (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
2439 		rq->stats->cqe_compress_blks++;
2440 		cqd->last_cqe_title = false;
2441 	}
2442 
2443 	do {
2444 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2445 			if (title_cqe) {
2446 				mlx5e_read_enhanced_title_slot(rq, title_cqe);
2447 				title_cqe = NULL;
2448 				rq->stats->cqe_compress_blks++;
2449 			}
2450 			work_done +=
2451 				mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
2452 							      budget_rem - work_done);
2453 			continue;
2454 		}
2455 		title_cqe = cqe;
2456 		mlx5_cqwq_pop(cqwq);
2457 
2458 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2459 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2460 				rq, cqe);
2461 		work_done++;
2462 	} while (work_done < budget_rem &&
2463 		 (cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)));
2464 
2465 	/* last cqe might be title on next poll bulk */
2466 	if (title_cqe) {
2467 		mlx5e_read_enhanced_title_slot(rq, title_cqe);
2468 		cqd->last_cqe_title = true;
2469 	}
2470 
2471 	return work_done;
2472 }
2473 
2474 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
2475 					      struct mlx5_cqwq *cqwq,
2476 					      int budget_rem)
2477 {
2478 	struct mlx5_cqe64 *cqe;
2479 	int work_done = 0;
2480 
2481 	if (rq->cqd.left)
2482 		work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
2483 
2484 	while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
2485 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2486 			work_done +=
2487 				mlx5e_decompress_cqes_start(rq, cqwq,
2488 							    budget_rem - work_done);
2489 			continue;
2490 		}
2491 
2492 		mlx5_cqwq_pop(cqwq);
2493 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2494 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2495 				rq, cqe);
2496 		work_done++;
2497 	}
2498 
2499 	return work_done;
2500 }
2501 
2502 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2503 {
2504 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2505 	struct mlx5_cqwq *cqwq = &cq->wq;
2506 	int work_done;
2507 
2508 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
2509 		return 0;
2510 
2511 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
2512 		work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
2513 								  budget);
2514 	else
2515 		work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
2516 							       budget);
2517 
2518 	if (work_done == 0)
2519 		return 0;
2520 
2521 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
2522 		mlx5e_shampo_flush_skb(rq, NULL, false);
2523 
2524 	if (rcu_access_pointer(rq->xdp_prog))
2525 		mlx5e_xdp_rx_poll_complete(rq);
2526 
2527 	mlx5_cqwq_update_db_record(cqwq);
2528 
2529 	/* ensure cq space is freed before enabling more cqes */
2530 	wmb();
2531 
2532 	return work_done;
2533 }
2534 
2535 #ifdef CONFIG_MLX5_CORE_IPOIB
2536 
2537 #define MLX5_IB_GRH_SGID_OFFSET 8
2538 #define MLX5_IB_GRH_DGID_OFFSET 24
2539 #define MLX5_GID_SIZE           16
2540 
2541 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
2542 					 struct mlx5_cqe64 *cqe,
2543 					 u32 cqe_bcnt,
2544 					 struct sk_buff *skb)
2545 {
2546 	struct hwtstamp_config *tstamp;
2547 	struct mlx5e_rq_stats *stats;
2548 	struct net_device *netdev;
2549 	struct mlx5e_priv *priv;
2550 	char *pseudo_header;
2551 	u32 flags_rqpn;
2552 	u32 qpn;
2553 	u8 *dgid;
2554 	u8 g;
2555 
2556 	qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
2557 	netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
2558 
2559 	/* No mapping present, cannot process SKB. This might happen if a child
2560 	 * interface is going down while having unprocessed CQEs on parent RQ
2561 	 */
2562 	if (unlikely(!netdev)) {
2563 		/* TODO: add drop counters support */
2564 		skb->dev = NULL;
2565 		pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
2566 		return;
2567 	}
2568 
2569 	priv = mlx5i_epriv(netdev);
2570 	tstamp = &priv->tstamp;
2571 	stats = &priv->channel_stats[rq->ix]->rq;
2572 
2573 	flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
2574 	g = (flags_rqpn >> 28) & 3;
2575 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
2576 	if ((!g) || dgid[0] != 0xff)
2577 		skb->pkt_type = PACKET_HOST;
2578 	else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
2579 		skb->pkt_type = PACKET_BROADCAST;
2580 	else
2581 		skb->pkt_type = PACKET_MULTICAST;
2582 
2583 	/* Drop packets that this interface sent, ie multicast packets
2584 	 * that the HCA has replicated.
2585 	 */
2586 	if (g && (qpn == (flags_rqpn & 0xffffff)) &&
2587 	    (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
2588 		    MLX5_GID_SIZE) == 0)) {
2589 		skb->dev = NULL;
2590 		return;
2591 	}
2592 
2593 	skb_pull(skb, MLX5_IB_GRH_BYTES);
2594 
2595 	skb->protocol = *((__be16 *)(skb->data));
2596 
2597 	if (netdev->features & NETIF_F_RXCSUM) {
2598 		skb->ip_summed = CHECKSUM_COMPLETE;
2599 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
2600 		stats->csum_complete++;
2601 	} else {
2602 		skb->ip_summed = CHECKSUM_NONE;
2603 		stats->csum_none++;
2604 	}
2605 
2606 	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
2607 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
2608 								  rq->clock, get_cqe_ts(cqe));
2609 	skb_record_rx_queue(skb, rq->ix);
2610 
2611 	if (likely(netdev->features & NETIF_F_RXHASH))
2612 		mlx5e_skb_set_hash(cqe, skb);
2613 
2614 	/* 20 bytes of ipoib header and 4 for encap existing */
2615 	pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
2616 	memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
2617 	skb_reset_mac_header(skb);
2618 	skb_pull(skb, MLX5_IPOIB_HARD_LEN);
2619 
2620 	skb->dev = netdev;
2621 
2622 	stats->packets++;
2623 	stats->bytes += cqe_bcnt;
2624 }
2625 
2626 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2627 {
2628 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2629 	struct mlx5e_wqe_frag_info *wi;
2630 	struct sk_buff *skb;
2631 	u32 cqe_bcnt;
2632 	u16 ci;
2633 
2634 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2635 	wi       = get_frag(rq, ci);
2636 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2637 
2638 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2639 		rq->stats->wqe_err++;
2640 		goto wq_cyc_pop;
2641 	}
2642 
2643 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
2644 			      mlx5e_skb_from_cqe_linear,
2645 			      mlx5e_skb_from_cqe_nonlinear,
2646 			      rq, wi, cqe, cqe_bcnt);
2647 	if (!skb)
2648 		goto wq_cyc_pop;
2649 
2650 	mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2651 	if (unlikely(!skb->dev)) {
2652 		dev_kfree_skb_any(skb);
2653 		goto wq_cyc_pop;
2654 	}
2655 	napi_gro_receive(rq->cq.napi, skb);
2656 
2657 wq_cyc_pop:
2658 	mlx5_wq_cyc_pop(wq);
2659 }
2660 
2661 const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
2662 	.handle_rx_cqe       = mlx5i_handle_rx_cqe,
2663 	.handle_rx_cqe_mpwqe = NULL, /* Not supported */
2664 };
2665 #endif /* CONFIG_MLX5_CORE_IPOIB */
2666 
2667 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
2668 {
2669 	struct net_device *netdev = rq->netdev;
2670 	struct mlx5_core_dev *mdev = rq->mdev;
2671 	struct mlx5e_priv *priv = rq->priv;
2672 
2673 	switch (rq->wq_type) {
2674 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2675 		rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
2676 			mlx5e_xsk_skb_from_cqe_mpwrq_linear :
2677 			mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
2678 				mlx5e_skb_from_cqe_mpwrq_linear :
2679 				mlx5e_skb_from_cqe_mpwrq_nonlinear;
2680 		rq->post_wqes = mlx5e_post_rx_mpwqes;
2681 		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
2682 
2683 		if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
2684 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
2685 			if (!rq->handle_rx_cqe) {
2686 				netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
2687 				return -EINVAL;
2688 			}
2689 		} else {
2690 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
2691 			if (!rq->handle_rx_cqe) {
2692 				netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
2693 				return -EINVAL;
2694 			}
2695 		}
2696 
2697 		break;
2698 	default: /* MLX5_WQ_TYPE_CYCLIC */
2699 		rq->wqe.skb_from_cqe = xsk ?
2700 			mlx5e_xsk_skb_from_cqe_linear :
2701 			mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
2702 				mlx5e_skb_from_cqe_linear :
2703 				mlx5e_skb_from_cqe_nonlinear;
2704 		rq->post_wqes = mlx5e_post_rx_wqes;
2705 		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2706 		rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
2707 		if (!rq->handle_rx_cqe) {
2708 			netdev_err(netdev, "RX handler of RQ is not set\n");
2709 			return -EINVAL;
2710 		}
2711 	}
2712 
2713 	return 0;
2714 }
2715 
2716 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2717 {
2718 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2719 	struct mlx5e_wqe_frag_info *wi;
2720 	struct sk_buff *skb;
2721 	u32 cqe_bcnt;
2722 	u16 trap_id;
2723 	u16 ci;
2724 
2725 	trap_id  = get_cqe_flow_tag(cqe);
2726 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2727 	wi       = get_frag(rq, ci);
2728 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2729 
2730 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2731 		rq->stats->wqe_err++;
2732 		goto wq_cyc_pop;
2733 	}
2734 
2735 	skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
2736 	if (!skb)
2737 		goto wq_cyc_pop;
2738 
2739 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2740 	skb_push(skb, ETH_HLEN);
2741 
2742 	mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
2743 				 rq->netdev->devlink_port);
2744 	dev_kfree_skb_any(skb);
2745 
2746 wq_cyc_pop:
2747 	mlx5_wq_cyc_pop(wq);
2748 }
2749 
2750 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
2751 {
2752 	rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
2753 			       mlx5e_skb_from_cqe_linear :
2754 			       mlx5e_skb_from_cqe_nonlinear;
2755 	rq->post_wqes = mlx5e_post_rx_wqes;
2756 	rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2757 	rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
2758 }
2759