1542578c6STariq Toukan /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2542578c6STariq Toukan /* Copyright (c) 2019 Mellanox Technologies. */
3542578c6STariq Toukan 
4542578c6STariq Toukan #ifndef __MLX5_EN_TXRX_H___
5542578c6STariq Toukan #define __MLX5_EN_TXRX_H___
6542578c6STariq Toukan 
7542578c6STariq Toukan #include "en.h"
8542578c6STariq Toukan 
968865419STariq Toukan #define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
1001614d4fSTariq Toukan #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
1101614d4fSTariq Toukan 			    MLX5E_SQ_NOPS_ROOM)
1201614d4fSTariq Toukan 
1301614d4fSTariq Toukan #ifndef CONFIG_MLX5_EN_TLS
1401614d4fSTariq Toukan #define MLX5E_SQ_TLS_ROOM (0)
1501614d4fSTariq Toukan #else
1601614d4fSTariq Toukan /* TLS offload requires additional stop_room for:
1701614d4fSTariq Toukan  *  - a resync SKB.
1884d1bb2bSTariq Toukan  * kTLS offload requires fixed additional stop_room for:
1984d1bb2bSTariq Toukan  * - a static params WQE, and a progress params WQE.
2084d1bb2bSTariq Toukan  * The additional MTU-depending room for the resync DUMP WQEs
2184d1bb2bSTariq Toukan  * will be calculated and added in runtime.
2201614d4fSTariq Toukan  */
2301614d4fSTariq Toukan #define MLX5E_SQ_TLS_ROOM  \
24d2ead1f3STariq Toukan 	(MLX5_SEND_WQE_MAX_WQEBBS + \
2584d1bb2bSTariq Toukan 	 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
2601614d4fSTariq Toukan #endif
2701614d4fSTariq Toukan 
28542578c6STariq Toukan #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
29542578c6STariq Toukan 
3028bff095STariq Toukan enum mlx5e_icosq_wqe_type {
3128bff095STariq Toukan 	MLX5E_ICOSQ_WQE_NOP,
3228bff095STariq Toukan 	MLX5E_ICOSQ_WQE_UMR_RX,
3328bff095STariq Toukan };
3428bff095STariq Toukan 
35542578c6STariq Toukan static inline bool
36542578c6STariq Toukan mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
37542578c6STariq Toukan {
38542578c6STariq Toukan 	return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
39542578c6STariq Toukan }
40542578c6STariq Toukan 
41fed0c6cfSMaxim Mikityanskiy static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
42542578c6STariq Toukan {
43fd1b2259STariq Toukan 	void *wqe;
44542578c6STariq Toukan 
45fed0c6cfSMaxim Mikityanskiy 	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
46fed0c6cfSMaxim Mikityanskiy 	memset(wqe, 0, wqe_size);
47fd1b2259STariq Toukan 
48fd1b2259STariq Toukan 	return wqe;
49542578c6STariq Toukan }
50542578c6STariq Toukan 
51fed0c6cfSMaxim Mikityanskiy #define MLX5E_TX_FETCH_WQE(sq, pi) \
52fed0c6cfSMaxim Mikityanskiy 	((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
53fed0c6cfSMaxim Mikityanskiy 
54542578c6STariq Toukan static inline struct mlx5e_tx_wqe *
55542578c6STariq Toukan mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
56542578c6STariq Toukan {
57542578c6STariq Toukan 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
58542578c6STariq Toukan 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
59542578c6STariq Toukan 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
60542578c6STariq Toukan 
61542578c6STariq Toukan 	memset(cseg, 0, sizeof(*cseg));
62542578c6STariq Toukan 
63542578c6STariq Toukan 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
64542578c6STariq Toukan 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
65542578c6STariq Toukan 
66542578c6STariq Toukan 	(*pc)++;
67542578c6STariq Toukan 
68542578c6STariq Toukan 	return wqe;
69542578c6STariq Toukan }
70542578c6STariq Toukan 
7137badd15STariq Toukan static inline struct mlx5e_tx_wqe *
7237badd15STariq Toukan mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
7337badd15STariq Toukan {
7437badd15STariq Toukan 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
7537badd15STariq Toukan 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
7637badd15STariq Toukan 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
7737badd15STariq Toukan 
7837badd15STariq Toukan 	memset(cseg, 0, sizeof(*cseg));
7937badd15STariq Toukan 
8037badd15STariq Toukan 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
8137badd15STariq Toukan 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
8237badd15STariq Toukan 	cseg->fm_ce_se         = MLX5_FENCE_MODE_INITIATOR_SMALL;
8337badd15STariq Toukan 
8437badd15STariq Toukan 	(*pc)++;
8537badd15STariq Toukan 
8637badd15STariq Toukan 	return wqe;
8737badd15STariq Toukan }
8837badd15STariq Toukan 
8905dfd570STariq Toukan struct mlx5e_tx_wqe_info {
9005dfd570STariq Toukan 	struct sk_buff *skb;
9105dfd570STariq Toukan 	u32 num_bytes;
9205dfd570STariq Toukan 	u8 num_wqebbs;
9305dfd570STariq Toukan 	u8 num_dma;
9405dfd570STariq Toukan #ifdef CONFIG_MLX5_EN_TLS
9505dfd570STariq Toukan 	struct page *resync_dump_frag_page;
9605dfd570STariq Toukan #endif
9705dfd570STariq Toukan };
9805dfd570STariq Toukan 
99ec9cdca0SMaxim Mikityanskiy static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
100ec9cdca0SMaxim Mikityanskiy {
101ec9cdca0SMaxim Mikityanskiy 	struct mlx5_wq_cyc *wq = &sq->wq;
102ec9cdca0SMaxim Mikityanskiy 	u16 pi, contig_wqebbs;
103ec9cdca0SMaxim Mikityanskiy 
104ec9cdca0SMaxim Mikityanskiy 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
105ec9cdca0SMaxim Mikityanskiy 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
106ec9cdca0SMaxim Mikityanskiy 	if (unlikely(contig_wqebbs < size)) {
107ec9cdca0SMaxim Mikityanskiy 		struct mlx5e_tx_wqe_info *wi, *edge_wi;
108ec9cdca0SMaxim Mikityanskiy 
109ec9cdca0SMaxim Mikityanskiy 		wi = &sq->db.wqe_info[pi];
110ec9cdca0SMaxim Mikityanskiy 		edge_wi = wi + contig_wqebbs;
111ec9cdca0SMaxim Mikityanskiy 
112ec9cdca0SMaxim Mikityanskiy 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
113ec9cdca0SMaxim Mikityanskiy 		for (; wi < edge_wi; wi++) {
114ec9cdca0SMaxim Mikityanskiy 			*wi = (struct mlx5e_tx_wqe_info) {
115ec9cdca0SMaxim Mikityanskiy 				.num_wqebbs = 1,
116ec9cdca0SMaxim Mikityanskiy 			};
117ec9cdca0SMaxim Mikityanskiy 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
118ec9cdca0SMaxim Mikityanskiy 		}
119ec9cdca0SMaxim Mikityanskiy 		sq->stats->nop += contig_wqebbs;
120ec9cdca0SMaxim Mikityanskiy 
121ec9cdca0SMaxim Mikityanskiy 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
122ec9cdca0SMaxim Mikityanskiy 	}
123ec9cdca0SMaxim Mikityanskiy 
124ec9cdca0SMaxim Mikityanskiy 	return pi;
125ec9cdca0SMaxim Mikityanskiy }
126ec9cdca0SMaxim Mikityanskiy 
12705dfd570STariq Toukan struct mlx5e_icosq_wqe_info {
12828bff095STariq Toukan 	u8 wqe_type;
12905dfd570STariq Toukan 	u8 num_wqebbs;
13005dfd570STariq Toukan 
13128bff095STariq Toukan 	/* Auxiliary data for different wqe types. */
13205dfd570STariq Toukan 	union {
13305dfd570STariq Toukan 		struct {
13405dfd570STariq Toukan 			struct mlx5e_rq *rq;
13505dfd570STariq Toukan 		} umr;
13605dfd570STariq Toukan 	};
13705dfd570STariq Toukan };
13805dfd570STariq Toukan 
139ec9cdca0SMaxim Mikityanskiy static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
140ec9cdca0SMaxim Mikityanskiy {
141ec9cdca0SMaxim Mikityanskiy 	struct mlx5_wq_cyc *wq = &sq->wq;
142ec9cdca0SMaxim Mikityanskiy 	u16 pi, contig_wqebbs;
143ec9cdca0SMaxim Mikityanskiy 
144ec9cdca0SMaxim Mikityanskiy 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
145ec9cdca0SMaxim Mikityanskiy 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
146ec9cdca0SMaxim Mikityanskiy 	if (unlikely(contig_wqebbs < size)) {
147ec9cdca0SMaxim Mikityanskiy 		struct mlx5e_icosq_wqe_info *wi, *edge_wi;
148ec9cdca0SMaxim Mikityanskiy 
149ec9cdca0SMaxim Mikityanskiy 		wi = &sq->db.wqe_info[pi];
150ec9cdca0SMaxim Mikityanskiy 		edge_wi = wi + contig_wqebbs;
151ec9cdca0SMaxim Mikityanskiy 
152ec9cdca0SMaxim Mikityanskiy 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
153ec9cdca0SMaxim Mikityanskiy 		for (; wi < edge_wi; wi++) {
154ec9cdca0SMaxim Mikityanskiy 			*wi = (struct mlx5e_icosq_wqe_info) {
15528bff095STariq Toukan 				.wqe_type   = MLX5E_ICOSQ_WQE_NOP,
156ec9cdca0SMaxim Mikityanskiy 				.num_wqebbs = 1,
157ec9cdca0SMaxim Mikityanskiy 			};
158ec9cdca0SMaxim Mikityanskiy 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
159ec9cdca0SMaxim Mikityanskiy 		}
160ec9cdca0SMaxim Mikityanskiy 
161ec9cdca0SMaxim Mikityanskiy 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
162ec9cdca0SMaxim Mikityanskiy 	}
163ec9cdca0SMaxim Mikityanskiy 
164ec9cdca0SMaxim Mikityanskiy 	return pi;
165ec9cdca0SMaxim Mikityanskiy }
166ec9cdca0SMaxim Mikityanskiy 
167542578c6STariq Toukan static inline void
168542578c6STariq Toukan mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
169542578c6STariq Toukan 			u16 pi, u16 nnops)
170542578c6STariq Toukan {
171542578c6STariq Toukan 	struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
172542578c6STariq Toukan 
173542578c6STariq Toukan 	edge_wi = wi + nnops;
174542578c6STariq Toukan 
175542578c6STariq Toukan 	/* fill sq frag edge with nops to avoid wqe wrapping two pages */
176542578c6STariq Toukan 	for (; wi < edge_wi; wi++) {
177500f36a4STariq Toukan 		memset(wi, 0, sizeof(*wi));
178542578c6STariq Toukan 		wi->num_wqebbs = 1;
179542578c6STariq Toukan 		mlx5e_post_nop(wq, sq->sqn, &sq->pc);
180542578c6STariq Toukan 	}
181542578c6STariq Toukan 	sq->stats->nop += nnops;
182542578c6STariq Toukan }
183542578c6STariq Toukan 
184542578c6STariq Toukan static inline void
185542578c6STariq Toukan mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
186542578c6STariq Toukan 		struct mlx5_wqe_ctrl_seg *ctrl)
187542578c6STariq Toukan {
18882fe2996STariq Toukan 	ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
189542578c6STariq Toukan 	/* ensure wqe is visible to device before updating doorbell record */
190542578c6STariq Toukan 	dma_wmb();
191542578c6STariq Toukan 
192542578c6STariq Toukan 	*wq->db = cpu_to_be32(pc);
193542578c6STariq Toukan 
194542578c6STariq Toukan 	/* ensure doorbell record is visible to device before ringing the
195542578c6STariq Toukan 	 * doorbell
196542578c6STariq Toukan 	 */
197542578c6STariq Toukan 	wmb();
198542578c6STariq Toukan 
199542578c6STariq Toukan 	mlx5_write64((__be32 *)ctrl, uar_map);
200542578c6STariq Toukan }
201542578c6STariq Toukan 
202b431302eSTariq Toukan static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
203740114a8STariq Toukan {
204b431302eSTariq Toukan 	return cseg && !!cseg->tisn;
205b431302eSTariq Toukan }
206b431302eSTariq Toukan 
207b431302eSTariq Toukan static inline u8
208b431302eSTariq Toukan mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
209b431302eSTariq Toukan 			 struct sk_buff *skb)
210b431302eSTariq Toukan {
211b431302eSTariq Toukan 	u8 mode;
212b431302eSTariq Toukan 
213b431302eSTariq Toukan 	if (mlx5e_transport_inline_tx_wqe(cseg))
214b431302eSTariq Toukan 		return MLX5_INLINE_MODE_TCP_UDP;
215b431302eSTariq Toukan 
216b431302eSTariq Toukan 	mode = sq->min_inline_mode;
217b431302eSTariq Toukan 
218b431302eSTariq Toukan 	if (skb_vlan_tag_present(skb) &&
219b431302eSTariq Toukan 	    test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
220b431302eSTariq Toukan 		mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
221b431302eSTariq Toukan 
222b431302eSTariq Toukan 	return mode;
223740114a8STariq Toukan }
224740114a8STariq Toukan 
225542578c6STariq Toukan static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
226542578c6STariq Toukan {
227542578c6STariq Toukan 	struct mlx5_core_cq *mcq;
228542578c6STariq Toukan 
229542578c6STariq Toukan 	mcq = &cq->mcq;
230542578c6STariq Toukan 	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
231542578c6STariq Toukan }
232542578c6STariq Toukan 
233542578c6STariq Toukan static inline struct mlx5e_sq_dma *
234542578c6STariq Toukan mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
235542578c6STariq Toukan {
236542578c6STariq Toukan 	return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
237542578c6STariq Toukan }
238542578c6STariq Toukan 
239542578c6STariq Toukan static inline void
240542578c6STariq Toukan mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
241542578c6STariq Toukan 	       enum mlx5e_dma_map_type map_type)
242542578c6STariq Toukan {
243542578c6STariq Toukan 	struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
244542578c6STariq Toukan 
245542578c6STariq Toukan 	dma->addr = addr;
246542578c6STariq Toukan 	dma->size = size;
247542578c6STariq Toukan 	dma->type = map_type;
248542578c6STariq Toukan }
249542578c6STariq Toukan 
250542578c6STariq Toukan static inline void
251542578c6STariq Toukan mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
252542578c6STariq Toukan {
253542578c6STariq Toukan 	switch (dma->type) {
254542578c6STariq Toukan 	case MLX5E_DMA_MAP_SINGLE:
255542578c6STariq Toukan 		dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
256542578c6STariq Toukan 		break;
257542578c6STariq Toukan 	case MLX5E_DMA_MAP_PAGE:
258542578c6STariq Toukan 		dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
259542578c6STariq Toukan 		break;
260542578c6STariq Toukan 	default:
261542578c6STariq Toukan 		WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
262542578c6STariq Toukan 	}
263542578c6STariq Toukan }
264542578c6STariq Toukan 
2655ee090edSAya Levin static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
2665ee090edSAya Levin {
26739369fd5SAya Levin 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
2685ee090edSAya Levin 		mlx5_wq_ll_reset(&rq->mpwqe.wq);
26939369fd5SAya Levin 		rq->mpwqe.actual_wq_head = 0;
27039369fd5SAya Levin 	} else {
2715ee090edSAya Levin 		mlx5_wq_cyc_reset(&rq->wqe.wq);
2725ee090edSAya Levin 	}
27339369fd5SAya Levin }
2745ee090edSAya Levin 
275f1b95753STariq Toukan static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
276f1b95753STariq Toukan 					struct mlx5_err_cqe *err_cqe)
277f1b95753STariq Toukan {
278f1b95753STariq Toukan 	struct mlx5_cqwq *wq = &cq->wq;
279f1b95753STariq Toukan 	u32 ci;
280f1b95753STariq Toukan 
281f1b95753STariq Toukan 	ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
282f1b95753STariq Toukan 
283f1b95753STariq Toukan 	netdev_err(cq->channel->netdev,
284f1b95753STariq Toukan 		   "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
285f1b95753STariq Toukan 		   cq->mcq.cqn, ci, sqn,
286f1b95753STariq Toukan 		   get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
287f1b95753STariq Toukan 		   err_cqe->syndrome, err_cqe->vendor_err_synd);
288f1b95753STariq Toukan 	mlx5_dump_err_cqe(cq->mdev, err_cqe);
289f1b95753STariq Toukan }
290f1b95753STariq Toukan 
291542578c6STariq Toukan /* SW parser related functions */
292542578c6STariq Toukan 
293542578c6STariq Toukan struct mlx5e_swp_spec {
294542578c6STariq Toukan 	__be16 l3_proto;
295542578c6STariq Toukan 	u8 l4_proto;
296542578c6STariq Toukan 	u8 is_tun;
297542578c6STariq Toukan 	__be16 tun_l3_proto;
298542578c6STariq Toukan 	u8 tun_l4_proto;
299542578c6STariq Toukan };
300542578c6STariq Toukan 
301542578c6STariq Toukan static inline void
302542578c6STariq Toukan mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
303542578c6STariq Toukan 		   struct mlx5e_swp_spec *swp_spec)
304542578c6STariq Toukan {
305542578c6STariq Toukan 	/* SWP offsets are in 2-bytes words */
306542578c6STariq Toukan 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
307542578c6STariq Toukan 	if (swp_spec->l3_proto == htons(ETH_P_IPV6))
308542578c6STariq Toukan 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
309542578c6STariq Toukan 	if (swp_spec->l4_proto) {
310542578c6STariq Toukan 		eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
311542578c6STariq Toukan 		if (swp_spec->l4_proto == IPPROTO_UDP)
312542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
313542578c6STariq Toukan 	}
314542578c6STariq Toukan 
315542578c6STariq Toukan 	if (swp_spec->is_tun) {
316542578c6STariq Toukan 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
317542578c6STariq Toukan 		if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
318542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
319542578c6STariq Toukan 	} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
320542578c6STariq Toukan 		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
321542578c6STariq Toukan 		if (swp_spec->l3_proto == htons(ETH_P_IPV6))
322542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
323542578c6STariq Toukan 	}
324542578c6STariq Toukan 	switch (swp_spec->tun_l4_proto) {
325542578c6STariq Toukan 	case IPPROTO_UDP:
326542578c6STariq Toukan 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
327542578c6STariq Toukan 		/* fall through */
328542578c6STariq Toukan 	case IPPROTO_TCP:
329542578c6STariq Toukan 		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
330542578c6STariq Toukan 		break;
331542578c6STariq Toukan 	}
332542578c6STariq Toukan }
333542578c6STariq Toukan 
334542578c6STariq Toukan #endif
335