1542578c6STariq Toukan /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2542578c6STariq Toukan /* Copyright (c) 2019 Mellanox Technologies. */
3542578c6STariq Toukan 
4542578c6STariq Toukan #ifndef __MLX5_EN_TXRX_H___
5542578c6STariq Toukan #define __MLX5_EN_TXRX_H___
6542578c6STariq Toukan 
7542578c6STariq Toukan #include "en.h"
85d0b8476STariq Toukan #include <linux/indirect_call_wrapper.h>
9542578c6STariq Toukan 
1097e3afd6SMaxim Mikityanskiy #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
1197e3afd6SMaxim Mikityanskiy 
12530d5ce2SMaxim Mikityanskiy /* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
13530d5ce2SMaxim Mikityanskiy  * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
14530d5ce2SMaxim Mikityanskiy  * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
15530d5ce2SMaxim Mikityanskiy  * full-session WQE be cache-aligned.
16530d5ce2SMaxim Mikityanskiy  */
17530d5ce2SMaxim Mikityanskiy #if L1_CACHE_BYTES < 128
18530d5ce2SMaxim Mikityanskiy #define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
19530d5ce2SMaxim Mikityanskiy #else
20530d5ce2SMaxim Mikityanskiy #define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
21530d5ce2SMaxim Mikityanskiy #endif
22530d5ce2SMaxim Mikityanskiy 
23530d5ce2SMaxim Mikityanskiy #define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
24530d5ce2SMaxim Mikityanskiy 
25542578c6STariq Toukan #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
26542578c6STariq Toukan 
27cecaa6a7SEran Ben Elisha #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
28cecaa6a7SEran Ben Elisha 
29*432119deSAya Levin static inline
30*432119deSAya Levin ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
31*432119deSAya Levin {
32*432119deSAya Levin 	return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time,
33*432119deSAya Levin 			       clock, cqe_ts);
34*432119deSAya Levin }
35*432119deSAya Levin 
3628bff095STariq Toukan enum mlx5e_icosq_wqe_type {
3728bff095STariq Toukan 	MLX5E_ICOSQ_WQE_NOP,
3828bff095STariq Toukan 	MLX5E_ICOSQ_WQE_UMR_RX,
391182f365STariq Toukan #ifdef CONFIG_MLX5_EN_TLS
401182f365STariq Toukan 	MLX5E_ICOSQ_WQE_UMR_TLS,
411182f365STariq Toukan 	MLX5E_ICOSQ_WQE_SET_PSV_TLS,
420419d8c9STariq Toukan 	MLX5E_ICOSQ_WQE_GET_PSV_TLS,
431182f365STariq Toukan #endif
4428bff095STariq Toukan };
4528bff095STariq Toukan 
46b307f7f1STariq Toukan /* General */
4747c97e6bSRon Diskin static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
4847c97e6bSRon Diskin {
4947c97e6bSRon Diskin 	return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
5047c97e6bSRon Diskin }
5147c97e6bSRon Diskin 
52b307f7f1STariq Toukan void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
53b307f7f1STariq Toukan void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
54b307f7f1STariq Toukan void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
55b307f7f1STariq Toukan int mlx5e_napi_poll(struct napi_struct *napi, int budget);
56b307f7f1STariq Toukan int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
57b307f7f1STariq Toukan 
58b307f7f1STariq Toukan /* RX */
59b307f7f1STariq Toukan void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
60b307f7f1STariq Toukan void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
61b307f7f1STariq Toukan 				struct mlx5e_dma_info *dma_info,
62b307f7f1STariq Toukan 				bool recycle);
635d0b8476STariq Toukan INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
645d0b8476STariq Toukan INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
65b307f7f1STariq Toukan int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
66b307f7f1STariq Toukan void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
67b307f7f1STariq Toukan void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
68b307f7f1STariq Toukan 
69b307f7f1STariq Toukan /* TX */
70b307f7f1STariq Toukan u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
71b307f7f1STariq Toukan 		       struct net_device *sb_dev);
72b307f7f1STariq Toukan netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
73b307f7f1STariq Toukan bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
74b307f7f1STariq Toukan void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
75b307f7f1STariq Toukan 
76542578c6STariq Toukan static inline bool
77542578c6STariq Toukan mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
78542578c6STariq Toukan {
79542578c6STariq Toukan 	return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
80542578c6STariq Toukan }
81542578c6STariq Toukan 
82fed0c6cfSMaxim Mikityanskiy static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
83542578c6STariq Toukan {
84fd1b2259STariq Toukan 	void *wqe;
85542578c6STariq Toukan 
86fed0c6cfSMaxim Mikityanskiy 	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
87fed0c6cfSMaxim Mikityanskiy 	memset(wqe, 0, wqe_size);
88fd1b2259STariq Toukan 
89fd1b2259STariq Toukan 	return wqe;
90542578c6STariq Toukan }
91542578c6STariq Toukan 
92fed0c6cfSMaxim Mikityanskiy #define MLX5E_TX_FETCH_WQE(sq, pi) \
93fed0c6cfSMaxim Mikityanskiy 	((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
94fed0c6cfSMaxim Mikityanskiy 
95542578c6STariq Toukan static inline struct mlx5e_tx_wqe *
96542578c6STariq Toukan mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
97542578c6STariq Toukan {
98542578c6STariq Toukan 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
99542578c6STariq Toukan 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
100542578c6STariq Toukan 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
101542578c6STariq Toukan 
102542578c6STariq Toukan 	memset(cseg, 0, sizeof(*cseg));
103542578c6STariq Toukan 
104542578c6STariq Toukan 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
105542578c6STariq Toukan 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
106542578c6STariq Toukan 
107542578c6STariq Toukan 	(*pc)++;
108542578c6STariq Toukan 
109542578c6STariq Toukan 	return wqe;
110542578c6STariq Toukan }
111542578c6STariq Toukan 
11237badd15STariq Toukan static inline struct mlx5e_tx_wqe *
11337badd15STariq Toukan mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
11437badd15STariq Toukan {
11537badd15STariq Toukan 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
11637badd15STariq Toukan 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
11737badd15STariq Toukan 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
11837badd15STariq Toukan 
11937badd15STariq Toukan 	memset(cseg, 0, sizeof(*cseg));
12037badd15STariq Toukan 
12137badd15STariq Toukan 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
12237badd15STariq Toukan 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
12337badd15STariq Toukan 	cseg->fm_ce_se         = MLX5_FENCE_MODE_INITIATOR_SMALL;
12437badd15STariq Toukan 
12537badd15STariq Toukan 	(*pc)++;
12637badd15STariq Toukan 
12737badd15STariq Toukan 	return wqe;
12837badd15STariq Toukan }
12937badd15STariq Toukan 
13005dfd570STariq Toukan struct mlx5e_tx_wqe_info {
13105dfd570STariq Toukan 	struct sk_buff *skb;
13205dfd570STariq Toukan 	u32 num_bytes;
13305dfd570STariq Toukan 	u8 num_wqebbs;
13405dfd570STariq Toukan 	u8 num_dma;
135338c46c6SMaxim Mikityanskiy 	u8 num_fifo_pkts;
13605dfd570STariq Toukan #ifdef CONFIG_MLX5_EN_TLS
13705dfd570STariq Toukan 	struct page *resync_dump_frag_page;
13805dfd570STariq Toukan #endif
13905dfd570STariq Toukan };
14005dfd570STariq Toukan 
141ec9cdca0SMaxim Mikityanskiy static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
142ec9cdca0SMaxim Mikityanskiy {
143ec9cdca0SMaxim Mikityanskiy 	struct mlx5_wq_cyc *wq = &sq->wq;
144ec9cdca0SMaxim Mikityanskiy 	u16 pi, contig_wqebbs;
145ec9cdca0SMaxim Mikityanskiy 
146ec9cdca0SMaxim Mikityanskiy 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
147ec9cdca0SMaxim Mikityanskiy 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
148ec9cdca0SMaxim Mikityanskiy 	if (unlikely(contig_wqebbs < size)) {
149ec9cdca0SMaxim Mikityanskiy 		struct mlx5e_tx_wqe_info *wi, *edge_wi;
150ec9cdca0SMaxim Mikityanskiy 
151ec9cdca0SMaxim Mikityanskiy 		wi = &sq->db.wqe_info[pi];
152ec9cdca0SMaxim Mikityanskiy 		edge_wi = wi + contig_wqebbs;
153ec9cdca0SMaxim Mikityanskiy 
154ec9cdca0SMaxim Mikityanskiy 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
155ec9cdca0SMaxim Mikityanskiy 		for (; wi < edge_wi; wi++) {
156ec9cdca0SMaxim Mikityanskiy 			*wi = (struct mlx5e_tx_wqe_info) {
157ec9cdca0SMaxim Mikityanskiy 				.num_wqebbs = 1,
158ec9cdca0SMaxim Mikityanskiy 			};
159ec9cdca0SMaxim Mikityanskiy 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
160ec9cdca0SMaxim Mikityanskiy 		}
161ec9cdca0SMaxim Mikityanskiy 		sq->stats->nop += contig_wqebbs;
162ec9cdca0SMaxim Mikityanskiy 
163ec9cdca0SMaxim Mikityanskiy 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
164ec9cdca0SMaxim Mikityanskiy 	}
165ec9cdca0SMaxim Mikityanskiy 
166ec9cdca0SMaxim Mikityanskiy 	return pi;
167ec9cdca0SMaxim Mikityanskiy }
168ec9cdca0SMaxim Mikityanskiy 
16905dfd570STariq Toukan struct mlx5e_icosq_wqe_info {
17028bff095STariq Toukan 	u8 wqe_type;
17105dfd570STariq Toukan 	u8 num_wqebbs;
17205dfd570STariq Toukan 
17328bff095STariq Toukan 	/* Auxiliary data for different wqe types. */
17405dfd570STariq Toukan 	union {
17505dfd570STariq Toukan 		struct {
17605dfd570STariq Toukan 			struct mlx5e_rq *rq;
17705dfd570STariq Toukan 		} umr;
1781182f365STariq Toukan #ifdef CONFIG_MLX5_EN_TLS
1791182f365STariq Toukan 		struct {
1801182f365STariq Toukan 			struct mlx5e_ktls_offload_context_rx *priv_rx;
1811182f365STariq Toukan 		} tls_set_params;
1820419d8c9STariq Toukan 		struct {
1830419d8c9STariq Toukan 			struct mlx5e_ktls_rx_resync_buf *buf;
1840419d8c9STariq Toukan 		} tls_get_params;
1851182f365STariq Toukan #endif
18605dfd570STariq Toukan 	};
18705dfd570STariq Toukan };
18805dfd570STariq Toukan 
1891182f365STariq Toukan void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
1901182f365STariq Toukan 
191ec9cdca0SMaxim Mikityanskiy static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
192ec9cdca0SMaxim Mikityanskiy {
193ec9cdca0SMaxim Mikityanskiy 	struct mlx5_wq_cyc *wq = &sq->wq;
194ec9cdca0SMaxim Mikityanskiy 	u16 pi, contig_wqebbs;
195ec9cdca0SMaxim Mikityanskiy 
196ec9cdca0SMaxim Mikityanskiy 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
197ec9cdca0SMaxim Mikityanskiy 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
198ec9cdca0SMaxim Mikityanskiy 	if (unlikely(contig_wqebbs < size)) {
199ec9cdca0SMaxim Mikityanskiy 		struct mlx5e_icosq_wqe_info *wi, *edge_wi;
200ec9cdca0SMaxim Mikityanskiy 
201ec9cdca0SMaxim Mikityanskiy 		wi = &sq->db.wqe_info[pi];
202ec9cdca0SMaxim Mikityanskiy 		edge_wi = wi + contig_wqebbs;
203ec9cdca0SMaxim Mikityanskiy 
204ec9cdca0SMaxim Mikityanskiy 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
205ec9cdca0SMaxim Mikityanskiy 		for (; wi < edge_wi; wi++) {
206ec9cdca0SMaxim Mikityanskiy 			*wi = (struct mlx5e_icosq_wqe_info) {
20728bff095STariq Toukan 				.wqe_type   = MLX5E_ICOSQ_WQE_NOP,
208ec9cdca0SMaxim Mikityanskiy 				.num_wqebbs = 1,
209ec9cdca0SMaxim Mikityanskiy 			};
210ec9cdca0SMaxim Mikityanskiy 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
211ec9cdca0SMaxim Mikityanskiy 		}
212ec9cdca0SMaxim Mikityanskiy 
213ec9cdca0SMaxim Mikityanskiy 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
214ec9cdca0SMaxim Mikityanskiy 	}
215ec9cdca0SMaxim Mikityanskiy 
216ec9cdca0SMaxim Mikityanskiy 	return pi;
217ec9cdca0SMaxim Mikityanskiy }
218ec9cdca0SMaxim Mikityanskiy 
219542578c6STariq Toukan static inline void
220542578c6STariq Toukan mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
221542578c6STariq Toukan 		struct mlx5_wqe_ctrl_seg *ctrl)
222542578c6STariq Toukan {
22382fe2996STariq Toukan 	ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
224542578c6STariq Toukan 	/* ensure wqe is visible to device before updating doorbell record */
225542578c6STariq Toukan 	dma_wmb();
226542578c6STariq Toukan 
227542578c6STariq Toukan 	*wq->db = cpu_to_be32(pc);
228542578c6STariq Toukan 
229542578c6STariq Toukan 	/* ensure doorbell record is visible to device before ringing the
230542578c6STariq Toukan 	 * doorbell
231542578c6STariq Toukan 	 */
232542578c6STariq Toukan 	wmb();
233542578c6STariq Toukan 
234542578c6STariq Toukan 	mlx5_write64((__be32 *)ctrl, uar_map);
235542578c6STariq Toukan }
236542578c6STariq Toukan 
237542578c6STariq Toukan static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
238542578c6STariq Toukan {
239542578c6STariq Toukan 	struct mlx5_core_cq *mcq;
240542578c6STariq Toukan 
241542578c6STariq Toukan 	mcq = &cq->mcq;
242542578c6STariq Toukan 	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
243542578c6STariq Toukan }
244542578c6STariq Toukan 
245542578c6STariq Toukan static inline struct mlx5e_sq_dma *
246542578c6STariq Toukan mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
247542578c6STariq Toukan {
248542578c6STariq Toukan 	return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
249542578c6STariq Toukan }
250542578c6STariq Toukan 
251542578c6STariq Toukan static inline void
252542578c6STariq Toukan mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
253542578c6STariq Toukan 	       enum mlx5e_dma_map_type map_type)
254542578c6STariq Toukan {
255542578c6STariq Toukan 	struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
256542578c6STariq Toukan 
257542578c6STariq Toukan 	dma->addr = addr;
258542578c6STariq Toukan 	dma->size = size;
259542578c6STariq Toukan 	dma->type = map_type;
260542578c6STariq Toukan }
261542578c6STariq Toukan 
2620b676aaeSEran Ben Elisha static inline
2630b676aaeSEran Ben Elisha struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
264338c46c6SMaxim Mikityanskiy {
2650b676aaeSEran Ben Elisha 	return &fifo->fifo[i & fifo->mask];
266338c46c6SMaxim Mikityanskiy }
267338c46c6SMaxim Mikityanskiy 
2680b676aaeSEran Ben Elisha static inline
2690b676aaeSEran Ben Elisha void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
270338c46c6SMaxim Mikityanskiy {
2710b676aaeSEran Ben Elisha 	struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++);
272338c46c6SMaxim Mikityanskiy 
273338c46c6SMaxim Mikityanskiy 	*skb_item = skb;
274338c46c6SMaxim Mikityanskiy }
275338c46c6SMaxim Mikityanskiy 
2760b676aaeSEran Ben Elisha static inline
2770b676aaeSEran Ben Elisha struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
278338c46c6SMaxim Mikityanskiy {
2790b676aaeSEran Ben Elisha 	return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
280338c46c6SMaxim Mikityanskiy }
281338c46c6SMaxim Mikityanskiy 
282542578c6STariq Toukan static inline void
283542578c6STariq Toukan mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
284542578c6STariq Toukan {
285542578c6STariq Toukan 	switch (dma->type) {
286542578c6STariq Toukan 	case MLX5E_DMA_MAP_SINGLE:
287542578c6STariq Toukan 		dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
288542578c6STariq Toukan 		break;
289542578c6STariq Toukan 	case MLX5E_DMA_MAP_PAGE:
290542578c6STariq Toukan 		dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
291542578c6STariq Toukan 		break;
292542578c6STariq Toukan 	default:
293542578c6STariq Toukan 		WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
294542578c6STariq Toukan 	}
295542578c6STariq Toukan }
296542578c6STariq Toukan 
2978e4b53f6SMaxim Mikityanskiy void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
2985af75c74SMaxim Mikityanskiy void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
2998e4b53f6SMaxim Mikityanskiy 
300b39fe61eSMaxim Mikityanskiy static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
301530d5ce2SMaxim Mikityanskiy {
302530d5ce2SMaxim Mikityanskiy 	return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
303530d5ce2SMaxim Mikityanskiy }
304530d5ce2SMaxim Mikityanskiy 
3055ee090edSAya Levin static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
3065ee090edSAya Levin {
30739369fd5SAya Levin 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3085ee090edSAya Levin 		mlx5_wq_ll_reset(&rq->mpwqe.wq);
30939369fd5SAya Levin 		rq->mpwqe.actual_wq_head = 0;
31039369fd5SAya Levin 	} else {
3115ee090edSAya Levin 		mlx5_wq_cyc_reset(&rq->wqe.wq);
3125ee090edSAya Levin 	}
31339369fd5SAya Levin }
3145ee090edSAya Levin 
315b9961af7SAya Levin static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
316f1b95753STariq Toukan 					struct mlx5_err_cqe *err_cqe)
317f1b95753STariq Toukan {
318f1b95753STariq Toukan 	struct mlx5_cqwq *wq = &cq->wq;
319f1b95753STariq Toukan 	u32 ci;
320f1b95753STariq Toukan 
321f1b95753STariq Toukan 	ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
322f1b95753STariq Toukan 
3234d0b7ef9SAya Levin 	netdev_err(cq->netdev,
324b9961af7SAya Levin 		   "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
325b9961af7SAya Levin 		   cq->mcq.cqn, ci, qn,
326f1b95753STariq Toukan 		   get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
327f1b95753STariq Toukan 		   err_cqe->syndrome, err_cqe->vendor_err_synd);
328f1b95753STariq Toukan 	mlx5_dump_err_cqe(cq->mdev, err_cqe);
329f1b95753STariq Toukan }
330f1b95753STariq Toukan 
3315d95c816SAya Levin static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
3325d95c816SAya Levin {
3335d95c816SAya Levin 	switch (rq->wq_type) {
3345d95c816SAya Levin 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
3355d95c816SAya Levin 		return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
3365d95c816SAya Levin 	default:
3375d95c816SAya Levin 		return mlx5_wq_cyc_get_size(&rq->wqe.wq);
3385d95c816SAya Levin 	}
3395d95c816SAya Levin }
3405d95c816SAya Levin 
3415d95c816SAya Levin static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
3425d95c816SAya Levin {
3435d95c816SAya Levin 	switch (rq->wq_type) {
3445d95c816SAya Levin 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
3455d95c816SAya Levin 		return rq->mpwqe.wq.cur_sz;
3465d95c816SAya Levin 	default:
3475d95c816SAya Levin 		return rq->wqe.wq.cur_sz;
3485d95c816SAya Levin 	}
3495d95c816SAya Levin }
3505d95c816SAya Levin 
351fc42d0deSAya Levin static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
352fc42d0deSAya Levin {
353fc42d0deSAya Levin 	switch (rq->wq_type) {
354fc42d0deSAya Levin 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
355fc42d0deSAya Levin 		return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
356fc42d0deSAya Levin 	default:
357fc42d0deSAya Levin 		return mlx5_wq_cyc_get_head(&rq->wqe.wq);
358fc42d0deSAya Levin 	}
359fc42d0deSAya Levin }
360fc42d0deSAya Levin 
361de6c6ab7SAya Levin static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
362de6c6ab7SAya Levin {
363de6c6ab7SAya Levin 	switch (rq->wq_type) {
364de6c6ab7SAya Levin 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
365de6c6ab7SAya Levin 		return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
366de6c6ab7SAya Levin 	default:
367de6c6ab7SAya Levin 		return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
368de6c6ab7SAya Levin 	}
369de6c6ab7SAya Levin }
370de6c6ab7SAya Levin 
371542578c6STariq Toukan /* SW parser related functions */
372542578c6STariq Toukan 
373542578c6STariq Toukan struct mlx5e_swp_spec {
374542578c6STariq Toukan 	__be16 l3_proto;
375542578c6STariq Toukan 	u8 l4_proto;
376542578c6STariq Toukan 	u8 is_tun;
377542578c6STariq Toukan 	__be16 tun_l3_proto;
378542578c6STariq Toukan 	u8 tun_l4_proto;
379542578c6STariq Toukan };
380542578c6STariq Toukan 
381b544011fSMoshe Shemesh static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
382b544011fSMoshe Shemesh {
383b544011fSMoshe Shemesh 	/* SWP offsets are in 2-bytes words */
384b544011fSMoshe Shemesh 	eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
385b544011fSMoshe Shemesh 	eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
386b544011fSMoshe Shemesh 	eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
387b544011fSMoshe Shemesh 	eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
388b544011fSMoshe Shemesh }
389b544011fSMoshe Shemesh 
390542578c6STariq Toukan static inline void
391542578c6STariq Toukan mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
392542578c6STariq Toukan 		   struct mlx5e_swp_spec *swp_spec)
393542578c6STariq Toukan {
394542578c6STariq Toukan 	/* SWP offsets are in 2-bytes words */
395542578c6STariq Toukan 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
396542578c6STariq Toukan 	if (swp_spec->l3_proto == htons(ETH_P_IPV6))
397542578c6STariq Toukan 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
398542578c6STariq Toukan 	if (swp_spec->l4_proto) {
399542578c6STariq Toukan 		eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
400542578c6STariq Toukan 		if (swp_spec->l4_proto == IPPROTO_UDP)
401542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
402542578c6STariq Toukan 	}
403542578c6STariq Toukan 
404542578c6STariq Toukan 	if (swp_spec->is_tun) {
405542578c6STariq Toukan 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
406542578c6STariq Toukan 		if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
407542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
408542578c6STariq Toukan 	} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
409542578c6STariq Toukan 		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
410542578c6STariq Toukan 		if (swp_spec->l3_proto == htons(ETH_P_IPV6))
411542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
412542578c6STariq Toukan 	}
413542578c6STariq Toukan 	switch (swp_spec->tun_l4_proto) {
414542578c6STariq Toukan 	case IPPROTO_UDP:
415542578c6STariq Toukan 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
416c8b838d1SGustavo A. R. Silva 		fallthrough;
417542578c6STariq Toukan 	case IPPROTO_TCP:
418542578c6STariq Toukan 		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
419542578c6STariq Toukan 		break;
420542578c6STariq Toukan 	}
421542578c6STariq Toukan }
422542578c6STariq Toukan 
4235ffb4d85SMaxim Mikityanskiy static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
4245ffb4d85SMaxim Mikityanskiy {
4255ffb4d85SMaxim Mikityanskiy 	BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
4265ffb4d85SMaxim Mikityanskiy 
4275ffb4d85SMaxim Mikityanskiy 	/* A WQE must not cross the page boundary, hence two conditions:
4285ffb4d85SMaxim Mikityanskiy 	 * 1. Its size must not exceed the page size.
4295ffb4d85SMaxim Mikityanskiy 	 * 2. If the WQE size is X, and the space remaining in a page is less
4305ffb4d85SMaxim Mikityanskiy 	 *    than X, this space needs to be padded with NOPs. So, one WQE of
4315ffb4d85SMaxim Mikityanskiy 	 *    size X may require up to X-1 WQEBBs of padding, which makes the
4325ffb4d85SMaxim Mikityanskiy 	 *    stop room of X-1 + X.
4335ffb4d85SMaxim Mikityanskiy 	 * WQE size is also limited by the hardware limit.
4345ffb4d85SMaxim Mikityanskiy 	 */
4355ffb4d85SMaxim Mikityanskiy 
4365ffb4d85SMaxim Mikityanskiy 	if (__builtin_constant_p(wqe_size))
4375ffb4d85SMaxim Mikityanskiy 		BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
4385ffb4d85SMaxim Mikityanskiy 	else
4395ffb4d85SMaxim Mikityanskiy 		WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
4405ffb4d85SMaxim Mikityanskiy 
4415ffb4d85SMaxim Mikityanskiy 	return wqe_size * 2 - 1;
4425ffb4d85SMaxim Mikityanskiy }
4435ffb4d85SMaxim Mikityanskiy 
444542578c6STariq Toukan #endif
445