1542578c6STariq Toukan /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2542578c6STariq Toukan /* Copyright (c) 2019 Mellanox Technologies. */
3542578c6STariq Toukan 
4542578c6STariq Toukan #ifndef __MLX5_EN_TXRX_H___
5542578c6STariq Toukan #define __MLX5_EN_TXRX_H___
6542578c6STariq Toukan 
7542578c6STariq Toukan #include "en.h"
85d0b8476STariq Toukan #include <linux/indirect_call_wrapper.h>
9542578c6STariq Toukan 
1097e3afd6SMaxim Mikityanskiy #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
1197e3afd6SMaxim Mikityanskiy 
12530d5ce2SMaxim Mikityanskiy /* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
13530d5ce2SMaxim Mikityanskiy  * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
14530d5ce2SMaxim Mikityanskiy  * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
15530d5ce2SMaxim Mikityanskiy  * full-session WQE be cache-aligned.
16530d5ce2SMaxim Mikityanskiy  */
17530d5ce2SMaxim Mikityanskiy #if L1_CACHE_BYTES < 128
18530d5ce2SMaxim Mikityanskiy #define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
19530d5ce2SMaxim Mikityanskiy #else
20530d5ce2SMaxim Mikityanskiy #define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
21530d5ce2SMaxim Mikityanskiy #endif
22530d5ce2SMaxim Mikityanskiy 
23530d5ce2SMaxim Mikityanskiy #define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
24530d5ce2SMaxim Mikityanskiy 
25542578c6STariq Toukan #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
26542578c6STariq Toukan 
2728bff095STariq Toukan enum mlx5e_icosq_wqe_type {
2828bff095STariq Toukan 	MLX5E_ICOSQ_WQE_NOP,
2928bff095STariq Toukan 	MLX5E_ICOSQ_WQE_UMR_RX,
301182f365STariq Toukan #ifdef CONFIG_MLX5_EN_TLS
311182f365STariq Toukan 	MLX5E_ICOSQ_WQE_UMR_TLS,
321182f365STariq Toukan 	MLX5E_ICOSQ_WQE_SET_PSV_TLS,
330419d8c9STariq Toukan 	MLX5E_ICOSQ_WQE_GET_PSV_TLS,
341182f365STariq Toukan #endif
3528bff095STariq Toukan };
3628bff095STariq Toukan 
37b307f7f1STariq Toukan /* General */
3847c97e6bSRon Diskin static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
3947c97e6bSRon Diskin {
4047c97e6bSRon Diskin 	return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
4147c97e6bSRon Diskin }
4247c97e6bSRon Diskin 
43b307f7f1STariq Toukan void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
44b307f7f1STariq Toukan void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
45b307f7f1STariq Toukan void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
46b307f7f1STariq Toukan int mlx5e_napi_poll(struct napi_struct *napi, int budget);
47b307f7f1STariq Toukan int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
48b307f7f1STariq Toukan 
49b307f7f1STariq Toukan /* RX */
50b307f7f1STariq Toukan void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
51b307f7f1STariq Toukan void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
52b307f7f1STariq Toukan 				struct mlx5e_dma_info *dma_info,
53b307f7f1STariq Toukan 				bool recycle);
545d0b8476STariq Toukan INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
555d0b8476STariq Toukan INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
56b307f7f1STariq Toukan int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
57b307f7f1STariq Toukan void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
58b307f7f1STariq Toukan void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
59b307f7f1STariq Toukan 
60b307f7f1STariq Toukan /* TX */
61b307f7f1STariq Toukan u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
62b307f7f1STariq Toukan 		       struct net_device *sb_dev);
63b307f7f1STariq Toukan netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
64b307f7f1STariq Toukan bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
65b307f7f1STariq Toukan void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
66b307f7f1STariq Toukan 
67542578c6STariq Toukan static inline bool
68542578c6STariq Toukan mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
69542578c6STariq Toukan {
70542578c6STariq Toukan 	return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
71542578c6STariq Toukan }
72542578c6STariq Toukan 
73fed0c6cfSMaxim Mikityanskiy static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
74542578c6STariq Toukan {
75fd1b2259STariq Toukan 	void *wqe;
76542578c6STariq Toukan 
77fed0c6cfSMaxim Mikityanskiy 	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
78fed0c6cfSMaxim Mikityanskiy 	memset(wqe, 0, wqe_size);
79fd1b2259STariq Toukan 
80fd1b2259STariq Toukan 	return wqe;
81542578c6STariq Toukan }
82542578c6STariq Toukan 
83fed0c6cfSMaxim Mikityanskiy #define MLX5E_TX_FETCH_WQE(sq, pi) \
84fed0c6cfSMaxim Mikityanskiy 	((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
85fed0c6cfSMaxim Mikityanskiy 
86542578c6STariq Toukan static inline struct mlx5e_tx_wqe *
87542578c6STariq Toukan mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
88542578c6STariq Toukan {
89542578c6STariq Toukan 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
90542578c6STariq Toukan 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
91542578c6STariq Toukan 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
92542578c6STariq Toukan 
93542578c6STariq Toukan 	memset(cseg, 0, sizeof(*cseg));
94542578c6STariq Toukan 
95542578c6STariq Toukan 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
96542578c6STariq Toukan 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
97542578c6STariq Toukan 
98542578c6STariq Toukan 	(*pc)++;
99542578c6STariq Toukan 
100542578c6STariq Toukan 	return wqe;
101542578c6STariq Toukan }
102542578c6STariq Toukan 
10337badd15STariq Toukan static inline struct mlx5e_tx_wqe *
10437badd15STariq Toukan mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
10537badd15STariq Toukan {
10637badd15STariq Toukan 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
10737badd15STariq Toukan 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
10837badd15STariq Toukan 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
10937badd15STariq Toukan 
11037badd15STariq Toukan 	memset(cseg, 0, sizeof(*cseg));
11137badd15STariq Toukan 
11237badd15STariq Toukan 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
11337badd15STariq Toukan 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
11437badd15STariq Toukan 	cseg->fm_ce_se         = MLX5_FENCE_MODE_INITIATOR_SMALL;
11537badd15STariq Toukan 
11637badd15STariq Toukan 	(*pc)++;
11737badd15STariq Toukan 
11837badd15STariq Toukan 	return wqe;
11937badd15STariq Toukan }
12037badd15STariq Toukan 
12105dfd570STariq Toukan struct mlx5e_tx_wqe_info {
12205dfd570STariq Toukan 	struct sk_buff *skb;
12305dfd570STariq Toukan 	u32 num_bytes;
12405dfd570STariq Toukan 	u8 num_wqebbs;
12505dfd570STariq Toukan 	u8 num_dma;
126338c46c6SMaxim Mikityanskiy 	u8 num_fifo_pkts;
12705dfd570STariq Toukan #ifdef CONFIG_MLX5_EN_TLS
12805dfd570STariq Toukan 	struct page *resync_dump_frag_page;
12905dfd570STariq Toukan #endif
13005dfd570STariq Toukan };
13105dfd570STariq Toukan 
132ec9cdca0SMaxim Mikityanskiy static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
133ec9cdca0SMaxim Mikityanskiy {
134ec9cdca0SMaxim Mikityanskiy 	struct mlx5_wq_cyc *wq = &sq->wq;
135ec9cdca0SMaxim Mikityanskiy 	u16 pi, contig_wqebbs;
136ec9cdca0SMaxim Mikityanskiy 
137ec9cdca0SMaxim Mikityanskiy 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
138ec9cdca0SMaxim Mikityanskiy 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
139ec9cdca0SMaxim Mikityanskiy 	if (unlikely(contig_wqebbs < size)) {
140ec9cdca0SMaxim Mikityanskiy 		struct mlx5e_tx_wqe_info *wi, *edge_wi;
141ec9cdca0SMaxim Mikityanskiy 
142ec9cdca0SMaxim Mikityanskiy 		wi = &sq->db.wqe_info[pi];
143ec9cdca0SMaxim Mikityanskiy 		edge_wi = wi + contig_wqebbs;
144ec9cdca0SMaxim Mikityanskiy 
145ec9cdca0SMaxim Mikityanskiy 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
146ec9cdca0SMaxim Mikityanskiy 		for (; wi < edge_wi; wi++) {
147ec9cdca0SMaxim Mikityanskiy 			*wi = (struct mlx5e_tx_wqe_info) {
148ec9cdca0SMaxim Mikityanskiy 				.num_wqebbs = 1,
149ec9cdca0SMaxim Mikityanskiy 			};
150ec9cdca0SMaxim Mikityanskiy 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
151ec9cdca0SMaxim Mikityanskiy 		}
152ec9cdca0SMaxim Mikityanskiy 		sq->stats->nop += contig_wqebbs;
153ec9cdca0SMaxim Mikityanskiy 
154ec9cdca0SMaxim Mikityanskiy 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
155ec9cdca0SMaxim Mikityanskiy 	}
156ec9cdca0SMaxim Mikityanskiy 
157ec9cdca0SMaxim Mikityanskiy 	return pi;
158ec9cdca0SMaxim Mikityanskiy }
159ec9cdca0SMaxim Mikityanskiy 
16005dfd570STariq Toukan struct mlx5e_icosq_wqe_info {
16128bff095STariq Toukan 	u8 wqe_type;
16205dfd570STariq Toukan 	u8 num_wqebbs;
16305dfd570STariq Toukan 
16428bff095STariq Toukan 	/* Auxiliary data for different wqe types. */
16505dfd570STariq Toukan 	union {
16605dfd570STariq Toukan 		struct {
16705dfd570STariq Toukan 			struct mlx5e_rq *rq;
16805dfd570STariq Toukan 		} umr;
1691182f365STariq Toukan #ifdef CONFIG_MLX5_EN_TLS
1701182f365STariq Toukan 		struct {
1711182f365STariq Toukan 			struct mlx5e_ktls_offload_context_rx *priv_rx;
1721182f365STariq Toukan 		} tls_set_params;
1730419d8c9STariq Toukan 		struct {
1740419d8c9STariq Toukan 			struct mlx5e_ktls_rx_resync_buf *buf;
1750419d8c9STariq Toukan 		} tls_get_params;
1761182f365STariq Toukan #endif
17705dfd570STariq Toukan 	};
17805dfd570STariq Toukan };
17905dfd570STariq Toukan 
1801182f365STariq Toukan void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
1811182f365STariq Toukan 
182ec9cdca0SMaxim Mikityanskiy static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
183ec9cdca0SMaxim Mikityanskiy {
184ec9cdca0SMaxim Mikityanskiy 	struct mlx5_wq_cyc *wq = &sq->wq;
185ec9cdca0SMaxim Mikityanskiy 	u16 pi, contig_wqebbs;
186ec9cdca0SMaxim Mikityanskiy 
187ec9cdca0SMaxim Mikityanskiy 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
188ec9cdca0SMaxim Mikityanskiy 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
189ec9cdca0SMaxim Mikityanskiy 	if (unlikely(contig_wqebbs < size)) {
190ec9cdca0SMaxim Mikityanskiy 		struct mlx5e_icosq_wqe_info *wi, *edge_wi;
191ec9cdca0SMaxim Mikityanskiy 
192ec9cdca0SMaxim Mikityanskiy 		wi = &sq->db.wqe_info[pi];
193ec9cdca0SMaxim Mikityanskiy 		edge_wi = wi + contig_wqebbs;
194ec9cdca0SMaxim Mikityanskiy 
195ec9cdca0SMaxim Mikityanskiy 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
196ec9cdca0SMaxim Mikityanskiy 		for (; wi < edge_wi; wi++) {
197ec9cdca0SMaxim Mikityanskiy 			*wi = (struct mlx5e_icosq_wqe_info) {
19828bff095STariq Toukan 				.wqe_type   = MLX5E_ICOSQ_WQE_NOP,
199ec9cdca0SMaxim Mikityanskiy 				.num_wqebbs = 1,
200ec9cdca0SMaxim Mikityanskiy 			};
201ec9cdca0SMaxim Mikityanskiy 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
202ec9cdca0SMaxim Mikityanskiy 		}
203ec9cdca0SMaxim Mikityanskiy 
204ec9cdca0SMaxim Mikityanskiy 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
205ec9cdca0SMaxim Mikityanskiy 	}
206ec9cdca0SMaxim Mikityanskiy 
207ec9cdca0SMaxim Mikityanskiy 	return pi;
208ec9cdca0SMaxim Mikityanskiy }
209ec9cdca0SMaxim Mikityanskiy 
210542578c6STariq Toukan static inline void
211542578c6STariq Toukan mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
212542578c6STariq Toukan 		struct mlx5_wqe_ctrl_seg *ctrl)
213542578c6STariq Toukan {
21482fe2996STariq Toukan 	ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
215542578c6STariq Toukan 	/* ensure wqe is visible to device before updating doorbell record */
216542578c6STariq Toukan 	dma_wmb();
217542578c6STariq Toukan 
218542578c6STariq Toukan 	*wq->db = cpu_to_be32(pc);
219542578c6STariq Toukan 
220542578c6STariq Toukan 	/* ensure doorbell record is visible to device before ringing the
221542578c6STariq Toukan 	 * doorbell
222542578c6STariq Toukan 	 */
223542578c6STariq Toukan 	wmb();
224542578c6STariq Toukan 
225542578c6STariq Toukan 	mlx5_write64((__be32 *)ctrl, uar_map);
226542578c6STariq Toukan }
227542578c6STariq Toukan 
228542578c6STariq Toukan static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
229542578c6STariq Toukan {
230542578c6STariq Toukan 	struct mlx5_core_cq *mcq;
231542578c6STariq Toukan 
232542578c6STariq Toukan 	mcq = &cq->mcq;
233542578c6STariq Toukan 	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
234542578c6STariq Toukan }
235542578c6STariq Toukan 
236542578c6STariq Toukan static inline struct mlx5e_sq_dma *
237542578c6STariq Toukan mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
238542578c6STariq Toukan {
239542578c6STariq Toukan 	return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
240542578c6STariq Toukan }
241542578c6STariq Toukan 
242542578c6STariq Toukan static inline void
243542578c6STariq Toukan mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
244542578c6STariq Toukan 	       enum mlx5e_dma_map_type map_type)
245542578c6STariq Toukan {
246542578c6STariq Toukan 	struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
247542578c6STariq Toukan 
248542578c6STariq Toukan 	dma->addr = addr;
249542578c6STariq Toukan 	dma->size = size;
250542578c6STariq Toukan 	dma->type = map_type;
251542578c6STariq Toukan }
252542578c6STariq Toukan 
253*0b676aaeSEran Ben Elisha static inline
254*0b676aaeSEran Ben Elisha struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
255338c46c6SMaxim Mikityanskiy {
256*0b676aaeSEran Ben Elisha 	return &fifo->fifo[i & fifo->mask];
257338c46c6SMaxim Mikityanskiy }
258338c46c6SMaxim Mikityanskiy 
259*0b676aaeSEran Ben Elisha static inline
260*0b676aaeSEran Ben Elisha void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
261338c46c6SMaxim Mikityanskiy {
262*0b676aaeSEran Ben Elisha 	struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++);
263338c46c6SMaxim Mikityanskiy 
264338c46c6SMaxim Mikityanskiy 	*skb_item = skb;
265338c46c6SMaxim Mikityanskiy }
266338c46c6SMaxim Mikityanskiy 
267*0b676aaeSEran Ben Elisha static inline
268*0b676aaeSEran Ben Elisha struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
269338c46c6SMaxim Mikityanskiy {
270*0b676aaeSEran Ben Elisha 	return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
271338c46c6SMaxim Mikityanskiy }
272338c46c6SMaxim Mikityanskiy 
273542578c6STariq Toukan static inline void
274542578c6STariq Toukan mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
275542578c6STariq Toukan {
276542578c6STariq Toukan 	switch (dma->type) {
277542578c6STariq Toukan 	case MLX5E_DMA_MAP_SINGLE:
278542578c6STariq Toukan 		dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
279542578c6STariq Toukan 		break;
280542578c6STariq Toukan 	case MLX5E_DMA_MAP_PAGE:
281542578c6STariq Toukan 		dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
282542578c6STariq Toukan 		break;
283542578c6STariq Toukan 	default:
284542578c6STariq Toukan 		WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
285542578c6STariq Toukan 	}
286542578c6STariq Toukan }
287542578c6STariq Toukan 
2888e4b53f6SMaxim Mikityanskiy void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
2895af75c74SMaxim Mikityanskiy void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
2908e4b53f6SMaxim Mikityanskiy 
291b39fe61eSMaxim Mikityanskiy static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
292530d5ce2SMaxim Mikityanskiy {
293530d5ce2SMaxim Mikityanskiy 	return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
294530d5ce2SMaxim Mikityanskiy }
295530d5ce2SMaxim Mikityanskiy 
2965ee090edSAya Levin static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
2975ee090edSAya Levin {
29839369fd5SAya Levin 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
2995ee090edSAya Levin 		mlx5_wq_ll_reset(&rq->mpwqe.wq);
30039369fd5SAya Levin 		rq->mpwqe.actual_wq_head = 0;
30139369fd5SAya Levin 	} else {
3025ee090edSAya Levin 		mlx5_wq_cyc_reset(&rq->wqe.wq);
3035ee090edSAya Levin 	}
30439369fd5SAya Levin }
3055ee090edSAya Levin 
306b9961af7SAya Levin static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
307f1b95753STariq Toukan 					struct mlx5_err_cqe *err_cqe)
308f1b95753STariq Toukan {
309f1b95753STariq Toukan 	struct mlx5_cqwq *wq = &cq->wq;
310f1b95753STariq Toukan 	u32 ci;
311f1b95753STariq Toukan 
312f1b95753STariq Toukan 	ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
313f1b95753STariq Toukan 
3144d0b7ef9SAya Levin 	netdev_err(cq->netdev,
315b9961af7SAya Levin 		   "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
316b9961af7SAya Levin 		   cq->mcq.cqn, ci, qn,
317f1b95753STariq Toukan 		   get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
318f1b95753STariq Toukan 		   err_cqe->syndrome, err_cqe->vendor_err_synd);
319f1b95753STariq Toukan 	mlx5_dump_err_cqe(cq->mdev, err_cqe);
320f1b95753STariq Toukan }
321f1b95753STariq Toukan 
3225d95c816SAya Levin static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
3235d95c816SAya Levin {
3245d95c816SAya Levin 	switch (rq->wq_type) {
3255d95c816SAya Levin 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
3265d95c816SAya Levin 		return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
3275d95c816SAya Levin 	default:
3285d95c816SAya Levin 		return mlx5_wq_cyc_get_size(&rq->wqe.wq);
3295d95c816SAya Levin 	}
3305d95c816SAya Levin }
3315d95c816SAya Levin 
3325d95c816SAya Levin static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
3335d95c816SAya Levin {
3345d95c816SAya Levin 	switch (rq->wq_type) {
3355d95c816SAya Levin 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
3365d95c816SAya Levin 		return rq->mpwqe.wq.cur_sz;
3375d95c816SAya Levin 	default:
3385d95c816SAya Levin 		return rq->wqe.wq.cur_sz;
3395d95c816SAya Levin 	}
3405d95c816SAya Levin }
3415d95c816SAya Levin 
342fc42d0deSAya Levin static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
343fc42d0deSAya Levin {
344fc42d0deSAya Levin 	switch (rq->wq_type) {
345fc42d0deSAya Levin 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
346fc42d0deSAya Levin 		return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
347fc42d0deSAya Levin 	default:
348fc42d0deSAya Levin 		return mlx5_wq_cyc_get_head(&rq->wqe.wq);
349fc42d0deSAya Levin 	}
350fc42d0deSAya Levin }
351fc42d0deSAya Levin 
352de6c6ab7SAya Levin static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
353de6c6ab7SAya Levin {
354de6c6ab7SAya Levin 	switch (rq->wq_type) {
355de6c6ab7SAya Levin 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
356de6c6ab7SAya Levin 		return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
357de6c6ab7SAya Levin 	default:
358de6c6ab7SAya Levin 		return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
359de6c6ab7SAya Levin 	}
360de6c6ab7SAya Levin }
361de6c6ab7SAya Levin 
362542578c6STariq Toukan /* SW parser related functions */
363542578c6STariq Toukan 
364542578c6STariq Toukan struct mlx5e_swp_spec {
365542578c6STariq Toukan 	__be16 l3_proto;
366542578c6STariq Toukan 	u8 l4_proto;
367542578c6STariq Toukan 	u8 is_tun;
368542578c6STariq Toukan 	__be16 tun_l3_proto;
369542578c6STariq Toukan 	u8 tun_l4_proto;
370542578c6STariq Toukan };
371542578c6STariq Toukan 
372542578c6STariq Toukan static inline void
373542578c6STariq Toukan mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
374542578c6STariq Toukan 		   struct mlx5e_swp_spec *swp_spec)
375542578c6STariq Toukan {
376542578c6STariq Toukan 	/* SWP offsets are in 2-bytes words */
377542578c6STariq Toukan 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
378542578c6STariq Toukan 	if (swp_spec->l3_proto == htons(ETH_P_IPV6))
379542578c6STariq Toukan 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
380542578c6STariq Toukan 	if (swp_spec->l4_proto) {
381542578c6STariq Toukan 		eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
382542578c6STariq Toukan 		if (swp_spec->l4_proto == IPPROTO_UDP)
383542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
384542578c6STariq Toukan 	}
385542578c6STariq Toukan 
386542578c6STariq Toukan 	if (swp_spec->is_tun) {
387542578c6STariq Toukan 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
388542578c6STariq Toukan 		if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
389542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
390542578c6STariq Toukan 	} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
391542578c6STariq Toukan 		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
392542578c6STariq Toukan 		if (swp_spec->l3_proto == htons(ETH_P_IPV6))
393542578c6STariq Toukan 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
394542578c6STariq Toukan 	}
395542578c6STariq Toukan 	switch (swp_spec->tun_l4_proto) {
396542578c6STariq Toukan 	case IPPROTO_UDP:
397542578c6STariq Toukan 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
398c8b838d1SGustavo A. R. Silva 		fallthrough;
399542578c6STariq Toukan 	case IPPROTO_TCP:
400542578c6STariq Toukan 		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
401542578c6STariq Toukan 		break;
402542578c6STariq Toukan 	}
403542578c6STariq Toukan }
404542578c6STariq Toukan 
4055ffb4d85SMaxim Mikityanskiy static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
4065ffb4d85SMaxim Mikityanskiy {
4075ffb4d85SMaxim Mikityanskiy 	BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
4085ffb4d85SMaxim Mikityanskiy 
4095ffb4d85SMaxim Mikityanskiy 	/* A WQE must not cross the page boundary, hence two conditions:
4105ffb4d85SMaxim Mikityanskiy 	 * 1. Its size must not exceed the page size.
4115ffb4d85SMaxim Mikityanskiy 	 * 2. If the WQE size is X, and the space remaining in a page is less
4125ffb4d85SMaxim Mikityanskiy 	 *    than X, this space needs to be padded with NOPs. So, one WQE of
4135ffb4d85SMaxim Mikityanskiy 	 *    size X may require up to X-1 WQEBBs of padding, which makes the
4145ffb4d85SMaxim Mikityanskiy 	 *    stop room of X-1 + X.
4155ffb4d85SMaxim Mikityanskiy 	 * WQE size is also limited by the hardware limit.
4165ffb4d85SMaxim Mikityanskiy 	 */
4175ffb4d85SMaxim Mikityanskiy 
4185ffb4d85SMaxim Mikityanskiy 	if (__builtin_constant_p(wqe_size))
4195ffb4d85SMaxim Mikityanskiy 		BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
4205ffb4d85SMaxim Mikityanskiy 	else
4215ffb4d85SMaxim Mikityanskiy 		WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
4225ffb4d85SMaxim Mikityanskiy 
4235ffb4d85SMaxim Mikityanskiy 	return wqe_size * 2 - 1;
4245ffb4d85SMaxim Mikityanskiy }
4255ffb4d85SMaxim Mikityanskiy 
426542578c6STariq Toukan #endif
427