1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
6 
7 #include "en.h"
8 #include <linux/indirect_call_wrapper.h>
9 
10 #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
11 
12 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
13 
14 /* IPSEC inline data includes:
15  * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
16  *    next header.
17  * 2. ESP authentication data: 16 bytes for ICV.
18  */
19 #define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
20 					   255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
21 
22 /* 366 should be big enough to cover all L2, L3 and L4 headers with possible
23  * encapsulations.
24  */
25 #define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
26 					    MLX5_SEND_WQE_DS)
27 
28 /* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
29 #define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
30 					 MLX5E_MAX_TX_INLINE_DS + \
31 					 MLX5E_MAX_TX_IPSEC_DS + \
32 					 MAX_SKB_FRAGS + 1, \
33 					 MLX5_SEND_WQEBB_NUM_DS)
34 
35 #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
36 
37 static inline
38 ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
39 {
40 	return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time,
41 			       clock, cqe_ts);
42 }
43 
44 enum mlx5e_icosq_wqe_type {
45 	MLX5E_ICOSQ_WQE_NOP,
46 	MLX5E_ICOSQ_WQE_UMR_RX,
47 	MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
48 #ifdef CONFIG_MLX5_EN_TLS
49 	MLX5E_ICOSQ_WQE_UMR_TLS,
50 	MLX5E_ICOSQ_WQE_SET_PSV_TLS,
51 	MLX5E_ICOSQ_WQE_GET_PSV_TLS,
52 #endif
53 };
54 
55 /* General */
56 static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
57 {
58 	return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
59 }
60 
61 void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
62 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
63 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
64 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
65 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
66 
67 /* RX */
68 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
69 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
70 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
71 INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
72 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
73 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
74 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
75 
76 /* TX */
77 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
78 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
79 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
80 
81 static inline bool
82 mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
83 {
84 	return (*fifo->pc - *fifo->cc) < fifo->mask;
85 }
86 
87 static inline bool
88 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
89 {
90 	return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
91 }
92 
93 static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
94 {
95 	void *wqe;
96 
97 	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
98 	memset(wqe, 0, wqe_size);
99 
100 	return wqe;
101 }
102 
103 #define MLX5E_TX_FETCH_WQE(sq, pi) \
104 	((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
105 
106 static inline struct mlx5e_tx_wqe *
107 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
108 {
109 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
110 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
111 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
112 
113 	memset(cseg, 0, sizeof(*cseg));
114 
115 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
116 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
117 
118 	(*pc)++;
119 
120 	return wqe;
121 }
122 
123 static inline struct mlx5e_tx_wqe *
124 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
125 {
126 	u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
127 	struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
128 	struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
129 
130 	memset(cseg, 0, sizeof(*cseg));
131 
132 	cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
133 	cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
134 	cseg->fm_ce_se         = MLX5_FENCE_MODE_INITIATOR_SMALL;
135 
136 	(*pc)++;
137 
138 	return wqe;
139 }
140 
141 struct mlx5e_tx_wqe_info {
142 	struct sk_buff *skb;
143 	u32 num_bytes;
144 	u8 num_wqebbs;
145 	u8 num_dma;
146 	u8 num_fifo_pkts;
147 #ifdef CONFIG_MLX5_EN_TLS
148 	struct page *resync_dump_frag_page;
149 #endif
150 };
151 
152 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
153 {
154 	struct mlx5_wq_cyc *wq = &sq->wq;
155 	u16 pi, contig_wqebbs;
156 
157 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
158 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
159 	if (unlikely(contig_wqebbs < size)) {
160 		struct mlx5e_tx_wqe_info *wi, *edge_wi;
161 
162 		wi = &sq->db.wqe_info[pi];
163 		edge_wi = wi + contig_wqebbs;
164 
165 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
166 		for (; wi < edge_wi; wi++) {
167 			*wi = (struct mlx5e_tx_wqe_info) {
168 				.num_wqebbs = 1,
169 			};
170 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
171 		}
172 		sq->stats->nop += contig_wqebbs;
173 
174 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
175 	}
176 
177 	return pi;
178 }
179 
180 static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
181 {
182 	return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
183 }
184 
185 struct mlx5e_shampo_umr {
186 	u16 len;
187 };
188 
189 struct mlx5e_icosq_wqe_info {
190 	u8 wqe_type;
191 	u8 num_wqebbs;
192 
193 	/* Auxiliary data for different wqe types. */
194 	union {
195 		struct {
196 			struct mlx5e_rq *rq;
197 		} umr;
198 		struct mlx5e_shampo_umr shampo;
199 #ifdef CONFIG_MLX5_EN_TLS
200 		struct {
201 			struct mlx5e_ktls_offload_context_rx *priv_rx;
202 		} tls_set_params;
203 		struct {
204 			struct mlx5e_ktls_rx_resync_buf *buf;
205 		} tls_get_params;
206 #endif
207 	};
208 };
209 
210 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
211 
212 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
213 {
214 	struct mlx5_wq_cyc *wq = &sq->wq;
215 	u16 pi, contig_wqebbs;
216 
217 	pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
218 	contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
219 	if (unlikely(contig_wqebbs < size)) {
220 		struct mlx5e_icosq_wqe_info *wi, *edge_wi;
221 
222 		wi = &sq->db.wqe_info[pi];
223 		edge_wi = wi + contig_wqebbs;
224 
225 		/* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
226 		for (; wi < edge_wi; wi++) {
227 			*wi = (struct mlx5e_icosq_wqe_info) {
228 				.wqe_type   = MLX5E_ICOSQ_WQE_NOP,
229 				.num_wqebbs = 1,
230 			};
231 			mlx5e_post_nop(wq, sq->sqn, &sq->pc);
232 		}
233 
234 		pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
235 	}
236 
237 	return pi;
238 }
239 
240 static inline void
241 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
242 		struct mlx5_wqe_ctrl_seg *ctrl)
243 {
244 	ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
245 	/* ensure wqe is visible to device before updating doorbell record */
246 	dma_wmb();
247 
248 	*wq->db = cpu_to_be32(pc);
249 
250 	/* ensure doorbell record is visible to device before ringing the
251 	 * doorbell
252 	 */
253 	wmb();
254 
255 	mlx5_write64((__be32 *)ctrl, uar_map);
256 }
257 
258 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
259 {
260 	struct mlx5_core_cq *mcq;
261 
262 	mcq = &cq->mcq;
263 	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
264 }
265 
266 static inline struct mlx5e_sq_dma *
267 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
268 {
269 	return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
270 }
271 
272 static inline void
273 mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
274 	       enum mlx5e_dma_map_type map_type)
275 {
276 	struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
277 
278 	dma->addr = addr;
279 	dma->size = size;
280 	dma->type = map_type;
281 }
282 
283 static inline
284 struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
285 {
286 	return &fifo->fifo[i & fifo->mask];
287 }
288 
289 static inline
290 void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
291 {
292 	struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++);
293 
294 	*skb_item = skb;
295 }
296 
297 static inline
298 struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
299 {
300 	return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
301 }
302 
303 static inline void
304 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
305 {
306 	switch (dma->type) {
307 	case MLX5E_DMA_MAP_SINGLE:
308 		dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
309 		break;
310 	case MLX5E_DMA_MAP_PAGE:
311 		dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
312 		break;
313 	default:
314 		WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
315 	}
316 }
317 
318 void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
319 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
320 
321 static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
322 {
323 	return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
324 }
325 
326 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
327 {
328 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
329 		mlx5_wq_ll_reset(&rq->mpwqe.wq);
330 		rq->mpwqe.actual_wq_head = 0;
331 	} else {
332 		mlx5_wq_cyc_reset(&rq->wqe.wq);
333 	}
334 }
335 
336 static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
337 					struct mlx5_err_cqe *err_cqe)
338 {
339 	struct mlx5_cqwq *wq = &cq->wq;
340 	u32 ci;
341 
342 	ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
343 
344 	netdev_err(cq->netdev,
345 		   "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
346 		   cq->mcq.cqn, ci, qn,
347 		   get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
348 		   err_cqe->syndrome, err_cqe->vendor_err_synd);
349 	mlx5_dump_err_cqe(cq->mdev, err_cqe);
350 }
351 
352 static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
353 {
354 	switch (rq->wq_type) {
355 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
356 		return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
357 	default:
358 		return mlx5_wq_cyc_get_size(&rq->wqe.wq);
359 	}
360 }
361 
362 static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
363 {
364 	switch (rq->wq_type) {
365 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
366 		return rq->mpwqe.wq.cur_sz;
367 	default:
368 		return rq->wqe.wq.cur_sz;
369 	}
370 }
371 
372 static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
373 {
374 	switch (rq->wq_type) {
375 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
376 		return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
377 	default:
378 		return mlx5_wq_cyc_get_head(&rq->wqe.wq);
379 	}
380 }
381 
382 static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
383 {
384 	switch (rq->wq_type) {
385 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
386 		return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
387 	default:
388 		return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
389 	}
390 }
391 
392 /* SW parser related functions */
393 
394 struct mlx5e_swp_spec {
395 	__be16 l3_proto;
396 	u8 l4_proto;
397 	u8 is_tun;
398 	__be16 tun_l3_proto;
399 	u8 tun_l4_proto;
400 };
401 
402 static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
403 {
404 	/* SWP offsets are in 2-bytes words */
405 	eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
406 	eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
407 	eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
408 	eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
409 }
410 
411 static inline void
412 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
413 		   struct mlx5e_swp_spec *swp_spec)
414 {
415 	/* SWP offsets are in 2-bytes words */
416 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
417 	if (swp_spec->l3_proto == htons(ETH_P_IPV6))
418 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
419 	if (swp_spec->l4_proto) {
420 		eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
421 		if (swp_spec->l4_proto == IPPROTO_UDP)
422 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
423 	}
424 
425 	if (swp_spec->is_tun) {
426 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
427 		if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
428 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
429 	} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
430 		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
431 		if (swp_spec->l3_proto == htons(ETH_P_IPV6))
432 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
433 	}
434 	switch (swp_spec->tun_l4_proto) {
435 	case IPPROTO_UDP:
436 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
437 		fallthrough;
438 	case IPPROTO_TCP:
439 		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
440 		break;
441 	}
442 }
443 
444 #define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
445 
446 static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
447 {
448 	WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
449 
450 	/* A WQE must not cross the page boundary, hence two conditions:
451 	 * 1. Its size must not exceed the page size.
452 	 * 2. If the WQE size is X, and the space remaining in a page is less
453 	 *    than X, this space needs to be padded with NOPs. So, one WQE of
454 	 *    size X may require up to X-1 WQEBBs of padding, which makes the
455 	 *    stop room of X-1 + X.
456 	 * WQE size is also limited by the hardware limit.
457 	 */
458 	WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev),
459 		  "wqe_size %u is greater than max SQ WQEBBs %u",
460 		  wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
461 
462 	return MLX5E_STOP_ROOM(wqe_size);
463 }
464 
465 static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
466 {
467 	return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
468 }
469 
470 static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
471 {
472 	u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
473 
474 	return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs);
475 }
476 
477 static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
478 {
479 	u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);
480 
481 	return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
482 }
483 
484 static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
485 {
486 	size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe);
487 
488 	return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
489 }
490 #endif
491