xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c (revision c496daeb863093a046e0bb8db7265bf45d91775a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/etherdevice.h>
9 #include <net/ip.h>
10 #include <net/tso.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_trace.h>
13 #include <net/ip6_checksum.h>
14 
15 #include "otx2_reg.h"
16 #include "otx2_common.h"
17 #include "otx2_struct.h"
18 #include "otx2_txrx.h"
19 #include "otx2_ptp.h"
20 #include "cn10k.h"
21 
22 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
23 #define PTP_PORT	        0x13F
24 /* PTPv2 header Original Timestamp starts at byte offset 34 and
25  * contains 6 byte seconds field and 4 byte nano seconds field.
26  */
27 #define PTP_SYNC_SEC_OFFSET	34
28 
29 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
30 				     struct bpf_prog *prog,
31 				     struct nix_cqe_rx_s *cqe,
32 				     struct otx2_cq_queue *cq);
33 
34 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
35 				 struct otx2_cq_queue *cq)
36 {
37 	u64 incr = (u64)(cq->cq_idx) << 32;
38 	u64 status;
39 
40 	status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
41 
42 	if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
43 		     status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
44 		dev_err(pfvf->dev, "CQ stopped due to error");
45 		return -EINVAL;
46 	}
47 
48 	cq->cq_tail = status & 0xFFFFF;
49 	cq->cq_head = (status >> 20) & 0xFFFFF;
50 	if (cq->cq_tail < cq->cq_head)
51 		cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
52 				cq->cq_tail;
53 	else
54 		cq->pend_cqe = cq->cq_tail - cq->cq_head;
55 
56 	return 0;
57 }
58 
59 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
60 {
61 	struct nix_cqe_hdr_s *cqe_hdr;
62 
63 	cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
64 	if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
65 		return NULL;
66 
67 	cq->cq_head++;
68 	cq->cq_head &= (cq->cqe_cnt - 1);
69 
70 	return cqe_hdr;
71 }
72 
73 static unsigned int frag_num(unsigned int i)
74 {
75 #ifdef __BIG_ENDIAN
76 	return (i & ~3) + 3 - (i & 3);
77 #else
78 	return i;
79 #endif
80 }
81 
82 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
83 					struct sk_buff *skb, int seg, int *len)
84 {
85 	const skb_frag_t *frag;
86 	struct page *page;
87 	int offset;
88 
89 	/* First segment is always skb->data */
90 	if (!seg) {
91 		page = virt_to_page(skb->data);
92 		offset = offset_in_page(skb->data);
93 		*len = skb_headlen(skb);
94 	} else {
95 		frag = &skb_shinfo(skb)->frags[seg - 1];
96 		page = skb_frag_page(frag);
97 		offset = skb_frag_off(frag);
98 		*len = skb_frag_size(frag);
99 	}
100 	return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
101 }
102 
103 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
104 {
105 	int seg;
106 
107 	for (seg = 0; seg < sg->num_segs; seg++) {
108 		otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
109 				    sg->size[seg], DMA_TO_DEVICE);
110 	}
111 	sg->num_segs = 0;
112 }
113 
114 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
115 				     struct otx2_snd_queue *sq,
116 				 struct nix_cqe_tx_s *cqe)
117 {
118 	struct nix_send_comp_s *snd_comp = &cqe->comp;
119 	struct sg_list *sg;
120 	struct page *page;
121 	u64 pa;
122 
123 	sg = &sq->sg[snd_comp->sqe_id];
124 
125 	pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
126 	otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
127 			    sg->size[0], DMA_TO_DEVICE);
128 	page = virt_to_page(phys_to_virt(pa));
129 	put_page(page);
130 }
131 
132 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
133 				 struct otx2_cq_queue *cq,
134 				 struct otx2_snd_queue *sq,
135 				 struct nix_cqe_tx_s *cqe,
136 				 int budget, int *tx_pkts, int *tx_bytes)
137 {
138 	struct nix_send_comp_s *snd_comp = &cqe->comp;
139 	struct skb_shared_hwtstamps ts;
140 	struct sk_buff *skb = NULL;
141 	u64 timestamp, tsns;
142 	struct sg_list *sg;
143 	int err;
144 
145 	if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
146 		net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
147 				    pfvf->netdev->name, cq->cint_idx,
148 				    snd_comp->status);
149 
150 	sg = &sq->sg[snd_comp->sqe_id];
151 	skb = (struct sk_buff *)sg->skb;
152 	if (unlikely(!skb))
153 		return;
154 
155 	if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
156 		timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
157 		if (timestamp != 1) {
158 			timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
159 			err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
160 			if (!err) {
161 				memset(&ts, 0, sizeof(ts));
162 				ts.hwtstamp = ns_to_ktime(tsns);
163 				skb_tstamp_tx(skb, &ts);
164 			}
165 		}
166 	}
167 
168 	*tx_bytes += skb->len;
169 	(*tx_pkts)++;
170 	otx2_dma_unmap_skb_frags(pfvf, sg);
171 	napi_consume_skb(skb, budget);
172 	sg->skb = (u64)NULL;
173 }
174 
175 static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
176 			      struct sk_buff *skb, void *data)
177 {
178 	u64 timestamp, tsns;
179 	int err;
180 
181 	if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
182 		return;
183 
184 	timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
185 	/* The first 8 bytes is the timestamp */
186 	err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
187 	if (err)
188 		return;
189 
190 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
191 }
192 
193 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
194 			      u64 iova, int len, struct nix_rx_parse_s *parse,
195 			      int qidx)
196 {
197 	struct page *page;
198 	int off = 0;
199 	void *va;
200 
201 	va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
202 
203 	if (likely(!skb_shinfo(skb)->nr_frags)) {
204 		/* Check if data starts at some nonzero offset
205 		 * from the start of the buffer.  For now the
206 		 * only possible offset is 8 bytes in the case
207 		 * where packet is prepended by a timestamp.
208 		 */
209 		if (parse->laptr) {
210 			otx2_set_rxtstamp(pfvf, skb, va);
211 			off = OTX2_HW_TIMESTAMP_LEN;
212 		}
213 	}
214 
215 	page = virt_to_page(va);
216 	if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
217 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
218 				va - page_address(page) + off,
219 				len - off, pfvf->rbsize);
220 		return true;
221 	}
222 
223 	/* If more than MAX_SKB_FRAGS fragments are received then
224 	 * give back those buffer pointers to hardware for reuse.
225 	 */
226 	pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
227 
228 	return false;
229 }
230 
231 static void otx2_set_rxhash(struct otx2_nic *pfvf,
232 			    struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
233 {
234 	enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
235 	struct otx2_rss_info *rss;
236 	u32 hash = 0;
237 
238 	if (!(pfvf->netdev->features & NETIF_F_RXHASH))
239 		return;
240 
241 	rss = &pfvf->hw.rss_info;
242 	if (rss->flowkey_cfg) {
243 		if (rss->flowkey_cfg &
244 		    ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
245 			hash_type = PKT_HASH_TYPE_L4;
246 		else
247 			hash_type = PKT_HASH_TYPE_L3;
248 		hash = cqe->hdr.flow_tag;
249 	}
250 	skb_set_hash(skb, hash, hash_type);
251 }
252 
253 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
254 			      int qidx)
255 {
256 	struct nix_rx_sg_s *sg = &cqe->sg;
257 	void *end, *start;
258 	u64 *seg_addr;
259 	int seg;
260 
261 	start = (void *)sg;
262 	end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
263 	while (start < end) {
264 		sg = (struct nix_rx_sg_s *)start;
265 		seg_addr = &sg->seg_addr;
266 		for (seg = 0; seg < sg->segs; seg++, seg_addr++)
267 			pfvf->hw_ops->aura_freeptr(pfvf, qidx,
268 						   *seg_addr & ~0x07ULL);
269 		start += sizeof(*sg);
270 	}
271 }
272 
273 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
274 				  struct nix_cqe_rx_s *cqe, int qidx)
275 {
276 	struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
277 	struct nix_rx_parse_s *parse = &cqe->parse;
278 
279 	if (netif_msg_rx_err(pfvf))
280 		netdev_err(pfvf->netdev,
281 			   "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
282 			   qidx, parse->errlev, parse->errcode);
283 
284 	if (parse->errlev == NPC_ERRLVL_RE) {
285 		switch (parse->errcode) {
286 		case ERRCODE_FCS:
287 		case ERRCODE_FCS_RCV:
288 			atomic_inc(&stats->rx_fcs_errs);
289 			break;
290 		case ERRCODE_UNDERSIZE:
291 			atomic_inc(&stats->rx_undersize_errs);
292 			break;
293 		case ERRCODE_OVERSIZE:
294 			atomic_inc(&stats->rx_oversize_errs);
295 			break;
296 		case ERRCODE_OL2_LEN_MISMATCH:
297 			atomic_inc(&stats->rx_len_errs);
298 			break;
299 		default:
300 			atomic_inc(&stats->rx_other_errs);
301 			break;
302 		}
303 	} else if (parse->errlev == NPC_ERRLVL_NIX) {
304 		switch (parse->errcode) {
305 		case ERRCODE_OL3_LEN:
306 		case ERRCODE_OL4_LEN:
307 		case ERRCODE_IL3_LEN:
308 		case ERRCODE_IL4_LEN:
309 			atomic_inc(&stats->rx_len_errs);
310 			break;
311 		case ERRCODE_OL4_CSUM:
312 		case ERRCODE_IL4_CSUM:
313 			atomic_inc(&stats->rx_csum_errs);
314 			break;
315 		default:
316 			atomic_inc(&stats->rx_other_errs);
317 			break;
318 		}
319 	} else {
320 		atomic_inc(&stats->rx_other_errs);
321 		/* For now ignore all the NPC parser errors and
322 		 * pass the packets to stack.
323 		 */
324 		return false;
325 	}
326 
327 	/* If RXALL is enabled pass on packets to stack. */
328 	if (pfvf->netdev->features & NETIF_F_RXALL)
329 		return false;
330 
331 	/* Free buffer back to pool */
332 	if (cqe->sg.segs)
333 		otx2_free_rcv_seg(pfvf, cqe, qidx);
334 	return true;
335 }
336 
337 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
338 				 struct napi_struct *napi,
339 				 struct otx2_cq_queue *cq,
340 				 struct nix_cqe_rx_s *cqe)
341 {
342 	struct nix_rx_parse_s *parse = &cqe->parse;
343 	struct nix_rx_sg_s *sg = &cqe->sg;
344 	struct sk_buff *skb = NULL;
345 	void *end, *start;
346 	u64 *seg_addr;
347 	u16 *seg_size;
348 	int seg;
349 
350 	if (unlikely(parse->errlev || parse->errcode)) {
351 		if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
352 			return;
353 	}
354 
355 	if (pfvf->xdp_prog)
356 		if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
357 			return;
358 
359 	skb = napi_get_frags(napi);
360 	if (unlikely(!skb))
361 		return;
362 
363 	start = (void *)sg;
364 	end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
365 	while (start < end) {
366 		sg = (struct nix_rx_sg_s *)start;
367 		seg_addr = &sg->seg_addr;
368 		seg_size = (void *)sg;
369 		for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
370 			if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
371 					      seg_size[seg], parse, cq->cq_idx))
372 				cq->pool_ptrs++;
373 		}
374 		start += sizeof(*sg);
375 	}
376 	otx2_set_rxhash(pfvf, cqe, skb);
377 
378 	skb_record_rx_queue(skb, cq->cq_idx);
379 	if (pfvf->netdev->features & NETIF_F_RXCSUM)
380 		skb->ip_summed = CHECKSUM_UNNECESSARY;
381 
382 	skb_mark_for_recycle(skb);
383 
384 	napi_gro_frags(napi);
385 }
386 
387 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
388 				struct napi_struct *napi,
389 				struct otx2_cq_queue *cq, int budget)
390 {
391 	struct nix_cqe_rx_s *cqe;
392 	int processed_cqe = 0;
393 
394 	if (cq->pend_cqe >= budget)
395 		goto process_cqe;
396 
397 	if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
398 		return 0;
399 
400 process_cqe:
401 	while (likely(processed_cqe < budget) && cq->pend_cqe) {
402 		cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
403 		if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
404 		    !cqe->sg.seg_addr) {
405 			if (!processed_cqe)
406 				return 0;
407 			break;
408 		}
409 		cq->cq_head++;
410 		cq->cq_head &= (cq->cqe_cnt - 1);
411 
412 		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
413 
414 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
415 		cqe->sg.seg_addr = 0x00;
416 		processed_cqe++;
417 		cq->pend_cqe--;
418 	}
419 
420 	/* Free CQEs to HW */
421 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
422 		     ((u64)cq->cq_idx << 32) | processed_cqe);
423 
424 	return processed_cqe;
425 }
426 
427 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
428 {
429 	struct otx2_nic *pfvf = dev;
430 	dma_addr_t bufptr;
431 
432 	while (cq->pool_ptrs) {
433 		if (otx2_alloc_buffer(pfvf, cq, &bufptr))
434 			break;
435 		otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
436 		cq->pool_ptrs--;
437 	}
438 }
439 
440 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
441 				struct otx2_cq_queue *cq, int budget)
442 {
443 	int tx_pkts = 0, tx_bytes = 0, qidx;
444 	struct otx2_snd_queue *sq;
445 	struct nix_cqe_tx_s *cqe;
446 	int processed_cqe = 0;
447 
448 	if (cq->pend_cqe >= budget)
449 		goto process_cqe;
450 
451 	if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
452 		return 0;
453 
454 process_cqe:
455 	qidx = cq->cq_idx - pfvf->hw.rx_queues;
456 	sq = &pfvf->qset.sq[qidx];
457 
458 	while (likely(processed_cqe < budget) && cq->pend_cqe) {
459 		cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
460 		if (unlikely(!cqe)) {
461 			if (!processed_cqe)
462 				return 0;
463 			break;
464 		}
465 
466 		qidx = cq->cq_idx - pfvf->hw.rx_queues;
467 
468 		if (cq->cq_type == CQ_XDP)
469 			otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
470 		else
471 			otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
472 					     cqe, budget, &tx_pkts, &tx_bytes);
473 
474 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
475 		processed_cqe++;
476 		cq->pend_cqe--;
477 
478 		sq->cons_head++;
479 		sq->cons_head &= (sq->sqe_cnt - 1);
480 	}
481 
482 	/* Free CQEs to HW */
483 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
484 		     ((u64)cq->cq_idx << 32) | processed_cqe);
485 
486 	if (likely(tx_pkts)) {
487 		struct netdev_queue *txq;
488 
489 		qidx = cq->cq_idx - pfvf->hw.rx_queues;
490 
491 		if (qidx >= pfvf->hw.tx_queues)
492 			qidx -= pfvf->hw.xdp_queues;
493 		txq = netdev_get_tx_queue(pfvf->netdev, qidx);
494 		netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
495 		/* Check if queue was stopped earlier due to ring full */
496 		smp_mb();
497 		if (netif_tx_queue_stopped(txq) &&
498 		    netif_carrier_ok(pfvf->netdev))
499 			netif_tx_wake_queue(txq);
500 	}
501 	return 0;
502 }
503 
504 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
505 {
506 	struct dim_sample dim_sample;
507 	u64 rx_frames, rx_bytes;
508 
509 	rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
510 		OTX2_GET_RX_STATS(RX_UCAST);
511 	rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
512 	dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
513 	net_dim(&cq_poll->dim, dim_sample);
514 }
515 
516 int otx2_napi_handler(struct napi_struct *napi, int budget)
517 {
518 	struct otx2_cq_queue *rx_cq = NULL;
519 	struct otx2_cq_poll *cq_poll;
520 	int workdone = 0, cq_idx, i;
521 	struct otx2_cq_queue *cq;
522 	struct otx2_qset *qset;
523 	struct otx2_nic *pfvf;
524 
525 	cq_poll = container_of(napi, struct otx2_cq_poll, napi);
526 	pfvf = (struct otx2_nic *)cq_poll->dev;
527 	qset = &pfvf->qset;
528 
529 	for (i = 0; i < CQS_PER_CINT; i++) {
530 		cq_idx = cq_poll->cq_ids[i];
531 		if (unlikely(cq_idx == CINT_INVALID_CQ))
532 			continue;
533 		cq = &qset->cq[cq_idx];
534 		if (cq->cq_type == CQ_RX) {
535 			rx_cq = cq;
536 			workdone += otx2_rx_napi_handler(pfvf, napi,
537 							 cq, budget);
538 		} else {
539 			workdone += otx2_tx_napi_handler(pfvf, cq, budget);
540 		}
541 	}
542 
543 	if (rx_cq && rx_cq->pool_ptrs)
544 		pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
545 	/* Clear the IRQ */
546 	otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
547 
548 	if (workdone < budget && napi_complete_done(napi, workdone)) {
549 		/* If interface is going down, don't re-enable IRQ */
550 		if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
551 			return workdone;
552 
553 		/* Check for adaptive interrupt coalesce */
554 		if (workdone != 0 &&
555 		    ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
556 		     OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
557 			/* Adjust irq coalese using net_dim */
558 			otx2_adjust_adaptive_coalese(pfvf, cq_poll);
559 			/* Update irq coalescing */
560 			for (i = 0; i < pfvf->hw.cint_cnt; i++)
561 				otx2_config_irq_coalescing(pfvf, i);
562 		}
563 
564 		/* Re-enable interrupts */
565 		otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
566 			     BIT_ULL(0));
567 	}
568 	return workdone;
569 }
570 
571 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
572 		    int size, int qidx)
573 {
574 	u64 status;
575 
576 	/* Packet data stores should finish before SQE is flushed to HW */
577 	dma_wmb();
578 
579 	do {
580 		memcpy(sq->lmt_addr, sq->sqe_base, size);
581 		status = otx2_lmt_flush(sq->io_addr);
582 	} while (status == 0);
583 
584 	sq->head++;
585 	sq->head &= (sq->sqe_cnt - 1);
586 }
587 
588 #define MAX_SEGS_PER_SG	3
589 /* Add SQE scatter/gather subdescriptor structure */
590 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
591 			    struct sk_buff *skb, int num_segs, int *offset)
592 {
593 	struct nix_sqe_sg_s *sg = NULL;
594 	u64 dma_addr, *iova = NULL;
595 	u16 *sg_lens = NULL;
596 	int seg, len;
597 
598 	sq->sg[sq->head].num_segs = 0;
599 
600 	for (seg = 0; seg < num_segs; seg++) {
601 		if ((seg % MAX_SEGS_PER_SG) == 0) {
602 			sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
603 			sg->ld_type = NIX_SEND_LDTYPE_LDD;
604 			sg->subdc = NIX_SUBDC_SG;
605 			sg->segs = 0;
606 			sg_lens = (void *)sg;
607 			iova = (void *)sg + sizeof(*sg);
608 			/* Next subdc always starts at a 16byte boundary.
609 			 * So if sg->segs is whether 2 or 3, offset += 16bytes.
610 			 */
611 			if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
612 				*offset += sizeof(*sg) + (3 * sizeof(u64));
613 			else
614 				*offset += sizeof(*sg) + sizeof(u64);
615 		}
616 		dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
617 		if (dma_mapping_error(pfvf->dev, dma_addr))
618 			return false;
619 
620 		sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
621 		sg->segs++;
622 		*iova++ = dma_addr;
623 
624 		/* Save DMA mapping info for later unmapping */
625 		sq->sg[sq->head].dma_addr[seg] = dma_addr;
626 		sq->sg[sq->head].size[seg] = len;
627 		sq->sg[sq->head].num_segs++;
628 	}
629 
630 	sq->sg[sq->head].skb = (u64)skb;
631 	return true;
632 }
633 
634 /* Add SQE extended header subdescriptor */
635 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
636 			     struct sk_buff *skb, int *offset)
637 {
638 	struct nix_sqe_ext_s *ext;
639 
640 	ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
641 	ext->subdc = NIX_SUBDC_EXT;
642 	if (skb_shinfo(skb)->gso_size) {
643 		ext->lso = 1;
644 		ext->lso_sb = skb_tcp_all_headers(skb);
645 		ext->lso_mps = skb_shinfo(skb)->gso_size;
646 
647 		/* Only TSOv4 and TSOv6 GSO offloads are supported */
648 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
649 			ext->lso_format = pfvf->hw.lso_tsov4_idx;
650 
651 			/* HW adds payload size to 'ip_hdr->tot_len' while
652 			 * sending TSO segment, hence set payload length
653 			 * in IP header of the packet to just header length.
654 			 */
655 			ip_hdr(skb)->tot_len =
656 				htons(ext->lso_sb - skb_network_offset(skb));
657 		} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
658 			ext->lso_format = pfvf->hw.lso_tsov6_idx;
659 
660 			ipv6_hdr(skb)->payload_len =
661 				htons(ext->lso_sb - skb_network_offset(skb));
662 		} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
663 			__be16 l3_proto = vlan_get_protocol(skb);
664 			struct udphdr *udph = udp_hdr(skb);
665 			u16 iplen;
666 
667 			ext->lso_sb = skb_transport_offset(skb) +
668 					sizeof(struct udphdr);
669 
670 			/* HW adds payload size to length fields in IP and
671 			 * UDP headers while segmentation, hence adjust the
672 			 * lengths to just header sizes.
673 			 */
674 			iplen = htons(ext->lso_sb - skb_network_offset(skb));
675 			if (l3_proto == htons(ETH_P_IP)) {
676 				ip_hdr(skb)->tot_len = iplen;
677 				ext->lso_format = pfvf->hw.lso_udpv4_idx;
678 			} else {
679 				ipv6_hdr(skb)->payload_len = iplen;
680 				ext->lso_format = pfvf->hw.lso_udpv6_idx;
681 			}
682 
683 			udph->len = htons(sizeof(struct udphdr));
684 		}
685 	} else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
686 		ext->tstmp = 1;
687 	}
688 
689 #define OTX2_VLAN_PTR_OFFSET     (ETH_HLEN - ETH_TLEN)
690 	if (skb_vlan_tag_present(skb)) {
691 		if (skb->vlan_proto == htons(ETH_P_8021Q)) {
692 			ext->vlan1_ins_ena = 1;
693 			ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
694 			ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
695 		} else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
696 			ext->vlan0_ins_ena = 1;
697 			ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
698 			ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
699 		}
700 	}
701 
702 	*offset += sizeof(*ext);
703 }
704 
705 static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
706 			     int alg, u64 iova, int ptp_offset,
707 			     u64 base_ns, bool udp_csum_crt)
708 {
709 	struct nix_sqe_mem_s *mem;
710 
711 	mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
712 	mem->subdc = NIX_SUBDC_MEM;
713 	mem->alg = alg;
714 	mem->wmem = 1; /* wait for the memory operation */
715 	mem->addr = iova;
716 
717 	if (ptp_offset) {
718 		mem->start_offset = ptp_offset;
719 		mem->udp_csum_crt = !!udp_csum_crt;
720 		mem->base_ns = base_ns;
721 		mem->step_type = 1;
722 	}
723 
724 	*offset += sizeof(*mem);
725 }
726 
727 /* Add SQE header subdescriptor structure */
728 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
729 			     struct nix_sqe_hdr_s *sqe_hdr,
730 			     struct sk_buff *skb, u16 qidx)
731 {
732 	int proto = 0;
733 
734 	/* Check if SQE was framed before, if yes then no need to
735 	 * set these constants again and again.
736 	 */
737 	if (!sqe_hdr->total) {
738 		/* Don't free Tx buffers to Aura */
739 		sqe_hdr->df = 1;
740 		sqe_hdr->aura = sq->aura_id;
741 		/* Post a CQE Tx after pkt transmission */
742 		sqe_hdr->pnc = 1;
743 		sqe_hdr->sq = (qidx >=  pfvf->hw.tx_queues) ?
744 			       qidx + pfvf->hw.xdp_queues : qidx;
745 	}
746 	sqe_hdr->total = skb->len;
747 	/* Set SQE identifier which will be used later for freeing SKB */
748 	sqe_hdr->sqe_id = sq->head;
749 
750 	/* Offload TCP/UDP checksum to HW */
751 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
752 		sqe_hdr->ol3ptr = skb_network_offset(skb);
753 		sqe_hdr->ol4ptr = skb_transport_offset(skb);
754 		/* get vlan protocol Ethertype */
755 		if (eth_type_vlan(skb->protocol))
756 			skb->protocol = vlan_get_protocol(skb);
757 
758 		if (skb->protocol == htons(ETH_P_IP)) {
759 			proto = ip_hdr(skb)->protocol;
760 			/* In case of TSO, HW needs this to be explicitly set.
761 			 * So set this always, instead of adding a check.
762 			 */
763 			sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
764 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
765 			proto = ipv6_hdr(skb)->nexthdr;
766 			sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
767 		}
768 
769 		if (proto == IPPROTO_TCP)
770 			sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
771 		else if (proto == IPPROTO_UDP)
772 			sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
773 	}
774 }
775 
776 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
777 				struct otx2_snd_queue *sq,
778 				struct sk_buff *skb, int sqe, int hdr_len)
779 {
780 	int num_segs = skb_shinfo(skb)->nr_frags + 1;
781 	struct sg_list *sg = &sq->sg[sqe];
782 	u64 dma_addr;
783 	int seg, len;
784 
785 	sg->num_segs = 0;
786 
787 	/* Get payload length at skb->data */
788 	len = skb_headlen(skb) - hdr_len;
789 
790 	for (seg = 0; seg < num_segs; seg++) {
791 		/* Skip skb->data, if there is no payload */
792 		if (!seg && !len)
793 			continue;
794 		dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
795 		if (dma_mapping_error(pfvf->dev, dma_addr))
796 			goto unmap;
797 
798 		/* Save DMA mapping info for later unmapping */
799 		sg->dma_addr[sg->num_segs] = dma_addr;
800 		sg->size[sg->num_segs] = len;
801 		sg->num_segs++;
802 	}
803 	return 0;
804 unmap:
805 	otx2_dma_unmap_skb_frags(pfvf, sg);
806 	return -EINVAL;
807 }
808 
809 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
810 				  struct sk_buff *skb, int seg,
811 				  u64 seg_addr, int hdr_len, int sqe)
812 {
813 	struct sg_list *sg = &sq->sg[sqe];
814 	const skb_frag_t *frag;
815 	int offset;
816 
817 	if (seg < 0)
818 		return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
819 
820 	frag = &skb_shinfo(skb)->frags[seg];
821 	offset = seg_addr - (u64)skb_frag_address(frag);
822 	if (skb_headlen(skb) - hdr_len)
823 		seg++;
824 	return sg->dma_addr[seg] + offset;
825 }
826 
827 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
828 				struct sg_list *list, int *offset)
829 {
830 	struct nix_sqe_sg_s *sg = NULL;
831 	u16 *sg_lens = NULL;
832 	u64 *iova = NULL;
833 	int seg;
834 
835 	/* Add SG descriptors with buffer addresses */
836 	for (seg = 0; seg < list->num_segs; seg++) {
837 		if ((seg % MAX_SEGS_PER_SG) == 0) {
838 			sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
839 			sg->ld_type = NIX_SEND_LDTYPE_LDD;
840 			sg->subdc = NIX_SUBDC_SG;
841 			sg->segs = 0;
842 			sg_lens = (void *)sg;
843 			iova = (void *)sg + sizeof(*sg);
844 			/* Next subdc always starts at a 16byte boundary.
845 			 * So if sg->segs is whether 2 or 3, offset += 16bytes.
846 			 */
847 			if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
848 				*offset += sizeof(*sg) + (3 * sizeof(u64));
849 			else
850 				*offset += sizeof(*sg) + sizeof(u64);
851 		}
852 		sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
853 		*iova++ = list->dma_addr[seg];
854 		sg->segs++;
855 	}
856 }
857 
858 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
859 			       struct sk_buff *skb, u16 qidx)
860 {
861 	struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
862 	int hdr_len, tcp_data, seg_len, pkt_len, offset;
863 	struct nix_sqe_hdr_s *sqe_hdr;
864 	int first_sqe = sq->head;
865 	struct sg_list list;
866 	struct tso_t tso;
867 
868 	hdr_len = tso_start(skb, &tso);
869 
870 	/* Map SKB's fragments to DMA.
871 	 * It's done here to avoid mapping for every TSO segment's packet.
872 	 */
873 	if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
874 		dev_kfree_skb_any(skb);
875 		return;
876 	}
877 
878 	netdev_tx_sent_queue(txq, skb->len);
879 
880 	tcp_data = skb->len - hdr_len;
881 	while (tcp_data > 0) {
882 		char *hdr;
883 
884 		seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
885 		tcp_data -= seg_len;
886 
887 		/* Set SQE's SEND_HDR */
888 		memset(sq->sqe_base, 0, sq->sqe_size);
889 		sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
890 		otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
891 		offset = sizeof(*sqe_hdr);
892 
893 		/* Add TSO segment's pkt header */
894 		hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
895 		tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
896 		list.dma_addr[0] =
897 			sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
898 		list.size[0] = hdr_len;
899 		list.num_segs = 1;
900 
901 		/* Add TSO segment's payload data fragments */
902 		pkt_len = hdr_len;
903 		while (seg_len > 0) {
904 			int size;
905 
906 			size = min_t(int, tso.size, seg_len);
907 
908 			list.size[list.num_segs] = size;
909 			list.dma_addr[list.num_segs] =
910 				otx2_tso_frag_dma_addr(sq, skb,
911 						       tso.next_frag_idx - 1,
912 						       (u64)tso.data, hdr_len,
913 						       first_sqe);
914 			list.num_segs++;
915 			pkt_len += size;
916 			seg_len -= size;
917 			tso_build_data(skb, &tso, size);
918 		}
919 		sqe_hdr->total = pkt_len;
920 		otx2_sqe_tso_add_sg(sq, &list, &offset);
921 
922 		/* DMA mappings and skb needs to be freed only after last
923 		 * TSO segment is transmitted out. So set 'PNC' only for
924 		 * last segment. Also point last segment's sqe_id to first
925 		 * segment's SQE index where skb address and DMA mappings
926 		 * are saved.
927 		 */
928 		if (!tcp_data) {
929 			sqe_hdr->pnc = 1;
930 			sqe_hdr->sqe_id = first_sqe;
931 			sq->sg[first_sqe].skb = (u64)skb;
932 		} else {
933 			sqe_hdr->pnc = 0;
934 		}
935 
936 		sqe_hdr->sizem1 = (offset / 16) - 1;
937 
938 		/* Flush SQE to HW */
939 		pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
940 	}
941 }
942 
943 static bool is_hw_tso_supported(struct otx2_nic *pfvf,
944 				struct sk_buff *skb)
945 {
946 	int payload_len, last_seg_size;
947 
948 	if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
949 		return true;
950 
951 	/* On 96xx A0, HW TSO not supported */
952 	if (!is_96xx_B0(pfvf->pdev))
953 		return false;
954 
955 	/* HW has an issue due to which when the payload of the last LSO
956 	 * segment is shorter than 16 bytes, some header fields may not
957 	 * be correctly modified, hence don't offload such TSO segments.
958 	 */
959 
960 	payload_len = skb->len - skb_tcp_all_headers(skb);
961 	last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
962 	if (last_seg_size && last_seg_size < 16)
963 		return false;
964 
965 	return true;
966 }
967 
968 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
969 {
970 	if (!skb_shinfo(skb)->gso_size)
971 		return 1;
972 
973 	/* HW TSO */
974 	if (is_hw_tso_supported(pfvf, skb))
975 		return 1;
976 
977 	/* SW TSO */
978 	return skb_shinfo(skb)->gso_segs;
979 }
980 
981 static bool otx2_validate_network_transport(struct sk_buff *skb)
982 {
983 	if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
984 	    (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
985 		struct udphdr *udph = udp_hdr(skb);
986 
987 		if (udph->source == htons(PTP_PORT) &&
988 		    udph->dest == htons(PTP_PORT))
989 			return true;
990 	}
991 
992 	return false;
993 }
994 
995 static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
996 {
997 	struct ethhdr *eth = (struct ethhdr *)(skb->data);
998 	u16 nix_offload_hlen = 0, inner_vhlen = 0;
999 	bool udp_hdr_present = false, is_sync;
1000 	u8 *data = skb->data, *msgtype;
1001 	__be16 proto = eth->h_proto;
1002 	int network_depth = 0;
1003 
1004 	/* NIX is programmed to offload outer  VLAN header
1005 	 * in case of single vlan protocol field holds Network header ETH_IP/V6
1006 	 * in case of stacked vlan protocol field holds Inner vlan (8100)
1007 	 */
1008 	if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
1009 	    skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
1010 		if (skb->vlan_proto == htons(ETH_P_8021AD)) {
1011 			/* Get vlan protocol */
1012 			proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
1013 			/* SKB APIs like skb_transport_offset does not include
1014 			 * offloaded vlan header length. Need to explicitly add
1015 			 * the length
1016 			 */
1017 			nix_offload_hlen = VLAN_HLEN;
1018 			inner_vhlen = VLAN_HLEN;
1019 		} else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
1020 			nix_offload_hlen = VLAN_HLEN;
1021 		}
1022 	} else if (eth_type_vlan(eth->h_proto)) {
1023 		proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1024 	}
1025 
1026 	switch (ntohs(proto)) {
1027 	case ETH_P_1588:
1028 		if (network_depth)
1029 			*offset = network_depth;
1030 		else
1031 			*offset = ETH_HLEN + nix_offload_hlen +
1032 				  inner_vhlen;
1033 		break;
1034 	case ETH_P_IP:
1035 	case ETH_P_IPV6:
1036 		if (!otx2_validate_network_transport(skb))
1037 			return false;
1038 
1039 		*offset = nix_offload_hlen + skb_transport_offset(skb) +
1040 			  sizeof(struct udphdr);
1041 		udp_hdr_present = true;
1042 
1043 	}
1044 
1045 	msgtype = data + *offset;
1046 	/* Check PTP messageId is SYNC or not */
1047 	is_sync = !(*msgtype & 0xf);
1048 	if (is_sync)
1049 		*udp_csum_crt = udp_hdr_present;
1050 	else
1051 		*offset = 0;
1052 
1053 	return is_sync;
1054 }
1055 
1056 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
1057 			      struct otx2_snd_queue *sq, int *offset)
1058 {
1059 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
1060 	struct ptpv2_tstamp *origin_tstamp;
1061 	bool udp_csum_crt = false;
1062 	unsigned int udphoff;
1063 	struct timespec64 ts;
1064 	int ptp_offset = 0;
1065 	__wsum skb_csum;
1066 	u64 iova;
1067 
1068 	if (unlikely(!skb_shinfo(skb)->gso_size &&
1069 		     (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
1070 		if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
1071 			     otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) {
1072 			origin_tstamp = (struct ptpv2_tstamp *)
1073 					((u8 *)skb->data + ptp_offset +
1074 					 PTP_SYNC_SEC_OFFSET);
1075 			ts = ns_to_timespec64(pfvf->ptp->tstamp);
1076 			origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
1077 			origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
1078 			origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
1079 			/* Point to correction field in PTP packet */
1080 			ptp_offset += 8;
1081 
1082 			/* When user disables hw checksum, stack calculates the csum,
1083 			 * but it does not cover ptp timestamp which is added later.
1084 			 * Recalculate the checksum manually considering the timestamp.
1085 			 */
1086 			if (udp_csum_crt) {
1087 				struct udphdr *uh = udp_hdr(skb);
1088 
1089 				if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) {
1090 					udphoff = skb_transport_offset(skb);
1091 					uh->check = 0;
1092 					skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff,
1093 								0);
1094 					if (ntohs(eth->h_proto) == ETH_P_IPV6)
1095 						uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1096 									    &ipv6_hdr(skb)->daddr,
1097 									    skb->len - udphoff,
1098 									    ipv6_hdr(skb)->nexthdr,
1099 									    skb_csum);
1100 					else
1101 						uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
1102 									      ip_hdr(skb)->daddr,
1103 									      skb->len - udphoff,
1104 									      IPPROTO_UDP,
1105 									      skb_csum);
1106 				}
1107 			}
1108 		} else {
1109 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1110 		}
1111 		iova = sq->timestamps->iova + (sq->head * sizeof(u64));
1112 		otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
1113 				 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
1114 	} else {
1115 		skb_tx_timestamp(skb);
1116 	}
1117 }
1118 
1119 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
1120 			struct sk_buff *skb, u16 qidx)
1121 {
1122 	struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
1123 	struct otx2_nic *pfvf = netdev_priv(netdev);
1124 	int offset, num_segs, free_desc;
1125 	struct nix_sqe_hdr_s *sqe_hdr;
1126 
1127 	/* Check if there is enough room between producer
1128 	 * and consumer index.
1129 	 */
1130 	free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
1131 	if (free_desc < sq->sqe_thresh)
1132 		return false;
1133 
1134 	if (free_desc < otx2_get_sqe_count(pfvf, skb))
1135 		return false;
1136 
1137 	num_segs = skb_shinfo(skb)->nr_frags + 1;
1138 
1139 	/* If SKB doesn't fit in a single SQE, linearize it.
1140 	 * TODO: Consider adding JUMP descriptor instead.
1141 	 */
1142 	if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
1143 		if (__skb_linearize(skb)) {
1144 			dev_kfree_skb_any(skb);
1145 			return true;
1146 		}
1147 		num_segs = skb_shinfo(skb)->nr_frags + 1;
1148 	}
1149 
1150 	if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
1151 		/* Insert vlan tag before giving pkt to tso */
1152 		if (skb_vlan_tag_present(skb))
1153 			skb = __vlan_hwaccel_push_inside(skb);
1154 		otx2_sq_append_tso(pfvf, sq, skb, qidx);
1155 		return true;
1156 	}
1157 
1158 	/* Set SQE's SEND_HDR.
1159 	 * Do not clear the first 64bit as it contains constant info.
1160 	 */
1161 	memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1162 	sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1163 	otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
1164 	offset = sizeof(*sqe_hdr);
1165 
1166 	/* Add extended header if needed */
1167 	otx2_sqe_add_ext(pfvf, sq, skb, &offset);
1168 
1169 	/* Add SG subdesc with data frags */
1170 	if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
1171 		otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
1172 		return false;
1173 	}
1174 
1175 	otx2_set_txtstamp(pfvf, skb, sq, &offset);
1176 
1177 	sqe_hdr->sizem1 = (offset / 16) - 1;
1178 
1179 	netdev_tx_sent_queue(txq, skb->len);
1180 
1181 	/* Flush SQE to HW */
1182 	pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1183 
1184 	return true;
1185 }
1186 EXPORT_SYMBOL(otx2_sq_append_skb);
1187 
1188 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
1189 {
1190 	struct nix_cqe_rx_s *cqe;
1191 	struct otx2_pool *pool;
1192 	int processed_cqe = 0;
1193 	u16 pool_id;
1194 	u64 iova;
1195 
1196 	if (pfvf->xdp_prog)
1197 		xdp_rxq_info_unreg(&cq->xdp_rxq);
1198 
1199 	if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1200 		return;
1201 
1202 	pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
1203 	pool = &pfvf->qset.pool[pool_id];
1204 
1205 	while (cq->pend_cqe) {
1206 		cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
1207 		processed_cqe++;
1208 		cq->pend_cqe--;
1209 
1210 		if (!cqe)
1211 			continue;
1212 		if (cqe->sg.segs > 1) {
1213 			otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
1214 			continue;
1215 		}
1216 		iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1217 
1218 		otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
1219 	}
1220 
1221 	/* Free CQEs to HW */
1222 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1223 		     ((u64)cq->cq_idx << 32) | processed_cqe);
1224 }
1225 
1226 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
1227 {
1228 	struct sk_buff *skb = NULL;
1229 	struct otx2_snd_queue *sq;
1230 	struct nix_cqe_tx_s *cqe;
1231 	int processed_cqe = 0;
1232 	struct sg_list *sg;
1233 	int qidx;
1234 
1235 	qidx = cq->cq_idx - pfvf->hw.rx_queues;
1236 	sq = &pfvf->qset.sq[qidx];
1237 
1238 	if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1239 		return;
1240 
1241 	while (cq->pend_cqe) {
1242 		cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
1243 		processed_cqe++;
1244 		cq->pend_cqe--;
1245 
1246 		if (!cqe)
1247 			continue;
1248 		sg = &sq->sg[cqe->comp.sqe_id];
1249 		skb = (struct sk_buff *)sg->skb;
1250 		if (skb) {
1251 			otx2_dma_unmap_skb_frags(pfvf, sg);
1252 			dev_kfree_skb_any(skb);
1253 			sg->skb = (u64)NULL;
1254 		}
1255 	}
1256 
1257 	/* Free CQEs to HW */
1258 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1259 		     ((u64)cq->cq_idx << 32) | processed_cqe);
1260 }
1261 
1262 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
1263 {
1264 	struct msg_req *msg;
1265 	int err;
1266 
1267 	mutex_lock(&pfvf->mbox.lock);
1268 	if (enable)
1269 		msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
1270 	else
1271 		msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
1272 
1273 	if (!msg) {
1274 		mutex_unlock(&pfvf->mbox.lock);
1275 		return -ENOMEM;
1276 	}
1277 
1278 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1279 	mutex_unlock(&pfvf->mbox.lock);
1280 	return err;
1281 }
1282 
1283 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
1284 				int len, int *offset)
1285 {
1286 	struct nix_sqe_sg_s *sg = NULL;
1287 	u64 *iova = NULL;
1288 
1289 	sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
1290 	sg->ld_type = NIX_SEND_LDTYPE_LDD;
1291 	sg->subdc = NIX_SUBDC_SG;
1292 	sg->segs = 1;
1293 	sg->seg1_size = len;
1294 	iova = (void *)sg + sizeof(*sg);
1295 	*iova = dma_addr;
1296 	*offset += sizeof(*sg) + sizeof(u64);
1297 
1298 	sq->sg[sq->head].dma_addr[0] = dma_addr;
1299 	sq->sg[sq->head].size[0] = len;
1300 	sq->sg[sq->head].num_segs = 1;
1301 }
1302 
1303 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
1304 {
1305 	struct nix_sqe_hdr_s *sqe_hdr;
1306 	struct otx2_snd_queue *sq;
1307 	int offset, free_sqe;
1308 
1309 	sq = &pfvf->qset.sq[qidx];
1310 	free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
1311 	if (free_sqe < sq->sqe_thresh)
1312 		return false;
1313 
1314 	memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1315 
1316 	sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1317 
1318 	if (!sqe_hdr->total) {
1319 		sqe_hdr->aura = sq->aura_id;
1320 		sqe_hdr->df = 1;
1321 		sqe_hdr->sq = qidx;
1322 		sqe_hdr->pnc = 1;
1323 	}
1324 	sqe_hdr->total = len;
1325 	sqe_hdr->sqe_id = sq->head;
1326 
1327 	offset = sizeof(*sqe_hdr);
1328 
1329 	otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
1330 	sqe_hdr->sizem1 = (offset / 16) - 1;
1331 	pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1332 
1333 	return true;
1334 }
1335 
1336 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
1337 				     struct bpf_prog *prog,
1338 				     struct nix_cqe_rx_s *cqe,
1339 				     struct otx2_cq_queue *cq)
1340 {
1341 	unsigned char *hard_start, *data;
1342 	int qidx = cq->cq_idx;
1343 	struct xdp_buff xdp;
1344 	struct page *page;
1345 	u64 iova, pa;
1346 	u32 act;
1347 	int err;
1348 
1349 	iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1350 	pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1351 	page = virt_to_page(phys_to_virt(pa));
1352 
1353 	xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
1354 
1355 	data = (unsigned char *)phys_to_virt(pa);
1356 	hard_start = page_address(page);
1357 	xdp_prepare_buff(&xdp, hard_start, data - hard_start,
1358 			 cqe->sg.seg_size, false);
1359 
1360 	act = bpf_prog_run_xdp(prog, &xdp);
1361 
1362 	switch (act) {
1363 	case XDP_PASS:
1364 		break;
1365 	case XDP_TX:
1366 		qidx += pfvf->hw.tx_queues;
1367 		cq->pool_ptrs++;
1368 		return otx2_xdp_sq_append_pkt(pfvf, iova,
1369 					      cqe->sg.seg_size, qidx);
1370 	case XDP_REDIRECT:
1371 		cq->pool_ptrs++;
1372 		err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
1373 
1374 		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
1375 				    DMA_FROM_DEVICE);
1376 		if (!err)
1377 			return true;
1378 		put_page(page);
1379 		break;
1380 	default:
1381 		bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
1382 		break;
1383 	case XDP_ABORTED:
1384 		trace_xdp_exception(pfvf->netdev, prog, act);
1385 		break;
1386 	case XDP_DROP:
1387 		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
1388 				    DMA_FROM_DEVICE);
1389 		put_page(page);
1390 		cq->pool_ptrs++;
1391 		return true;
1392 	}
1393 	return false;
1394 }
1395