1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Ethernet driver
3  *
4  * Copyright (C) 2020 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/etherdevice.h>
12 #include <net/ip.h>
13 #include <net/tso.h>
14 
15 #include "otx2_reg.h"
16 #include "otx2_common.h"
17 #include "otx2_struct.h"
18 #include "otx2_txrx.h"
19 
20 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
21 
22 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
23 {
24 	struct nix_cqe_hdr_s *cqe_hdr;
25 
26 	cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
27 	if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
28 		return NULL;
29 
30 	cq->cq_head++;
31 	cq->cq_head &= (cq->cqe_cnt - 1);
32 
33 	return cqe_hdr;
34 }
35 
36 static unsigned int frag_num(unsigned int i)
37 {
38 #ifdef __BIG_ENDIAN
39 	return (i & ~3) + 3 - (i & 3);
40 #else
41 	return i;
42 #endif
43 }
44 
45 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
46 					struct sk_buff *skb, int seg, int *len)
47 {
48 	const skb_frag_t *frag;
49 	struct page *page;
50 	int offset;
51 
52 	/* First segment is always skb->data */
53 	if (!seg) {
54 		page = virt_to_page(skb->data);
55 		offset = offset_in_page(skb->data);
56 		*len = skb_headlen(skb);
57 	} else {
58 		frag = &skb_shinfo(skb)->frags[seg - 1];
59 		page = skb_frag_page(frag);
60 		offset = skb_frag_off(frag);
61 		*len = skb_frag_size(frag);
62 	}
63 	return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
64 }
65 
66 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
67 {
68 	int seg;
69 
70 	for (seg = 0; seg < sg->num_segs; seg++) {
71 		otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
72 				    sg->size[seg], DMA_TO_DEVICE);
73 	}
74 	sg->num_segs = 0;
75 }
76 
77 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
78 				 struct otx2_cq_queue *cq,
79 				 struct otx2_snd_queue *sq,
80 				 struct nix_cqe_tx_s *cqe,
81 				 int budget, int *tx_pkts, int *tx_bytes)
82 {
83 	struct nix_send_comp_s *snd_comp = &cqe->comp;
84 	struct sk_buff *skb = NULL;
85 	struct sg_list *sg;
86 
87 	if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
88 		net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
89 				    pfvf->netdev->name, cq->cint_idx,
90 				    snd_comp->status);
91 
92 	sg = &sq->sg[snd_comp->sqe_id];
93 	skb = (struct sk_buff *)sg->skb;
94 	if (unlikely(!skb))
95 		return;
96 
97 	*tx_bytes += skb->len;
98 	(*tx_pkts)++;
99 	otx2_dma_unmap_skb_frags(pfvf, sg);
100 	napi_consume_skb(skb, budget);
101 	sg->skb = (u64)NULL;
102 }
103 
104 static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
105 			      u64 iova, int len)
106 {
107 	struct page *page;
108 	void *va;
109 
110 	va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
111 	page = virt_to_page(va);
112 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
113 			va - page_address(page), len, pfvf->rbsize);
114 
115 	otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
116 			    pfvf->rbsize, DMA_FROM_DEVICE);
117 }
118 
119 static void otx2_set_rxhash(struct otx2_nic *pfvf,
120 			    struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
121 {
122 	enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
123 	struct otx2_rss_info *rss;
124 	u32 hash = 0;
125 
126 	if (!(pfvf->netdev->features & NETIF_F_RXHASH))
127 		return;
128 
129 	rss = &pfvf->hw.rss_info;
130 	if (rss->flowkey_cfg) {
131 		if (rss->flowkey_cfg &
132 		    ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
133 			hash_type = PKT_HASH_TYPE_L4;
134 		else
135 			hash_type = PKT_HASH_TYPE_L3;
136 		hash = cqe->hdr.flow_tag;
137 	}
138 	skb_set_hash(skb, hash, hash_type);
139 }
140 
141 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
142 			      int qidx)
143 {
144 	struct nix_rx_sg_s *sg = &cqe->sg;
145 	void *end, *start;
146 	u64 *seg_addr;
147 	int seg;
148 
149 	start = (void *)sg;
150 	end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
151 	while (start < end) {
152 		sg = (struct nix_rx_sg_s *)start;
153 		seg_addr = &sg->seg_addr;
154 		for (seg = 0; seg < sg->segs; seg++, seg_addr++)
155 			otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
156 		start += sizeof(*sg);
157 	}
158 }
159 
160 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
161 				  struct nix_cqe_rx_s *cqe, int qidx)
162 {
163 	struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
164 	struct nix_rx_parse_s *parse = &cqe->parse;
165 
166 	if (netif_msg_rx_err(pfvf))
167 		netdev_err(pfvf->netdev,
168 			   "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
169 			   qidx, parse->errlev, parse->errcode);
170 
171 	if (parse->errlev == NPC_ERRLVL_RE) {
172 		switch (parse->errcode) {
173 		case ERRCODE_FCS:
174 		case ERRCODE_FCS_RCV:
175 			atomic_inc(&stats->rx_fcs_errs);
176 			break;
177 		case ERRCODE_UNDERSIZE:
178 			atomic_inc(&stats->rx_undersize_errs);
179 			break;
180 		case ERRCODE_OVERSIZE:
181 			atomic_inc(&stats->rx_oversize_errs);
182 			break;
183 		case ERRCODE_OL2_LEN_MISMATCH:
184 			atomic_inc(&stats->rx_len_errs);
185 			break;
186 		default:
187 			atomic_inc(&stats->rx_other_errs);
188 			break;
189 		}
190 	} else if (parse->errlev == NPC_ERRLVL_NIX) {
191 		switch (parse->errcode) {
192 		case ERRCODE_OL3_LEN:
193 		case ERRCODE_OL4_LEN:
194 		case ERRCODE_IL3_LEN:
195 		case ERRCODE_IL4_LEN:
196 			atomic_inc(&stats->rx_len_errs);
197 			break;
198 		case ERRCODE_OL4_CSUM:
199 		case ERRCODE_IL4_CSUM:
200 			atomic_inc(&stats->rx_csum_errs);
201 			break;
202 		default:
203 			atomic_inc(&stats->rx_other_errs);
204 			break;
205 		}
206 	} else {
207 		atomic_inc(&stats->rx_other_errs);
208 		/* For now ignore all the NPC parser errors and
209 		 * pass the packets to stack.
210 		 */
211 		if (cqe->sg.segs == 1)
212 			return false;
213 	}
214 
215 	/* If RXALL is enabled pass on packets to stack. */
216 	if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
217 		return false;
218 
219 	/* Free buffer back to pool */
220 	if (cqe->sg.segs)
221 		otx2_free_rcv_seg(pfvf, cqe, qidx);
222 	return true;
223 }
224 
225 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
226 				 struct napi_struct *napi,
227 				 struct otx2_cq_queue *cq,
228 				 struct nix_cqe_rx_s *cqe)
229 {
230 	struct nix_rx_parse_s *parse = &cqe->parse;
231 	struct sk_buff *skb = NULL;
232 
233 	if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
234 		if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
235 			return;
236 	}
237 
238 	skb = napi_get_frags(napi);
239 	if (unlikely(!skb))
240 		return;
241 
242 	otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
243 	cq->pool_ptrs++;
244 
245 	otx2_set_rxhash(pfvf, cqe, skb);
246 
247 	skb_record_rx_queue(skb, cq->cq_idx);
248 	if (pfvf->netdev->features & NETIF_F_RXCSUM)
249 		skb->ip_summed = CHECKSUM_UNNECESSARY;
250 
251 	napi_gro_frags(napi);
252 }
253 
254 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
255 				struct napi_struct *napi,
256 				struct otx2_cq_queue *cq, int budget)
257 {
258 	struct nix_cqe_rx_s *cqe;
259 	int processed_cqe = 0;
260 	s64 bufptr;
261 
262 	while (likely(processed_cqe < budget)) {
263 		cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
264 		if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
265 		    !cqe->sg.seg_addr) {
266 			if (!processed_cqe)
267 				return 0;
268 			break;
269 		}
270 		cq->cq_head++;
271 		cq->cq_head &= (cq->cqe_cnt - 1);
272 
273 		otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
274 
275 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
276 		cqe->sg.seg_addr = 0x00;
277 		processed_cqe++;
278 	}
279 
280 	/* Free CQEs to HW */
281 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
282 		     ((u64)cq->cq_idx << 32) | processed_cqe);
283 
284 	if (unlikely(!cq->pool_ptrs))
285 		return 0;
286 
287 	/* Refill pool with new buffers */
288 	while (cq->pool_ptrs) {
289 		bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
290 		if (unlikely(bufptr <= 0)) {
291 			struct refill_work *work;
292 			struct delayed_work *dwork;
293 
294 			work = &pfvf->refill_wrk[cq->cq_idx];
295 			dwork = &work->pool_refill_work;
296 			/* Schedule a task if no other task is running */
297 			if (!cq->refill_task_sched) {
298 				cq->refill_task_sched = true;
299 				schedule_delayed_work(dwork,
300 						      msecs_to_jiffies(100));
301 			}
302 			break;
303 		}
304 		otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
305 		cq->pool_ptrs--;
306 	}
307 	otx2_get_page(cq->rbpool);
308 
309 	return processed_cqe;
310 }
311 
312 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
313 				struct otx2_cq_queue *cq, int budget)
314 {
315 	int tx_pkts = 0, tx_bytes = 0;
316 	struct nix_cqe_tx_s *cqe;
317 	int processed_cqe = 0;
318 
319 	while (likely(processed_cqe < budget)) {
320 		cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
321 		if (unlikely(!cqe)) {
322 			if (!processed_cqe)
323 				return 0;
324 			break;
325 		}
326 		otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
327 				     cqe, budget, &tx_pkts, &tx_bytes);
328 
329 		cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
330 		processed_cqe++;
331 	}
332 
333 	/* Free CQEs to HW */
334 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
335 		     ((u64)cq->cq_idx << 32) | processed_cqe);
336 
337 	if (likely(tx_pkts)) {
338 		struct netdev_queue *txq;
339 
340 		txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
341 		netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
342 		/* Check if queue was stopped earlier due to ring full */
343 		smp_mb();
344 		if (netif_tx_queue_stopped(txq) &&
345 		    netif_carrier_ok(pfvf->netdev))
346 			netif_tx_wake_queue(txq);
347 	}
348 	return 0;
349 }
350 
351 int otx2_napi_handler(struct napi_struct *napi, int budget)
352 {
353 	struct otx2_cq_poll *cq_poll;
354 	int workdone = 0, cq_idx, i;
355 	struct otx2_cq_queue *cq;
356 	struct otx2_qset *qset;
357 	struct otx2_nic *pfvf;
358 
359 	cq_poll = container_of(napi, struct otx2_cq_poll, napi);
360 	pfvf = (struct otx2_nic *)cq_poll->dev;
361 	qset = &pfvf->qset;
362 
363 	for (i = CQS_PER_CINT - 1; i >= 0; i--) {
364 		cq_idx = cq_poll->cq_ids[i];
365 		if (unlikely(cq_idx == CINT_INVALID_CQ))
366 			continue;
367 		cq = &qset->cq[cq_idx];
368 		if (cq->cq_type == CQ_RX) {
369 			/* If the RQ refill WQ task is running, skip napi
370 			 * scheduler for this queue.
371 			 */
372 			if (cq->refill_task_sched)
373 				continue;
374 			workdone += otx2_rx_napi_handler(pfvf, napi,
375 							 cq, budget);
376 		} else {
377 			workdone += otx2_tx_napi_handler(pfvf, cq, budget);
378 		}
379 	}
380 
381 	/* Clear the IRQ */
382 	otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
383 
384 	if (workdone < budget && napi_complete_done(napi, workdone)) {
385 		/* If interface is going down, don't re-enable IRQ */
386 		if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
387 			return workdone;
388 
389 		/* Re-enable interrupts */
390 		otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
391 			     BIT_ULL(0));
392 	}
393 	return workdone;
394 }
395 
396 static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
397 {
398 	u64 status;
399 
400 	/* Packet data stores should finish before SQE is flushed to HW */
401 	dma_wmb();
402 
403 	do {
404 		memcpy(sq->lmt_addr, sq->sqe_base, size);
405 		status = otx2_lmt_flush(sq->io_addr);
406 	} while (status == 0);
407 
408 	sq->head++;
409 	sq->head &= (sq->sqe_cnt - 1);
410 }
411 
412 #define MAX_SEGS_PER_SG	3
413 /* Add SQE scatter/gather subdescriptor structure */
414 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
415 			    struct sk_buff *skb, int num_segs, int *offset)
416 {
417 	struct nix_sqe_sg_s *sg = NULL;
418 	u64 dma_addr, *iova = NULL;
419 	u16 *sg_lens = NULL;
420 	int seg, len;
421 
422 	sq->sg[sq->head].num_segs = 0;
423 
424 	for (seg = 0; seg < num_segs; seg++) {
425 		if ((seg % MAX_SEGS_PER_SG) == 0) {
426 			sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
427 			sg->ld_type = NIX_SEND_LDTYPE_LDD;
428 			sg->subdc = NIX_SUBDC_SG;
429 			sg->segs = 0;
430 			sg_lens = (void *)sg;
431 			iova = (void *)sg + sizeof(*sg);
432 			/* Next subdc always starts at a 16byte boundary.
433 			 * So if sg->segs is whether 2 or 3, offset += 16bytes.
434 			 */
435 			if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
436 				*offset += sizeof(*sg) + (3 * sizeof(u64));
437 			else
438 				*offset += sizeof(*sg) + sizeof(u64);
439 		}
440 		dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
441 		if (dma_mapping_error(pfvf->dev, dma_addr))
442 			return false;
443 
444 		sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
445 		sg->segs++;
446 		*iova++ = dma_addr;
447 
448 		/* Save DMA mapping info for later unmapping */
449 		sq->sg[sq->head].dma_addr[seg] = dma_addr;
450 		sq->sg[sq->head].size[seg] = len;
451 		sq->sg[sq->head].num_segs++;
452 	}
453 
454 	sq->sg[sq->head].skb = (u64)skb;
455 	return true;
456 }
457 
458 /* Add SQE extended header subdescriptor */
459 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
460 			     struct sk_buff *skb, int *offset)
461 {
462 	struct nix_sqe_ext_s *ext;
463 
464 	ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
465 	ext->subdc = NIX_SUBDC_EXT;
466 	if (skb_shinfo(skb)->gso_size) {
467 		ext->lso = 1;
468 		ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
469 		ext->lso_mps = skb_shinfo(skb)->gso_size;
470 
471 		/* Only TSOv4 and TSOv6 GSO offloads are supported */
472 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
473 			ext->lso_format = pfvf->hw.lso_tsov4_idx;
474 
475 			/* HW adds payload size to 'ip_hdr->tot_len' while
476 			 * sending TSO segment, hence set payload length
477 			 * in IP header of the packet to just header length.
478 			 */
479 			ip_hdr(skb)->tot_len =
480 				htons(ext->lso_sb - skb_network_offset(skb));
481 		} else {
482 			ext->lso_format = pfvf->hw.lso_tsov6_idx;
483 			ipv6_hdr(skb)->payload_len =
484 				htons(ext->lso_sb - skb_network_offset(skb));
485 		}
486 	}
487 	*offset += sizeof(*ext);
488 }
489 
490 /* Add SQE header subdescriptor structure */
491 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
492 			     struct nix_sqe_hdr_s *sqe_hdr,
493 			     struct sk_buff *skb, u16 qidx)
494 {
495 	int proto = 0;
496 
497 	/* Check if SQE was framed before, if yes then no need to
498 	 * set these constants again and again.
499 	 */
500 	if (!sqe_hdr->total) {
501 		/* Don't free Tx buffers to Aura */
502 		sqe_hdr->df = 1;
503 		sqe_hdr->aura = sq->aura_id;
504 		/* Post a CQE Tx after pkt transmission */
505 		sqe_hdr->pnc = 1;
506 		sqe_hdr->sq = qidx;
507 	}
508 	sqe_hdr->total = skb->len;
509 	/* Set SQE identifier which will be used later for freeing SKB */
510 	sqe_hdr->sqe_id = sq->head;
511 
512 	/* Offload TCP/UDP checksum to HW */
513 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
514 		sqe_hdr->ol3ptr = skb_network_offset(skb);
515 		sqe_hdr->ol4ptr = skb_transport_offset(skb);
516 		/* get vlan protocol Ethertype */
517 		if (eth_type_vlan(skb->protocol))
518 			skb->protocol = vlan_get_protocol(skb);
519 
520 		if (skb->protocol == htons(ETH_P_IP)) {
521 			proto = ip_hdr(skb)->protocol;
522 			/* In case of TSO, HW needs this to be explicitly set.
523 			 * So set this always, instead of adding a check.
524 			 */
525 			sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
526 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
527 			proto = ipv6_hdr(skb)->nexthdr;
528 		}
529 
530 		if (proto == IPPROTO_TCP)
531 			sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
532 		else if (proto == IPPROTO_UDP)
533 			sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
534 	}
535 }
536 
537 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
538 				struct otx2_snd_queue *sq,
539 				struct sk_buff *skb, int sqe, int hdr_len)
540 {
541 	int num_segs = skb_shinfo(skb)->nr_frags + 1;
542 	struct sg_list *sg = &sq->sg[sqe];
543 	u64 dma_addr;
544 	int seg, len;
545 
546 	sg->num_segs = 0;
547 
548 	/* Get payload length at skb->data */
549 	len = skb_headlen(skb) - hdr_len;
550 
551 	for (seg = 0; seg < num_segs; seg++) {
552 		/* Skip skb->data, if there is no payload */
553 		if (!seg && !len)
554 			continue;
555 		dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
556 		if (dma_mapping_error(pfvf->dev, dma_addr))
557 			goto unmap;
558 
559 		/* Save DMA mapping info for later unmapping */
560 		sg->dma_addr[sg->num_segs] = dma_addr;
561 		sg->size[sg->num_segs] = len;
562 		sg->num_segs++;
563 	}
564 	return 0;
565 unmap:
566 	otx2_dma_unmap_skb_frags(pfvf, sg);
567 	return -EINVAL;
568 }
569 
570 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
571 				  struct sk_buff *skb, int seg,
572 				  u64 seg_addr, int hdr_len, int sqe)
573 {
574 	struct sg_list *sg = &sq->sg[sqe];
575 	const skb_frag_t *frag;
576 	int offset;
577 
578 	if (seg < 0)
579 		return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
580 
581 	frag = &skb_shinfo(skb)->frags[seg];
582 	offset = seg_addr - (u64)skb_frag_address(frag);
583 	if (skb_headlen(skb) - hdr_len)
584 		seg++;
585 	return sg->dma_addr[seg] + offset;
586 }
587 
588 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
589 				struct sg_list *list, int *offset)
590 {
591 	struct nix_sqe_sg_s *sg = NULL;
592 	u16 *sg_lens = NULL;
593 	u64 *iova = NULL;
594 	int seg;
595 
596 	/* Add SG descriptors with buffer addresses */
597 	for (seg = 0; seg < list->num_segs; seg++) {
598 		if ((seg % MAX_SEGS_PER_SG) == 0) {
599 			sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
600 			sg->ld_type = NIX_SEND_LDTYPE_LDD;
601 			sg->subdc = NIX_SUBDC_SG;
602 			sg->segs = 0;
603 			sg_lens = (void *)sg;
604 			iova = (void *)sg + sizeof(*sg);
605 			/* Next subdc always starts at a 16byte boundary.
606 			 * So if sg->segs is whether 2 or 3, offset += 16bytes.
607 			 */
608 			if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
609 				*offset += sizeof(*sg) + (3 * sizeof(u64));
610 			else
611 				*offset += sizeof(*sg) + sizeof(u64);
612 		}
613 		sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
614 		*iova++ = list->dma_addr[seg];
615 		sg->segs++;
616 	}
617 }
618 
619 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
620 			       struct sk_buff *skb, u16 qidx)
621 {
622 	struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
623 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
624 	int tcp_data, seg_len, pkt_len, offset;
625 	struct nix_sqe_hdr_s *sqe_hdr;
626 	int first_sqe = sq->head;
627 	struct sg_list list;
628 	struct tso_t tso;
629 
630 	/* Map SKB's fragments to DMA.
631 	 * It's done here to avoid mapping for every TSO segment's packet.
632 	 */
633 	if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
634 		dev_kfree_skb_any(skb);
635 		return;
636 	}
637 
638 	netdev_tx_sent_queue(txq, skb->len);
639 
640 	tso_start(skb, &tso);
641 	tcp_data = skb->len - hdr_len;
642 	while (tcp_data > 0) {
643 		char *hdr;
644 
645 		seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
646 		tcp_data -= seg_len;
647 
648 		/* Set SQE's SEND_HDR */
649 		memset(sq->sqe_base, 0, sq->sqe_size);
650 		sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
651 		otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
652 		offset = sizeof(*sqe_hdr);
653 
654 		/* Add TSO segment's pkt header */
655 		hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
656 		tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
657 		list.dma_addr[0] =
658 			sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
659 		list.size[0] = hdr_len;
660 		list.num_segs = 1;
661 
662 		/* Add TSO segment's payload data fragments */
663 		pkt_len = hdr_len;
664 		while (seg_len > 0) {
665 			int size;
666 
667 			size = min_t(int, tso.size, seg_len);
668 
669 			list.size[list.num_segs] = size;
670 			list.dma_addr[list.num_segs] =
671 				otx2_tso_frag_dma_addr(sq, skb,
672 						       tso.next_frag_idx - 1,
673 						       (u64)tso.data, hdr_len,
674 						       first_sqe);
675 			list.num_segs++;
676 			pkt_len += size;
677 			seg_len -= size;
678 			tso_build_data(skb, &tso, size);
679 		}
680 		sqe_hdr->total = pkt_len;
681 		otx2_sqe_tso_add_sg(sq, &list, &offset);
682 
683 		/* DMA mappings and skb needs to be freed only after last
684 		 * TSO segment is transmitted out. So set 'PNC' only for
685 		 * last segment. Also point last segment's sqe_id to first
686 		 * segment's SQE index where skb address and DMA mappings
687 		 * are saved.
688 		 */
689 		if (!tcp_data) {
690 			sqe_hdr->pnc = 1;
691 			sqe_hdr->sqe_id = first_sqe;
692 			sq->sg[first_sqe].skb = (u64)skb;
693 		} else {
694 			sqe_hdr->pnc = 0;
695 		}
696 
697 		sqe_hdr->sizem1 = (offset / 16) - 1;
698 
699 		/* Flush SQE to HW */
700 		otx2_sqe_flush(sq, offset);
701 	}
702 }
703 
704 static bool is_hw_tso_supported(struct otx2_nic *pfvf,
705 				struct sk_buff *skb)
706 {
707 	int payload_len, last_seg_size;
708 
709 	if (!pfvf->hw.hw_tso)
710 		return false;
711 
712 	/* HW has an issue due to which when the payload of the last LSO
713 	 * segment is shorter than 16 bytes, some header fields may not
714 	 * be correctly modified, hence don't offload such TSO segments.
715 	 */
716 	if (!is_96xx_B0(pfvf->pdev))
717 		return true;
718 
719 	payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
720 	last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
721 	if (last_seg_size && last_seg_size < 16)
722 		return false;
723 
724 	return true;
725 }
726 
727 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
728 {
729 	if (!skb_shinfo(skb)->gso_size)
730 		return 1;
731 
732 	/* HW TSO */
733 	if (is_hw_tso_supported(pfvf, skb))
734 		return 1;
735 
736 	/* SW TSO */
737 	return skb_shinfo(skb)->gso_segs;
738 }
739 
740 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
741 			struct sk_buff *skb, u16 qidx)
742 {
743 	struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
744 	struct otx2_nic *pfvf = netdev_priv(netdev);
745 	int offset, num_segs, free_sqe;
746 	struct nix_sqe_hdr_s *sqe_hdr;
747 
748 	/* Check if there is room for new SQE.
749 	 * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
750 	 * will give free SQE count.
751 	 */
752 	free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
753 
754 	if (free_sqe < sq->sqe_thresh ||
755 	    free_sqe < otx2_get_sqe_count(pfvf, skb))
756 		return false;
757 
758 	num_segs = skb_shinfo(skb)->nr_frags + 1;
759 
760 	/* If SKB doesn't fit in a single SQE, linearize it.
761 	 * TODO: Consider adding JUMP descriptor instead.
762 	 */
763 	if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
764 		if (__skb_linearize(skb)) {
765 			dev_kfree_skb_any(skb);
766 			return true;
767 		}
768 		num_segs = skb_shinfo(skb)->nr_frags + 1;
769 	}
770 
771 	if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
772 		otx2_sq_append_tso(pfvf, sq, skb, qidx);
773 		return true;
774 	}
775 
776 	/* Set SQE's SEND_HDR.
777 	 * Do not clear the first 64bit as it contains constant info.
778 	 */
779 	memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
780 	sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
781 	otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
782 	offset = sizeof(*sqe_hdr);
783 
784 	/* Add extended header if needed */
785 	otx2_sqe_add_ext(pfvf, sq, skb, &offset);
786 
787 	/* Add SG subdesc with data frags */
788 	if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
789 		otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
790 		return false;
791 	}
792 
793 	sqe_hdr->sizem1 = (offset / 16) - 1;
794 
795 	netdev_tx_sent_queue(txq, skb->len);
796 
797 	/* Flush SQE to HW */
798 	otx2_sqe_flush(sq, offset);
799 
800 	return true;
801 }
802 EXPORT_SYMBOL(otx2_sq_append_skb);
803 
804 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
805 {
806 	struct nix_cqe_rx_s *cqe;
807 	int processed_cqe = 0;
808 	u64 iova, pa;
809 
810 	while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
811 		if (!cqe->sg.subdc)
812 			continue;
813 		processed_cqe++;
814 		if (cqe->sg.segs > 1) {
815 			otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
816 			continue;
817 		}
818 		iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
819 		pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
820 		otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
821 		put_page(virt_to_page(phys_to_virt(pa)));
822 	}
823 
824 	/* Free CQEs to HW */
825 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
826 		     ((u64)cq->cq_idx << 32) | processed_cqe);
827 }
828 
829 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
830 {
831 	struct sk_buff *skb = NULL;
832 	struct otx2_snd_queue *sq;
833 	struct nix_cqe_tx_s *cqe;
834 	int processed_cqe = 0;
835 	struct sg_list *sg;
836 
837 	sq = &pfvf->qset.sq[cq->cint_idx];
838 
839 	while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
840 		sg = &sq->sg[cqe->comp.sqe_id];
841 		skb = (struct sk_buff *)sg->skb;
842 		if (skb) {
843 			otx2_dma_unmap_skb_frags(pfvf, sg);
844 			dev_kfree_skb_any(skb);
845 			sg->skb = (u64)NULL;
846 		}
847 		processed_cqe++;
848 	}
849 
850 	/* Free CQEs to HW */
851 	otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
852 		     ((u64)cq->cq_idx << 32) | processed_cqe);
853 }
854 
855 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
856 {
857 	struct msg_req *msg;
858 	int err;
859 
860 	mutex_lock(&pfvf->mbox.lock);
861 	if (enable)
862 		msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
863 	else
864 		msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
865 
866 	if (!msg) {
867 		mutex_unlock(&pfvf->mbox.lock);
868 		return -ENOMEM;
869 	}
870 
871 	err = otx2_sync_mbox_msg(&pfvf->mbox);
872 	mutex_unlock(&pfvf->mbox.lock);
873 	return err;
874 }
875