1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8 
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
12 
13 static void ionic_rx_clean(struct ionic_queue *q,
14 			   struct ionic_desc_info *desc_info,
15 			   struct ionic_cq_info *cq_info,
16 			   void *cb_arg);
17 
18 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
19 				  ionic_desc_cb cb_func, void *cb_arg)
20 {
21 	DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
22 
23 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
24 }
25 
26 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
27 				  ionic_desc_cb cb_func, void *cb_arg)
28 {
29 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
30 
31 	DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
32 }
33 
34 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
35 {
36 	return netdev_get_tx_queue(q->lif->netdev, q->index);
37 }
38 
39 static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
40 					  unsigned int len, bool frags)
41 {
42 	struct ionic_lif *lif = q->lif;
43 	struct ionic_rx_stats *stats;
44 	struct net_device *netdev;
45 	struct sk_buff *skb;
46 
47 	netdev = lif->netdev;
48 	stats = q_to_rx_stats(q);
49 
50 	if (frags)
51 		skb = napi_get_frags(&q_to_qcq(q)->napi);
52 	else
53 		skb = netdev_alloc_skb_ip_align(netdev, len);
54 
55 	if (unlikely(!skb)) {
56 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
57 				     netdev->name, q->name);
58 		stats->alloc_err++;
59 		return NULL;
60 	}
61 
62 	return skb;
63 }
64 
65 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
66 				      struct ionic_desc_info *desc_info,
67 				      struct ionic_cq_info *cq_info)
68 {
69 	struct ionic_rxq_comp *comp = cq_info->cq_desc;
70 	struct device *dev = q->lif->ionic->dev;
71 	struct ionic_page_info *page_info;
72 	struct sk_buff *skb;
73 	unsigned int i;
74 	u16 frag_len;
75 	u16 len;
76 
77 	page_info = &desc_info->pages[0];
78 	len = le16_to_cpu(comp->len);
79 
80 	prefetch(page_address(page_info->page) + NET_IP_ALIGN);
81 
82 	skb = ionic_rx_skb_alloc(q, len, true);
83 	if (unlikely(!skb))
84 		return NULL;
85 
86 	i = comp->num_sg_elems + 1;
87 	do {
88 		if (unlikely(!page_info->page)) {
89 			struct napi_struct *napi = &q_to_qcq(q)->napi;
90 
91 			napi->skb = NULL;
92 			dev_kfree_skb(skb);
93 			return NULL;
94 		}
95 
96 		frag_len = min(len, (u16)PAGE_SIZE);
97 		len -= frag_len;
98 
99 		dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
100 			       PAGE_SIZE, DMA_FROM_DEVICE);
101 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
102 				page_info->page, 0, frag_len, PAGE_SIZE);
103 		page_info->page = NULL;
104 		page_info++;
105 		i--;
106 	} while (i > 0);
107 
108 	return skb;
109 }
110 
111 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
112 					  struct ionic_desc_info *desc_info,
113 					  struct ionic_cq_info *cq_info)
114 {
115 	struct ionic_rxq_comp *comp = cq_info->cq_desc;
116 	struct device *dev = q->lif->ionic->dev;
117 	struct ionic_page_info *page_info;
118 	struct sk_buff *skb;
119 	u16 len;
120 
121 	page_info = &desc_info->pages[0];
122 	len = le16_to_cpu(comp->len);
123 
124 	skb = ionic_rx_skb_alloc(q, len, false);
125 	if (unlikely(!skb))
126 		return NULL;
127 
128 	if (unlikely(!page_info->page)) {
129 		dev_kfree_skb(skb);
130 		return NULL;
131 	}
132 
133 	dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
134 				len, DMA_FROM_DEVICE);
135 	skb_copy_to_linear_data(skb, page_address(page_info->page), len);
136 	dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
137 				   len, DMA_FROM_DEVICE);
138 
139 	skb_put(skb, len);
140 	skb->protocol = eth_type_trans(skb, q->lif->netdev);
141 
142 	return skb;
143 }
144 
145 static void ionic_rx_clean(struct ionic_queue *q,
146 			   struct ionic_desc_info *desc_info,
147 			   struct ionic_cq_info *cq_info,
148 			   void *cb_arg)
149 {
150 	struct ionic_rxq_comp *comp = cq_info->cq_desc;
151 	struct ionic_qcq *qcq = q_to_qcq(q);
152 	struct ionic_rx_stats *stats;
153 	struct net_device *netdev;
154 	struct sk_buff *skb;
155 
156 	stats = q_to_rx_stats(q);
157 	netdev = q->lif->netdev;
158 
159 	if (comp->status) {
160 		stats->dropped++;
161 		return;
162 	}
163 
164 	/* no packet processing while resetting */
165 	if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) {
166 		stats->dropped++;
167 		return;
168 	}
169 
170 	stats->pkts++;
171 	stats->bytes += le16_to_cpu(comp->len);
172 
173 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
174 		skb = ionic_rx_copybreak(q, desc_info, cq_info);
175 	else
176 		skb = ionic_rx_frags(q, desc_info, cq_info);
177 
178 	if (unlikely(!skb)) {
179 		stats->dropped++;
180 		return;
181 	}
182 
183 	skb_record_rx_queue(skb, q->index);
184 
185 	if (likely(netdev->features & NETIF_F_RXHASH)) {
186 		switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
187 		case IONIC_PKT_TYPE_IPV4:
188 		case IONIC_PKT_TYPE_IPV6:
189 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
190 				     PKT_HASH_TYPE_L3);
191 			break;
192 		case IONIC_PKT_TYPE_IPV4_TCP:
193 		case IONIC_PKT_TYPE_IPV6_TCP:
194 		case IONIC_PKT_TYPE_IPV4_UDP:
195 		case IONIC_PKT_TYPE_IPV6_UDP:
196 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
197 				     PKT_HASH_TYPE_L4);
198 			break;
199 		}
200 	}
201 
202 	if (likely(netdev->features & NETIF_F_RXCSUM)) {
203 		if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
204 			skb->ip_summed = CHECKSUM_COMPLETE;
205 			skb->csum = (__wsum)le16_to_cpu(comp->csum);
206 			stats->csum_complete++;
207 		}
208 	} else {
209 		stats->csum_none++;
210 	}
211 
212 	if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
213 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
214 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
215 		stats->csum_error++;
216 
217 	if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
218 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
219 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
220 				       le16_to_cpu(comp->vlan_tci));
221 		stats->vlan_stripped++;
222 	}
223 
224 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
225 		napi_gro_receive(&qcq->napi, skb);
226 	else
227 		napi_gro_frags(&qcq->napi);
228 }
229 
230 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
231 {
232 	struct ionic_rxq_comp *comp = cq_info->cq_desc;
233 	struct ionic_queue *q = cq->bound_q;
234 	struct ionic_desc_info *desc_info;
235 
236 	if (!color_match(comp->pkt_type_color, cq->done_color))
237 		return false;
238 
239 	/* check for empty queue */
240 	if (q->tail->index == q->head->index)
241 		return false;
242 
243 	desc_info = q->tail;
244 	if (desc_info->index != le16_to_cpu(comp->comp_index))
245 		return false;
246 
247 	q->tail = desc_info->next;
248 
249 	/* clean the related q entry, only one per qc completion */
250 	ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
251 
252 	desc_info->cb = NULL;
253 	desc_info->cb_arg = NULL;
254 
255 	return true;
256 }
257 
258 static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit)
259 {
260 	u32 work_done = 0;
261 
262 	while (ionic_rx_service(rxcq, rxcq->tail)) {
263 		if (rxcq->tail->last)
264 			rxcq->done_color = !rxcq->done_color;
265 		rxcq->tail = rxcq->tail->next;
266 		DEBUG_STATS_CQE_CNT(rxcq);
267 
268 		if (++work_done >= limit)
269 			break;
270 	}
271 
272 	return work_done;
273 }
274 
275 void ionic_rx_flush(struct ionic_cq *cq)
276 {
277 	struct ionic_dev *idev = &cq->lif->ionic->idev;
278 	u32 work_done;
279 
280 	work_done = ionic_rx_walk_cq(cq, cq->num_descs);
281 
282 	if (work_done)
283 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
284 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
285 }
286 
287 static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
288 					dma_addr_t *dma_addr)
289 {
290 	struct ionic_lif *lif = q->lif;
291 	struct ionic_rx_stats *stats;
292 	struct net_device *netdev;
293 	struct device *dev;
294 	struct page *page;
295 
296 	netdev = lif->netdev;
297 	dev = lif->ionic->dev;
298 	stats = q_to_rx_stats(q);
299 	page = alloc_page(GFP_ATOMIC);
300 	if (unlikely(!page)) {
301 		net_err_ratelimited("%s: Page alloc failed on %s!\n",
302 				    netdev->name, q->name);
303 		stats->alloc_err++;
304 		return NULL;
305 	}
306 
307 	*dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
308 	if (unlikely(dma_mapping_error(dev, *dma_addr))) {
309 		__free_page(page);
310 		net_err_ratelimited("%s: DMA single map failed on %s!\n",
311 				    netdev->name, q->name);
312 		stats->dma_map_err++;
313 		return NULL;
314 	}
315 
316 	return page;
317 }
318 
319 static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
320 			       dma_addr_t dma_addr)
321 {
322 	struct ionic_lif *lif = q->lif;
323 	struct net_device *netdev;
324 	struct device *dev;
325 
326 	netdev = lif->netdev;
327 	dev = lif->ionic->dev;
328 
329 	if (unlikely(!page)) {
330 		net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
331 				    netdev->name, q->name);
332 		return;
333 	}
334 
335 	dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
336 
337 	__free_page(page);
338 }
339 
340 #define IONIC_RX_RING_DOORBELL_STRIDE		((1 << 5) - 1)
341 #define IONIC_RX_RING_HEAD_BUF_SZ		2048
342 
343 void ionic_rx_fill(struct ionic_queue *q)
344 {
345 	struct net_device *netdev = q->lif->netdev;
346 	struct ionic_desc_info *desc_info;
347 	struct ionic_page_info *page_info;
348 	struct ionic_rxq_sg_desc *sg_desc;
349 	struct ionic_rxq_sg_elem *sg_elem;
350 	struct ionic_rxq_desc *desc;
351 	unsigned int remain_len;
352 	unsigned int seg_len;
353 	unsigned int nfrags;
354 	bool ring_doorbell;
355 	unsigned int i, j;
356 	unsigned int len;
357 
358 	len = netdev->mtu + ETH_HLEN;
359 	nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
360 
361 	for (i = ionic_q_space_avail(q); i; i--) {
362 		remain_len = len;
363 		desc_info = q->head;
364 		desc = desc_info->desc;
365 		sg_desc = desc_info->sg_desc;
366 		page_info = &desc_info->pages[0];
367 
368 		if (page_info->page) { /* recycle the buffer */
369 			ring_doorbell = ((q->head->index + 1) &
370 					IONIC_RX_RING_DOORBELL_STRIDE) == 0;
371 			ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
372 			continue;
373 		}
374 
375 		/* fill main descriptor - pages[0] */
376 		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
377 					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
378 		desc_info->npages = nfrags;
379 		page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
380 		if (unlikely(!page_info->page)) {
381 			desc->addr = 0;
382 			desc->len = 0;
383 			return;
384 		}
385 		desc->addr = cpu_to_le64(page_info->dma_addr);
386 		seg_len = min_t(unsigned int, PAGE_SIZE, len);
387 		desc->len = cpu_to_le16(seg_len);
388 		remain_len -= seg_len;
389 		page_info++;
390 
391 		/* fill sg descriptors - pages[1..n] */
392 		for (j = 0; j < nfrags - 1; j++) {
393 			if (page_info->page) /* recycle the sg buffer */
394 				continue;
395 
396 			sg_elem = &sg_desc->elems[j];
397 			page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
398 			if (unlikely(!page_info->page)) {
399 				sg_elem->addr = 0;
400 				sg_elem->len = 0;
401 				return;
402 			}
403 			sg_elem->addr = cpu_to_le64(page_info->dma_addr);
404 			seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
405 			sg_elem->len = cpu_to_le16(seg_len);
406 			remain_len -= seg_len;
407 			page_info++;
408 		}
409 
410 		ring_doorbell = ((q->head->index + 1) &
411 				IONIC_RX_RING_DOORBELL_STRIDE) == 0;
412 		ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
413 	}
414 }
415 
416 static void ionic_rx_fill_cb(void *arg)
417 {
418 	ionic_rx_fill(arg);
419 }
420 
421 void ionic_rx_empty(struct ionic_queue *q)
422 {
423 	struct ionic_desc_info *cur;
424 	struct ionic_rxq_desc *desc;
425 	unsigned int i;
426 
427 	for (cur = q->tail; cur != q->head; cur = cur->next) {
428 		desc = cur->desc;
429 		desc->addr = 0;
430 		desc->len = 0;
431 
432 		for (i = 0; i < cur->npages; i++) {
433 			if (likely(cur->pages[i].page)) {
434 				ionic_rx_page_free(q, cur->pages[i].page,
435 						   cur->pages[i].dma_addr);
436 				cur->pages[i].page = NULL;
437 				cur->pages[i].dma_addr = 0;
438 			}
439 		}
440 
441 		cur->cb_arg = NULL;
442 	}
443 }
444 
445 int ionic_rx_napi(struct napi_struct *napi, int budget)
446 {
447 	struct ionic_qcq *qcq = napi_to_qcq(napi);
448 	struct ionic_cq *rxcq = napi_to_cq(napi);
449 	unsigned int qi = rxcq->bound_q->index;
450 	struct ionic_dev *idev;
451 	struct ionic_lif *lif;
452 	struct ionic_cq *txcq;
453 	u32 work_done = 0;
454 	u32 flags = 0;
455 
456 	lif = rxcq->bound_q->lif;
457 	idev = &lif->ionic->idev;
458 	txcq = &lif->txqcqs[qi].qcq->cq;
459 
460 	ionic_tx_flush(txcq);
461 
462 	work_done = ionic_rx_walk_cq(rxcq, budget);
463 
464 	if (work_done)
465 		ionic_rx_fill_cb(rxcq->bound_q);
466 
467 	if (work_done < budget && napi_complete_done(napi, work_done)) {
468 		flags |= IONIC_INTR_CRED_UNMASK;
469 		DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
470 	}
471 
472 	if (work_done || flags) {
473 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
474 		ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
475 				   work_done, flags);
476 	}
477 
478 	DEBUG_STATS_NAPI_POLL(qcq, work_done);
479 
480 	return work_done;
481 }
482 
483 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
484 				      void *data, size_t len)
485 {
486 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
487 	struct device *dev = q->lif->ionic->dev;
488 	dma_addr_t dma_addr;
489 
490 	dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
491 	if (dma_mapping_error(dev, dma_addr)) {
492 		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
493 				     q->lif->netdev->name, q->name);
494 		stats->dma_map_err++;
495 		return 0;
496 	}
497 	return dma_addr;
498 }
499 
500 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
501 				    const skb_frag_t *frag,
502 				    size_t offset, size_t len)
503 {
504 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
505 	struct device *dev = q->lif->ionic->dev;
506 	dma_addr_t dma_addr;
507 
508 	dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
509 	if (dma_mapping_error(dev, dma_addr)) {
510 		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
511 				     q->lif->netdev->name, q->name);
512 		stats->dma_map_err++;
513 	}
514 	return dma_addr;
515 }
516 
517 static void ionic_tx_clean(struct ionic_queue *q,
518 			   struct ionic_desc_info *desc_info,
519 			   struct ionic_cq_info *cq_info,
520 			   void *cb_arg)
521 {
522 	struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
523 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
524 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
525 	struct ionic_txq_desc *desc = desc_info->desc;
526 	struct device *dev = q->lif->ionic->dev;
527 	u8 opcode, flags, nsge;
528 	u16 queue_index;
529 	unsigned int i;
530 	u64 addr;
531 
532 	decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
533 			    &opcode, &flags, &nsge, &addr);
534 
535 	/* use unmap_single only if either this is not TSO,
536 	 * or this is first descriptor of a TSO
537 	 */
538 	if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
539 	    flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
540 		dma_unmap_single(dev, (dma_addr_t)addr,
541 				 le16_to_cpu(desc->len), DMA_TO_DEVICE);
542 	else
543 		dma_unmap_page(dev, (dma_addr_t)addr,
544 			       le16_to_cpu(desc->len), DMA_TO_DEVICE);
545 
546 	for (i = 0; i < nsge; i++, elem++)
547 		dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
548 			       le16_to_cpu(elem->len), DMA_TO_DEVICE);
549 
550 	if (cb_arg) {
551 		struct sk_buff *skb = cb_arg;
552 		u32 len = skb->len;
553 
554 		queue_index = skb_get_queue_mapping(skb);
555 		if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
556 						      queue_index))) {
557 			netif_wake_subqueue(q->lif->netdev, queue_index);
558 			q->wake++;
559 		}
560 		dev_kfree_skb_any(skb);
561 		stats->clean++;
562 		netdev_tx_completed_queue(q_to_ndq(q), 1, len);
563 	}
564 }
565 
566 void ionic_tx_flush(struct ionic_cq *cq)
567 {
568 	struct ionic_txq_comp *comp = cq->tail->cq_desc;
569 	struct ionic_dev *idev = &cq->lif->ionic->idev;
570 	struct ionic_queue *q = cq->bound_q;
571 	struct ionic_desc_info *desc_info;
572 	unsigned int work_done = 0;
573 
574 	/* walk the completed cq entries */
575 	while (work_done < cq->num_descs &&
576 	       color_match(comp->color, cq->done_color)) {
577 
578 		/* clean the related q entries, there could be
579 		 * several q entries completed for each cq completion
580 		 */
581 		do {
582 			desc_info = q->tail;
583 			q->tail = desc_info->next;
584 			ionic_tx_clean(q, desc_info, cq->tail,
585 				       desc_info->cb_arg);
586 			desc_info->cb = NULL;
587 			desc_info->cb_arg = NULL;
588 		} while (desc_info->index != le16_to_cpu(comp->comp_index));
589 
590 		if (cq->tail->last)
591 			cq->done_color = !cq->done_color;
592 
593 		cq->tail = cq->tail->next;
594 		comp = cq->tail->cq_desc;
595 		DEBUG_STATS_CQE_CNT(cq);
596 
597 		work_done++;
598 	}
599 
600 	if (work_done)
601 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
602 				   work_done, 0);
603 }
604 
605 void ionic_tx_empty(struct ionic_queue *q)
606 {
607 	struct ionic_desc_info *desc_info;
608 	int done = 0;
609 
610 	/* walk the not completed tx entries, if any */
611 	while (q->head != q->tail) {
612 		desc_info = q->tail;
613 		q->tail = desc_info->next;
614 		ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
615 		desc_info->cb = NULL;
616 		desc_info->cb_arg = NULL;
617 		done++;
618 	}
619 }
620 
621 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
622 {
623 	int err;
624 
625 	err = skb_cow_head(skb, 0);
626 	if (err)
627 		return err;
628 
629 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
630 		inner_ip_hdr(skb)->check = 0;
631 		inner_tcp_hdr(skb)->check =
632 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
633 					   inner_ip_hdr(skb)->daddr,
634 					   0, IPPROTO_TCP, 0);
635 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
636 		inner_tcp_hdr(skb)->check =
637 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
638 					 &inner_ipv6_hdr(skb)->daddr,
639 					 0, IPPROTO_TCP, 0);
640 	}
641 
642 	return 0;
643 }
644 
645 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
646 {
647 	int err;
648 
649 	err = skb_cow_head(skb, 0);
650 	if (err)
651 		return err;
652 
653 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
654 		ip_hdr(skb)->check = 0;
655 		tcp_hdr(skb)->check =
656 			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
657 					   ip_hdr(skb)->daddr,
658 					   0, IPPROTO_TCP, 0);
659 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
660 		tcp_v6_gso_csum_prep(skb);
661 	}
662 
663 	return 0;
664 }
665 
666 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
667 			      struct sk_buff *skb,
668 			      dma_addr_t addr, u8 nsge, u16 len,
669 			      unsigned int hdrlen, unsigned int mss,
670 			      bool outer_csum,
671 			      u16 vlan_tci, bool has_vlan,
672 			      bool start, bool done)
673 {
674 	u8 flags = 0;
675 	u64 cmd;
676 
677 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
678 	flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
679 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
680 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
681 
682 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
683 	desc->cmd = cpu_to_le64(cmd);
684 	desc->len = cpu_to_le16(len);
685 	desc->vlan_tci = cpu_to_le16(vlan_tci);
686 	desc->hdr_len = cpu_to_le16(hdrlen);
687 	desc->mss = cpu_to_le16(mss);
688 
689 	if (done) {
690 		skb_tx_timestamp(skb);
691 		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
692 		ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
693 	} else {
694 		ionic_txq_post(q, false, ionic_tx_clean, NULL);
695 	}
696 }
697 
698 static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
699 						struct ionic_txq_sg_elem **elem)
700 {
701 	struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
702 	struct ionic_txq_desc *desc = q->head->desc;
703 
704 	*elem = sg_desc->elems;
705 	return desc;
706 }
707 
708 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
709 {
710 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
711 	struct ionic_desc_info *abort = q->head;
712 	struct device *dev = q->lif->ionic->dev;
713 	struct ionic_desc_info *rewind = abort;
714 	struct ionic_txq_sg_elem *elem;
715 	struct ionic_txq_desc *desc;
716 	unsigned int frag_left = 0;
717 	unsigned int offset = 0;
718 	unsigned int len_left;
719 	dma_addr_t desc_addr;
720 	unsigned int hdrlen;
721 	unsigned int nfrags;
722 	unsigned int seglen;
723 	u64 total_bytes = 0;
724 	u64 total_pkts = 0;
725 	unsigned int left;
726 	unsigned int len;
727 	unsigned int mss;
728 	skb_frag_t *frag;
729 	bool start, done;
730 	bool outer_csum;
731 	bool has_vlan;
732 	u16 desc_len;
733 	u8 desc_nsge;
734 	u16 vlan_tci;
735 	bool encap;
736 	int err;
737 
738 	mss = skb_shinfo(skb)->gso_size;
739 	nfrags = skb_shinfo(skb)->nr_frags;
740 	len_left = skb->len - skb_headlen(skb);
741 	outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
742 		     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
743 	has_vlan = !!skb_vlan_tag_present(skb);
744 	vlan_tci = skb_vlan_tag_get(skb);
745 	encap = skb->encapsulation;
746 
747 	/* Preload inner-most TCP csum field with IP pseudo hdr
748 	 * calculated with IP length set to zero.  HW will later
749 	 * add in length to each TCP segment resulting from the TSO.
750 	 */
751 
752 	if (encap)
753 		err = ionic_tx_tcp_inner_pseudo_csum(skb);
754 	else
755 		err = ionic_tx_tcp_pseudo_csum(skb);
756 	if (err)
757 		return err;
758 
759 	if (encap)
760 		hdrlen = skb_inner_transport_header(skb) - skb->data +
761 			 inner_tcp_hdrlen(skb);
762 	else
763 		hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
764 
765 	seglen = hdrlen + mss;
766 	left = skb_headlen(skb);
767 
768 	desc = ionic_tx_tso_next(q, &elem);
769 	start = true;
770 
771 	/* Chop skb->data up into desc segments */
772 
773 	while (left > 0) {
774 		len = min(seglen, left);
775 		frag_left = seglen - len;
776 		desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
777 		if (dma_mapping_error(dev, desc_addr))
778 			goto err_out_abort;
779 		desc_len = len;
780 		desc_nsge = 0;
781 		left -= len;
782 		offset += len;
783 		if (nfrags > 0 && frag_left > 0)
784 			continue;
785 		done = (nfrags == 0 && left == 0);
786 		ionic_tx_tso_post(q, desc, skb,
787 				  desc_addr, desc_nsge, desc_len,
788 				  hdrlen, mss,
789 				  outer_csum,
790 				  vlan_tci, has_vlan,
791 				  start, done);
792 		total_pkts++;
793 		total_bytes += start ? len : len + hdrlen;
794 		desc = ionic_tx_tso_next(q, &elem);
795 		start = false;
796 		seglen = mss;
797 	}
798 
799 	/* Chop skb frags into desc segments */
800 
801 	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
802 		offset = 0;
803 		left = skb_frag_size(frag);
804 		len_left -= left;
805 		nfrags--;
806 		stats->frags++;
807 
808 		while (left > 0) {
809 			if (frag_left > 0) {
810 				len = min(frag_left, left);
811 				frag_left -= len;
812 				elem->addr =
813 				    cpu_to_le64(ionic_tx_map_frag(q, frag,
814 								  offset, len));
815 				if (dma_mapping_error(dev, elem->addr))
816 					goto err_out_abort;
817 				elem->len = cpu_to_le16(len);
818 				elem++;
819 				desc_nsge++;
820 				left -= len;
821 				offset += len;
822 				if (nfrags > 0 && frag_left > 0)
823 					continue;
824 				done = (nfrags == 0 && left == 0);
825 				ionic_tx_tso_post(q, desc, skb, desc_addr,
826 						  desc_nsge, desc_len,
827 						  hdrlen, mss, outer_csum,
828 						  vlan_tci, has_vlan,
829 						  start, done);
830 				total_pkts++;
831 				total_bytes += start ? len : len + hdrlen;
832 				desc = ionic_tx_tso_next(q, &elem);
833 				start = false;
834 			} else {
835 				len = min(mss, left);
836 				frag_left = mss - len;
837 				desc_addr = ionic_tx_map_frag(q, frag,
838 							      offset, len);
839 				if (dma_mapping_error(dev, desc_addr))
840 					goto err_out_abort;
841 				desc_len = len;
842 				desc_nsge = 0;
843 				left -= len;
844 				offset += len;
845 				if (nfrags > 0 && frag_left > 0)
846 					continue;
847 				done = (nfrags == 0 && left == 0);
848 				ionic_tx_tso_post(q, desc, skb, desc_addr,
849 						  desc_nsge, desc_len,
850 						  hdrlen, mss, outer_csum,
851 						  vlan_tci, has_vlan,
852 						  start, done);
853 				total_pkts++;
854 				total_bytes += start ? len : len + hdrlen;
855 				desc = ionic_tx_tso_next(q, &elem);
856 				start = false;
857 			}
858 		}
859 	}
860 
861 	stats->pkts += total_pkts;
862 	stats->bytes += total_bytes;
863 	stats->tso++;
864 	stats->tso_bytes += total_bytes;
865 
866 	return 0;
867 
868 err_out_abort:
869 	while (rewind->desc != q->head->desc) {
870 		ionic_tx_clean(q, rewind, NULL, NULL);
871 		rewind = rewind->next;
872 	}
873 	q->head = abort;
874 
875 	return -ENOMEM;
876 }
877 
878 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
879 {
880 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
881 	struct ionic_txq_desc *desc = q->head->desc;
882 	struct device *dev = q->lif->ionic->dev;
883 	dma_addr_t dma_addr;
884 	bool has_vlan;
885 	u8 flags = 0;
886 	bool encap;
887 	u64 cmd;
888 
889 	has_vlan = !!skb_vlan_tag_present(skb);
890 	encap = skb->encapsulation;
891 
892 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
893 	if (dma_mapping_error(dev, dma_addr))
894 		return -ENOMEM;
895 
896 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
897 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
898 
899 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
900 				  flags, skb_shinfo(skb)->nr_frags, dma_addr);
901 	desc->cmd = cpu_to_le64(cmd);
902 	desc->len = cpu_to_le16(skb_headlen(skb));
903 	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
904 	desc->csum_offset = cpu_to_le16(skb->csum_offset);
905 	if (has_vlan) {
906 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
907 		stats->vlan_inserted++;
908 	}
909 
910 	if (skb->csum_not_inet)
911 		stats->crc32_csum++;
912 	else
913 		stats->csum++;
914 
915 	return 0;
916 }
917 
918 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
919 {
920 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
921 	struct ionic_txq_desc *desc = q->head->desc;
922 	struct device *dev = q->lif->ionic->dev;
923 	dma_addr_t dma_addr;
924 	bool has_vlan;
925 	u8 flags = 0;
926 	bool encap;
927 	u64 cmd;
928 
929 	has_vlan = !!skb_vlan_tag_present(skb);
930 	encap = skb->encapsulation;
931 
932 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
933 	if (dma_mapping_error(dev, dma_addr))
934 		return -ENOMEM;
935 
936 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
937 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
938 
939 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
940 				  flags, skb_shinfo(skb)->nr_frags, dma_addr);
941 	desc->cmd = cpu_to_le64(cmd);
942 	desc->len = cpu_to_le16(skb_headlen(skb));
943 	if (has_vlan) {
944 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
945 		stats->vlan_inserted++;
946 	}
947 
948 	stats->csum_none++;
949 
950 	return 0;
951 }
952 
953 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
954 {
955 	struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
956 	unsigned int len_left = skb->len - skb_headlen(skb);
957 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
958 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
959 	struct device *dev = q->lif->ionic->dev;
960 	dma_addr_t dma_addr;
961 	skb_frag_t *frag;
962 	u16 len;
963 
964 	for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
965 		len = skb_frag_size(frag);
966 		elem->len = cpu_to_le16(len);
967 		dma_addr = ionic_tx_map_frag(q, frag, 0, len);
968 		if (dma_mapping_error(dev, dma_addr))
969 			return -ENOMEM;
970 		elem->addr = cpu_to_le64(dma_addr);
971 		len_left -= len;
972 		stats->frags++;
973 	}
974 
975 	return 0;
976 }
977 
978 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
979 {
980 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
981 	int err;
982 
983 	/* set up the initial descriptor */
984 	if (skb->ip_summed == CHECKSUM_PARTIAL)
985 		err = ionic_tx_calc_csum(q, skb);
986 	else
987 		err = ionic_tx_calc_no_csum(q, skb);
988 	if (err)
989 		return err;
990 
991 	/* add frags */
992 	err = ionic_tx_skb_frags(q, skb);
993 	if (err)
994 		return err;
995 
996 	skb_tx_timestamp(skb);
997 	stats->pkts++;
998 	stats->bytes += skb->len;
999 
1000 	netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1001 	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1002 
1003 	return 0;
1004 }
1005 
1006 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1007 {
1008 	int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
1009 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1010 	int err;
1011 
1012 	/* If TSO, need roundup(skb->len/mss) descs */
1013 	if (skb_is_gso(skb))
1014 		return (skb->len / skb_shinfo(skb)->gso_size) + 1;
1015 
1016 	/* If non-TSO, just need 1 desc and nr_frags sg elems */
1017 	if (skb_shinfo(skb)->nr_frags <= sg_elems)
1018 		return 1;
1019 
1020 	/* Too many frags, so linearize */
1021 	err = skb_linearize(skb);
1022 	if (err)
1023 		return err;
1024 
1025 	stats->linearize++;
1026 
1027 	/* Need 1 desc and zero sg elems */
1028 	return 1;
1029 }
1030 
1031 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1032 {
1033 	int stopped = 0;
1034 
1035 	if (unlikely(!ionic_q_has_space(q, ndescs))) {
1036 		netif_stop_subqueue(q->lif->netdev, q->index);
1037 		q->stop++;
1038 		stopped = 1;
1039 
1040 		/* Might race with ionic_tx_clean, check again */
1041 		smp_rmb();
1042 		if (ionic_q_has_space(q, ndescs)) {
1043 			netif_wake_subqueue(q->lif->netdev, q->index);
1044 			stopped = 0;
1045 		}
1046 	}
1047 
1048 	return stopped;
1049 }
1050 
1051 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1052 {
1053 	u16 queue_index = skb_get_queue_mapping(skb);
1054 	struct ionic_lif *lif = netdev_priv(netdev);
1055 	struct ionic_queue *q;
1056 	int ndescs;
1057 	int err;
1058 
1059 	if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1060 		dev_kfree_skb(skb);
1061 		return NETDEV_TX_OK;
1062 	}
1063 
1064 	if (unlikely(!lif_to_txqcq(lif, queue_index)))
1065 		queue_index = 0;
1066 	q = lif_to_txq(lif, queue_index);
1067 
1068 	ndescs = ionic_tx_descs_needed(q, skb);
1069 	if (ndescs < 0)
1070 		goto err_out_drop;
1071 
1072 	if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1073 		return NETDEV_TX_BUSY;
1074 
1075 	if (skb_is_gso(skb))
1076 		err = ionic_tx_tso(q, skb);
1077 	else
1078 		err = ionic_tx(q, skb);
1079 
1080 	if (err)
1081 		goto err_out_drop;
1082 
1083 	/* Stop the queue if there aren't descriptors for the next packet.
1084 	 * Since our SG lists per descriptor take care of most of the possible
1085 	 * fragmentation, we don't need to have many descriptors available.
1086 	 */
1087 	ionic_maybe_stop_tx(q, 4);
1088 
1089 	return NETDEV_TX_OK;
1090 
1091 err_out_drop:
1092 	q->stop++;
1093 	q->drop++;
1094 	dev_kfree_skb(skb);
1095 	return NETDEV_TX_OK;
1096 }
1097