1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8 
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
12 
13 
14 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
15 				  ionic_desc_cb cb_func, void *cb_arg)
16 {
17 	DEBUG_STATS_TXQ_POST(q, ring_dbell);
18 
19 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
20 }
21 
22 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
23 				  ionic_desc_cb cb_func, void *cb_arg)
24 {
25 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
26 
27 	DEBUG_STATS_RX_BUFF_CNT(q);
28 }
29 
30 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
31 {
32 	return netdev_get_tx_queue(q->lif->netdev, q->index);
33 }
34 
35 static int ionic_rx_page_alloc(struct ionic_queue *q,
36 			       struct ionic_buf_info *buf_info)
37 {
38 	struct net_device *netdev = q->lif->netdev;
39 	struct ionic_rx_stats *stats;
40 	struct device *dev;
41 	struct page *page;
42 
43 	dev = q->dev;
44 	stats = q_to_rx_stats(q);
45 
46 	if (unlikely(!buf_info)) {
47 		net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
48 				    netdev->name, q->name);
49 		return -EINVAL;
50 	}
51 
52 	page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
53 	if (unlikely(!page)) {
54 		net_err_ratelimited("%s: %s page alloc failed\n",
55 				    netdev->name, q->name);
56 		stats->alloc_err++;
57 		return -ENOMEM;
58 	}
59 
60 	buf_info->dma_addr = dma_map_page(dev, page, 0,
61 					  IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
62 	if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
63 		__free_pages(page, 0);
64 		net_err_ratelimited("%s: %s dma map failed\n",
65 				    netdev->name, q->name);
66 		stats->dma_map_err++;
67 		return -EIO;
68 	}
69 
70 	buf_info->page = page;
71 	buf_info->page_offset = 0;
72 
73 	return 0;
74 }
75 
76 static void ionic_rx_page_free(struct ionic_queue *q,
77 			       struct ionic_buf_info *buf_info)
78 {
79 	struct net_device *netdev = q->lif->netdev;
80 	struct device *dev = q->dev;
81 
82 	if (unlikely(!buf_info)) {
83 		net_err_ratelimited("%s: %s invalid buf_info in free\n",
84 				    netdev->name, q->name);
85 		return;
86 	}
87 
88 	if (!buf_info->page)
89 		return;
90 
91 	dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
92 	__free_pages(buf_info->page, 0);
93 	buf_info->page = NULL;
94 }
95 
96 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
97 				 struct ionic_buf_info *buf_info, u32 used)
98 {
99 	u32 size;
100 
101 	/* don't re-use pages allocated in low-mem condition */
102 	if (page_is_pfmemalloc(buf_info->page))
103 		return false;
104 
105 	/* don't re-use buffers from non-local numa nodes */
106 	if (page_to_nid(buf_info->page) != numa_mem_id())
107 		return false;
108 
109 	size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
110 	buf_info->page_offset += size;
111 	if (buf_info->page_offset >= IONIC_PAGE_SIZE)
112 		return false;
113 
114 	get_page(buf_info->page);
115 
116 	return true;
117 }
118 
119 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
120 				      struct ionic_desc_info *desc_info,
121 				      struct ionic_rxq_comp *comp)
122 {
123 	struct net_device *netdev = q->lif->netdev;
124 	struct ionic_buf_info *buf_info;
125 	struct ionic_rx_stats *stats;
126 	struct device *dev = q->dev;
127 	struct sk_buff *skb;
128 	unsigned int i;
129 	u16 frag_len;
130 	u16 len;
131 
132 	stats = q_to_rx_stats(q);
133 
134 	buf_info = &desc_info->bufs[0];
135 	len = le16_to_cpu(comp->len);
136 
137 	prefetchw(buf_info->page);
138 
139 	skb = napi_get_frags(&q_to_qcq(q)->napi);
140 	if (unlikely(!skb)) {
141 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
142 				     netdev->name, q->name);
143 		stats->alloc_err++;
144 		return NULL;
145 	}
146 
147 	i = comp->num_sg_elems + 1;
148 	do {
149 		if (unlikely(!buf_info->page)) {
150 			dev_kfree_skb(skb);
151 			return NULL;
152 		}
153 
154 		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
155 		len -= frag_len;
156 
157 		dma_sync_single_for_cpu(dev,
158 					buf_info->dma_addr + buf_info->page_offset,
159 					frag_len, DMA_FROM_DEVICE);
160 
161 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
162 				buf_info->page, buf_info->page_offset, frag_len,
163 				IONIC_PAGE_SIZE);
164 
165 		if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
166 			dma_unmap_page(dev, buf_info->dma_addr,
167 				       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
168 			buf_info->page = NULL;
169 		}
170 
171 		buf_info++;
172 
173 		i--;
174 	} while (i > 0);
175 
176 	return skb;
177 }
178 
179 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
180 					  struct ionic_desc_info *desc_info,
181 					  struct ionic_rxq_comp *comp)
182 {
183 	struct net_device *netdev = q->lif->netdev;
184 	struct ionic_buf_info *buf_info;
185 	struct ionic_rx_stats *stats;
186 	struct device *dev = q->dev;
187 	struct sk_buff *skb;
188 	u16 len;
189 
190 	stats = q_to_rx_stats(q);
191 
192 	buf_info = &desc_info->bufs[0];
193 	len = le16_to_cpu(comp->len);
194 
195 	skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
196 	if (unlikely(!skb)) {
197 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
198 				     netdev->name, q->name);
199 		stats->alloc_err++;
200 		return NULL;
201 	}
202 
203 	if (unlikely(!buf_info->page)) {
204 		dev_kfree_skb(skb);
205 		return NULL;
206 	}
207 
208 	dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
209 				len, DMA_FROM_DEVICE);
210 	skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
211 	dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
212 				   len, DMA_FROM_DEVICE);
213 
214 	skb_put(skb, len);
215 	skb->protocol = eth_type_trans(skb, q->lif->netdev);
216 
217 	return skb;
218 }
219 
220 static void ionic_rx_clean(struct ionic_queue *q,
221 			   struct ionic_desc_info *desc_info,
222 			   struct ionic_cq_info *cq_info,
223 			   void *cb_arg)
224 {
225 	struct net_device *netdev = q->lif->netdev;
226 	struct ionic_qcq *qcq = q_to_qcq(q);
227 	struct ionic_rx_stats *stats;
228 	struct ionic_rxq_comp *comp;
229 	struct sk_buff *skb;
230 
231 	comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
232 
233 	stats = q_to_rx_stats(q);
234 
235 	if (comp->status) {
236 		stats->dropped++;
237 		return;
238 	}
239 
240 	stats->pkts++;
241 	stats->bytes += le16_to_cpu(comp->len);
242 
243 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
244 		skb = ionic_rx_copybreak(q, desc_info, comp);
245 	else
246 		skb = ionic_rx_frags(q, desc_info, comp);
247 
248 	if (unlikely(!skb)) {
249 		stats->dropped++;
250 		return;
251 	}
252 
253 	skb_record_rx_queue(skb, q->index);
254 
255 	if (likely(netdev->features & NETIF_F_RXHASH)) {
256 		switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
257 		case IONIC_PKT_TYPE_IPV4:
258 		case IONIC_PKT_TYPE_IPV6:
259 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
260 				     PKT_HASH_TYPE_L3);
261 			break;
262 		case IONIC_PKT_TYPE_IPV4_TCP:
263 		case IONIC_PKT_TYPE_IPV6_TCP:
264 		case IONIC_PKT_TYPE_IPV4_UDP:
265 		case IONIC_PKT_TYPE_IPV6_UDP:
266 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
267 				     PKT_HASH_TYPE_L4);
268 			break;
269 		}
270 	}
271 
272 	if (likely(netdev->features & NETIF_F_RXCSUM) &&
273 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
274 		skb->ip_summed = CHECKSUM_COMPLETE;
275 		skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
276 		stats->csum_complete++;
277 	} else {
278 		stats->csum_none++;
279 	}
280 
281 	if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
282 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
283 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
284 		stats->csum_error++;
285 
286 	if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
287 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
288 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
289 				       le16_to_cpu(comp->vlan_tci));
290 		stats->vlan_stripped++;
291 	}
292 
293 	if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
294 		__le64 *cq_desc_hwstamp;
295 		u64 hwstamp;
296 
297 		cq_desc_hwstamp =
298 			cq_info->cq_desc +
299 			qcq->cq.desc_size -
300 			sizeof(struct ionic_rxq_comp) -
301 			IONIC_HWSTAMP_CQ_NEGOFFSET;
302 
303 		hwstamp = le64_to_cpu(*cq_desc_hwstamp);
304 
305 		if (hwstamp != IONIC_HWSTAMP_INVALID) {
306 			skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
307 			stats->hwstamp_valid++;
308 		} else {
309 			stats->hwstamp_invalid++;
310 		}
311 	}
312 
313 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
314 		napi_gro_receive(&qcq->napi, skb);
315 	else
316 		napi_gro_frags(&qcq->napi);
317 }
318 
319 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
320 {
321 	struct ionic_queue *q = cq->bound_q;
322 	struct ionic_desc_info *desc_info;
323 	struct ionic_rxq_comp *comp;
324 
325 	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
326 
327 	if (!color_match(comp->pkt_type_color, cq->done_color))
328 		return false;
329 
330 	/* check for empty queue */
331 	if (q->tail_idx == q->head_idx)
332 		return false;
333 
334 	if (q->tail_idx != le16_to_cpu(comp->comp_index))
335 		return false;
336 
337 	desc_info = &q->info[q->tail_idx];
338 	q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
339 
340 	/* clean the related q entry, only one per qc completion */
341 	ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
342 
343 	desc_info->cb = NULL;
344 	desc_info->cb_arg = NULL;
345 
346 	return true;
347 }
348 
349 void ionic_rx_fill(struct ionic_queue *q)
350 {
351 	struct net_device *netdev = q->lif->netdev;
352 	struct ionic_desc_info *desc_info;
353 	struct ionic_rxq_sg_desc *sg_desc;
354 	struct ionic_rxq_sg_elem *sg_elem;
355 	struct ionic_buf_info *buf_info;
356 	struct ionic_rxq_desc *desc;
357 	unsigned int remain_len;
358 	unsigned int frag_len;
359 	unsigned int nfrags;
360 	unsigned int i, j;
361 	unsigned int len;
362 
363 	len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
364 
365 	for (i = ionic_q_space_avail(q); i; i--) {
366 		nfrags = 0;
367 		remain_len = len;
368 		desc_info = &q->info[q->head_idx];
369 		desc = desc_info->desc;
370 		buf_info = &desc_info->bufs[0];
371 
372 		if (!buf_info->page) { /* alloc a new buffer? */
373 			if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
374 				desc->addr = 0;
375 				desc->len = 0;
376 				return;
377 			}
378 		}
379 
380 		/* fill main descriptor - buf[0] */
381 		desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
382 		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
383 		desc->len = cpu_to_le16(frag_len);
384 		remain_len -= frag_len;
385 		buf_info++;
386 		nfrags++;
387 
388 		/* fill sg descriptors - buf[1..n] */
389 		sg_desc = desc_info->sg_desc;
390 		for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
391 			sg_elem = &sg_desc->elems[j];
392 			if (!buf_info->page) { /* alloc a new sg buffer? */
393 				if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
394 					sg_elem->addr = 0;
395 					sg_elem->len = 0;
396 					return;
397 				}
398 			}
399 
400 			sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
401 			frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
402 			sg_elem->len = cpu_to_le16(frag_len);
403 			remain_len -= frag_len;
404 			buf_info++;
405 			nfrags++;
406 		}
407 
408 		/* clear end sg element as a sentinel */
409 		if (j < q->max_sg_elems) {
410 			sg_elem = &sg_desc->elems[j];
411 			memset(sg_elem, 0, sizeof(*sg_elem));
412 		}
413 
414 		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
415 					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
416 		desc_info->nbufs = nfrags;
417 
418 		ionic_rxq_post(q, false, ionic_rx_clean, NULL);
419 	}
420 
421 	ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
422 			 q->dbval | q->head_idx);
423 }
424 
425 void ionic_rx_empty(struct ionic_queue *q)
426 {
427 	struct ionic_desc_info *desc_info;
428 	struct ionic_buf_info *buf_info;
429 	unsigned int i, j;
430 
431 	for (i = 0; i < q->num_descs; i++) {
432 		desc_info = &q->info[i];
433 		for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
434 			buf_info = &desc_info->bufs[j];
435 			if (buf_info->page)
436 				ionic_rx_page_free(q, buf_info);
437 		}
438 
439 		desc_info->nbufs = 0;
440 		desc_info->cb = NULL;
441 		desc_info->cb_arg = NULL;
442 	}
443 
444 	q->head_idx = 0;
445 	q->tail_idx = 0;
446 }
447 
448 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
449 {
450 	struct dim_sample dim_sample;
451 	struct ionic_lif *lif;
452 	unsigned int qi;
453 	u64 pkts, bytes;
454 
455 	if (!qcq->intr.dim_coal_hw)
456 		return;
457 
458 	lif = qcq->q.lif;
459 	qi = qcq->cq.bound_q->index;
460 
461 	switch (napi_mode) {
462 	case IONIC_LIF_F_TX_DIM_INTR:
463 		pkts = lif->txqstats[qi].pkts;
464 		bytes = lif->txqstats[qi].bytes;
465 		break;
466 	case IONIC_LIF_F_RX_DIM_INTR:
467 		pkts = lif->rxqstats[qi].pkts;
468 		bytes = lif->rxqstats[qi].bytes;
469 		break;
470 	default:
471 		pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
472 		bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
473 		break;
474 	}
475 
476 	dim_update_sample(qcq->cq.bound_intr->rearm_count,
477 			  pkts, bytes, &dim_sample);
478 
479 	net_dim(&qcq->dim, dim_sample);
480 }
481 
482 int ionic_tx_napi(struct napi_struct *napi, int budget)
483 {
484 	struct ionic_qcq *qcq = napi_to_qcq(napi);
485 	struct ionic_cq *cq = napi_to_cq(napi);
486 	struct ionic_dev *idev;
487 	struct ionic_lif *lif;
488 	u32 work_done = 0;
489 	u32 flags = 0;
490 
491 	lif = cq->bound_q->lif;
492 	idev = &lif->ionic->idev;
493 
494 	work_done = ionic_cq_service(cq, budget,
495 				     ionic_tx_service, NULL, NULL);
496 
497 	if (work_done < budget && napi_complete_done(napi, work_done)) {
498 		ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
499 		flags |= IONIC_INTR_CRED_UNMASK;
500 		cq->bound_intr->rearm_count++;
501 	}
502 
503 	if (work_done || flags) {
504 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
505 		ionic_intr_credits(idev->intr_ctrl,
506 				   cq->bound_intr->index,
507 				   work_done, flags);
508 	}
509 
510 	DEBUG_STATS_NAPI_POLL(qcq, work_done);
511 
512 	return work_done;
513 }
514 
515 int ionic_rx_napi(struct napi_struct *napi, int budget)
516 {
517 	struct ionic_qcq *qcq = napi_to_qcq(napi);
518 	struct ionic_cq *cq = napi_to_cq(napi);
519 	struct ionic_dev *idev;
520 	struct ionic_lif *lif;
521 	u16 rx_fill_threshold;
522 	u32 work_done = 0;
523 	u32 flags = 0;
524 
525 	lif = cq->bound_q->lif;
526 	idev = &lif->ionic->idev;
527 
528 	work_done = ionic_cq_service(cq, budget,
529 				     ionic_rx_service, NULL, NULL);
530 
531 	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
532 				  cq->num_descs / IONIC_RX_FILL_DIV);
533 	if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
534 		ionic_rx_fill(cq->bound_q);
535 
536 	if (work_done < budget && napi_complete_done(napi, work_done)) {
537 		ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
538 		flags |= IONIC_INTR_CRED_UNMASK;
539 		cq->bound_intr->rearm_count++;
540 	}
541 
542 	if (work_done || flags) {
543 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
544 		ionic_intr_credits(idev->intr_ctrl,
545 				   cq->bound_intr->index,
546 				   work_done, flags);
547 	}
548 
549 	DEBUG_STATS_NAPI_POLL(qcq, work_done);
550 
551 	return work_done;
552 }
553 
554 int ionic_txrx_napi(struct napi_struct *napi, int budget)
555 {
556 	struct ionic_qcq *qcq = napi_to_qcq(napi);
557 	struct ionic_cq *rxcq = napi_to_cq(napi);
558 	unsigned int qi = rxcq->bound_q->index;
559 	struct ionic_dev *idev;
560 	struct ionic_lif *lif;
561 	struct ionic_cq *txcq;
562 	u16 rx_fill_threshold;
563 	u32 rx_work_done = 0;
564 	u32 tx_work_done = 0;
565 	u32 flags = 0;
566 
567 	lif = rxcq->bound_q->lif;
568 	idev = &lif->ionic->idev;
569 	txcq = &lif->txqcqs[qi]->cq;
570 
571 	tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
572 					ionic_tx_service, NULL, NULL);
573 
574 	rx_work_done = ionic_cq_service(rxcq, budget,
575 					ionic_rx_service, NULL, NULL);
576 
577 	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
578 				  rxcq->num_descs / IONIC_RX_FILL_DIV);
579 	if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
580 		ionic_rx_fill(rxcq->bound_q);
581 
582 	if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
583 		ionic_dim_update(qcq, 0);
584 		flags |= IONIC_INTR_CRED_UNMASK;
585 		rxcq->bound_intr->rearm_count++;
586 	}
587 
588 	if (rx_work_done || flags) {
589 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
590 		ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
591 				   tx_work_done + rx_work_done, flags);
592 	}
593 
594 	DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
595 	DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
596 
597 	return rx_work_done;
598 }
599 
600 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
601 				      void *data, size_t len)
602 {
603 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
604 	struct device *dev = q->dev;
605 	dma_addr_t dma_addr;
606 
607 	dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
608 	if (dma_mapping_error(dev, dma_addr)) {
609 		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
610 				     q->lif->netdev->name, q->name);
611 		stats->dma_map_err++;
612 		return 0;
613 	}
614 	return dma_addr;
615 }
616 
617 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
618 				    const skb_frag_t *frag,
619 				    size_t offset, size_t len)
620 {
621 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
622 	struct device *dev = q->dev;
623 	dma_addr_t dma_addr;
624 
625 	dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
626 	if (dma_mapping_error(dev, dma_addr)) {
627 		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
628 				     q->lif->netdev->name, q->name);
629 		stats->dma_map_err++;
630 	}
631 	return dma_addr;
632 }
633 
634 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
635 			    struct ionic_desc_info *desc_info)
636 {
637 	struct ionic_buf_info *buf_info = desc_info->bufs;
638 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
639 	struct device *dev = q->dev;
640 	dma_addr_t dma_addr;
641 	unsigned int nfrags;
642 	skb_frag_t *frag;
643 	int frag_idx;
644 
645 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
646 	if (dma_mapping_error(dev, dma_addr)) {
647 		stats->dma_map_err++;
648 		return -EIO;
649 	}
650 	buf_info->dma_addr = dma_addr;
651 	buf_info->len = skb_headlen(skb);
652 	buf_info++;
653 
654 	frag = skb_shinfo(skb)->frags;
655 	nfrags = skb_shinfo(skb)->nr_frags;
656 	for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
657 		dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
658 		if (dma_mapping_error(dev, dma_addr)) {
659 			stats->dma_map_err++;
660 			goto dma_fail;
661 		}
662 		buf_info->dma_addr = dma_addr;
663 		buf_info->len = skb_frag_size(frag);
664 		buf_info++;
665 	}
666 
667 	desc_info->nbufs = 1 + nfrags;
668 
669 	return 0;
670 
671 dma_fail:
672 	/* unwind the frag mappings and the head mapping */
673 	while (frag_idx > 0) {
674 		frag_idx--;
675 		buf_info--;
676 		dma_unmap_page(dev, buf_info->dma_addr,
677 			       buf_info->len, DMA_TO_DEVICE);
678 	}
679 	dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
680 	return -EIO;
681 }
682 
683 static void ionic_tx_clean(struct ionic_queue *q,
684 			   struct ionic_desc_info *desc_info,
685 			   struct ionic_cq_info *cq_info,
686 			   void *cb_arg)
687 {
688 	struct ionic_buf_info *buf_info = desc_info->bufs;
689 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
690 	struct ionic_qcq *qcq = q_to_qcq(q);
691 	struct sk_buff *skb = cb_arg;
692 	struct device *dev = q->dev;
693 	unsigned int i;
694 	u16 qi;
695 
696 	if (desc_info->nbufs) {
697 		dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
698 				 buf_info->len, DMA_TO_DEVICE);
699 		buf_info++;
700 		for (i = 1; i < desc_info->nbufs; i++, buf_info++)
701 			dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
702 				       buf_info->len, DMA_TO_DEVICE);
703 	}
704 
705 	if (!skb)
706 		return;
707 
708 	qi = skb_get_queue_mapping(skb);
709 
710 	if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
711 		if (cq_info) {
712 			struct skb_shared_hwtstamps hwts = {};
713 			__le64 *cq_desc_hwstamp;
714 			u64 hwstamp;
715 
716 			cq_desc_hwstamp =
717 				cq_info->cq_desc +
718 				qcq->cq.desc_size -
719 				sizeof(struct ionic_txq_comp) -
720 				IONIC_HWSTAMP_CQ_NEGOFFSET;
721 
722 			hwstamp = le64_to_cpu(*cq_desc_hwstamp);
723 
724 			if (hwstamp != IONIC_HWSTAMP_INVALID) {
725 				hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
726 
727 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
728 				skb_tstamp_tx(skb, &hwts);
729 
730 				stats->hwstamp_valid++;
731 			} else {
732 				stats->hwstamp_invalid++;
733 			}
734 		}
735 
736 	} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
737 		netif_wake_subqueue(q->lif->netdev, qi);
738 		q->wake++;
739 	}
740 
741 	desc_info->bytes = skb->len;
742 	stats->clean++;
743 
744 	dev_consume_skb_any(skb);
745 }
746 
747 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
748 {
749 	struct ionic_queue *q = cq->bound_q;
750 	struct ionic_desc_info *desc_info;
751 	struct ionic_txq_comp *comp;
752 	int bytes = 0;
753 	int pkts = 0;
754 	u16 index;
755 
756 	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
757 
758 	if (!color_match(comp->color, cq->done_color))
759 		return false;
760 
761 	/* clean the related q entries, there could be
762 	 * several q entries completed for each cq completion
763 	 */
764 	do {
765 		desc_info = &q->info[q->tail_idx];
766 		desc_info->bytes = 0;
767 		index = q->tail_idx;
768 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
769 		ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
770 		if (desc_info->cb_arg) {
771 			pkts++;
772 			bytes += desc_info->bytes;
773 		}
774 		desc_info->cb = NULL;
775 		desc_info->cb_arg = NULL;
776 	} while (index != le16_to_cpu(comp->comp_index));
777 
778 	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
779 		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
780 
781 	return true;
782 }
783 
784 void ionic_tx_flush(struct ionic_cq *cq)
785 {
786 	struct ionic_dev *idev = &cq->lif->ionic->idev;
787 	u32 work_done;
788 
789 	work_done = ionic_cq_service(cq, cq->num_descs,
790 				     ionic_tx_service, NULL, NULL);
791 	if (work_done)
792 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
793 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
794 }
795 
796 void ionic_tx_empty(struct ionic_queue *q)
797 {
798 	struct ionic_desc_info *desc_info;
799 	int bytes = 0;
800 	int pkts = 0;
801 
802 	/* walk the not completed tx entries, if any */
803 	while (q->head_idx != q->tail_idx) {
804 		desc_info = &q->info[q->tail_idx];
805 		desc_info->bytes = 0;
806 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
807 		ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
808 		if (desc_info->cb_arg) {
809 			pkts++;
810 			bytes += desc_info->bytes;
811 		}
812 		desc_info->cb = NULL;
813 		desc_info->cb_arg = NULL;
814 	}
815 
816 	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
817 		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
818 }
819 
820 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
821 {
822 	int err;
823 
824 	err = skb_cow_head(skb, 0);
825 	if (err)
826 		return err;
827 
828 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
829 		inner_ip_hdr(skb)->check = 0;
830 		inner_tcp_hdr(skb)->check =
831 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
832 					   inner_ip_hdr(skb)->daddr,
833 					   0, IPPROTO_TCP, 0);
834 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
835 		inner_tcp_hdr(skb)->check =
836 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
837 					 &inner_ipv6_hdr(skb)->daddr,
838 					 0, IPPROTO_TCP, 0);
839 	}
840 
841 	return 0;
842 }
843 
844 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
845 {
846 	int err;
847 
848 	err = skb_cow_head(skb, 0);
849 	if (err)
850 		return err;
851 
852 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
853 		ip_hdr(skb)->check = 0;
854 		tcp_hdr(skb)->check =
855 			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
856 					   ip_hdr(skb)->daddr,
857 					   0, IPPROTO_TCP, 0);
858 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
859 		tcp_v6_gso_csum_prep(skb);
860 	}
861 
862 	return 0;
863 }
864 
865 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
866 			      struct sk_buff *skb,
867 			      dma_addr_t addr, u8 nsge, u16 len,
868 			      unsigned int hdrlen, unsigned int mss,
869 			      bool outer_csum,
870 			      u16 vlan_tci, bool has_vlan,
871 			      bool start, bool done)
872 {
873 	u8 flags = 0;
874 	u64 cmd;
875 
876 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
877 	flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
878 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
879 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
880 
881 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
882 	desc->cmd = cpu_to_le64(cmd);
883 	desc->len = cpu_to_le16(len);
884 	desc->vlan_tci = cpu_to_le16(vlan_tci);
885 	desc->hdr_len = cpu_to_le16(hdrlen);
886 	desc->mss = cpu_to_le16(mss);
887 
888 	if (start) {
889 		skb_tx_timestamp(skb);
890 		if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
891 			netdev_tx_sent_queue(q_to_ndq(q), skb->len);
892 		ionic_txq_post(q, false, ionic_tx_clean, skb);
893 	} else {
894 		ionic_txq_post(q, done, NULL, NULL);
895 	}
896 }
897 
898 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
899 {
900 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
901 	struct ionic_desc_info *desc_info;
902 	struct ionic_buf_info *buf_info;
903 	struct ionic_txq_sg_elem *elem;
904 	struct ionic_txq_desc *desc;
905 	unsigned int chunk_len;
906 	unsigned int frag_rem;
907 	unsigned int tso_rem;
908 	unsigned int seg_rem;
909 	dma_addr_t desc_addr;
910 	dma_addr_t frag_addr;
911 	unsigned int hdrlen;
912 	unsigned int len;
913 	unsigned int mss;
914 	bool start, done;
915 	bool outer_csum;
916 	bool has_vlan;
917 	u16 desc_len;
918 	u8 desc_nsge;
919 	u16 vlan_tci;
920 	bool encap;
921 	int err;
922 
923 	desc_info = &q->info[q->head_idx];
924 	buf_info = desc_info->bufs;
925 
926 	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
927 		return -EIO;
928 
929 	len = skb->len;
930 	mss = skb_shinfo(skb)->gso_size;
931 	outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
932 		     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
933 	has_vlan = !!skb_vlan_tag_present(skb);
934 	vlan_tci = skb_vlan_tag_get(skb);
935 	encap = skb->encapsulation;
936 
937 	/* Preload inner-most TCP csum field with IP pseudo hdr
938 	 * calculated with IP length set to zero.  HW will later
939 	 * add in length to each TCP segment resulting from the TSO.
940 	 */
941 
942 	if (encap)
943 		err = ionic_tx_tcp_inner_pseudo_csum(skb);
944 	else
945 		err = ionic_tx_tcp_pseudo_csum(skb);
946 	if (err)
947 		return err;
948 
949 	if (encap)
950 		hdrlen = skb_inner_transport_header(skb) - skb->data +
951 			 inner_tcp_hdrlen(skb);
952 	else
953 		hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
954 
955 	tso_rem = len;
956 	seg_rem = min(tso_rem, hdrlen + mss);
957 
958 	frag_addr = 0;
959 	frag_rem = 0;
960 
961 	start = true;
962 
963 	while (tso_rem > 0) {
964 		desc = NULL;
965 		elem = NULL;
966 		desc_addr = 0;
967 		desc_len = 0;
968 		desc_nsge = 0;
969 		/* use fragments until we have enough to post a single descriptor */
970 		while (seg_rem > 0) {
971 			/* if the fragment is exhausted then move to the next one */
972 			if (frag_rem == 0) {
973 				/* grab the next fragment */
974 				frag_addr = buf_info->dma_addr;
975 				frag_rem = buf_info->len;
976 				buf_info++;
977 			}
978 			chunk_len = min(frag_rem, seg_rem);
979 			if (!desc) {
980 				/* fill main descriptor */
981 				desc = desc_info->txq_desc;
982 				elem = desc_info->txq_sg_desc->elems;
983 				desc_addr = frag_addr;
984 				desc_len = chunk_len;
985 			} else {
986 				/* fill sg descriptor */
987 				elem->addr = cpu_to_le64(frag_addr);
988 				elem->len = cpu_to_le16(chunk_len);
989 				elem++;
990 				desc_nsge++;
991 			}
992 			frag_addr += chunk_len;
993 			frag_rem -= chunk_len;
994 			tso_rem -= chunk_len;
995 			seg_rem -= chunk_len;
996 		}
997 		seg_rem = min(tso_rem, mss);
998 		done = (tso_rem == 0);
999 		/* post descriptor */
1000 		ionic_tx_tso_post(q, desc, skb,
1001 				  desc_addr, desc_nsge, desc_len,
1002 				  hdrlen, mss, outer_csum, vlan_tci, has_vlan,
1003 				  start, done);
1004 		start = false;
1005 		/* Buffer information is stored with the first tso descriptor */
1006 		desc_info = &q->info[q->head_idx];
1007 		desc_info->nbufs = 0;
1008 	}
1009 
1010 	stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1011 	stats->bytes += len;
1012 	stats->tso++;
1013 	stats->tso_bytes = len;
1014 
1015 	return 0;
1016 }
1017 
1018 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1019 			      struct ionic_desc_info *desc_info)
1020 {
1021 	struct ionic_txq_desc *desc = desc_info->txq_desc;
1022 	struct ionic_buf_info *buf_info = desc_info->bufs;
1023 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1024 	bool has_vlan;
1025 	u8 flags = 0;
1026 	bool encap;
1027 	u64 cmd;
1028 
1029 	has_vlan = !!skb_vlan_tag_present(skb);
1030 	encap = skb->encapsulation;
1031 
1032 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1033 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1034 
1035 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1036 				  flags, skb_shinfo(skb)->nr_frags,
1037 				  buf_info->dma_addr);
1038 	desc->cmd = cpu_to_le64(cmd);
1039 	desc->len = cpu_to_le16(buf_info->len);
1040 	if (has_vlan) {
1041 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1042 		stats->vlan_inserted++;
1043 	} else {
1044 		desc->vlan_tci = 0;
1045 	}
1046 	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1047 	desc->csum_offset = cpu_to_le16(skb->csum_offset);
1048 
1049 	if (skb_csum_is_sctp(skb))
1050 		stats->crc32_csum++;
1051 	else
1052 		stats->csum++;
1053 
1054 	return 0;
1055 }
1056 
1057 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1058 				 struct ionic_desc_info *desc_info)
1059 {
1060 	struct ionic_txq_desc *desc = desc_info->txq_desc;
1061 	struct ionic_buf_info *buf_info = desc_info->bufs;
1062 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1063 	bool has_vlan;
1064 	u8 flags = 0;
1065 	bool encap;
1066 	u64 cmd;
1067 
1068 	has_vlan = !!skb_vlan_tag_present(skb);
1069 	encap = skb->encapsulation;
1070 
1071 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1072 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1073 
1074 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1075 				  flags, skb_shinfo(skb)->nr_frags,
1076 				  buf_info->dma_addr);
1077 	desc->cmd = cpu_to_le64(cmd);
1078 	desc->len = cpu_to_le16(buf_info->len);
1079 	if (has_vlan) {
1080 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1081 		stats->vlan_inserted++;
1082 	} else {
1083 		desc->vlan_tci = 0;
1084 	}
1085 	desc->csum_start = 0;
1086 	desc->csum_offset = 0;
1087 
1088 	stats->csum_none++;
1089 
1090 	return 0;
1091 }
1092 
1093 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1094 			      struct ionic_desc_info *desc_info)
1095 {
1096 	struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1097 	struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1098 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
1099 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1100 	unsigned int i;
1101 
1102 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1103 		elem->addr = cpu_to_le64(buf_info->dma_addr);
1104 		elem->len = cpu_to_le16(buf_info->len);
1105 	}
1106 
1107 	stats->frags += skb_shinfo(skb)->nr_frags;
1108 
1109 	return 0;
1110 }
1111 
1112 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1113 {
1114 	struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1115 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1116 	int err;
1117 
1118 	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1119 		return -EIO;
1120 
1121 	/* set up the initial descriptor */
1122 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1123 		err = ionic_tx_calc_csum(q, skb, desc_info);
1124 	else
1125 		err = ionic_tx_calc_no_csum(q, skb, desc_info);
1126 	if (err)
1127 		return err;
1128 
1129 	/* add frags */
1130 	err = ionic_tx_skb_frags(q, skb, desc_info);
1131 	if (err)
1132 		return err;
1133 
1134 	skb_tx_timestamp(skb);
1135 	stats->pkts++;
1136 	stats->bytes += skb->len;
1137 
1138 	if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
1139 		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1140 	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1141 
1142 	return 0;
1143 }
1144 
1145 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1146 {
1147 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1148 	int ndescs;
1149 	int err;
1150 
1151 	/* Each desc is mss long max, so a descriptor for each gso_seg */
1152 	if (skb_is_gso(skb))
1153 		ndescs = skb_shinfo(skb)->gso_segs;
1154 	else
1155 		ndescs = 1;
1156 
1157 	/* If non-TSO, just need 1 desc and nr_frags sg elems */
1158 	if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1159 		return ndescs;
1160 
1161 	/* Too many frags, so linearize */
1162 	err = skb_linearize(skb);
1163 	if (err)
1164 		return err;
1165 
1166 	stats->linearize++;
1167 
1168 	return ndescs;
1169 }
1170 
1171 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1172 {
1173 	int stopped = 0;
1174 
1175 	if (unlikely(!ionic_q_has_space(q, ndescs))) {
1176 		netif_stop_subqueue(q->lif->netdev, q->index);
1177 		q->stop++;
1178 		stopped = 1;
1179 
1180 		/* Might race with ionic_tx_clean, check again */
1181 		smp_rmb();
1182 		if (ionic_q_has_space(q, ndescs)) {
1183 			netif_wake_subqueue(q->lif->netdev, q->index);
1184 			stopped = 0;
1185 		}
1186 	}
1187 
1188 	return stopped;
1189 }
1190 
1191 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1192 					    struct net_device *netdev)
1193 {
1194 	struct ionic_lif *lif = netdev_priv(netdev);
1195 	struct ionic_queue *q = &lif->hwstamp_txq->q;
1196 	int err, ndescs;
1197 
1198 	/* Does not stop/start txq, because we post to a separate tx queue
1199 	 * for timestamping, and if a packet can't be posted immediately to
1200 	 * the timestamping queue, it is dropped.
1201 	 */
1202 
1203 	ndescs = ionic_tx_descs_needed(q, skb);
1204 	if (unlikely(ndescs < 0))
1205 		goto err_out_drop;
1206 
1207 	if (unlikely(!ionic_q_has_space(q, ndescs)))
1208 		goto err_out_drop;
1209 
1210 	skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1211 	if (skb_is_gso(skb))
1212 		err = ionic_tx_tso(q, skb);
1213 	else
1214 		err = ionic_tx(q, skb);
1215 
1216 	if (err)
1217 		goto err_out_drop;
1218 
1219 	return NETDEV_TX_OK;
1220 
1221 err_out_drop:
1222 	q->drop++;
1223 	dev_kfree_skb(skb);
1224 	return NETDEV_TX_OK;
1225 }
1226 
1227 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1228 {
1229 	u16 queue_index = skb_get_queue_mapping(skb);
1230 	struct ionic_lif *lif = netdev_priv(netdev);
1231 	struct ionic_queue *q;
1232 	int ndescs;
1233 	int err;
1234 
1235 	if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1236 		dev_kfree_skb(skb);
1237 		return NETDEV_TX_OK;
1238 	}
1239 
1240 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1241 		if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1242 			return ionic_start_hwstamp_xmit(skb, netdev);
1243 
1244 	if (unlikely(queue_index >= lif->nxqs))
1245 		queue_index = 0;
1246 	q = &lif->txqcqs[queue_index]->q;
1247 
1248 	ndescs = ionic_tx_descs_needed(q, skb);
1249 	if (ndescs < 0)
1250 		goto err_out_drop;
1251 
1252 	if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1253 		return NETDEV_TX_BUSY;
1254 
1255 	if (skb_is_gso(skb))
1256 		err = ionic_tx_tso(q, skb);
1257 	else
1258 		err = ionic_tx(q, skb);
1259 
1260 	if (err)
1261 		goto err_out_drop;
1262 
1263 	/* Stop the queue if there aren't descriptors for the next packet.
1264 	 * Since our SG lists per descriptor take care of most of the possible
1265 	 * fragmentation, we don't need to have many descriptors available.
1266 	 */
1267 	ionic_maybe_stop_tx(q, 4);
1268 
1269 	return NETDEV_TX_OK;
1270 
1271 err_out_drop:
1272 	q->stop++;
1273 	q->drop++;
1274 	dev_kfree_skb(skb);
1275 	return NETDEV_TX_OK;
1276 }
1277