xref: /openbmc/linux/drivers/net/ethernet/pensando/ionic/ionic_txrx.c (revision 27bbf45eae9ca98877a2d52a92a188147cd61b07)
1  // SPDX-License-Identifier: GPL-2.0
2  /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3  
4  #include <linux/ip.h>
5  #include <linux/ipv6.h>
6  #include <linux/if_vlan.h>
7  #include <net/ip6_checksum.h>
8  
9  #include "ionic.h"
10  #include "ionic_lif.h"
11  #include "ionic_txrx.h"
12  
ionic_txq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)13  static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
14  				  ionic_desc_cb cb_func, void *cb_arg)
15  {
16  	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
17  }
18  
ionic_rxq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)19  static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
20  				  ionic_desc_cb cb_func, void *cb_arg)
21  {
22  	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
23  }
24  
ionic_txq_poke_doorbell(struct ionic_queue * q)25  bool ionic_txq_poke_doorbell(struct ionic_queue *q)
26  {
27  	unsigned long now, then, dif;
28  	struct netdev_queue *netdev_txq;
29  	struct net_device *netdev;
30  
31  	netdev = q->lif->netdev;
32  	netdev_txq = netdev_get_tx_queue(netdev, q->index);
33  
34  	HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
35  
36  	if (q->tail_idx == q->head_idx) {
37  		HARD_TX_UNLOCK(netdev, netdev_txq);
38  		return false;
39  	}
40  
41  	now = READ_ONCE(jiffies);
42  	then = q->dbell_jiffies;
43  	dif = now - then;
44  
45  	if (dif > q->dbell_deadline) {
46  		ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
47  				 q->dbval | q->head_idx);
48  
49  		q->dbell_jiffies = now;
50  	}
51  
52  	HARD_TX_UNLOCK(netdev, netdev_txq);
53  
54  	return true;
55  }
56  
ionic_rxq_poke_doorbell(struct ionic_queue * q)57  bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
58  {
59  	unsigned long now, then, dif;
60  
61  	/* no lock, called from rx napi or txrx napi, nothing else can fill */
62  
63  	if (q->tail_idx == q->head_idx)
64  		return false;
65  
66  	now = READ_ONCE(jiffies);
67  	then = q->dbell_jiffies;
68  	dif = now - then;
69  
70  	if (dif > q->dbell_deadline) {
71  		ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
72  				 q->dbval | q->head_idx);
73  
74  		q->dbell_jiffies = now;
75  
76  		dif = 2 * q->dbell_deadline;
77  		if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
78  			dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
79  
80  		q->dbell_deadline = dif;
81  	}
82  
83  	return true;
84  }
85  
q_to_ndq(struct ionic_queue * q)86  static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
87  {
88  	return netdev_get_tx_queue(q->lif->netdev, q->index);
89  }
90  
ionic_rx_page_alloc(struct ionic_queue * q,struct ionic_buf_info * buf_info)91  static int ionic_rx_page_alloc(struct ionic_queue *q,
92  			       struct ionic_buf_info *buf_info)
93  {
94  	struct net_device *netdev = q->lif->netdev;
95  	struct ionic_rx_stats *stats;
96  	struct device *dev;
97  	struct page *page;
98  
99  	dev = q->dev;
100  	stats = q_to_rx_stats(q);
101  
102  	if (unlikely(!buf_info)) {
103  		net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
104  				    netdev->name, q->name);
105  		return -EINVAL;
106  	}
107  
108  	page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
109  	if (unlikely(!page)) {
110  		net_err_ratelimited("%s: %s page alloc failed\n",
111  				    netdev->name, q->name);
112  		stats->alloc_err++;
113  		return -ENOMEM;
114  	}
115  
116  	buf_info->dma_addr = dma_map_page(dev, page, 0,
117  					  IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
118  	if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
119  		__free_pages(page, 0);
120  		net_err_ratelimited("%s: %s dma map failed\n",
121  				    netdev->name, q->name);
122  		stats->dma_map_err++;
123  		return -EIO;
124  	}
125  
126  	buf_info->page = page;
127  	buf_info->page_offset = 0;
128  
129  	return 0;
130  }
131  
ionic_rx_page_free(struct ionic_queue * q,struct ionic_buf_info * buf_info)132  static void ionic_rx_page_free(struct ionic_queue *q,
133  			       struct ionic_buf_info *buf_info)
134  {
135  	struct net_device *netdev = q->lif->netdev;
136  	struct device *dev = q->dev;
137  
138  	if (unlikely(!buf_info)) {
139  		net_err_ratelimited("%s: %s invalid buf_info in free\n",
140  				    netdev->name, q->name);
141  		return;
142  	}
143  
144  	if (!buf_info->page)
145  		return;
146  
147  	dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
148  	__free_pages(buf_info->page, 0);
149  	buf_info->page = NULL;
150  }
151  
ionic_rx_buf_recycle(struct ionic_queue * q,struct ionic_buf_info * buf_info,u32 used)152  static bool ionic_rx_buf_recycle(struct ionic_queue *q,
153  				 struct ionic_buf_info *buf_info, u32 used)
154  {
155  	u32 size;
156  
157  	/* don't re-use pages allocated in low-mem condition */
158  	if (page_is_pfmemalloc(buf_info->page))
159  		return false;
160  
161  	/* don't re-use buffers from non-local numa nodes */
162  	if (page_to_nid(buf_info->page) != numa_mem_id())
163  		return false;
164  
165  	size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
166  	buf_info->page_offset += size;
167  	if (buf_info->page_offset >= IONIC_PAGE_SIZE)
168  		return false;
169  
170  	get_page(buf_info->page);
171  
172  	return true;
173  }
174  
ionic_rx_frags(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_rxq_comp * comp)175  static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
176  				      struct ionic_desc_info *desc_info,
177  				      struct ionic_rxq_comp *comp)
178  {
179  	struct net_device *netdev = q->lif->netdev;
180  	struct ionic_buf_info *buf_info;
181  	struct ionic_rx_stats *stats;
182  	struct device *dev = q->dev;
183  	struct sk_buff *skb;
184  	unsigned int i;
185  	u16 frag_len;
186  	u16 len;
187  
188  	stats = q_to_rx_stats(q);
189  
190  	buf_info = &desc_info->bufs[0];
191  	len = le16_to_cpu(comp->len);
192  
193  	prefetchw(buf_info->page);
194  
195  	skb = napi_get_frags(&q_to_qcq(q)->napi);
196  	if (unlikely(!skb)) {
197  		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
198  				     netdev->name, q->name);
199  		stats->alloc_err++;
200  		return NULL;
201  	}
202  
203  	i = comp->num_sg_elems + 1;
204  	do {
205  		if (unlikely(!buf_info->page)) {
206  			dev_kfree_skb(skb);
207  			return NULL;
208  		}
209  
210  		frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
211  						 IONIC_PAGE_SIZE - buf_info->page_offset));
212  		len -= frag_len;
213  
214  		dma_sync_single_for_cpu(dev,
215  					buf_info->dma_addr + buf_info->page_offset,
216  					frag_len, DMA_FROM_DEVICE);
217  
218  		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
219  				buf_info->page, buf_info->page_offset, frag_len,
220  				IONIC_PAGE_SIZE);
221  
222  		if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
223  			dma_unmap_page(dev, buf_info->dma_addr,
224  				       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
225  			buf_info->page = NULL;
226  		}
227  
228  		buf_info++;
229  
230  		i--;
231  	} while (i > 0);
232  
233  	return skb;
234  }
235  
ionic_rx_copybreak(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_rxq_comp * comp)236  static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
237  					  struct ionic_desc_info *desc_info,
238  					  struct ionic_rxq_comp *comp)
239  {
240  	struct net_device *netdev = q->lif->netdev;
241  	struct ionic_buf_info *buf_info;
242  	struct ionic_rx_stats *stats;
243  	struct device *dev = q->dev;
244  	struct sk_buff *skb;
245  	u16 len;
246  
247  	stats = q_to_rx_stats(q);
248  
249  	buf_info = &desc_info->bufs[0];
250  	len = le16_to_cpu(comp->len);
251  
252  	skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
253  	if (unlikely(!skb)) {
254  		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
255  				     netdev->name, q->name);
256  		stats->alloc_err++;
257  		return NULL;
258  	}
259  
260  	if (unlikely(!buf_info->page)) {
261  		dev_kfree_skb(skb);
262  		return NULL;
263  	}
264  
265  	dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
266  				len, DMA_FROM_DEVICE);
267  	skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
268  	dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
269  				   len, DMA_FROM_DEVICE);
270  
271  	skb_put(skb, len);
272  	skb->protocol = eth_type_trans(skb, q->lif->netdev);
273  
274  	return skb;
275  }
276  
ionic_rx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)277  static void ionic_rx_clean(struct ionic_queue *q,
278  			   struct ionic_desc_info *desc_info,
279  			   struct ionic_cq_info *cq_info,
280  			   void *cb_arg)
281  {
282  	struct net_device *netdev = q->lif->netdev;
283  	struct ionic_qcq *qcq = q_to_qcq(q);
284  	struct ionic_rx_stats *stats;
285  	struct ionic_rxq_comp *comp;
286  	struct sk_buff *skb;
287  
288  	comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
289  
290  	stats = q_to_rx_stats(q);
291  
292  	if (comp->status) {
293  		stats->dropped++;
294  		return;
295  	}
296  
297  	stats->pkts++;
298  	stats->bytes += le16_to_cpu(comp->len);
299  
300  	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
301  		skb = ionic_rx_copybreak(q, desc_info, comp);
302  	else
303  		skb = ionic_rx_frags(q, desc_info, comp);
304  
305  	if (unlikely(!skb)) {
306  		stats->dropped++;
307  		return;
308  	}
309  
310  	skb_record_rx_queue(skb, q->index);
311  
312  	if (likely(netdev->features & NETIF_F_RXHASH)) {
313  		switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
314  		case IONIC_PKT_TYPE_IPV4:
315  		case IONIC_PKT_TYPE_IPV6:
316  			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
317  				     PKT_HASH_TYPE_L3);
318  			break;
319  		case IONIC_PKT_TYPE_IPV4_TCP:
320  		case IONIC_PKT_TYPE_IPV6_TCP:
321  		case IONIC_PKT_TYPE_IPV4_UDP:
322  		case IONIC_PKT_TYPE_IPV6_UDP:
323  			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
324  				     PKT_HASH_TYPE_L4);
325  			break;
326  		}
327  	}
328  
329  	if (likely(netdev->features & NETIF_F_RXCSUM) &&
330  	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
331  		skb->ip_summed = CHECKSUM_COMPLETE;
332  		skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
333  		stats->csum_complete++;
334  	} else {
335  		stats->csum_none++;
336  	}
337  
338  	if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
339  		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
340  		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
341  		stats->csum_error++;
342  
343  	if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
344  	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
345  		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
346  				       le16_to_cpu(comp->vlan_tci));
347  		stats->vlan_stripped++;
348  	}
349  
350  	if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
351  		__le64 *cq_desc_hwstamp;
352  		u64 hwstamp;
353  
354  		cq_desc_hwstamp =
355  			cq_info->cq_desc +
356  			qcq->cq.desc_size -
357  			sizeof(struct ionic_rxq_comp) -
358  			IONIC_HWSTAMP_CQ_NEGOFFSET;
359  
360  		hwstamp = le64_to_cpu(*cq_desc_hwstamp);
361  
362  		if (hwstamp != IONIC_HWSTAMP_INVALID) {
363  			skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
364  			stats->hwstamp_valid++;
365  		} else {
366  			stats->hwstamp_invalid++;
367  		}
368  	}
369  
370  	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
371  		napi_gro_receive(&qcq->napi, skb);
372  	else
373  		napi_gro_frags(&qcq->napi);
374  }
375  
ionic_rx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)376  bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
377  {
378  	struct ionic_queue *q = cq->bound_q;
379  	struct ionic_desc_info *desc_info;
380  	struct ionic_rxq_comp *comp;
381  
382  	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
383  
384  	if (!color_match(comp->pkt_type_color, cq->done_color))
385  		return false;
386  
387  	/* check for empty queue */
388  	if (q->tail_idx == q->head_idx)
389  		return false;
390  
391  	if (q->tail_idx != le16_to_cpu(comp->comp_index))
392  		return false;
393  
394  	desc_info = &q->info[q->tail_idx];
395  	q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
396  
397  	/* clean the related q entry, only one per qc completion */
398  	ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
399  
400  	desc_info->cb = NULL;
401  	desc_info->cb_arg = NULL;
402  
403  	return true;
404  }
405  
ionic_write_cmb_desc(struct ionic_queue * q,void __iomem * cmb_desc,void * desc)406  static inline void ionic_write_cmb_desc(struct ionic_queue *q,
407  					void __iomem *cmb_desc,
408  					void *desc)
409  {
410  	if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
411  		memcpy_toio(cmb_desc, desc, q->desc_size);
412  }
413  
ionic_rx_fill(struct ionic_queue * q)414  void ionic_rx_fill(struct ionic_queue *q)
415  {
416  	struct net_device *netdev = q->lif->netdev;
417  	struct ionic_desc_info *desc_info;
418  	struct ionic_rxq_sg_desc *sg_desc;
419  	struct ionic_rxq_sg_elem *sg_elem;
420  	struct ionic_buf_info *buf_info;
421  	unsigned int fill_threshold;
422  	struct ionic_rxq_desc *desc;
423  	unsigned int remain_len;
424  	unsigned int frag_len;
425  	unsigned int nfrags;
426  	unsigned int n_fill;
427  	unsigned int i, j;
428  	unsigned int len;
429  
430  	n_fill = ionic_q_space_avail(q);
431  
432  	fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD,
433  			       q->num_descs / IONIC_RX_FILL_DIV);
434  	if (n_fill < fill_threshold)
435  		return;
436  
437  	len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
438  
439  	for (i = n_fill; i; i--) {
440  		nfrags = 0;
441  		remain_len = len;
442  		desc_info = &q->info[q->head_idx];
443  		desc = desc_info->desc;
444  		buf_info = &desc_info->bufs[0];
445  
446  		if (!buf_info->page) { /* alloc a new buffer? */
447  			if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
448  				desc->addr = 0;
449  				desc->len = 0;
450  				return;
451  			}
452  		}
453  
454  		/* fill main descriptor - buf[0] */
455  		desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
456  		frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
457  						 IONIC_PAGE_SIZE - buf_info->page_offset));
458  		desc->len = cpu_to_le16(frag_len);
459  		remain_len -= frag_len;
460  		buf_info++;
461  		nfrags++;
462  
463  		/* fill sg descriptors - buf[1..n] */
464  		sg_desc = desc_info->sg_desc;
465  		for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
466  			sg_elem = &sg_desc->elems[j];
467  			if (!buf_info->page) { /* alloc a new sg buffer? */
468  				if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
469  					sg_elem->addr = 0;
470  					sg_elem->len = 0;
471  					return;
472  				}
473  			}
474  
475  			sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
476  			frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
477  								IONIC_PAGE_SIZE -
478  								buf_info->page_offset));
479  			sg_elem->len = cpu_to_le16(frag_len);
480  			remain_len -= frag_len;
481  			buf_info++;
482  			nfrags++;
483  		}
484  
485  		/* clear end sg element as a sentinel */
486  		if (j < q->max_sg_elems) {
487  			sg_elem = &sg_desc->elems[j];
488  			memset(sg_elem, 0, sizeof(*sg_elem));
489  		}
490  
491  		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
492  					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
493  		desc_info->nbufs = nfrags;
494  
495  		ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
496  
497  		ionic_rxq_post(q, false, ionic_rx_clean, NULL);
498  	}
499  
500  	ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
501  			 q->dbval | q->head_idx);
502  
503  	q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
504  	q->dbell_jiffies = jiffies;
505  
506  	mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
507  		  jiffies + IONIC_NAPI_DEADLINE);
508  }
509  
ionic_rx_empty(struct ionic_queue * q)510  void ionic_rx_empty(struct ionic_queue *q)
511  {
512  	struct ionic_desc_info *desc_info;
513  	struct ionic_buf_info *buf_info;
514  	unsigned int i, j;
515  
516  	for (i = 0; i < q->num_descs; i++) {
517  		desc_info = &q->info[i];
518  		for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
519  			buf_info = &desc_info->bufs[j];
520  			if (buf_info->page)
521  				ionic_rx_page_free(q, buf_info);
522  		}
523  
524  		desc_info->nbufs = 0;
525  		desc_info->cb = NULL;
526  		desc_info->cb_arg = NULL;
527  	}
528  
529  	q->head_idx = 0;
530  	q->tail_idx = 0;
531  }
532  
ionic_dim_update(struct ionic_qcq * qcq,int napi_mode)533  static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
534  {
535  	struct dim_sample dim_sample;
536  	struct ionic_lif *lif;
537  	unsigned int qi;
538  	u64 pkts, bytes;
539  
540  	if (!qcq->intr.dim_coal_hw)
541  		return;
542  
543  	lif = qcq->q.lif;
544  	qi = qcq->cq.bound_q->index;
545  
546  	switch (napi_mode) {
547  	case IONIC_LIF_F_TX_DIM_INTR:
548  		pkts = lif->txqstats[qi].pkts;
549  		bytes = lif->txqstats[qi].bytes;
550  		break;
551  	case IONIC_LIF_F_RX_DIM_INTR:
552  		pkts = lif->rxqstats[qi].pkts;
553  		bytes = lif->rxqstats[qi].bytes;
554  		break;
555  	default:
556  		pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
557  		bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
558  		break;
559  	}
560  
561  	dim_update_sample(qcq->cq.bound_intr->rearm_count,
562  			  pkts, bytes, &dim_sample);
563  
564  	net_dim(&qcq->dim, dim_sample);
565  }
566  
ionic_tx_napi(struct napi_struct * napi,int budget)567  int ionic_tx_napi(struct napi_struct *napi, int budget)
568  {
569  	struct ionic_qcq *qcq = napi_to_qcq(napi);
570  	struct ionic_cq *cq = napi_to_cq(napi);
571  	struct ionic_dev *idev;
572  	struct ionic_lif *lif;
573  	u32 work_done = 0;
574  	u32 flags = 0;
575  
576  	lif = cq->bound_q->lif;
577  	idev = &lif->ionic->idev;
578  
579  	work_done = ionic_cq_service(cq, budget,
580  				     ionic_tx_service, NULL, NULL);
581  
582  	if (work_done < budget && napi_complete_done(napi, work_done)) {
583  		ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
584  		flags |= IONIC_INTR_CRED_UNMASK;
585  		cq->bound_intr->rearm_count++;
586  	}
587  
588  	if (work_done || flags) {
589  		flags |= IONIC_INTR_CRED_RESET_COALESCE;
590  		ionic_intr_credits(idev->intr_ctrl,
591  				   cq->bound_intr->index,
592  				   work_done, flags);
593  	}
594  
595  	if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
596  		mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
597  
598  	return work_done;
599  }
600  
ionic_rx_napi(struct napi_struct * napi,int budget)601  int ionic_rx_napi(struct napi_struct *napi, int budget)
602  {
603  	struct ionic_qcq *qcq = napi_to_qcq(napi);
604  	struct ionic_cq *cq = napi_to_cq(napi);
605  	struct ionic_dev *idev;
606  	struct ionic_lif *lif;
607  	u32 work_done = 0;
608  	u32 flags = 0;
609  
610  	lif = cq->bound_q->lif;
611  	idev = &lif->ionic->idev;
612  
613  	work_done = ionic_cq_service(cq, budget,
614  				     ionic_rx_service, NULL, NULL);
615  
616  	ionic_rx_fill(cq->bound_q);
617  
618  	if (work_done < budget && napi_complete_done(napi, work_done)) {
619  		ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
620  		flags |= IONIC_INTR_CRED_UNMASK;
621  		cq->bound_intr->rearm_count++;
622  	}
623  
624  	if (work_done || flags) {
625  		flags |= IONIC_INTR_CRED_RESET_COALESCE;
626  		ionic_intr_credits(idev->intr_ctrl,
627  				   cq->bound_intr->index,
628  				   work_done, flags);
629  	}
630  
631  	if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
632  		mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
633  
634  	return work_done;
635  }
636  
ionic_txrx_napi(struct napi_struct * napi,int budget)637  int ionic_txrx_napi(struct napi_struct *napi, int budget)
638  {
639  	struct ionic_qcq *rxqcq = napi_to_qcq(napi);
640  	struct ionic_cq *rxcq = napi_to_cq(napi);
641  	unsigned int qi = rxcq->bound_q->index;
642  	struct ionic_qcq *txqcq;
643  	struct ionic_dev *idev;
644  	struct ionic_lif *lif;
645  	struct ionic_cq *txcq;
646  	bool resched = false;
647  	u32 rx_work_done = 0;
648  	u32 tx_work_done = 0;
649  	u32 flags = 0;
650  
651  	lif = rxcq->bound_q->lif;
652  	idev = &lif->ionic->idev;
653  	txqcq = lif->txqcqs[qi];
654  	txcq = &lif->txqcqs[qi]->cq;
655  
656  	tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
657  					ionic_tx_service, NULL, NULL);
658  
659  	rx_work_done = ionic_cq_service(rxcq, budget,
660  					ionic_rx_service, NULL, NULL);
661  
662  	ionic_rx_fill(rxcq->bound_q);
663  
664  	if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
665  		ionic_dim_update(rxqcq, 0);
666  		flags |= IONIC_INTR_CRED_UNMASK;
667  		rxcq->bound_intr->rearm_count++;
668  	}
669  
670  	if (rx_work_done || flags) {
671  		flags |= IONIC_INTR_CRED_RESET_COALESCE;
672  		ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
673  				   tx_work_done + rx_work_done, flags);
674  	}
675  
676  	if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
677  		resched = true;
678  	if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
679  		resched = true;
680  	if (resched)
681  		mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
682  
683  	return rx_work_done;
684  }
685  
ionic_tx_map_single(struct ionic_queue * q,void * data,size_t len)686  static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
687  				      void *data, size_t len)
688  {
689  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
690  	struct device *dev = q->dev;
691  	dma_addr_t dma_addr;
692  
693  	dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
694  	if (dma_mapping_error(dev, dma_addr)) {
695  		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
696  				     q->lif->netdev->name, q->name);
697  		stats->dma_map_err++;
698  		return 0;
699  	}
700  	return dma_addr;
701  }
702  
ionic_tx_map_frag(struct ionic_queue * q,const skb_frag_t * frag,size_t offset,size_t len)703  static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
704  				    const skb_frag_t *frag,
705  				    size_t offset, size_t len)
706  {
707  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
708  	struct device *dev = q->dev;
709  	dma_addr_t dma_addr;
710  
711  	dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
712  	if (dma_mapping_error(dev, dma_addr)) {
713  		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
714  				     q->lif->netdev->name, q->name);
715  		stats->dma_map_err++;
716  	}
717  	return dma_addr;
718  }
719  
ionic_tx_map_skb(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)720  static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
721  			    struct ionic_desc_info *desc_info)
722  {
723  	struct ionic_buf_info *buf_info = desc_info->bufs;
724  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
725  	struct device *dev = q->dev;
726  	dma_addr_t dma_addr;
727  	unsigned int nfrags;
728  	skb_frag_t *frag;
729  	int frag_idx;
730  
731  	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
732  	if (dma_mapping_error(dev, dma_addr)) {
733  		stats->dma_map_err++;
734  		return -EIO;
735  	}
736  	buf_info->dma_addr = dma_addr;
737  	buf_info->len = skb_headlen(skb);
738  	buf_info++;
739  
740  	frag = skb_shinfo(skb)->frags;
741  	nfrags = skb_shinfo(skb)->nr_frags;
742  	for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
743  		dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
744  		if (dma_mapping_error(dev, dma_addr)) {
745  			stats->dma_map_err++;
746  			goto dma_fail;
747  		}
748  		buf_info->dma_addr = dma_addr;
749  		buf_info->len = skb_frag_size(frag);
750  		buf_info++;
751  	}
752  
753  	desc_info->nbufs = 1 + nfrags;
754  
755  	return 0;
756  
757  dma_fail:
758  	/* unwind the frag mappings and the head mapping */
759  	while (frag_idx > 0) {
760  		frag_idx--;
761  		buf_info--;
762  		dma_unmap_page(dev, buf_info->dma_addr,
763  			       buf_info->len, DMA_TO_DEVICE);
764  	}
765  	dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
766  	return -EIO;
767  }
768  
ionic_tx_desc_unmap_bufs(struct ionic_queue * q,struct ionic_desc_info * desc_info)769  static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
770  				     struct ionic_desc_info *desc_info)
771  {
772  	struct ionic_buf_info *buf_info = desc_info->bufs;
773  	struct device *dev = q->dev;
774  	unsigned int i;
775  
776  	if (!desc_info->nbufs)
777  		return;
778  
779  	dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
780  			 buf_info->len, DMA_TO_DEVICE);
781  	buf_info++;
782  	for (i = 1; i < desc_info->nbufs; i++, buf_info++)
783  		dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
784  			       buf_info->len, DMA_TO_DEVICE);
785  
786  	desc_info->nbufs = 0;
787  }
788  
ionic_tx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)789  static void ionic_tx_clean(struct ionic_queue *q,
790  			   struct ionic_desc_info *desc_info,
791  			   struct ionic_cq_info *cq_info,
792  			   void *cb_arg)
793  {
794  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
795  	struct ionic_qcq *qcq = q_to_qcq(q);
796  	struct sk_buff *skb = cb_arg;
797  	u16 qi;
798  
799  	ionic_tx_desc_unmap_bufs(q, desc_info);
800  
801  	if (!skb)
802  		return;
803  
804  	qi = skb_get_queue_mapping(skb);
805  
806  	if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
807  		if (cq_info) {
808  			struct skb_shared_hwtstamps hwts = {};
809  			__le64 *cq_desc_hwstamp;
810  			u64 hwstamp;
811  
812  			cq_desc_hwstamp =
813  				cq_info->cq_desc +
814  				qcq->cq.desc_size -
815  				sizeof(struct ionic_txq_comp) -
816  				IONIC_HWSTAMP_CQ_NEGOFFSET;
817  
818  			hwstamp = le64_to_cpu(*cq_desc_hwstamp);
819  
820  			if (hwstamp != IONIC_HWSTAMP_INVALID) {
821  				hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
822  
823  				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
824  				skb_tstamp_tx(skb, &hwts);
825  
826  				stats->hwstamp_valid++;
827  			} else {
828  				stats->hwstamp_invalid++;
829  			}
830  		}
831  
832  	} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
833  		netif_wake_subqueue(q->lif->netdev, qi);
834  	}
835  
836  	desc_info->bytes = skb->len;
837  	stats->clean++;
838  
839  	dev_consume_skb_any(skb);
840  }
841  
ionic_tx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)842  bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
843  {
844  	struct ionic_queue *q = cq->bound_q;
845  	struct ionic_desc_info *desc_info;
846  	struct ionic_txq_comp *comp;
847  	int bytes = 0;
848  	int pkts = 0;
849  	u16 index;
850  
851  	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
852  
853  	if (!color_match(comp->color, cq->done_color))
854  		return false;
855  
856  	/* clean the related q entries, there could be
857  	 * several q entries completed for each cq completion
858  	 */
859  	do {
860  		desc_info = &q->info[q->tail_idx];
861  		desc_info->bytes = 0;
862  		index = q->tail_idx;
863  		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
864  		ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
865  		if (desc_info->cb_arg) {
866  			pkts++;
867  			bytes += desc_info->bytes;
868  		}
869  		desc_info->cb = NULL;
870  		desc_info->cb_arg = NULL;
871  	} while (index != le16_to_cpu(comp->comp_index));
872  
873  	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
874  		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
875  
876  	return true;
877  }
878  
ionic_tx_flush(struct ionic_cq * cq)879  void ionic_tx_flush(struct ionic_cq *cq)
880  {
881  	struct ionic_dev *idev = &cq->lif->ionic->idev;
882  	u32 work_done;
883  
884  	work_done = ionic_cq_service(cq, cq->num_descs,
885  				     ionic_tx_service, NULL, NULL);
886  	if (work_done)
887  		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
888  				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
889  }
890  
ionic_tx_empty(struct ionic_queue * q)891  void ionic_tx_empty(struct ionic_queue *q)
892  {
893  	struct ionic_desc_info *desc_info;
894  	int bytes = 0;
895  	int pkts = 0;
896  
897  	/* walk the not completed tx entries, if any */
898  	while (q->head_idx != q->tail_idx) {
899  		desc_info = &q->info[q->tail_idx];
900  		desc_info->bytes = 0;
901  		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
902  		ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
903  		if (desc_info->cb_arg) {
904  			pkts++;
905  			bytes += desc_info->bytes;
906  		}
907  		desc_info->cb = NULL;
908  		desc_info->cb_arg = NULL;
909  	}
910  
911  	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
912  		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
913  }
914  
ionic_tx_tcp_inner_pseudo_csum(struct sk_buff * skb)915  static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
916  {
917  	int err;
918  
919  	err = skb_cow_head(skb, 0);
920  	if (err)
921  		return err;
922  
923  	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
924  		inner_ip_hdr(skb)->check = 0;
925  		inner_tcp_hdr(skb)->check =
926  			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
927  					   inner_ip_hdr(skb)->daddr,
928  					   0, IPPROTO_TCP, 0);
929  	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
930  		inner_tcp_hdr(skb)->check =
931  			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
932  					 &inner_ipv6_hdr(skb)->daddr,
933  					 0, IPPROTO_TCP, 0);
934  	}
935  
936  	return 0;
937  }
938  
ionic_tx_tcp_pseudo_csum(struct sk_buff * skb)939  static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
940  {
941  	int err;
942  
943  	err = skb_cow_head(skb, 0);
944  	if (err)
945  		return err;
946  
947  	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
948  		ip_hdr(skb)->check = 0;
949  		tcp_hdr(skb)->check =
950  			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
951  					   ip_hdr(skb)->daddr,
952  					   0, IPPROTO_TCP, 0);
953  	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
954  		tcp_v6_gso_csum_prep(skb);
955  	}
956  
957  	return 0;
958  }
959  
ionic_tx_tso_post(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct sk_buff * skb,dma_addr_t addr,u8 nsge,u16 len,unsigned int hdrlen,unsigned int mss,bool outer_csum,u16 vlan_tci,bool has_vlan,bool start,bool done)960  static void ionic_tx_tso_post(struct ionic_queue *q,
961  			      struct ionic_desc_info *desc_info,
962  			      struct sk_buff *skb,
963  			      dma_addr_t addr, u8 nsge, u16 len,
964  			      unsigned int hdrlen, unsigned int mss,
965  			      bool outer_csum,
966  			      u16 vlan_tci, bool has_vlan,
967  			      bool start, bool done)
968  {
969  	struct ionic_txq_desc *desc = desc_info->desc;
970  	u8 flags = 0;
971  	u64 cmd;
972  
973  	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
974  	flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
975  	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
976  	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
977  
978  	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
979  	desc->cmd = cpu_to_le64(cmd);
980  	desc->len = cpu_to_le16(len);
981  	desc->vlan_tci = cpu_to_le16(vlan_tci);
982  	desc->hdr_len = cpu_to_le16(hdrlen);
983  	desc->mss = cpu_to_le16(mss);
984  
985  	ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
986  
987  	if (start) {
988  		skb_tx_timestamp(skb);
989  		if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
990  			netdev_tx_sent_queue(q_to_ndq(q), skb->len);
991  		ionic_txq_post(q, false, ionic_tx_clean, skb);
992  	} else {
993  		ionic_txq_post(q, done, NULL, NULL);
994  	}
995  }
996  
ionic_tx_tso(struct ionic_queue * q,struct sk_buff * skb)997  static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
998  {
999  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1000  	struct ionic_desc_info *desc_info;
1001  	struct ionic_buf_info *buf_info;
1002  	struct ionic_txq_sg_elem *elem;
1003  	struct ionic_txq_desc *desc;
1004  	unsigned int chunk_len;
1005  	unsigned int frag_rem;
1006  	unsigned int tso_rem;
1007  	unsigned int seg_rem;
1008  	dma_addr_t desc_addr;
1009  	dma_addr_t frag_addr;
1010  	unsigned int hdrlen;
1011  	unsigned int len;
1012  	unsigned int mss;
1013  	bool start, done;
1014  	bool outer_csum;
1015  	bool has_vlan;
1016  	u16 desc_len;
1017  	u8 desc_nsge;
1018  	u16 vlan_tci;
1019  	bool encap;
1020  	int err;
1021  
1022  	desc_info = &q->info[q->head_idx];
1023  	buf_info = desc_info->bufs;
1024  
1025  	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1026  		return -EIO;
1027  
1028  	len = skb->len;
1029  	mss = skb_shinfo(skb)->gso_size;
1030  	outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1031  						   SKB_GSO_GRE_CSUM |
1032  						   SKB_GSO_IPXIP4 |
1033  						   SKB_GSO_IPXIP6 |
1034  						   SKB_GSO_UDP_TUNNEL |
1035  						   SKB_GSO_UDP_TUNNEL_CSUM));
1036  	has_vlan = !!skb_vlan_tag_present(skb);
1037  	vlan_tci = skb_vlan_tag_get(skb);
1038  	encap = skb->encapsulation;
1039  
1040  	/* Preload inner-most TCP csum field with IP pseudo hdr
1041  	 * calculated with IP length set to zero.  HW will later
1042  	 * add in length to each TCP segment resulting from the TSO.
1043  	 */
1044  
1045  	if (encap)
1046  		err = ionic_tx_tcp_inner_pseudo_csum(skb);
1047  	else
1048  		err = ionic_tx_tcp_pseudo_csum(skb);
1049  	if (err) {
1050  		/* clean up mapping from ionic_tx_map_skb */
1051  		ionic_tx_desc_unmap_bufs(q, desc_info);
1052  		return err;
1053  	}
1054  
1055  	if (encap)
1056  		hdrlen = skb_inner_tcp_all_headers(skb);
1057  	else
1058  		hdrlen = skb_tcp_all_headers(skb);
1059  
1060  	tso_rem = len;
1061  	seg_rem = min(tso_rem, hdrlen + mss);
1062  
1063  	frag_addr = 0;
1064  	frag_rem = 0;
1065  
1066  	start = true;
1067  
1068  	while (tso_rem > 0) {
1069  		desc = NULL;
1070  		elem = NULL;
1071  		desc_addr = 0;
1072  		desc_len = 0;
1073  		desc_nsge = 0;
1074  		/* use fragments until we have enough to post a single descriptor */
1075  		while (seg_rem > 0) {
1076  			/* if the fragment is exhausted then move to the next one */
1077  			if (frag_rem == 0) {
1078  				/* grab the next fragment */
1079  				frag_addr = buf_info->dma_addr;
1080  				frag_rem = buf_info->len;
1081  				buf_info++;
1082  			}
1083  			chunk_len = min(frag_rem, seg_rem);
1084  			if (!desc) {
1085  				/* fill main descriptor */
1086  				desc = desc_info->txq_desc;
1087  				elem = desc_info->txq_sg_desc->elems;
1088  				desc_addr = frag_addr;
1089  				desc_len = chunk_len;
1090  			} else {
1091  				/* fill sg descriptor */
1092  				elem->addr = cpu_to_le64(frag_addr);
1093  				elem->len = cpu_to_le16(chunk_len);
1094  				elem++;
1095  				desc_nsge++;
1096  			}
1097  			frag_addr += chunk_len;
1098  			frag_rem -= chunk_len;
1099  			tso_rem -= chunk_len;
1100  			seg_rem -= chunk_len;
1101  		}
1102  		seg_rem = min(tso_rem, mss);
1103  		done = (tso_rem == 0);
1104  		/* post descriptor */
1105  		ionic_tx_tso_post(q, desc_info, skb,
1106  				  desc_addr, desc_nsge, desc_len,
1107  				  hdrlen, mss, outer_csum, vlan_tci, has_vlan,
1108  				  start, done);
1109  		start = false;
1110  		/* Buffer information is stored with the first tso descriptor */
1111  		desc_info = &q->info[q->head_idx];
1112  		desc_info->nbufs = 0;
1113  	}
1114  
1115  	stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1116  	stats->bytes += len;
1117  	stats->tso++;
1118  	stats->tso_bytes = len;
1119  
1120  	return 0;
1121  }
1122  
ionic_tx_calc_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1123  static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1124  			       struct ionic_desc_info *desc_info)
1125  {
1126  	struct ionic_txq_desc *desc = desc_info->txq_desc;
1127  	struct ionic_buf_info *buf_info = desc_info->bufs;
1128  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1129  	bool has_vlan;
1130  	u8 flags = 0;
1131  	bool encap;
1132  	u64 cmd;
1133  
1134  	has_vlan = !!skb_vlan_tag_present(skb);
1135  	encap = skb->encapsulation;
1136  
1137  	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1138  	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1139  
1140  	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1141  				  flags, skb_shinfo(skb)->nr_frags,
1142  				  buf_info->dma_addr);
1143  	desc->cmd = cpu_to_le64(cmd);
1144  	desc->len = cpu_to_le16(buf_info->len);
1145  	if (has_vlan) {
1146  		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1147  		stats->vlan_inserted++;
1148  	} else {
1149  		desc->vlan_tci = 0;
1150  	}
1151  	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1152  	desc->csum_offset = cpu_to_le16(skb->csum_offset);
1153  
1154  	ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1155  
1156  	if (skb_csum_is_sctp(skb))
1157  		stats->crc32_csum++;
1158  	else
1159  		stats->csum++;
1160  }
1161  
ionic_tx_calc_no_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1162  static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1163  				  struct ionic_desc_info *desc_info)
1164  {
1165  	struct ionic_txq_desc *desc = desc_info->txq_desc;
1166  	struct ionic_buf_info *buf_info = desc_info->bufs;
1167  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1168  	bool has_vlan;
1169  	u8 flags = 0;
1170  	bool encap;
1171  	u64 cmd;
1172  
1173  	has_vlan = !!skb_vlan_tag_present(skb);
1174  	encap = skb->encapsulation;
1175  
1176  	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1177  	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1178  
1179  	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1180  				  flags, skb_shinfo(skb)->nr_frags,
1181  				  buf_info->dma_addr);
1182  	desc->cmd = cpu_to_le64(cmd);
1183  	desc->len = cpu_to_le16(buf_info->len);
1184  	if (has_vlan) {
1185  		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1186  		stats->vlan_inserted++;
1187  	} else {
1188  		desc->vlan_tci = 0;
1189  	}
1190  	desc->csum_start = 0;
1191  	desc->csum_offset = 0;
1192  
1193  	ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1194  
1195  	stats->csum_none++;
1196  }
1197  
ionic_tx_skb_frags(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1198  static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1199  			       struct ionic_desc_info *desc_info)
1200  {
1201  	struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1202  	struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1203  	struct ionic_txq_sg_elem *elem = sg_desc->elems;
1204  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1205  	unsigned int i;
1206  
1207  	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1208  		elem->addr = cpu_to_le64(buf_info->dma_addr);
1209  		elem->len = cpu_to_le16(buf_info->len);
1210  	}
1211  
1212  	stats->frags += skb_shinfo(skb)->nr_frags;
1213  }
1214  
ionic_tx(struct ionic_queue * q,struct sk_buff * skb)1215  static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1216  {
1217  	struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1218  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1219  
1220  	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1221  		return -EIO;
1222  
1223  	/* set up the initial descriptor */
1224  	if (skb->ip_summed == CHECKSUM_PARTIAL)
1225  		ionic_tx_calc_csum(q, skb, desc_info);
1226  	else
1227  		ionic_tx_calc_no_csum(q, skb, desc_info);
1228  
1229  	/* add frags */
1230  	ionic_tx_skb_frags(q, skb, desc_info);
1231  
1232  	skb_tx_timestamp(skb);
1233  	stats->pkts++;
1234  	stats->bytes += skb->len;
1235  
1236  	if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
1237  		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1238  	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1239  
1240  	return 0;
1241  }
1242  
ionic_tx_descs_needed(struct ionic_queue * q,struct sk_buff * skb)1243  static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1244  {
1245  	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1246  	int ndescs;
1247  	int err;
1248  
1249  	/* Each desc is mss long max, so a descriptor for each gso_seg */
1250  	if (skb_is_gso(skb))
1251  		ndescs = skb_shinfo(skb)->gso_segs;
1252  	else
1253  		ndescs = 1;
1254  
1255  	/* If non-TSO, just need 1 desc and nr_frags sg elems */
1256  	if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1257  		return ndescs;
1258  
1259  	/* Too many frags, so linearize */
1260  	err = skb_linearize(skb);
1261  	if (err)
1262  		return err;
1263  
1264  	stats->linearize++;
1265  
1266  	return ndescs;
1267  }
1268  
ionic_maybe_stop_tx(struct ionic_queue * q,int ndescs)1269  static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1270  {
1271  	int stopped = 0;
1272  
1273  	if (unlikely(!ionic_q_has_space(q, ndescs))) {
1274  		netif_stop_subqueue(q->lif->netdev, q->index);
1275  		stopped = 1;
1276  
1277  		/* Might race with ionic_tx_clean, check again */
1278  		smp_rmb();
1279  		if (ionic_q_has_space(q, ndescs)) {
1280  			netif_wake_subqueue(q->lif->netdev, q->index);
1281  			stopped = 0;
1282  		}
1283  	}
1284  
1285  	return stopped;
1286  }
1287  
ionic_start_hwstamp_xmit(struct sk_buff * skb,struct net_device * netdev)1288  static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1289  					    struct net_device *netdev)
1290  {
1291  	struct ionic_lif *lif = netdev_priv(netdev);
1292  	struct ionic_queue *q = &lif->hwstamp_txq->q;
1293  	int err, ndescs;
1294  
1295  	/* Does not stop/start txq, because we post to a separate tx queue
1296  	 * for timestamping, and if a packet can't be posted immediately to
1297  	 * the timestamping queue, it is dropped.
1298  	 */
1299  
1300  	ndescs = ionic_tx_descs_needed(q, skb);
1301  	if (unlikely(ndescs < 0))
1302  		goto err_out_drop;
1303  
1304  	if (unlikely(!ionic_q_has_space(q, ndescs)))
1305  		goto err_out_drop;
1306  
1307  	skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1308  	if (skb_is_gso(skb))
1309  		err = ionic_tx_tso(q, skb);
1310  	else
1311  		err = ionic_tx(q, skb);
1312  
1313  	if (err)
1314  		goto err_out_drop;
1315  
1316  	return NETDEV_TX_OK;
1317  
1318  err_out_drop:
1319  	q->drop++;
1320  	dev_kfree_skb(skb);
1321  	return NETDEV_TX_OK;
1322  }
1323  
ionic_start_xmit(struct sk_buff * skb,struct net_device * netdev)1324  netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1325  {
1326  	u16 queue_index = skb_get_queue_mapping(skb);
1327  	struct ionic_lif *lif = netdev_priv(netdev);
1328  	struct ionic_queue *q;
1329  	int ndescs;
1330  	int err;
1331  
1332  	if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1333  		dev_kfree_skb(skb);
1334  		return NETDEV_TX_OK;
1335  	}
1336  
1337  	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1338  		if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1339  			return ionic_start_hwstamp_xmit(skb, netdev);
1340  
1341  	if (unlikely(queue_index >= lif->nxqs))
1342  		queue_index = 0;
1343  	q = &lif->txqcqs[queue_index]->q;
1344  
1345  	ndescs = ionic_tx_descs_needed(q, skb);
1346  	if (ndescs < 0)
1347  		goto err_out_drop;
1348  
1349  	if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1350  		return NETDEV_TX_BUSY;
1351  
1352  	if (skb_is_gso(skb))
1353  		err = ionic_tx_tso(q, skb);
1354  	else
1355  		err = ionic_tx(q, skb);
1356  
1357  	if (err)
1358  		goto err_out_drop;
1359  
1360  	/* Stop the queue if there aren't descriptors for the next packet.
1361  	 * Since our SG lists per descriptor take care of most of the possible
1362  	 * fragmentation, we don't need to have many descriptors available.
1363  	 */
1364  	ionic_maybe_stop_tx(q, 4);
1365  
1366  	return NETDEV_TX_OK;
1367  
1368  err_out_drop:
1369  	q->drop++;
1370  	dev_kfree_skb(skb);
1371  	return NETDEV_TX_OK;
1372  }
1373