1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* The driver transmit and receive code */
5 
6 #include <linux/prefetch.h>
7 #include <linux/mm.h>
8 #include <linux/bpf_trace.h>
9 #include <net/dsfield.h>
10 #include <net/xdp.h>
11 #include "ice_txrx_lib.h"
12 #include "ice_lib.h"
13 #include "ice.h"
14 #include "ice_trace.h"
15 #include "ice_dcb_lib.h"
16 #include "ice_xsk.h"
17 #include "ice_eswitch.h"
18 
19 #define ICE_RX_HDR_SIZE		256
20 
21 #define FDIR_DESC_RXDID 0x40
22 #define ICE_FDIR_CLEAN_DELAY 10
23 
24 /**
25  * ice_prgm_fdir_fltr - Program a Flow Director filter
26  * @vsi: VSI to send dummy packet
27  * @fdir_desc: flow director descriptor
28  * @raw_packet: allocated buffer for flow director
29  */
30 int
31 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
32 		   u8 *raw_packet)
33 {
34 	struct ice_tx_buf *tx_buf, *first;
35 	struct ice_fltr_desc *f_desc;
36 	struct ice_tx_desc *tx_desc;
37 	struct ice_tx_ring *tx_ring;
38 	struct device *dev;
39 	dma_addr_t dma;
40 	u32 td_cmd;
41 	u16 i;
42 
43 	/* VSI and Tx ring */
44 	if (!vsi)
45 		return -ENOENT;
46 	tx_ring = vsi->tx_rings[0];
47 	if (!tx_ring || !tx_ring->desc)
48 		return -ENOENT;
49 	dev = tx_ring->dev;
50 
51 	/* we are using two descriptors to add/del a filter and we can wait */
52 	for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
53 		if (!i)
54 			return -EAGAIN;
55 		msleep_interruptible(1);
56 	}
57 
58 	dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
59 			     DMA_TO_DEVICE);
60 
61 	if (dma_mapping_error(dev, dma))
62 		return -EINVAL;
63 
64 	/* grab the next descriptor */
65 	i = tx_ring->next_to_use;
66 	first = &tx_ring->tx_buf[i];
67 	f_desc = ICE_TX_FDIRDESC(tx_ring, i);
68 	memcpy(f_desc, fdir_desc, sizeof(*f_desc));
69 
70 	i++;
71 	i = (i < tx_ring->count) ? i : 0;
72 	tx_desc = ICE_TX_DESC(tx_ring, i);
73 	tx_buf = &tx_ring->tx_buf[i];
74 
75 	i++;
76 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
77 
78 	memset(tx_buf, 0, sizeof(*tx_buf));
79 	dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
80 	dma_unmap_addr_set(tx_buf, dma, dma);
81 
82 	tx_desc->buf_addr = cpu_to_le64(dma);
83 	td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
84 		 ICE_TX_DESC_CMD_RE;
85 
86 	tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
87 	tx_buf->raw_buf = raw_packet;
88 
89 	tx_desc->cmd_type_offset_bsz =
90 		ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
91 
92 	/* Force memory write to complete before letting h/w know
93 	 * there are new descriptors to fetch.
94 	 */
95 	wmb();
96 
97 	/* mark the data descriptor to be watched */
98 	first->next_to_watch = tx_desc;
99 
100 	writel(tx_ring->next_to_use, tx_ring->tail);
101 
102 	return 0;
103 }
104 
105 /**
106  * ice_unmap_and_free_tx_buf - Release a Tx buffer
107  * @ring: the ring that owns the buffer
108  * @tx_buf: the buffer to free
109  */
110 static void
111 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
112 {
113 	if (tx_buf->skb) {
114 		if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
115 			devm_kfree(ring->dev, tx_buf->raw_buf);
116 		else if (ice_ring_is_xdp(ring))
117 			page_frag_free(tx_buf->raw_buf);
118 		else
119 			dev_kfree_skb_any(tx_buf->skb);
120 		if (dma_unmap_len(tx_buf, len))
121 			dma_unmap_single(ring->dev,
122 					 dma_unmap_addr(tx_buf, dma),
123 					 dma_unmap_len(tx_buf, len),
124 					 DMA_TO_DEVICE);
125 	} else if (dma_unmap_len(tx_buf, len)) {
126 		dma_unmap_page(ring->dev,
127 			       dma_unmap_addr(tx_buf, dma),
128 			       dma_unmap_len(tx_buf, len),
129 			       DMA_TO_DEVICE);
130 	}
131 
132 	tx_buf->next_to_watch = NULL;
133 	tx_buf->skb = NULL;
134 	dma_unmap_len_set(tx_buf, len, 0);
135 	/* tx_buf must be completely set up in the transmit path */
136 }
137 
138 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
139 {
140 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
141 }
142 
143 /**
144  * ice_clean_tx_ring - Free any empty Tx buffers
145  * @tx_ring: ring to be cleaned
146  */
147 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
148 {
149 	u32 size;
150 	u16 i;
151 
152 	if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
153 		ice_xsk_clean_xdp_ring(tx_ring);
154 		goto tx_skip_free;
155 	}
156 
157 	/* ring already cleared, nothing to do */
158 	if (!tx_ring->tx_buf)
159 		return;
160 
161 	/* Free all the Tx ring sk_buffs */
162 	for (i = 0; i < tx_ring->count; i++)
163 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
164 
165 tx_skip_free:
166 	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
167 
168 	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
169 		     PAGE_SIZE);
170 	/* Zero out the descriptor ring */
171 	memset(tx_ring->desc, 0, size);
172 
173 	tx_ring->next_to_use = 0;
174 	tx_ring->next_to_clean = 0;
175 
176 	if (!tx_ring->netdev)
177 		return;
178 
179 	/* cleanup Tx queue statistics */
180 	netdev_tx_reset_queue(txring_txq(tx_ring));
181 }
182 
183 /**
184  * ice_free_tx_ring - Free Tx resources per queue
185  * @tx_ring: Tx descriptor ring for a specific queue
186  *
187  * Free all transmit software resources
188  */
189 void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
190 {
191 	u32 size;
192 
193 	ice_clean_tx_ring(tx_ring);
194 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
195 	tx_ring->tx_buf = NULL;
196 
197 	if (tx_ring->desc) {
198 		size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
199 			     PAGE_SIZE);
200 		dmam_free_coherent(tx_ring->dev, size,
201 				   tx_ring->desc, tx_ring->dma);
202 		tx_ring->desc = NULL;
203 	}
204 }
205 
206 /**
207  * ice_clean_tx_irq - Reclaim resources after transmit completes
208  * @tx_ring: Tx ring to clean
209  * @napi_budget: Used to determine if we are in netpoll
210  *
211  * Returns true if there's any budget left (e.g. the clean is finished)
212  */
213 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
214 {
215 	unsigned int total_bytes = 0, total_pkts = 0;
216 	unsigned int budget = ICE_DFLT_IRQ_WORK;
217 	struct ice_vsi *vsi = tx_ring->vsi;
218 	s16 i = tx_ring->next_to_clean;
219 	struct ice_tx_desc *tx_desc;
220 	struct ice_tx_buf *tx_buf;
221 
222 	tx_buf = &tx_ring->tx_buf[i];
223 	tx_desc = ICE_TX_DESC(tx_ring, i);
224 	i -= tx_ring->count;
225 
226 	prefetch(&vsi->state);
227 
228 	do {
229 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
230 
231 		/* if next_to_watch is not set then there is no work pending */
232 		if (!eop_desc)
233 			break;
234 
235 		smp_rmb();	/* prevent any other reads prior to eop_desc */
236 
237 		ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
238 		/* if the descriptor isn't done, no work yet to do */
239 		if (!(eop_desc->cmd_type_offset_bsz &
240 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
241 			break;
242 
243 		/* clear next_to_watch to prevent false hangs */
244 		tx_buf->next_to_watch = NULL;
245 
246 		/* update the statistics for this packet */
247 		total_bytes += tx_buf->bytecount;
248 		total_pkts += tx_buf->gso_segs;
249 
250 		/* free the skb */
251 		napi_consume_skb(tx_buf->skb, napi_budget);
252 
253 		/* unmap skb header data */
254 		dma_unmap_single(tx_ring->dev,
255 				 dma_unmap_addr(tx_buf, dma),
256 				 dma_unmap_len(tx_buf, len),
257 				 DMA_TO_DEVICE);
258 
259 		/* clear tx_buf data */
260 		tx_buf->skb = NULL;
261 		dma_unmap_len_set(tx_buf, len, 0);
262 
263 		/* unmap remaining buffers */
264 		while (tx_desc != eop_desc) {
265 			ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
266 			tx_buf++;
267 			tx_desc++;
268 			i++;
269 			if (unlikely(!i)) {
270 				i -= tx_ring->count;
271 				tx_buf = tx_ring->tx_buf;
272 				tx_desc = ICE_TX_DESC(tx_ring, 0);
273 			}
274 
275 			/* unmap any remaining paged data */
276 			if (dma_unmap_len(tx_buf, len)) {
277 				dma_unmap_page(tx_ring->dev,
278 					       dma_unmap_addr(tx_buf, dma),
279 					       dma_unmap_len(tx_buf, len),
280 					       DMA_TO_DEVICE);
281 				dma_unmap_len_set(tx_buf, len, 0);
282 			}
283 		}
284 		ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
285 
286 		/* move us one more past the eop_desc for start of next pkt */
287 		tx_buf++;
288 		tx_desc++;
289 		i++;
290 		if (unlikely(!i)) {
291 			i -= tx_ring->count;
292 			tx_buf = tx_ring->tx_buf;
293 			tx_desc = ICE_TX_DESC(tx_ring, 0);
294 		}
295 
296 		prefetch(tx_desc);
297 
298 		/* update budget accounting */
299 		budget--;
300 	} while (likely(budget));
301 
302 	i += tx_ring->count;
303 	tx_ring->next_to_clean = i;
304 
305 	ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
306 
307 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
308 				  total_bytes);
309 
310 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
311 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
312 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
313 		/* Make sure that anybody stopping the queue after this
314 		 * sees the new next_to_clean.
315 		 */
316 		smp_mb();
317 		if (__netif_subqueue_stopped(tx_ring->netdev,
318 					     tx_ring->q_index) &&
319 		    !test_bit(ICE_VSI_DOWN, vsi->state)) {
320 			netif_wake_subqueue(tx_ring->netdev,
321 					    tx_ring->q_index);
322 			++tx_ring->tx_stats.restart_q;
323 		}
324 	}
325 
326 	return !!budget;
327 }
328 
329 /**
330  * ice_setup_tx_ring - Allocate the Tx descriptors
331  * @tx_ring: the Tx ring to set up
332  *
333  * Return 0 on success, negative on error
334  */
335 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
336 {
337 	struct device *dev = tx_ring->dev;
338 	u32 size;
339 
340 	if (!dev)
341 		return -ENOMEM;
342 
343 	/* warn if we are about to overwrite the pointer */
344 	WARN_ON(tx_ring->tx_buf);
345 	tx_ring->tx_buf =
346 		devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
347 			     GFP_KERNEL);
348 	if (!tx_ring->tx_buf)
349 		return -ENOMEM;
350 
351 	/* round up to nearest page */
352 	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
353 		     PAGE_SIZE);
354 	tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
355 					    GFP_KERNEL);
356 	if (!tx_ring->desc) {
357 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
358 			size);
359 		goto err;
360 	}
361 
362 	tx_ring->next_to_use = 0;
363 	tx_ring->next_to_clean = 0;
364 	tx_ring->tx_stats.prev_pkt = -1;
365 	return 0;
366 
367 err:
368 	devm_kfree(dev, tx_ring->tx_buf);
369 	tx_ring->tx_buf = NULL;
370 	return -ENOMEM;
371 }
372 
373 /**
374  * ice_clean_rx_ring - Free Rx buffers
375  * @rx_ring: ring to be cleaned
376  */
377 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
378 {
379 	struct device *dev = rx_ring->dev;
380 	u32 size;
381 	u16 i;
382 
383 	/* ring already cleared, nothing to do */
384 	if (!rx_ring->rx_buf)
385 		return;
386 
387 	if (rx_ring->skb) {
388 		dev_kfree_skb(rx_ring->skb);
389 		rx_ring->skb = NULL;
390 	}
391 
392 	if (rx_ring->xsk_pool) {
393 		ice_xsk_clean_rx_ring(rx_ring);
394 		goto rx_skip_free;
395 	}
396 
397 	/* Free all the Rx ring sk_buffs */
398 	for (i = 0; i < rx_ring->count; i++) {
399 		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
400 
401 		if (!rx_buf->page)
402 			continue;
403 
404 		/* Invalidate cache lines that may have been written to by
405 		 * device so that we avoid corrupting memory.
406 		 */
407 		dma_sync_single_range_for_cpu(dev, rx_buf->dma,
408 					      rx_buf->page_offset,
409 					      rx_ring->rx_buf_len,
410 					      DMA_FROM_DEVICE);
411 
412 		/* free resources associated with mapping */
413 		dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
414 				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
415 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
416 
417 		rx_buf->page = NULL;
418 		rx_buf->page_offset = 0;
419 	}
420 
421 rx_skip_free:
422 	memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
423 
424 	/* Zero out the descriptor ring */
425 	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
426 		     PAGE_SIZE);
427 	memset(rx_ring->desc, 0, size);
428 
429 	rx_ring->next_to_alloc = 0;
430 	rx_ring->next_to_clean = 0;
431 	rx_ring->next_to_use = 0;
432 }
433 
434 /**
435  * ice_free_rx_ring - Free Rx resources
436  * @rx_ring: ring to clean the resources from
437  *
438  * Free all receive software resources
439  */
440 void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
441 {
442 	u32 size;
443 
444 	ice_clean_rx_ring(rx_ring);
445 	if (rx_ring->vsi->type == ICE_VSI_PF)
446 		if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
447 			xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
448 	rx_ring->xdp_prog = NULL;
449 	devm_kfree(rx_ring->dev, rx_ring->rx_buf);
450 	rx_ring->rx_buf = NULL;
451 
452 	if (rx_ring->desc) {
453 		size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
454 			     PAGE_SIZE);
455 		dmam_free_coherent(rx_ring->dev, size,
456 				   rx_ring->desc, rx_ring->dma);
457 		rx_ring->desc = NULL;
458 	}
459 }
460 
461 /**
462  * ice_setup_rx_ring - Allocate the Rx descriptors
463  * @rx_ring: the Rx ring to set up
464  *
465  * Return 0 on success, negative on error
466  */
467 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
468 {
469 	struct device *dev = rx_ring->dev;
470 	u32 size;
471 
472 	if (!dev)
473 		return -ENOMEM;
474 
475 	/* warn if we are about to overwrite the pointer */
476 	WARN_ON(rx_ring->rx_buf);
477 	rx_ring->rx_buf =
478 		devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
479 			     GFP_KERNEL);
480 	if (!rx_ring->rx_buf)
481 		return -ENOMEM;
482 
483 	/* round up to nearest page */
484 	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
485 		     PAGE_SIZE);
486 	rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
487 					    GFP_KERNEL);
488 	if (!rx_ring->desc) {
489 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
490 			size);
491 		goto err;
492 	}
493 
494 	rx_ring->next_to_use = 0;
495 	rx_ring->next_to_clean = 0;
496 
497 	if (ice_is_xdp_ena_vsi(rx_ring->vsi))
498 		WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
499 
500 	if (rx_ring->vsi->type == ICE_VSI_PF &&
501 	    !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
502 		if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
503 				     rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
504 			goto err;
505 	return 0;
506 
507 err:
508 	devm_kfree(dev, rx_ring->rx_buf);
509 	rx_ring->rx_buf = NULL;
510 	return -ENOMEM;
511 }
512 
513 static unsigned int
514 ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
515 {
516 	unsigned int truesize;
517 
518 #if (PAGE_SIZE < 8192)
519 	truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
520 #else
521 	truesize = rx_ring->rx_offset ?
522 		SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
523 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
524 		SKB_DATA_ALIGN(size);
525 #endif
526 	return truesize;
527 }
528 
529 /**
530  * ice_run_xdp - Executes an XDP program on initialized xdp_buff
531  * @rx_ring: Rx ring
532  * @xdp: xdp_buff used as input to the XDP program
533  * @xdp_prog: XDP program to run
534  * @xdp_ring: ring to be used for XDP_TX action
535  *
536  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
537  */
538 static int
539 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
540 	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
541 {
542 	int err;
543 	u32 act;
544 
545 	act = bpf_prog_run_xdp(xdp_prog, xdp);
546 	switch (act) {
547 	case XDP_PASS:
548 		return ICE_XDP_PASS;
549 	case XDP_TX:
550 		if (static_branch_unlikely(&ice_xdp_locking_key))
551 			spin_lock(&xdp_ring->tx_lock);
552 		err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
553 		if (static_branch_unlikely(&ice_xdp_locking_key))
554 			spin_unlock(&xdp_ring->tx_lock);
555 		if (err == ICE_XDP_CONSUMED)
556 			goto out_failure;
557 		return err;
558 	case XDP_REDIRECT:
559 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
560 		if (err)
561 			goto out_failure;
562 		return ICE_XDP_REDIR;
563 	default:
564 		bpf_warn_invalid_xdp_action(act);
565 		fallthrough;
566 	case XDP_ABORTED:
567 out_failure:
568 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
569 		fallthrough;
570 	case XDP_DROP:
571 		return ICE_XDP_CONSUMED;
572 	}
573 }
574 
575 /**
576  * ice_xdp_xmit - submit packets to XDP ring for transmission
577  * @dev: netdev
578  * @n: number of XDP frames to be transmitted
579  * @frames: XDP frames to be transmitted
580  * @flags: transmit flags
581  *
582  * Returns number of frames successfully sent. Failed frames
583  * will be free'ed by XDP core.
584  * For error cases, a negative errno code is returned and no-frames
585  * are transmitted (caller must handle freeing frames).
586  */
587 int
588 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
589 	     u32 flags)
590 {
591 	struct ice_netdev_priv *np = netdev_priv(dev);
592 	unsigned int queue_index = smp_processor_id();
593 	struct ice_vsi *vsi = np->vsi;
594 	struct ice_tx_ring *xdp_ring;
595 	int nxmit = 0, i;
596 
597 	if (test_bit(ICE_VSI_DOWN, vsi->state))
598 		return -ENETDOWN;
599 
600 	if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
601 		return -ENXIO;
602 
603 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
604 		return -EINVAL;
605 
606 	if (static_branch_unlikely(&ice_xdp_locking_key)) {
607 		queue_index %= vsi->num_xdp_txq;
608 		xdp_ring = vsi->xdp_rings[queue_index];
609 		spin_lock(&xdp_ring->tx_lock);
610 	} else {
611 		xdp_ring = vsi->xdp_rings[queue_index];
612 	}
613 
614 	for (i = 0; i < n; i++) {
615 		struct xdp_frame *xdpf = frames[i];
616 		int err;
617 
618 		err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
619 		if (err != ICE_XDP_TX)
620 			break;
621 		nxmit++;
622 	}
623 
624 	if (unlikely(flags & XDP_XMIT_FLUSH))
625 		ice_xdp_ring_update_tail(xdp_ring);
626 
627 	if (static_branch_unlikely(&ice_xdp_locking_key))
628 		spin_unlock(&xdp_ring->tx_lock);
629 
630 	return nxmit;
631 }
632 
633 /**
634  * ice_alloc_mapped_page - recycle or make a new page
635  * @rx_ring: ring to use
636  * @bi: rx_buf struct to modify
637  *
638  * Returns true if the page was successfully allocated or
639  * reused.
640  */
641 static bool
642 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
643 {
644 	struct page *page = bi->page;
645 	dma_addr_t dma;
646 
647 	/* since we are recycling buffers we should seldom need to alloc */
648 	if (likely(page))
649 		return true;
650 
651 	/* alloc new page for storage */
652 	page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
653 	if (unlikely(!page)) {
654 		rx_ring->rx_stats.alloc_page_failed++;
655 		return false;
656 	}
657 
658 	/* map page for use */
659 	dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
660 				 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
661 
662 	/* if mapping failed free memory back to system since
663 	 * there isn't much point in holding memory we can't use
664 	 */
665 	if (dma_mapping_error(rx_ring->dev, dma)) {
666 		__free_pages(page, ice_rx_pg_order(rx_ring));
667 		rx_ring->rx_stats.alloc_page_failed++;
668 		return false;
669 	}
670 
671 	bi->dma = dma;
672 	bi->page = page;
673 	bi->page_offset = rx_ring->rx_offset;
674 	page_ref_add(page, USHRT_MAX - 1);
675 	bi->pagecnt_bias = USHRT_MAX;
676 
677 	return true;
678 }
679 
680 /**
681  * ice_alloc_rx_bufs - Replace used receive buffers
682  * @rx_ring: ring to place buffers on
683  * @cleaned_count: number of buffers to replace
684  *
685  * Returns false if all allocations were successful, true if any fail. Returning
686  * true signals to the caller that we didn't replace cleaned_count buffers and
687  * there is more work to do.
688  *
689  * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
690  * buffers. Then bump tail at most one time. Grouping like this lets us avoid
691  * multiple tail writes per call.
692  */
693 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
694 {
695 	union ice_32b_rx_flex_desc *rx_desc;
696 	u16 ntu = rx_ring->next_to_use;
697 	struct ice_rx_buf *bi;
698 
699 	/* do nothing if no valid netdev defined */
700 	if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
701 	    !cleaned_count)
702 		return false;
703 
704 	/* get the Rx descriptor and buffer based on next_to_use */
705 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
706 	bi = &rx_ring->rx_buf[ntu];
707 
708 	do {
709 		/* if we fail here, we have work remaining */
710 		if (!ice_alloc_mapped_page(rx_ring, bi))
711 			break;
712 
713 		/* sync the buffer for use by the device */
714 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
715 						 bi->page_offset,
716 						 rx_ring->rx_buf_len,
717 						 DMA_FROM_DEVICE);
718 
719 		/* Refresh the desc even if buffer_addrs didn't change
720 		 * because each write-back erases this info.
721 		 */
722 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
723 
724 		rx_desc++;
725 		bi++;
726 		ntu++;
727 		if (unlikely(ntu == rx_ring->count)) {
728 			rx_desc = ICE_RX_DESC(rx_ring, 0);
729 			bi = rx_ring->rx_buf;
730 			ntu = 0;
731 		}
732 
733 		/* clear the status bits for the next_to_use descriptor */
734 		rx_desc->wb.status_error0 = 0;
735 
736 		cleaned_count--;
737 	} while (cleaned_count);
738 
739 	if (rx_ring->next_to_use != ntu)
740 		ice_release_rx_desc(rx_ring, ntu);
741 
742 	return !!cleaned_count;
743 }
744 
745 /**
746  * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
747  * @rx_buf: Rx buffer to adjust
748  * @size: Size of adjustment
749  *
750  * Update the offset within page so that Rx buf will be ready to be reused.
751  * For systems with PAGE_SIZE < 8192 this function will flip the page offset
752  * so the second half of page assigned to Rx buffer will be used, otherwise
753  * the offset is moved by "size" bytes
754  */
755 static void
756 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
757 {
758 #if (PAGE_SIZE < 8192)
759 	/* flip page offset to other buffer */
760 	rx_buf->page_offset ^= size;
761 #else
762 	/* move offset up to the next cache line */
763 	rx_buf->page_offset += size;
764 #endif
765 }
766 
767 /**
768  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
769  * @rx_buf: buffer containing the page
770  * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
771  *
772  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
773  * which will assign the current buffer to the buffer that next_to_alloc is
774  * pointing to; otherwise, the DMA mapping needs to be destroyed and
775  * page freed
776  */
777 static bool
778 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
779 {
780 	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
781 	struct page *page = rx_buf->page;
782 
783 	/* avoid re-using remote and pfmemalloc pages */
784 	if (!dev_page_is_reusable(page))
785 		return false;
786 
787 #if (PAGE_SIZE < 8192)
788 	/* if we are only owner of page we can reuse it */
789 	if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
790 		return false;
791 #else
792 #define ICE_LAST_OFFSET \
793 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
794 	if (rx_buf->page_offset > ICE_LAST_OFFSET)
795 		return false;
796 #endif /* PAGE_SIZE < 8192) */
797 
798 	/* If we have drained the page fragment pool we need to update
799 	 * the pagecnt_bias and page count so that we fully restock the
800 	 * number of references the driver holds.
801 	 */
802 	if (unlikely(pagecnt_bias == 1)) {
803 		page_ref_add(page, USHRT_MAX - 1);
804 		rx_buf->pagecnt_bias = USHRT_MAX;
805 	}
806 
807 	return true;
808 }
809 
810 /**
811  * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
812  * @rx_ring: Rx descriptor ring to transact packets on
813  * @rx_buf: buffer containing page to add
814  * @skb: sk_buff to place the data into
815  * @size: packet length from rx_desc
816  *
817  * This function will add the data contained in rx_buf->page to the skb.
818  * It will just attach the page as a frag to the skb.
819  * The function will then update the page offset.
820  */
821 static void
822 ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
823 		struct sk_buff *skb, unsigned int size)
824 {
825 #if (PAGE_SIZE >= 8192)
826 	unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
827 #else
828 	unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
829 #endif
830 
831 	if (!size)
832 		return;
833 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
834 			rx_buf->page_offset, size, truesize);
835 
836 	/* page is being used so we must update the page offset */
837 	ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
838 }
839 
840 /**
841  * ice_reuse_rx_page - page flip buffer and store it back on the ring
842  * @rx_ring: Rx descriptor ring to store buffers on
843  * @old_buf: donor buffer to have page reused
844  *
845  * Synchronizes page for reuse by the adapter
846  */
847 static void
848 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
849 {
850 	u16 nta = rx_ring->next_to_alloc;
851 	struct ice_rx_buf *new_buf;
852 
853 	new_buf = &rx_ring->rx_buf[nta];
854 
855 	/* update, and store next to alloc */
856 	nta++;
857 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
858 
859 	/* Transfer page from old buffer to new buffer.
860 	 * Move each member individually to avoid possible store
861 	 * forwarding stalls and unnecessary copy of skb.
862 	 */
863 	new_buf->dma = old_buf->dma;
864 	new_buf->page = old_buf->page;
865 	new_buf->page_offset = old_buf->page_offset;
866 	new_buf->pagecnt_bias = old_buf->pagecnt_bias;
867 }
868 
869 /**
870  * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
871  * @rx_ring: Rx descriptor ring to transact packets on
872  * @size: size of buffer to add to skb
873  * @rx_buf_pgcnt: rx_buf page refcount
874  *
875  * This function will pull an Rx buffer from the ring and synchronize it
876  * for use by the CPU.
877  */
878 static struct ice_rx_buf *
879 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
880 	       int *rx_buf_pgcnt)
881 {
882 	struct ice_rx_buf *rx_buf;
883 
884 	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
885 	*rx_buf_pgcnt =
886 #if (PAGE_SIZE < 8192)
887 		page_count(rx_buf->page);
888 #else
889 		0;
890 #endif
891 	prefetchw(rx_buf->page);
892 
893 	if (!size)
894 		return rx_buf;
895 	/* we are reusing so sync this buffer for CPU use */
896 	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
897 				      rx_buf->page_offset, size,
898 				      DMA_FROM_DEVICE);
899 
900 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
901 	rx_buf->pagecnt_bias--;
902 
903 	return rx_buf;
904 }
905 
906 /**
907  * ice_build_skb - Build skb around an existing buffer
908  * @rx_ring: Rx descriptor ring to transact packets on
909  * @rx_buf: Rx buffer to pull data from
910  * @xdp: xdp_buff pointing to the data
911  *
912  * This function builds an skb around an existing Rx buffer, taking care
913  * to set up the skb correctly and avoid any memcpy overhead.
914  */
915 static struct sk_buff *
916 ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
917 	      struct xdp_buff *xdp)
918 {
919 	u8 metasize = xdp->data - xdp->data_meta;
920 #if (PAGE_SIZE < 8192)
921 	unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
922 #else
923 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
924 				SKB_DATA_ALIGN(xdp->data_end -
925 					       xdp->data_hard_start);
926 #endif
927 	struct sk_buff *skb;
928 
929 	/* Prefetch first cache line of first page. If xdp->data_meta
930 	 * is unused, this points exactly as xdp->data, otherwise we
931 	 * likely have a consumer accessing first few bytes of meta
932 	 * data, and then actual data.
933 	 */
934 	net_prefetch(xdp->data_meta);
935 	/* build an skb around the page buffer */
936 	skb = build_skb(xdp->data_hard_start, truesize);
937 	if (unlikely(!skb))
938 		return NULL;
939 
940 	/* must to record Rx queue, otherwise OS features such as
941 	 * symmetric queue won't work
942 	 */
943 	skb_record_rx_queue(skb, rx_ring->q_index);
944 
945 	/* update pointers within the skb to store the data */
946 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
947 	__skb_put(skb, xdp->data_end - xdp->data);
948 	if (metasize)
949 		skb_metadata_set(skb, metasize);
950 
951 	/* buffer is used by skb, update page_offset */
952 	ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
953 
954 	return skb;
955 }
956 
957 /**
958  * ice_construct_skb - Allocate skb and populate it
959  * @rx_ring: Rx descriptor ring to transact packets on
960  * @rx_buf: Rx buffer to pull data from
961  * @xdp: xdp_buff pointing to the data
962  *
963  * This function allocates an skb. It then populates it with the page
964  * data from the current receive descriptor, taking care to set up the
965  * skb correctly.
966  */
967 static struct sk_buff *
968 ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
969 		  struct xdp_buff *xdp)
970 {
971 	unsigned int size = xdp->data_end - xdp->data;
972 	unsigned int headlen;
973 	struct sk_buff *skb;
974 
975 	/* prefetch first cache line of first page */
976 	net_prefetch(xdp->data);
977 
978 	/* allocate a skb to store the frags */
979 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
980 			       GFP_ATOMIC | __GFP_NOWARN);
981 	if (unlikely(!skb))
982 		return NULL;
983 
984 	skb_record_rx_queue(skb, rx_ring->q_index);
985 	/* Determine available headroom for copy */
986 	headlen = size;
987 	if (headlen > ICE_RX_HDR_SIZE)
988 		headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
989 
990 	/* align pull length to size of long to optimize memcpy performance */
991 	memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
992 							 sizeof(long)));
993 
994 	/* if we exhaust the linear part then add what is left as a frag */
995 	size -= headlen;
996 	if (size) {
997 #if (PAGE_SIZE >= 8192)
998 		unsigned int truesize = SKB_DATA_ALIGN(size);
999 #else
1000 		unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
1001 #endif
1002 		skb_add_rx_frag(skb, 0, rx_buf->page,
1003 				rx_buf->page_offset + headlen, size, truesize);
1004 		/* buffer is used by skb, update page_offset */
1005 		ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1006 	} else {
1007 		/* buffer is unused, reset bias back to rx_buf; data was copied
1008 		 * onto skb's linear part so there's no need for adjusting
1009 		 * page offset and we can reuse this buffer as-is
1010 		 */
1011 		rx_buf->pagecnt_bias++;
1012 	}
1013 
1014 	return skb;
1015 }
1016 
1017 /**
1018  * ice_put_rx_buf - Clean up used buffer and either recycle or free
1019  * @rx_ring: Rx descriptor ring to transact packets on
1020  * @rx_buf: Rx buffer to pull data from
1021  * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
1022  *
1023  * This function will update next_to_clean and then clean up the contents
1024  * of the rx_buf. It will either recycle the buffer or unmap it and free
1025  * the associated resources.
1026  */
1027 static void
1028 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
1029 	       int rx_buf_pgcnt)
1030 {
1031 	u16 ntc = rx_ring->next_to_clean + 1;
1032 
1033 	/* fetch, update, and store next to clean */
1034 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1035 	rx_ring->next_to_clean = ntc;
1036 
1037 	if (!rx_buf)
1038 		return;
1039 
1040 	if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1041 		/* hand second half of page back to the ring */
1042 		ice_reuse_rx_page(rx_ring, rx_buf);
1043 	} else {
1044 		/* we are not reusing the buffer so unmap it */
1045 		dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1046 				     ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1047 				     ICE_RX_DMA_ATTR);
1048 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1049 	}
1050 
1051 	/* clear contents of buffer_info */
1052 	rx_buf->page = NULL;
1053 }
1054 
1055 /**
1056  * ice_is_non_eop - process handling of non-EOP buffers
1057  * @rx_ring: Rx ring being processed
1058  * @rx_desc: Rx descriptor for current buffer
1059  *
1060  * If the buffer is an EOP buffer, this function exits returning false,
1061  * otherwise return true indicating that this is in fact a non-EOP buffer.
1062  */
1063 static bool
1064 ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1065 {
1066 	/* if we are the last buffer then there is nothing else to do */
1067 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1068 	if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1069 		return false;
1070 
1071 	rx_ring->rx_stats.non_eop_descs++;
1072 
1073 	return true;
1074 }
1075 
1076 /**
1077  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1078  * @rx_ring: Rx descriptor ring to transact packets on
1079  * @budget: Total limit on number of packets to process
1080  *
1081  * This function provides a "bounce buffer" approach to Rx interrupt
1082  * processing. The advantage to this is that on systems that have
1083  * expensive overhead for IOMMU access this provides a means of avoiding
1084  * it by maintaining the mapping of the page to the system.
1085  *
1086  * Returns amount of work completed
1087  */
1088 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1089 {
1090 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1091 	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1092 	unsigned int offset = rx_ring->rx_offset;
1093 	struct ice_tx_ring *xdp_ring = NULL;
1094 	unsigned int xdp_res, xdp_xmit = 0;
1095 	struct sk_buff *skb = rx_ring->skb;
1096 	struct bpf_prog *xdp_prog = NULL;
1097 	struct xdp_buff xdp;
1098 	bool failure;
1099 
1100 	/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1101 #if (PAGE_SIZE < 8192)
1102 	frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1103 #endif
1104 	xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1105 
1106 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1107 	if (xdp_prog)
1108 		xdp_ring = rx_ring->xdp_ring;
1109 
1110 	/* start the loop to process Rx packets bounded by 'budget' */
1111 	while (likely(total_rx_pkts < (unsigned int)budget)) {
1112 		union ice_32b_rx_flex_desc *rx_desc;
1113 		struct ice_rx_buf *rx_buf;
1114 		unsigned char *hard_start;
1115 		unsigned int size;
1116 		u16 stat_err_bits;
1117 		int rx_buf_pgcnt;
1118 		u16 vlan_tag = 0;
1119 		u16 rx_ptype;
1120 
1121 		/* get the Rx desc from Rx ring based on 'next_to_clean' */
1122 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1123 
1124 		/* status_error_len will always be zero for unused descriptors
1125 		 * because it's cleared in cleanup, and overlaps with hdr_addr
1126 		 * which is always zero because packet split isn't used, if the
1127 		 * hardware wrote DD then it will be non-zero
1128 		 */
1129 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1130 		if (!ice_test_staterr(rx_desc, stat_err_bits))
1131 			break;
1132 
1133 		/* This memory barrier is needed to keep us from reading
1134 		 * any other fields out of the rx_desc until we know the
1135 		 * DD bit is set.
1136 		 */
1137 		dma_rmb();
1138 
1139 		ice_trace(clean_rx_irq, rx_ring, rx_desc);
1140 		if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1141 			struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1142 
1143 			if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1144 			    ctrl_vsi->vf_id != ICE_INVAL_VFID)
1145 				ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1146 			ice_put_rx_buf(rx_ring, NULL, 0);
1147 			cleaned_count++;
1148 			continue;
1149 		}
1150 
1151 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
1152 			ICE_RX_FLX_DESC_PKT_LEN_M;
1153 
1154 		/* retrieve a buffer from the ring */
1155 		rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1156 
1157 		if (!size) {
1158 			xdp.data = NULL;
1159 			xdp.data_end = NULL;
1160 			xdp.data_hard_start = NULL;
1161 			xdp.data_meta = NULL;
1162 			goto construct_skb;
1163 		}
1164 
1165 		hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1166 			     offset;
1167 		xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1168 #if (PAGE_SIZE > 4096)
1169 		/* At larger PAGE_SIZE, frame_sz depend on len size */
1170 		xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1171 #endif
1172 
1173 		if (!xdp_prog)
1174 			goto construct_skb;
1175 
1176 		xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
1177 		if (!xdp_res)
1178 			goto construct_skb;
1179 		if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1180 			xdp_xmit |= xdp_res;
1181 			ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1182 		} else {
1183 			rx_buf->pagecnt_bias++;
1184 		}
1185 		total_rx_bytes += size;
1186 		total_rx_pkts++;
1187 
1188 		cleaned_count++;
1189 		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1190 		continue;
1191 construct_skb:
1192 		if (skb) {
1193 			ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1194 		} else if (likely(xdp.data)) {
1195 			if (ice_ring_uses_build_skb(rx_ring))
1196 				skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1197 			else
1198 				skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1199 		}
1200 		/* exit if we failed to retrieve a buffer */
1201 		if (!skb) {
1202 			rx_ring->rx_stats.alloc_buf_failed++;
1203 			if (rx_buf)
1204 				rx_buf->pagecnt_bias++;
1205 			break;
1206 		}
1207 
1208 		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1209 		cleaned_count++;
1210 
1211 		/* skip if it is NOP desc */
1212 		if (ice_is_non_eop(rx_ring, rx_desc))
1213 			continue;
1214 
1215 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1216 		if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1217 			dev_kfree_skb_any(skb);
1218 			continue;
1219 		}
1220 
1221 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1222 		if (ice_test_staterr(rx_desc, stat_err_bits))
1223 			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1224 
1225 		/* pad the skb if needed, to make a valid ethernet frame */
1226 		if (eth_skb_pad(skb)) {
1227 			skb = NULL;
1228 			continue;
1229 		}
1230 
1231 		/* probably a little skewed due to removing CRC */
1232 		total_rx_bytes += skb->len;
1233 
1234 		/* populate checksum, VLAN, and protocol */
1235 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1236 			ICE_RX_FLEX_DESC_PTYPE_M;
1237 
1238 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1239 
1240 		ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1241 		/* send completed skb up the stack */
1242 		ice_receive_skb(rx_ring, skb, vlan_tag);
1243 		skb = NULL;
1244 
1245 		/* update budget accounting */
1246 		total_rx_pkts++;
1247 	}
1248 
1249 	/* return up to cleaned_count buffers to hardware */
1250 	failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1251 
1252 	if (xdp_prog)
1253 		ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
1254 	rx_ring->skb = skb;
1255 
1256 	ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1257 
1258 	/* guarantee a trip back through this routine if there was a failure */
1259 	return failure ? budget : (int)total_rx_pkts;
1260 }
1261 
1262 /**
1263  * ice_net_dim - Update net DIM algorithm
1264  * @q_vector: the vector associated with the interrupt
1265  *
1266  * Create a DIM sample and notify net_dim() so that it can possibly decide
1267  * a new ITR value based on incoming packets, bytes, and interrupts.
1268  *
1269  * This function is a no-op if the ring is not configured to dynamic ITR.
1270  */
1271 static void ice_net_dim(struct ice_q_vector *q_vector)
1272 {
1273 	struct ice_ring_container *tx = &q_vector->tx;
1274 	struct ice_ring_container *rx = &q_vector->rx;
1275 
1276 	if (ITR_IS_DYNAMIC(tx)) {
1277 		struct dim_sample dim_sample = {};
1278 		u64 packets = 0, bytes = 0;
1279 		struct ice_tx_ring *ring;
1280 
1281 		ice_for_each_tx_ring(ring, q_vector->tx) {
1282 			packets += ring->stats.pkts;
1283 			bytes += ring->stats.bytes;
1284 		}
1285 
1286 		dim_update_sample(q_vector->total_events, packets, bytes,
1287 				  &dim_sample);
1288 
1289 		net_dim(&tx->dim, dim_sample);
1290 	}
1291 
1292 	if (ITR_IS_DYNAMIC(rx)) {
1293 		struct dim_sample dim_sample = {};
1294 		u64 packets = 0, bytes = 0;
1295 		struct ice_rx_ring *ring;
1296 
1297 		ice_for_each_rx_ring(ring, q_vector->rx) {
1298 			packets += ring->stats.pkts;
1299 			bytes += ring->stats.bytes;
1300 		}
1301 
1302 		dim_update_sample(q_vector->total_events, packets, bytes,
1303 				  &dim_sample);
1304 
1305 		net_dim(&rx->dim, dim_sample);
1306 	}
1307 }
1308 
1309 /**
1310  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1311  * @itr_idx: interrupt throttling index
1312  * @itr: interrupt throttling value in usecs
1313  */
1314 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1315 {
1316 	/* The ITR value is reported in microseconds, and the register value is
1317 	 * recorded in 2 microsecond units. For this reason we only need to
1318 	 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1319 	 * granularity as a shift instead of division. The mask makes sure the
1320 	 * ITR value is never odd so we don't accidentally write into the field
1321 	 * prior to the ITR field.
1322 	 */
1323 	itr &= ICE_ITR_MASK;
1324 
1325 	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1326 		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1327 		(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1328 }
1329 
1330 /**
1331  * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
1332  * @q_vector: the vector associated with the interrupt to enable
1333  *
1334  * Update the net_dim() algorithm and re-enable the interrupt associated with
1335  * this vector.
1336  *
1337  * If the VSI is down, the interrupt will not be re-enabled.
1338  */
1339 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1340 {
1341 	struct ice_vsi *vsi = q_vector->vsi;
1342 	bool wb_en = q_vector->wb_on_itr;
1343 	u32 itr_val;
1344 
1345 	if (test_bit(ICE_DOWN, vsi->state))
1346 		return;
1347 
1348 	/* When exiting WB_ON_ITR, let ITR resume its normal
1349 	 * interrupts-enabled path.
1350 	 */
1351 	if (wb_en)
1352 		q_vector->wb_on_itr = false;
1353 
1354 	/* This will do nothing if dynamic updates are not enabled. */
1355 	ice_net_dim(q_vector);
1356 
1357 	/* net_dim() updates ITR out-of-band using a work item */
1358 	itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1359 	/* trigger an immediate software interrupt when exiting
1360 	 * busy poll, to make sure to catch any pending cleanups
1361 	 * that might have been missed due to interrupt state
1362 	 * transition.
1363 	 */
1364 	if (wb_en) {
1365 		itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1366 			   GLINT_DYN_CTL_SW_ITR_INDX_M |
1367 			   GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1368 	}
1369 	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1370 }
1371 
1372 /**
1373  * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1374  * @q_vector: q_vector to set WB_ON_ITR on
1375  *
1376  * We need to tell hardware to write-back completed descriptors even when
1377  * interrupts are disabled. Descriptors will be written back on cache line
1378  * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1379  * descriptors may not be written back if they don't fill a cache line until
1380  * the next interrupt.
1381  *
1382  * This sets the write-back frequency to whatever was set previously for the
1383  * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1384  * aren't meddling with the INTENA_M bit.
1385  */
1386 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1387 {
1388 	struct ice_vsi *vsi = q_vector->vsi;
1389 
1390 	/* already in wb_on_itr mode no need to change it */
1391 	if (q_vector->wb_on_itr)
1392 		return;
1393 
1394 	/* use previously set ITR values for all of the ITR indices by
1395 	 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1396 	 * be static in non-adaptive mode (user configured)
1397 	 */
1398 	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1399 	     ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1400 	      GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1401 	     GLINT_DYN_CTL_WB_ON_ITR_M);
1402 
1403 	q_vector->wb_on_itr = true;
1404 }
1405 
1406 /**
1407  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1408  * @napi: napi struct with our devices info in it
1409  * @budget: amount of work driver is allowed to do this pass, in packets
1410  *
1411  * This function will clean all queues associated with a q_vector.
1412  *
1413  * Returns the amount of work done
1414  */
1415 int ice_napi_poll(struct napi_struct *napi, int budget)
1416 {
1417 	struct ice_q_vector *q_vector =
1418 				container_of(napi, struct ice_q_vector, napi);
1419 	struct ice_tx_ring *tx_ring;
1420 	struct ice_rx_ring *rx_ring;
1421 	bool clean_complete = true;
1422 	int budget_per_ring;
1423 	int work_done = 0;
1424 
1425 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1426 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1427 	 */
1428 	ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1429 		bool wd;
1430 
1431 		if (tx_ring->xsk_pool)
1432 			wd = ice_clean_tx_irq_zc(tx_ring, budget);
1433 		else if (ice_ring_is_xdp(tx_ring))
1434 			wd = true;
1435 		else
1436 			wd = ice_clean_tx_irq(tx_ring, budget);
1437 
1438 		if (!wd)
1439 			clean_complete = false;
1440 	}
1441 
1442 	/* Handle case where we are called by netpoll with a budget of 0 */
1443 	if (unlikely(budget <= 0))
1444 		return budget;
1445 
1446 	/* normally we have 1 Rx ring per q_vector */
1447 	if (unlikely(q_vector->num_ring_rx > 1))
1448 		/* We attempt to distribute budget to each Rx queue fairly, but
1449 		 * don't allow the budget to go below 1 because that would exit
1450 		 * polling early.
1451 		 */
1452 		budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1453 	else
1454 		/* Max of 1 Rx ring in this q_vector so give it the budget */
1455 		budget_per_ring = budget;
1456 
1457 	ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1458 		int cleaned;
1459 
1460 		/* A dedicated path for zero-copy allows making a single
1461 		 * comparison in the irq context instead of many inside the
1462 		 * ice_clean_rx_irq function and makes the codebase cleaner.
1463 		 */
1464 		cleaned = rx_ring->xsk_pool ?
1465 			  ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
1466 			  ice_clean_rx_irq(rx_ring, budget_per_ring);
1467 		work_done += cleaned;
1468 		/* if we clean as many as budgeted, we must not be done */
1469 		if (cleaned >= budget_per_ring)
1470 			clean_complete = false;
1471 	}
1472 
1473 	/* If work not completed, return budget and polling will return */
1474 	if (!clean_complete) {
1475 		/* Set the writeback on ITR so partial completions of
1476 		 * cache-lines will still continue even if we're polling.
1477 		 */
1478 		ice_set_wb_on_itr(q_vector);
1479 		return budget;
1480 	}
1481 
1482 	/* Exit the polling mode, but don't re-enable interrupts if stack might
1483 	 * poll us due to busy-polling
1484 	 */
1485 	if (likely(napi_complete_done(napi, work_done)))
1486 		ice_update_ena_itr(q_vector);
1487 	else
1488 		ice_set_wb_on_itr(q_vector);
1489 
1490 	return min_t(int, work_done, budget - 1);
1491 }
1492 
1493 /**
1494  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1495  * @tx_ring: the ring to be checked
1496  * @size: the size buffer we want to assure is available
1497  *
1498  * Returns -EBUSY if a stop is needed, else 0
1499  */
1500 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1501 {
1502 	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1503 	/* Memory barrier before checking head and tail */
1504 	smp_mb();
1505 
1506 	/* Check again in a case another CPU has just made room available. */
1507 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1508 		return -EBUSY;
1509 
1510 	/* A reprieve! - use start_subqueue because it doesn't call schedule */
1511 	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1512 	++tx_ring->tx_stats.restart_q;
1513 	return 0;
1514 }
1515 
1516 /**
1517  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1518  * @tx_ring: the ring to be checked
1519  * @size:    the size buffer we want to assure is available
1520  *
1521  * Returns 0 if stop is not needed
1522  */
1523 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1524 {
1525 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1526 		return 0;
1527 
1528 	return __ice_maybe_stop_tx(tx_ring, size);
1529 }
1530 
1531 /**
1532  * ice_tx_map - Build the Tx descriptor
1533  * @tx_ring: ring to send buffer on
1534  * @first: first buffer info buffer to use
1535  * @off: pointer to struct that holds offload parameters
1536  *
1537  * This function loops over the skb data pointed to by *first
1538  * and gets a physical address for each memory location and programs
1539  * it and the length into the transmit descriptor.
1540  */
1541 static void
1542 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1543 	   struct ice_tx_offload_params *off)
1544 {
1545 	u64 td_offset, td_tag, td_cmd;
1546 	u16 i = tx_ring->next_to_use;
1547 	unsigned int data_len, size;
1548 	struct ice_tx_desc *tx_desc;
1549 	struct ice_tx_buf *tx_buf;
1550 	struct sk_buff *skb;
1551 	skb_frag_t *frag;
1552 	dma_addr_t dma;
1553 
1554 	td_tag = off->td_l2tag1;
1555 	td_cmd = off->td_cmd;
1556 	td_offset = off->td_offset;
1557 	skb = first->skb;
1558 
1559 	data_len = skb->data_len;
1560 	size = skb_headlen(skb);
1561 
1562 	tx_desc = ICE_TX_DESC(tx_ring, i);
1563 
1564 	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1565 		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1566 		td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1567 			  ICE_TX_FLAGS_VLAN_S;
1568 	}
1569 
1570 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1571 
1572 	tx_buf = first;
1573 
1574 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1575 		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1576 
1577 		if (dma_mapping_error(tx_ring->dev, dma))
1578 			goto dma_error;
1579 
1580 		/* record length, and DMA address */
1581 		dma_unmap_len_set(tx_buf, len, size);
1582 		dma_unmap_addr_set(tx_buf, dma, dma);
1583 
1584 		/* align size to end of page */
1585 		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1586 		tx_desc->buf_addr = cpu_to_le64(dma);
1587 
1588 		/* account for data chunks larger than the hardware
1589 		 * can handle
1590 		 */
1591 		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1592 			tx_desc->cmd_type_offset_bsz =
1593 				ice_build_ctob(td_cmd, td_offset, max_data,
1594 					       td_tag);
1595 
1596 			tx_desc++;
1597 			i++;
1598 
1599 			if (i == tx_ring->count) {
1600 				tx_desc = ICE_TX_DESC(tx_ring, 0);
1601 				i = 0;
1602 			}
1603 
1604 			dma += max_data;
1605 			size -= max_data;
1606 
1607 			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1608 			tx_desc->buf_addr = cpu_to_le64(dma);
1609 		}
1610 
1611 		if (likely(!data_len))
1612 			break;
1613 
1614 		tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1615 							      size, td_tag);
1616 
1617 		tx_desc++;
1618 		i++;
1619 
1620 		if (i == tx_ring->count) {
1621 			tx_desc = ICE_TX_DESC(tx_ring, 0);
1622 			i = 0;
1623 		}
1624 
1625 		size = skb_frag_size(frag);
1626 		data_len -= size;
1627 
1628 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1629 				       DMA_TO_DEVICE);
1630 
1631 		tx_buf = &tx_ring->tx_buf[i];
1632 	}
1633 
1634 	/* record bytecount for BQL */
1635 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1636 
1637 	/* record SW timestamp if HW timestamp is not available */
1638 	skb_tx_timestamp(first->skb);
1639 
1640 	i++;
1641 	if (i == tx_ring->count)
1642 		i = 0;
1643 
1644 	/* write last descriptor with RS and EOP bits */
1645 	td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1646 	tx_desc->cmd_type_offset_bsz =
1647 			ice_build_ctob(td_cmd, td_offset, size, td_tag);
1648 
1649 	/* Force memory writes to complete before letting h/w know there
1650 	 * are new descriptors to fetch.
1651 	 *
1652 	 * We also use this memory barrier to make certain all of the
1653 	 * status bits have been updated before next_to_watch is written.
1654 	 */
1655 	wmb();
1656 
1657 	/* set next_to_watch value indicating a packet is present */
1658 	first->next_to_watch = tx_desc;
1659 
1660 	tx_ring->next_to_use = i;
1661 
1662 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1663 
1664 	/* notify HW of packet */
1665 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1666 		writel(i, tx_ring->tail);
1667 
1668 	return;
1669 
1670 dma_error:
1671 	/* clear DMA mappings for failed tx_buf map */
1672 	for (;;) {
1673 		tx_buf = &tx_ring->tx_buf[i];
1674 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1675 		if (tx_buf == first)
1676 			break;
1677 		if (i == 0)
1678 			i = tx_ring->count;
1679 		i--;
1680 	}
1681 
1682 	tx_ring->next_to_use = i;
1683 }
1684 
1685 /**
1686  * ice_tx_csum - Enable Tx checksum offloads
1687  * @first: pointer to the first descriptor
1688  * @off: pointer to struct that holds offload parameters
1689  *
1690  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1691  */
1692 static
1693 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1694 {
1695 	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1696 	struct sk_buff *skb = first->skb;
1697 	union {
1698 		struct iphdr *v4;
1699 		struct ipv6hdr *v6;
1700 		unsigned char *hdr;
1701 	} ip;
1702 	union {
1703 		struct tcphdr *tcp;
1704 		unsigned char *hdr;
1705 	} l4;
1706 	__be16 frag_off, protocol;
1707 	unsigned char *exthdr;
1708 	u32 offset, cmd = 0;
1709 	u8 l4_proto = 0;
1710 
1711 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1712 		return 0;
1713 
1714 	ip.hdr = skb_network_header(skb);
1715 	l4.hdr = skb_transport_header(skb);
1716 
1717 	/* compute outer L2 header size */
1718 	l2_len = ip.hdr - skb->data;
1719 	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1720 
1721 	protocol = vlan_get_protocol(skb);
1722 
1723 	if (protocol == htons(ETH_P_IP))
1724 		first->tx_flags |= ICE_TX_FLAGS_IPV4;
1725 	else if (protocol == htons(ETH_P_IPV6))
1726 		first->tx_flags |= ICE_TX_FLAGS_IPV6;
1727 
1728 	if (skb->encapsulation) {
1729 		bool gso_ena = false;
1730 		u32 tunnel = 0;
1731 
1732 		/* define outer network header type */
1733 		if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1734 			tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1735 				  ICE_TX_CTX_EIPT_IPV4 :
1736 				  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1737 			l4_proto = ip.v4->protocol;
1738 		} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1739 			int ret;
1740 
1741 			tunnel |= ICE_TX_CTX_EIPT_IPV6;
1742 			exthdr = ip.hdr + sizeof(*ip.v6);
1743 			l4_proto = ip.v6->nexthdr;
1744 			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1745 					       &l4_proto, &frag_off);
1746 			if (ret < 0)
1747 				return -1;
1748 		}
1749 
1750 		/* define outer transport */
1751 		switch (l4_proto) {
1752 		case IPPROTO_UDP:
1753 			tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1754 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1755 			break;
1756 		case IPPROTO_GRE:
1757 			tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1758 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1759 			break;
1760 		case IPPROTO_IPIP:
1761 		case IPPROTO_IPV6:
1762 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1763 			l4.hdr = skb_inner_network_header(skb);
1764 			break;
1765 		default:
1766 			if (first->tx_flags & ICE_TX_FLAGS_TSO)
1767 				return -1;
1768 
1769 			skb_checksum_help(skb);
1770 			return 0;
1771 		}
1772 
1773 		/* compute outer L3 header size */
1774 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1775 			  ICE_TXD_CTX_QW0_EIPLEN_S;
1776 
1777 		/* switch IP header pointer from outer to inner header */
1778 		ip.hdr = skb_inner_network_header(skb);
1779 
1780 		/* compute tunnel header size */
1781 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1782 			   ICE_TXD_CTX_QW0_NATLEN_S;
1783 
1784 		gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1785 		/* indicate if we need to offload outer UDP header */
1786 		if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1787 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1788 			tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1789 
1790 		/* record tunnel offload values */
1791 		off->cd_tunnel_params |= tunnel;
1792 
1793 		/* set DTYP=1 to indicate that it's an Tx context descriptor
1794 		 * in IPsec tunnel mode with Tx offloads in Quad word 1
1795 		 */
1796 		off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1797 
1798 		/* switch L4 header pointer from outer to inner */
1799 		l4.hdr = skb_inner_transport_header(skb);
1800 		l4_proto = 0;
1801 
1802 		/* reset type as we transition from outer to inner headers */
1803 		first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1804 		if (ip.v4->version == 4)
1805 			first->tx_flags |= ICE_TX_FLAGS_IPV4;
1806 		if (ip.v6->version == 6)
1807 			first->tx_flags |= ICE_TX_FLAGS_IPV6;
1808 	}
1809 
1810 	/* Enable IP checksum offloads */
1811 	if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1812 		l4_proto = ip.v4->protocol;
1813 		/* the stack computes the IP header already, the only time we
1814 		 * need the hardware to recompute it is in the case of TSO.
1815 		 */
1816 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1817 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1818 		else
1819 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1820 
1821 	} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1822 		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1823 		exthdr = ip.hdr + sizeof(*ip.v6);
1824 		l4_proto = ip.v6->nexthdr;
1825 		if (l4.hdr != exthdr)
1826 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1827 					 &frag_off);
1828 	} else {
1829 		return -1;
1830 	}
1831 
1832 	/* compute inner L3 header size */
1833 	l3_len = l4.hdr - ip.hdr;
1834 	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1835 
1836 	/* Enable L4 checksum offloads */
1837 	switch (l4_proto) {
1838 	case IPPROTO_TCP:
1839 		/* enable checksum offloads */
1840 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1841 		l4_len = l4.tcp->doff;
1842 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1843 		break;
1844 	case IPPROTO_UDP:
1845 		/* enable UDP checksum offload */
1846 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1847 		l4_len = (sizeof(struct udphdr) >> 2);
1848 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1849 		break;
1850 	case IPPROTO_SCTP:
1851 		/* enable SCTP checksum offload */
1852 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1853 		l4_len = sizeof(struct sctphdr) >> 2;
1854 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1855 		break;
1856 
1857 	default:
1858 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1859 			return -1;
1860 		skb_checksum_help(skb);
1861 		return 0;
1862 	}
1863 
1864 	off->td_cmd |= cmd;
1865 	off->td_offset |= offset;
1866 	return 1;
1867 }
1868 
1869 /**
1870  * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1871  * @tx_ring: ring to send buffer on
1872  * @first: pointer to struct ice_tx_buf
1873  *
1874  * Checks the skb and set up correspondingly several generic transmit flags
1875  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1876  */
1877 static void
1878 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
1879 {
1880 	struct sk_buff *skb = first->skb;
1881 
1882 	/* nothing left to do, software offloaded VLAN */
1883 	if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1884 		return;
1885 
1886 	/* currently, we always assume 802.1Q for VLAN insertion as VLAN
1887 	 * insertion for 802.1AD is not supported
1888 	 */
1889 	if (skb_vlan_tag_present(skb)) {
1890 		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1891 		first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1892 	}
1893 
1894 	ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1895 }
1896 
1897 /**
1898  * ice_tso - computes mss and TSO length to prepare for TSO
1899  * @first: pointer to struct ice_tx_buf
1900  * @off: pointer to struct that holds offload parameters
1901  *
1902  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1903  */
1904 static
1905 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1906 {
1907 	struct sk_buff *skb = first->skb;
1908 	union {
1909 		struct iphdr *v4;
1910 		struct ipv6hdr *v6;
1911 		unsigned char *hdr;
1912 	} ip;
1913 	union {
1914 		struct tcphdr *tcp;
1915 		struct udphdr *udp;
1916 		unsigned char *hdr;
1917 	} l4;
1918 	u64 cd_mss, cd_tso_len;
1919 	u32 paylen;
1920 	u8 l4_start;
1921 	int err;
1922 
1923 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1924 		return 0;
1925 
1926 	if (!skb_is_gso(skb))
1927 		return 0;
1928 
1929 	err = skb_cow_head(skb, 0);
1930 	if (err < 0)
1931 		return err;
1932 
1933 	/* cppcheck-suppress unreadVariable */
1934 	ip.hdr = skb_network_header(skb);
1935 	l4.hdr = skb_transport_header(skb);
1936 
1937 	/* initialize outer IP header fields */
1938 	if (ip.v4->version == 4) {
1939 		ip.v4->tot_len = 0;
1940 		ip.v4->check = 0;
1941 	} else {
1942 		ip.v6->payload_len = 0;
1943 	}
1944 
1945 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1946 					 SKB_GSO_GRE_CSUM |
1947 					 SKB_GSO_IPXIP4 |
1948 					 SKB_GSO_IPXIP6 |
1949 					 SKB_GSO_UDP_TUNNEL |
1950 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
1951 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1952 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1953 			l4.udp->len = 0;
1954 
1955 			/* determine offset of outer transport header */
1956 			l4_start = (u8)(l4.hdr - skb->data);
1957 
1958 			/* remove payload length from outer checksum */
1959 			paylen = skb->len - l4_start;
1960 			csum_replace_by_diff(&l4.udp->check,
1961 					     (__force __wsum)htonl(paylen));
1962 		}
1963 
1964 		/* reset pointers to inner headers */
1965 
1966 		/* cppcheck-suppress unreadVariable */
1967 		ip.hdr = skb_inner_network_header(skb);
1968 		l4.hdr = skb_inner_transport_header(skb);
1969 
1970 		/* initialize inner IP header fields */
1971 		if (ip.v4->version == 4) {
1972 			ip.v4->tot_len = 0;
1973 			ip.v4->check = 0;
1974 		} else {
1975 			ip.v6->payload_len = 0;
1976 		}
1977 	}
1978 
1979 	/* determine offset of transport header */
1980 	l4_start = (u8)(l4.hdr - skb->data);
1981 
1982 	/* remove payload length from checksum */
1983 	paylen = skb->len - l4_start;
1984 
1985 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1986 		csum_replace_by_diff(&l4.udp->check,
1987 				     (__force __wsum)htonl(paylen));
1988 		/* compute length of UDP segmentation header */
1989 		off->header_len = (u8)sizeof(l4.udp) + l4_start;
1990 	} else {
1991 		csum_replace_by_diff(&l4.tcp->check,
1992 				     (__force __wsum)htonl(paylen));
1993 		/* compute length of TCP segmentation header */
1994 		off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
1995 	}
1996 
1997 	/* update gso_segs and bytecount */
1998 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1999 	first->bytecount += (first->gso_segs - 1) * off->header_len;
2000 
2001 	cd_tso_len = skb->len - off->header_len;
2002 	cd_mss = skb_shinfo(skb)->gso_size;
2003 
2004 	/* record cdesc_qw1 with TSO parameters */
2005 	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2006 			     (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2007 			     (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2008 			     (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2009 	first->tx_flags |= ICE_TX_FLAGS_TSO;
2010 	return 1;
2011 }
2012 
2013 /**
2014  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
2015  * @size: transmit request size in bytes
2016  *
2017  * Due to hardware alignment restrictions (4K alignment), we need to
2018  * assume that we can have no more than 12K of data per descriptor, even
2019  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2020  * Thus, we need to divide by 12K. But division is slow! Instead,
2021  * we decompose the operation into shifts and one relatively cheap
2022  * multiply operation.
2023  *
2024  * To divide by 12K, we first divide by 4K, then divide by 3:
2025  *     To divide by 4K, shift right by 12 bits
2026  *     To divide by 3, multiply by 85, then divide by 256
2027  *     (Divide by 256 is done by shifting right by 8 bits)
2028  * Finally, we add one to round up. Because 256 isn't an exact multiple of
2029  * 3, we'll underestimate near each multiple of 12K. This is actually more
2030  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2031  * segment. For our purposes this is accurate out to 1M which is orders of
2032  * magnitude greater than our largest possible GSO size.
2033  *
2034  * This would then be implemented as:
2035  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2036  *
2037  * Since multiplication and division are commutative, we can reorder
2038  * operations into:
2039  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2040  */
2041 static unsigned int ice_txd_use_count(unsigned int size)
2042 {
2043 	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2044 }
2045 
2046 /**
2047  * ice_xmit_desc_count - calculate number of Tx descriptors needed
2048  * @skb: send buffer
2049  *
2050  * Returns number of data descriptors needed for this skb.
2051  */
2052 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2053 {
2054 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2055 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2056 	unsigned int count = 0, size = skb_headlen(skb);
2057 
2058 	for (;;) {
2059 		count += ice_txd_use_count(size);
2060 
2061 		if (!nr_frags--)
2062 			break;
2063 
2064 		size = skb_frag_size(frag++);
2065 	}
2066 
2067 	return count;
2068 }
2069 
2070 /**
2071  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2072  * @skb: send buffer
2073  *
2074  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2075  * and so we need to figure out the cases where we need to linearize the skb.
2076  *
2077  * For TSO we need to count the TSO header and segment payload separately.
2078  * As such we need to check cases where we have 7 fragments or more as we
2079  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2080  * the segment payload in the first descriptor, and another 7 for the
2081  * fragments.
2082  */
2083 static bool __ice_chk_linearize(struct sk_buff *skb)
2084 {
2085 	const skb_frag_t *frag, *stale;
2086 	int nr_frags, sum;
2087 
2088 	/* no need to check if number of frags is less than 7 */
2089 	nr_frags = skb_shinfo(skb)->nr_frags;
2090 	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2091 		return false;
2092 
2093 	/* We need to walk through the list and validate that each group
2094 	 * of 6 fragments totals at least gso_size.
2095 	 */
2096 	nr_frags -= ICE_MAX_BUF_TXD - 2;
2097 	frag = &skb_shinfo(skb)->frags[0];
2098 
2099 	/* Initialize size to the negative value of gso_size minus 1. We
2100 	 * use this as the worst case scenario in which the frag ahead
2101 	 * of us only provides one byte which is why we are limited to 6
2102 	 * descriptors for a single transmit as the header and previous
2103 	 * fragment are already consuming 2 descriptors.
2104 	 */
2105 	sum = 1 - skb_shinfo(skb)->gso_size;
2106 
2107 	/* Add size of frags 0 through 4 to create our initial sum */
2108 	sum += skb_frag_size(frag++);
2109 	sum += skb_frag_size(frag++);
2110 	sum += skb_frag_size(frag++);
2111 	sum += skb_frag_size(frag++);
2112 	sum += skb_frag_size(frag++);
2113 
2114 	/* Walk through fragments adding latest fragment, testing it, and
2115 	 * then removing stale fragments from the sum.
2116 	 */
2117 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2118 		int stale_size = skb_frag_size(stale);
2119 
2120 		sum += skb_frag_size(frag++);
2121 
2122 		/* The stale fragment may present us with a smaller
2123 		 * descriptor than the actual fragment size. To account
2124 		 * for that we need to remove all the data on the front and
2125 		 * figure out what the remainder would be in the last
2126 		 * descriptor associated with the fragment.
2127 		 */
2128 		if (stale_size > ICE_MAX_DATA_PER_TXD) {
2129 			int align_pad = -(skb_frag_off(stale)) &
2130 					(ICE_MAX_READ_REQ_SIZE - 1);
2131 
2132 			sum -= align_pad;
2133 			stale_size -= align_pad;
2134 
2135 			do {
2136 				sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2137 				stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2138 			} while (stale_size > ICE_MAX_DATA_PER_TXD);
2139 		}
2140 
2141 		/* if sum is negative we failed to make sufficient progress */
2142 		if (sum < 0)
2143 			return true;
2144 
2145 		if (!nr_frags--)
2146 			break;
2147 
2148 		sum -= stale_size;
2149 	}
2150 
2151 	return false;
2152 }
2153 
2154 /**
2155  * ice_chk_linearize - Check if there are more than 8 fragments per packet
2156  * @skb:      send buffer
2157  * @count:    number of buffers used
2158  *
2159  * Note: Our HW can't scatter-gather more than 8 fragments to build
2160  * a packet on the wire and so we need to figure out the cases where we
2161  * need to linearize the skb.
2162  */
2163 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2164 {
2165 	/* Both TSO and single send will work if count is less than 8 */
2166 	if (likely(count < ICE_MAX_BUF_TXD))
2167 		return false;
2168 
2169 	if (skb_is_gso(skb))
2170 		return __ice_chk_linearize(skb);
2171 
2172 	/* we can support up to 8 data buffers for a single send */
2173 	return count != ICE_MAX_BUF_TXD;
2174 }
2175 
2176 /**
2177  * ice_tstamp - set up context descriptor for hardware timestamp
2178  * @tx_ring: pointer to the Tx ring to send buffer on
2179  * @skb: pointer to the SKB we're sending
2180  * @first: Tx buffer
2181  * @off: Tx offload parameters
2182  */
2183 static void
2184 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2185 	   struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2186 {
2187 	s8 idx;
2188 
2189 	/* only timestamp the outbound packet if the user has requested it */
2190 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2191 		return;
2192 
2193 	if (!tx_ring->ptp_tx)
2194 		return;
2195 
2196 	/* Tx timestamps cannot be sampled when doing TSO */
2197 	if (first->tx_flags & ICE_TX_FLAGS_TSO)
2198 		return;
2199 
2200 	/* Grab an open timestamp slot */
2201 	idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2202 	if (idx < 0)
2203 		return;
2204 
2205 	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2206 			     (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2207 			     ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2208 	first->tx_flags |= ICE_TX_FLAGS_TSYN;
2209 }
2210 
2211 /**
2212  * ice_xmit_frame_ring - Sends buffer on Tx ring
2213  * @skb: send buffer
2214  * @tx_ring: ring to send buffer on
2215  *
2216  * Returns NETDEV_TX_OK if sent, else an error code
2217  */
2218 static netdev_tx_t
2219 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2220 {
2221 	struct ice_tx_offload_params offload = { 0 };
2222 	struct ice_vsi *vsi = tx_ring->vsi;
2223 	struct ice_tx_buf *first;
2224 	struct ethhdr *eth;
2225 	unsigned int count;
2226 	int tso, csum;
2227 
2228 	ice_trace(xmit_frame_ring, tx_ring, skb);
2229 
2230 	count = ice_xmit_desc_count(skb);
2231 	if (ice_chk_linearize(skb, count)) {
2232 		if (__skb_linearize(skb))
2233 			goto out_drop;
2234 		count = ice_txd_use_count(skb->len);
2235 		tx_ring->tx_stats.tx_linearize++;
2236 	}
2237 
2238 	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2239 	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2240 	 *       + 4 desc gap to avoid the cache line where head is,
2241 	 *       + 1 desc for context descriptor,
2242 	 * otherwise try next time
2243 	 */
2244 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2245 			      ICE_DESCS_FOR_CTX_DESC)) {
2246 		tx_ring->tx_stats.tx_busy++;
2247 		return NETDEV_TX_BUSY;
2248 	}
2249 
2250 	offload.tx_ring = tx_ring;
2251 
2252 	/* record the location of the first descriptor for this packet */
2253 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
2254 	first->skb = skb;
2255 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2256 	first->gso_segs = 1;
2257 	first->tx_flags = 0;
2258 
2259 	/* prepare the VLAN tagging flags for Tx */
2260 	ice_tx_prepare_vlan_flags(tx_ring, first);
2261 
2262 	/* set up TSO offload */
2263 	tso = ice_tso(first, &offload);
2264 	if (tso < 0)
2265 		goto out_drop;
2266 
2267 	/* always set up Tx checksum offload */
2268 	csum = ice_tx_csum(first, &offload);
2269 	if (csum < 0)
2270 		goto out_drop;
2271 
2272 	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2273 	eth = (struct ethhdr *)skb_mac_header(skb);
2274 	if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2275 		      eth->h_proto == htons(ETH_P_LLDP)) &&
2276 		     vsi->type == ICE_VSI_PF &&
2277 		     vsi->port_info->qos_cfg.is_sw_lldp))
2278 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2279 					ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2280 					ICE_TXD_CTX_QW1_CMD_S);
2281 
2282 	ice_tstamp(tx_ring, skb, first, &offload);
2283 	if (ice_is_switchdev_running(vsi->back))
2284 		ice_eswitch_set_target_vsi(skb, &offload);
2285 
2286 	if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2287 		struct ice_tx_ctx_desc *cdesc;
2288 		u16 i = tx_ring->next_to_use;
2289 
2290 		/* grab the next descriptor */
2291 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2292 		i++;
2293 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2294 
2295 		/* setup context descriptor */
2296 		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2297 		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2298 		cdesc->rsvd = cpu_to_le16(0);
2299 		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2300 	}
2301 
2302 	ice_tx_map(tx_ring, first, &offload);
2303 	return NETDEV_TX_OK;
2304 
2305 out_drop:
2306 	ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2307 	dev_kfree_skb_any(skb);
2308 	return NETDEV_TX_OK;
2309 }
2310 
2311 /**
2312  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2313  * @skb: send buffer
2314  * @netdev: network interface device structure
2315  *
2316  * Returns NETDEV_TX_OK if sent, else an error code
2317  */
2318 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2319 {
2320 	struct ice_netdev_priv *np = netdev_priv(netdev);
2321 	struct ice_vsi *vsi = np->vsi;
2322 	struct ice_tx_ring *tx_ring;
2323 
2324 	tx_ring = vsi->tx_rings[skb->queue_mapping];
2325 
2326 	/* hardware can't handle really short frames, hardware padding works
2327 	 * beyond this point
2328 	 */
2329 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2330 		return NETDEV_TX_OK;
2331 
2332 	return ice_xmit_frame_ring(skb, tx_ring);
2333 }
2334 
2335 /**
2336  * ice_get_dscp_up - return the UP/TC value for a SKB
2337  * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
2338  * @skb: SKB to query for info to determine UP/TC
2339  *
2340  * This function is to only be called when the PF is in L3 DSCP PFC mode
2341  */
2342 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
2343 {
2344 	u8 dscp = 0;
2345 
2346 	if (skb->protocol == htons(ETH_P_IP))
2347 		dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
2348 	else if (skb->protocol == htons(ETH_P_IPV6))
2349 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
2350 
2351 	return dcbcfg->dscp_map[dscp];
2352 }
2353 
2354 u16
2355 ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
2356 		 struct net_device *sb_dev)
2357 {
2358 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2359 	struct ice_dcbx_cfg *dcbcfg;
2360 
2361 	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
2362 	if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
2363 		skb->priority = ice_get_dscp_up(dcbcfg, skb);
2364 
2365 	return netdev_pick_tx(netdev, skb, sb_dev);
2366 }
2367 
2368 /**
2369  * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2370  * @tx_ring: tx_ring to clean
2371  */
2372 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2373 {
2374 	struct ice_vsi *vsi = tx_ring->vsi;
2375 	s16 i = tx_ring->next_to_clean;
2376 	int budget = ICE_DFLT_IRQ_WORK;
2377 	struct ice_tx_desc *tx_desc;
2378 	struct ice_tx_buf *tx_buf;
2379 
2380 	tx_buf = &tx_ring->tx_buf[i];
2381 	tx_desc = ICE_TX_DESC(tx_ring, i);
2382 	i -= tx_ring->count;
2383 
2384 	do {
2385 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2386 
2387 		/* if next_to_watch is not set then there is no pending work */
2388 		if (!eop_desc)
2389 			break;
2390 
2391 		/* prevent any other reads prior to eop_desc */
2392 		smp_rmb();
2393 
2394 		/* if the descriptor isn't done, no work to do */
2395 		if (!(eop_desc->cmd_type_offset_bsz &
2396 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2397 			break;
2398 
2399 		/* clear next_to_watch to prevent false hangs */
2400 		tx_buf->next_to_watch = NULL;
2401 		tx_desc->buf_addr = 0;
2402 		tx_desc->cmd_type_offset_bsz = 0;
2403 
2404 		/* move past filter desc */
2405 		tx_buf++;
2406 		tx_desc++;
2407 		i++;
2408 		if (unlikely(!i)) {
2409 			i -= tx_ring->count;
2410 			tx_buf = tx_ring->tx_buf;
2411 			tx_desc = ICE_TX_DESC(tx_ring, 0);
2412 		}
2413 
2414 		/* unmap the data header */
2415 		if (dma_unmap_len(tx_buf, len))
2416 			dma_unmap_single(tx_ring->dev,
2417 					 dma_unmap_addr(tx_buf, dma),
2418 					 dma_unmap_len(tx_buf, len),
2419 					 DMA_TO_DEVICE);
2420 		if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2421 			devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2422 
2423 		/* clear next_to_watch to prevent false hangs */
2424 		tx_buf->raw_buf = NULL;
2425 		tx_buf->tx_flags = 0;
2426 		tx_buf->next_to_watch = NULL;
2427 		dma_unmap_len_set(tx_buf, len, 0);
2428 		tx_desc->buf_addr = 0;
2429 		tx_desc->cmd_type_offset_bsz = 0;
2430 
2431 		/* move past eop_desc for start of next FD desc */
2432 		tx_buf++;
2433 		tx_desc++;
2434 		i++;
2435 		if (unlikely(!i)) {
2436 			i -= tx_ring->count;
2437 			tx_buf = tx_ring->tx_buf;
2438 			tx_desc = ICE_TX_DESC(tx_ring, 0);
2439 		}
2440 
2441 		budget--;
2442 	} while (likely(budget));
2443 
2444 	i += tx_ring->count;
2445 	tx_ring->next_to_clean = i;
2446 
2447 	/* re-enable interrupt if needed */
2448 	ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2449 }
2450