1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* The driver transmit and receive code */
5 
6 #include <linux/prefetch.h>
7 #include <linux/mm.h>
8 #include "ice.h"
9 
10 #define ICE_RX_HDR_SIZE		256
11 
12 /**
13  * ice_unmap_and_free_tx_buf - Release a Tx buffer
14  * @ring: the ring that owns the buffer
15  * @tx_buf: the buffer to free
16  */
17 static void
18 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
19 {
20 	if (tx_buf->skb) {
21 		dev_kfree_skb_any(tx_buf->skb);
22 		if (dma_unmap_len(tx_buf, len))
23 			dma_unmap_single(ring->dev,
24 					 dma_unmap_addr(tx_buf, dma),
25 					 dma_unmap_len(tx_buf, len),
26 					 DMA_TO_DEVICE);
27 	} else if (dma_unmap_len(tx_buf, len)) {
28 		dma_unmap_page(ring->dev,
29 			       dma_unmap_addr(tx_buf, dma),
30 			       dma_unmap_len(tx_buf, len),
31 			       DMA_TO_DEVICE);
32 	}
33 
34 	tx_buf->next_to_watch = NULL;
35 	tx_buf->skb = NULL;
36 	dma_unmap_len_set(tx_buf, len, 0);
37 	/* tx_buf must be completely set up in the transmit path */
38 }
39 
40 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
41 {
42 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
43 }
44 
45 /**
46  * ice_clean_tx_ring - Free any empty Tx buffers
47  * @tx_ring: ring to be cleaned
48  */
49 void ice_clean_tx_ring(struct ice_ring *tx_ring)
50 {
51 	u16 i;
52 
53 	/* ring already cleared, nothing to do */
54 	if (!tx_ring->tx_buf)
55 		return;
56 
57 	/* Free all the Tx ring sk_bufss */
58 	for (i = 0; i < tx_ring->count; i++)
59 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
60 
61 	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
62 
63 	/* Zero out the descriptor ring */
64 	memset(tx_ring->desc, 0, tx_ring->size);
65 
66 	tx_ring->next_to_use = 0;
67 	tx_ring->next_to_clean = 0;
68 
69 	if (!tx_ring->netdev)
70 		return;
71 
72 	/* cleanup Tx queue statistics */
73 	netdev_tx_reset_queue(txring_txq(tx_ring));
74 }
75 
76 /**
77  * ice_free_tx_ring - Free Tx resources per queue
78  * @tx_ring: Tx descriptor ring for a specific queue
79  *
80  * Free all transmit software resources
81  */
82 void ice_free_tx_ring(struct ice_ring *tx_ring)
83 {
84 	ice_clean_tx_ring(tx_ring);
85 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
86 	tx_ring->tx_buf = NULL;
87 
88 	if (tx_ring->desc) {
89 		dmam_free_coherent(tx_ring->dev, tx_ring->size,
90 				   tx_ring->desc, tx_ring->dma);
91 		tx_ring->desc = NULL;
92 	}
93 }
94 
95 /**
96  * ice_clean_tx_irq - Reclaim resources after transmit completes
97  * @vsi: the VSI we care about
98  * @tx_ring: Tx ring to clean
99  * @napi_budget: Used to determine if we are in netpoll
100  *
101  * Returns true if there's any budget left (e.g. the clean is finished)
102  */
103 static bool
104 ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
105 {
106 	unsigned int total_bytes = 0, total_pkts = 0;
107 	unsigned int budget = vsi->work_lmt;
108 	s16 i = tx_ring->next_to_clean;
109 	struct ice_tx_desc *tx_desc;
110 	struct ice_tx_buf *tx_buf;
111 
112 	tx_buf = &tx_ring->tx_buf[i];
113 	tx_desc = ICE_TX_DESC(tx_ring, i);
114 	i -= tx_ring->count;
115 
116 	do {
117 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
118 
119 		/* if next_to_watch is not set then there is no work pending */
120 		if (!eop_desc)
121 			break;
122 
123 		smp_rmb();	/* prevent any other reads prior to eop_desc */
124 
125 		/* if the descriptor isn't done, no work yet to do */
126 		if (!(eop_desc->cmd_type_offset_bsz &
127 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
128 			break;
129 
130 		/* clear next_to_watch to prevent false hangs */
131 		tx_buf->next_to_watch = NULL;
132 
133 		/* update the statistics for this packet */
134 		total_bytes += tx_buf->bytecount;
135 		total_pkts += tx_buf->gso_segs;
136 
137 		/* free the skb */
138 		napi_consume_skb(tx_buf->skb, napi_budget);
139 
140 		/* unmap skb header data */
141 		dma_unmap_single(tx_ring->dev,
142 				 dma_unmap_addr(tx_buf, dma),
143 				 dma_unmap_len(tx_buf, len),
144 				 DMA_TO_DEVICE);
145 
146 		/* clear tx_buf data */
147 		tx_buf->skb = NULL;
148 		dma_unmap_len_set(tx_buf, len, 0);
149 
150 		/* unmap remaining buffers */
151 		while (tx_desc != eop_desc) {
152 			tx_buf++;
153 			tx_desc++;
154 			i++;
155 			if (unlikely(!i)) {
156 				i -= tx_ring->count;
157 				tx_buf = tx_ring->tx_buf;
158 				tx_desc = ICE_TX_DESC(tx_ring, 0);
159 			}
160 
161 			/* unmap any remaining paged data */
162 			if (dma_unmap_len(tx_buf, len)) {
163 				dma_unmap_page(tx_ring->dev,
164 					       dma_unmap_addr(tx_buf, dma),
165 					       dma_unmap_len(tx_buf, len),
166 					       DMA_TO_DEVICE);
167 				dma_unmap_len_set(tx_buf, len, 0);
168 			}
169 		}
170 
171 		/* move us one more past the eop_desc for start of next pkt */
172 		tx_buf++;
173 		tx_desc++;
174 		i++;
175 		if (unlikely(!i)) {
176 			i -= tx_ring->count;
177 			tx_buf = tx_ring->tx_buf;
178 			tx_desc = ICE_TX_DESC(tx_ring, 0);
179 		}
180 
181 		prefetch(tx_desc);
182 
183 		/* update budget accounting */
184 		budget--;
185 	} while (likely(budget));
186 
187 	i += tx_ring->count;
188 	tx_ring->next_to_clean = i;
189 	u64_stats_update_begin(&tx_ring->syncp);
190 	tx_ring->stats.bytes += total_bytes;
191 	tx_ring->stats.pkts += total_pkts;
192 	u64_stats_update_end(&tx_ring->syncp);
193 	tx_ring->q_vector->tx.total_bytes += total_bytes;
194 	tx_ring->q_vector->tx.total_pkts += total_pkts;
195 
196 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
197 				  total_bytes);
198 
199 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
200 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
201 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
202 		/* Make sure that anybody stopping the queue after this
203 		 * sees the new next_to_clean.
204 		 */
205 		smp_mb();
206 		if (__netif_subqueue_stopped(tx_ring->netdev,
207 					     tx_ring->q_index) &&
208 		   !test_bit(__ICE_DOWN, vsi->state)) {
209 			netif_wake_subqueue(tx_ring->netdev,
210 					    tx_ring->q_index);
211 			++tx_ring->tx_stats.restart_q;
212 		}
213 	}
214 
215 	return !!budget;
216 }
217 
218 /**
219  * ice_setup_tx_ring - Allocate the Tx descriptors
220  * @tx_ring: the Tx ring to set up
221  *
222  * Return 0 on success, negative on error
223  */
224 int ice_setup_tx_ring(struct ice_ring *tx_ring)
225 {
226 	struct device *dev = tx_ring->dev;
227 
228 	if (!dev)
229 		return -ENOMEM;
230 
231 	/* warn if we are about to overwrite the pointer */
232 	WARN_ON(tx_ring->tx_buf);
233 	tx_ring->tx_buf =
234 		devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
235 			     GFP_KERNEL);
236 	if (!tx_ring->tx_buf)
237 		return -ENOMEM;
238 
239 	/* round up to nearest page */
240 	tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
241 			      PAGE_SIZE);
242 	tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
243 					    GFP_KERNEL);
244 	if (!tx_ring->desc) {
245 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
246 			tx_ring->size);
247 		goto err;
248 	}
249 
250 	tx_ring->next_to_use = 0;
251 	tx_ring->next_to_clean = 0;
252 	tx_ring->tx_stats.prev_pkt = -1;
253 	return 0;
254 
255 err:
256 	devm_kfree(dev, tx_ring->tx_buf);
257 	tx_ring->tx_buf = NULL;
258 	return -ENOMEM;
259 }
260 
261 /**
262  * ice_clean_rx_ring - Free Rx buffers
263  * @rx_ring: ring to be cleaned
264  */
265 void ice_clean_rx_ring(struct ice_ring *rx_ring)
266 {
267 	struct device *dev = rx_ring->dev;
268 	u16 i;
269 
270 	/* ring already cleared, nothing to do */
271 	if (!rx_ring->rx_buf)
272 		return;
273 
274 	/* Free all the Rx ring sk_buffs */
275 	for (i = 0; i < rx_ring->count; i++) {
276 		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
277 
278 		if (rx_buf->skb) {
279 			dev_kfree_skb(rx_buf->skb);
280 			rx_buf->skb = NULL;
281 		}
282 		if (!rx_buf->page)
283 			continue;
284 
285 		/* Invalidate cache lines that may have been written to by
286 		 * device so that we avoid corrupting memory.
287 		 */
288 		dma_sync_single_range_for_cpu(dev, rx_buf->dma,
289 					      rx_buf->page_offset,
290 					      ICE_RXBUF_2048, DMA_FROM_DEVICE);
291 
292 		/* free resources associated with mapping */
293 		dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
294 				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
295 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
296 
297 		rx_buf->page = NULL;
298 		rx_buf->page_offset = 0;
299 	}
300 
301 	memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
302 
303 	/* Zero out the descriptor ring */
304 	memset(rx_ring->desc, 0, rx_ring->size);
305 
306 	rx_ring->next_to_alloc = 0;
307 	rx_ring->next_to_clean = 0;
308 	rx_ring->next_to_use = 0;
309 }
310 
311 /**
312  * ice_free_rx_ring - Free Rx resources
313  * @rx_ring: ring to clean the resources from
314  *
315  * Free all receive software resources
316  */
317 void ice_free_rx_ring(struct ice_ring *rx_ring)
318 {
319 	ice_clean_rx_ring(rx_ring);
320 	devm_kfree(rx_ring->dev, rx_ring->rx_buf);
321 	rx_ring->rx_buf = NULL;
322 
323 	if (rx_ring->desc) {
324 		dmam_free_coherent(rx_ring->dev, rx_ring->size,
325 				   rx_ring->desc, rx_ring->dma);
326 		rx_ring->desc = NULL;
327 	}
328 }
329 
330 /**
331  * ice_setup_rx_ring - Allocate the Rx descriptors
332  * @rx_ring: the Rx ring to set up
333  *
334  * Return 0 on success, negative on error
335  */
336 int ice_setup_rx_ring(struct ice_ring *rx_ring)
337 {
338 	struct device *dev = rx_ring->dev;
339 
340 	if (!dev)
341 		return -ENOMEM;
342 
343 	/* warn if we are about to overwrite the pointer */
344 	WARN_ON(rx_ring->rx_buf);
345 	rx_ring->rx_buf =
346 		devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
347 			     GFP_KERNEL);
348 	if (!rx_ring->rx_buf)
349 		return -ENOMEM;
350 
351 	/* round up to nearest page */
352 	rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
353 			      PAGE_SIZE);
354 	rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
355 					    GFP_KERNEL);
356 	if (!rx_ring->desc) {
357 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
358 			rx_ring->size);
359 		goto err;
360 	}
361 
362 	rx_ring->next_to_use = 0;
363 	rx_ring->next_to_clean = 0;
364 	return 0;
365 
366 err:
367 	devm_kfree(dev, rx_ring->rx_buf);
368 	rx_ring->rx_buf = NULL;
369 	return -ENOMEM;
370 }
371 
372 /**
373  * ice_release_rx_desc - Store the new tail and head values
374  * @rx_ring: ring to bump
375  * @val: new head index
376  */
377 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
378 {
379 	rx_ring->next_to_use = val;
380 
381 	/* update next to alloc since we have filled the ring */
382 	rx_ring->next_to_alloc = val;
383 
384 	/* Force memory writes to complete before letting h/w
385 	 * know there are new descriptors to fetch. (Only
386 	 * applicable for weak-ordered memory model archs,
387 	 * such as IA-64).
388 	 */
389 	wmb();
390 	writel(val, rx_ring->tail);
391 }
392 
393 /**
394  * ice_alloc_mapped_page - recycle or make a new page
395  * @rx_ring: ring to use
396  * @bi: rx_buf struct to modify
397  *
398  * Returns true if the page was successfully allocated or
399  * reused.
400  */
401 static bool
402 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
403 {
404 	struct page *page = bi->page;
405 	dma_addr_t dma;
406 
407 	/* since we are recycling buffers we should seldom need to alloc */
408 	if (likely(page)) {
409 		rx_ring->rx_stats.page_reuse_count++;
410 		return true;
411 	}
412 
413 	/* alloc new page for storage */
414 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
415 	if (unlikely(!page)) {
416 		rx_ring->rx_stats.alloc_page_failed++;
417 		return false;
418 	}
419 
420 	/* map page for use */
421 	dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
422 				 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
423 
424 	/* if mapping failed free memory back to system since
425 	 * there isn't much point in holding memory we can't use
426 	 */
427 	if (dma_mapping_error(rx_ring->dev, dma)) {
428 		__free_pages(page, 0);
429 		rx_ring->rx_stats.alloc_page_failed++;
430 		return false;
431 	}
432 
433 	bi->dma = dma;
434 	bi->page = page;
435 	bi->page_offset = 0;
436 	page_ref_add(page, USHRT_MAX - 1);
437 	bi->pagecnt_bias = USHRT_MAX;
438 
439 	return true;
440 }
441 
442 /**
443  * ice_alloc_rx_bufs - Replace used receive buffers
444  * @rx_ring: ring to place buffers on
445  * @cleaned_count: number of buffers to replace
446  *
447  * Returns false if all allocations were successful, true if any fail
448  */
449 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
450 {
451 	union ice_32b_rx_flex_desc *rx_desc;
452 	u16 ntu = rx_ring->next_to_use;
453 	struct ice_rx_buf *bi;
454 
455 	/* do nothing if no valid netdev defined */
456 	if (!rx_ring->netdev || !cleaned_count)
457 		return false;
458 
459 	/* get the RX descriptor and buffer based on next_to_use */
460 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
461 	bi = &rx_ring->rx_buf[ntu];
462 
463 	do {
464 		if (!ice_alloc_mapped_page(rx_ring, bi))
465 			goto no_bufs;
466 
467 		/* sync the buffer for use by the device */
468 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
469 						 bi->page_offset,
470 						 ICE_RXBUF_2048,
471 						 DMA_FROM_DEVICE);
472 
473 		/* Refresh the desc even if buffer_addrs didn't change
474 		 * because each write-back erases this info.
475 		 */
476 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
477 
478 		rx_desc++;
479 		bi++;
480 		ntu++;
481 		if (unlikely(ntu == rx_ring->count)) {
482 			rx_desc = ICE_RX_DESC(rx_ring, 0);
483 			bi = rx_ring->rx_buf;
484 			ntu = 0;
485 		}
486 
487 		/* clear the status bits for the next_to_use descriptor */
488 		rx_desc->wb.status_error0 = 0;
489 
490 		cleaned_count--;
491 	} while (cleaned_count);
492 
493 	if (rx_ring->next_to_use != ntu)
494 		ice_release_rx_desc(rx_ring, ntu);
495 
496 	return false;
497 
498 no_bufs:
499 	if (rx_ring->next_to_use != ntu)
500 		ice_release_rx_desc(rx_ring, ntu);
501 
502 	/* make sure to come back via polling to try again after
503 	 * allocation failure
504 	 */
505 	return true;
506 }
507 
508 /**
509  * ice_page_is_reserved - check if reuse is possible
510  * @page: page struct to check
511  */
512 static bool ice_page_is_reserved(struct page *page)
513 {
514 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
515 }
516 
517 /**
518  * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
519  * @rx_buf: Rx buffer to adjust
520  * @size: Size of adjustment
521  *
522  * Update the offset within page so that Rx buf will be ready to be reused.
523  * For systems with PAGE_SIZE < 8192 this function will flip the page offset
524  * so the second half of page assigned to Rx buffer will be used, otherwise
525  * the offset is moved by the @size bytes
526  */
527 static void
528 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
529 {
530 #if (PAGE_SIZE < 8192)
531 	/* flip page offset to other buffer */
532 	rx_buf->page_offset ^= size;
533 #else
534 	/* move offset up to the next cache line */
535 	rx_buf->page_offset += size;
536 #endif
537 }
538 
539 /**
540  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
541  * @rx_buf: buffer containing the page
542  *
543  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
544  * which will assign the current buffer to the buffer that next_to_alloc is
545  * pointing to; otherwise, the DMA mapping needs to be destroyed and
546  * page freed
547  */
548 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
549 {
550 #if (PAGE_SIZE >= 8192)
551 	unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
552 #endif
553 	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
554 	struct page *page = rx_buf->page;
555 
556 	/* avoid re-using remote pages */
557 	if (unlikely(ice_page_is_reserved(page)))
558 		return false;
559 
560 #if (PAGE_SIZE < 8192)
561 	/* if we are only owner of page we can reuse it */
562 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
563 		return false;
564 #else
565 	if (rx_buf->page_offset > last_offset)
566 		return false;
567 #endif /* PAGE_SIZE < 8192) */
568 
569 	/* If we have drained the page fragment pool we need to update
570 	 * the pagecnt_bias and page count so that we fully restock the
571 	 * number of references the driver holds.
572 	 */
573 	if (unlikely(pagecnt_bias == 1)) {
574 		page_ref_add(page, USHRT_MAX - 1);
575 		rx_buf->pagecnt_bias = USHRT_MAX;
576 	}
577 
578 	return true;
579 }
580 
581 /**
582  * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
583  * @rx_buf: buffer containing page to add
584  * @skb: sk_buff to place the data into
585  * @size: packet length from rx_desc
586  *
587  * This function will add the data contained in rx_buf->page to the skb.
588  * It will just attach the page as a frag to the skb.
589  * The function will then update the page offset.
590  */
591 static void
592 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
593 		unsigned int size)
594 {
595 #if (PAGE_SIZE >= 8192)
596 	unsigned int truesize = SKB_DATA_ALIGN(size);
597 #else
598 	unsigned int truesize = ICE_RXBUF_2048;
599 #endif
600 
601 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
602 			rx_buf->page_offset, size, truesize);
603 
604 	/* page is being used so we must update the page offset */
605 	ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
606 }
607 
608 /**
609  * ice_reuse_rx_page - page flip buffer and store it back on the ring
610  * @rx_ring: Rx descriptor ring to store buffers on
611  * @old_buf: donor buffer to have page reused
612  *
613  * Synchronizes page for reuse by the adapter
614  */
615 static void
616 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
617 {
618 	u16 nta = rx_ring->next_to_alloc;
619 	struct ice_rx_buf *new_buf;
620 
621 	new_buf = &rx_ring->rx_buf[nta];
622 
623 	/* update, and store next to alloc */
624 	nta++;
625 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
626 
627 	/* Transfer page from old buffer to new buffer.
628 	 * Move each member individually to avoid possible store
629 	 * forwarding stalls and unnecessary copy of skb.
630 	 */
631 	new_buf->dma = old_buf->dma;
632 	new_buf->page = old_buf->page;
633 	new_buf->page_offset = old_buf->page_offset;
634 	new_buf->pagecnt_bias = old_buf->pagecnt_bias;
635 }
636 
637 /**
638  * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
639  * @rx_ring: Rx descriptor ring to transact packets on
640  * @skb: skb to be used
641  * @size: size of buffer to add to skb
642  *
643  * This function will pull an Rx buffer from the ring and synchronize it
644  * for use by the CPU.
645  */
646 static struct ice_rx_buf *
647 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
648 	       const unsigned int size)
649 {
650 	struct ice_rx_buf *rx_buf;
651 
652 	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
653 	prefetchw(rx_buf->page);
654 	*skb = rx_buf->skb;
655 
656 	/* we are reusing so sync this buffer for CPU use */
657 	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
658 				      rx_buf->page_offset, size,
659 				      DMA_FROM_DEVICE);
660 
661 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
662 	rx_buf->pagecnt_bias--;
663 
664 	return rx_buf;
665 }
666 
667 /**
668  * ice_construct_skb - Allocate skb and populate it
669  * @rx_ring: Rx descriptor ring to transact packets on
670  * @rx_buf: Rx buffer to pull data from
671  * @size: the length of the packet
672  *
673  * This function allocates an skb. It then populates it with the page
674  * data from the current receive descriptor, taking care to set up the
675  * skb correctly.
676  */
677 static struct sk_buff *
678 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
679 		  unsigned int size)
680 {
681 	void *va = page_address(rx_buf->page) + rx_buf->page_offset;
682 	unsigned int headlen;
683 	struct sk_buff *skb;
684 
685 	/* prefetch first cache line of first page */
686 	prefetch(va);
687 #if L1_CACHE_BYTES < 128
688 	prefetch((u8 *)va + L1_CACHE_BYTES);
689 #endif /* L1_CACHE_BYTES */
690 
691 	/* allocate a skb to store the frags */
692 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
693 			       GFP_ATOMIC | __GFP_NOWARN);
694 	if (unlikely(!skb))
695 		return NULL;
696 
697 	skb_record_rx_queue(skb, rx_ring->q_index);
698 	/* Determine available headroom for copy */
699 	headlen = size;
700 	if (headlen > ICE_RX_HDR_SIZE)
701 		headlen = eth_get_headlen(va, ICE_RX_HDR_SIZE);
702 
703 	/* align pull length to size of long to optimize memcpy performance */
704 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
705 
706 	/* if we exhaust the linear part then add what is left as a frag */
707 	size -= headlen;
708 	if (size) {
709 #if (PAGE_SIZE >= 8192)
710 		unsigned int truesize = SKB_DATA_ALIGN(size);
711 #else
712 		unsigned int truesize = ICE_RXBUF_2048;
713 #endif
714 		skb_add_rx_frag(skb, 0, rx_buf->page,
715 				rx_buf->page_offset + headlen, size, truesize);
716 		/* buffer is used by skb, update page_offset */
717 		ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
718 	} else {
719 		/* buffer is unused, reset bias back to rx_buf; data was copied
720 		 * onto skb's linear part so there's no need for adjusting
721 		 * page offset and we can reuse this buffer as-is
722 		 */
723 		rx_buf->pagecnt_bias++;
724 	}
725 
726 	return skb;
727 }
728 
729 /**
730  * ice_put_rx_buf - Clean up used buffer and either recycle or free
731  * @rx_ring: Rx descriptor ring to transact packets on
732  * @rx_buf: Rx buffer to pull data from
733  *
734  * This function will  clean up the contents of the rx_buf. It will
735  * either recycle the buffer or unmap it and free the associated resources.
736  */
737 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
738 {
739 		/* hand second half of page back to the ring */
740 	if (ice_can_reuse_rx_page(rx_buf)) {
741 		ice_reuse_rx_page(rx_ring, rx_buf);
742 		rx_ring->rx_stats.page_reuse_count++;
743 	} else {
744 		/* we are not reusing the buffer so unmap it */
745 		dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
746 				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
747 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
748 	}
749 
750 	/* clear contents of buffer_info */
751 	rx_buf->page = NULL;
752 	rx_buf->skb = NULL;
753 }
754 
755 /**
756  * ice_cleanup_headers - Correct empty headers
757  * @skb: pointer to current skb being fixed
758  *
759  * Also address the case where we are pulling data in on pages only
760  * and as such no data is present in the skb header.
761  *
762  * In addition if skb is not at least 60 bytes we need to pad it so that
763  * it is large enough to qualify as a valid Ethernet frame.
764  *
765  * Returns true if an error was encountered and skb was freed.
766  */
767 static bool ice_cleanup_headers(struct sk_buff *skb)
768 {
769 	/* if eth_skb_pad returns an error the skb was freed */
770 	if (eth_skb_pad(skb))
771 		return true;
772 
773 	return false;
774 }
775 
776 /**
777  * ice_test_staterr - tests bits in Rx descriptor status and error fields
778  * @rx_desc: pointer to receive descriptor (in le64 format)
779  * @stat_err_bits: value to mask
780  *
781  * This function does some fast chicanery in order to return the
782  * value of the mask which is really only used for boolean tests.
783  * The status_error_len doesn't need to be shifted because it begins
784  * at offset zero.
785  */
786 static bool
787 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
788 {
789 	return !!(rx_desc->wb.status_error0 &
790 		  cpu_to_le16(stat_err_bits));
791 }
792 
793 /**
794  * ice_is_non_eop - process handling of non-EOP buffers
795  * @rx_ring: Rx ring being processed
796  * @rx_desc: Rx descriptor for current buffer
797  * @skb: Current socket buffer containing buffer in progress
798  *
799  * This function updates next to clean. If the buffer is an EOP buffer
800  * this function exits returning false, otherwise it will place the
801  * sk_buff in the next buffer to be chained and return true indicating
802  * that this is in fact a non-EOP buffer.
803  */
804 static bool
805 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
806 	       struct sk_buff *skb)
807 {
808 	u32 ntc = rx_ring->next_to_clean + 1;
809 
810 	/* fetch, update, and store next to clean */
811 	ntc = (ntc < rx_ring->count) ? ntc : 0;
812 	rx_ring->next_to_clean = ntc;
813 
814 	prefetch(ICE_RX_DESC(rx_ring, ntc));
815 
816 	/* if we are the last buffer then there is nothing else to do */
817 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
818 	if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
819 		return false;
820 
821 	/* place skb in next buffer to be received */
822 	rx_ring->rx_buf[ntc].skb = skb;
823 	rx_ring->rx_stats.non_eop_descs++;
824 
825 	return true;
826 }
827 
828 /**
829  * ice_ptype_to_htype - get a hash type
830  * @ptype: the ptype value from the descriptor
831  *
832  * Returns a hash type to be used by skb_set_hash
833  */
834 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
835 {
836 	return PKT_HASH_TYPE_NONE;
837 }
838 
839 /**
840  * ice_rx_hash - set the hash value in the skb
841  * @rx_ring: descriptor ring
842  * @rx_desc: specific descriptor
843  * @skb: pointer to current skb
844  * @rx_ptype: the ptype value from the descriptor
845  */
846 static void
847 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
848 	    struct sk_buff *skb, u8 rx_ptype)
849 {
850 	struct ice_32b_rx_flex_desc_nic *nic_mdid;
851 	u32 hash;
852 
853 	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
854 		return;
855 
856 	if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
857 		return;
858 
859 	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
860 	hash = le32_to_cpu(nic_mdid->rss_hash);
861 	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
862 }
863 
864 /**
865  * ice_rx_csum - Indicate in skb if checksum is good
866  * @vsi: the VSI we care about
867  * @skb: skb currently being received and modified
868  * @rx_desc: the receive descriptor
869  * @ptype: the packet type decoded by hardware
870  *
871  * skb->protocol must be set before this function is called
872  */
873 static void
874 ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
875 	    union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
876 {
877 	struct ice_rx_ptype_decoded decoded;
878 	u32 rx_error, rx_status;
879 	bool ipv4, ipv6;
880 
881 	rx_status = le16_to_cpu(rx_desc->wb.status_error0);
882 	rx_error = rx_status;
883 
884 	decoded = ice_decode_rx_desc_ptype(ptype);
885 
886 	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
887 	skb->ip_summed = CHECKSUM_NONE;
888 	skb_checksum_none_assert(skb);
889 
890 	/* check if Rx checksum is enabled */
891 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
892 		return;
893 
894 	/* check if HW has decoded the packet and checksum */
895 	if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
896 		return;
897 
898 	if (!(decoded.known && decoded.outer_ip))
899 		return;
900 
901 	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
902 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
903 	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
904 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
905 
906 	if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
907 				 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
908 		goto checksum_fail;
909 	else if (ipv6 && (rx_status &
910 		 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
911 		goto checksum_fail;
912 
913 	/* check for L4 errors and handle packets that were not able to be
914 	 * checksummed due to arrival speed
915 	 */
916 	if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
917 		goto checksum_fail;
918 
919 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
920 	switch (decoded.inner_prot) {
921 	case ICE_RX_PTYPE_INNER_PROT_TCP:
922 	case ICE_RX_PTYPE_INNER_PROT_UDP:
923 	case ICE_RX_PTYPE_INNER_PROT_SCTP:
924 		skb->ip_summed = CHECKSUM_UNNECESSARY;
925 	default:
926 		break;
927 	}
928 	return;
929 
930 checksum_fail:
931 	vsi->back->hw_csum_rx_error++;
932 }
933 
934 /**
935  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
936  * @rx_ring: Rx descriptor ring packet is being transacted on
937  * @rx_desc: pointer to the EOP Rx descriptor
938  * @skb: pointer to current skb being populated
939  * @ptype: the packet type decoded by hardware
940  *
941  * This function checks the ring, descriptor, and packet information in
942  * order to populate the hash, checksum, VLAN, protocol, and
943  * other fields within the skb.
944  */
945 static void
946 ice_process_skb_fields(struct ice_ring *rx_ring,
947 		       union ice_32b_rx_flex_desc *rx_desc,
948 		       struct sk_buff *skb, u8 ptype)
949 {
950 	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
951 
952 	/* modifies the skb - consumes the enet header */
953 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
954 
955 	ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
956 }
957 
958 /**
959  * ice_receive_skb - Send a completed packet up the stack
960  * @rx_ring: Rx ring in play
961  * @skb: packet to send up
962  * @vlan_tag: vlan tag for packet
963  *
964  * This function sends the completed packet (via. skb) up the stack using
965  * gro receive functions (with/without vlan tag)
966  */
967 static void
968 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
969 {
970 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
971 	    (vlan_tag & VLAN_VID_MASK))
972 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
973 	napi_gro_receive(&rx_ring->q_vector->napi, skb);
974 }
975 
976 /**
977  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
978  * @rx_ring: Rx descriptor ring to transact packets on
979  * @budget: Total limit on number of packets to process
980  *
981  * This function provides a "bounce buffer" approach to Rx interrupt
982  * processing. The advantage to this is that on systems that have
983  * expensive overhead for IOMMU access this provides a means of avoiding
984  * it by maintaining the mapping of the page to the system.
985  *
986  * Returns amount of work completed
987  */
988 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
989 {
990 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
991 	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
992 	bool failure = false;
993 
994 	/* start the loop to process RX packets bounded by 'budget' */
995 	while (likely(total_rx_pkts < (unsigned int)budget)) {
996 		union ice_32b_rx_flex_desc *rx_desc;
997 		struct ice_rx_buf *rx_buf;
998 		struct sk_buff *skb;
999 		unsigned int size;
1000 		u16 stat_err_bits;
1001 		u16 vlan_tag = 0;
1002 		u8 rx_ptype;
1003 
1004 		/* return some buffers to hardware, one at a time is too slow */
1005 		if (cleaned_count >= ICE_RX_BUF_WRITE) {
1006 			failure = failure ||
1007 				  ice_alloc_rx_bufs(rx_ring, cleaned_count);
1008 			cleaned_count = 0;
1009 		}
1010 
1011 		/* get the RX desc from RX ring based on 'next_to_clean' */
1012 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1013 
1014 		/* status_error_len will always be zero for unused descriptors
1015 		 * because it's cleared in cleanup, and overlaps with hdr_addr
1016 		 * which is always zero because packet split isn't used, if the
1017 		 * hardware wrote DD then it will be non-zero
1018 		 */
1019 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1020 		if (!ice_test_staterr(rx_desc, stat_err_bits))
1021 			break;
1022 
1023 		/* This memory barrier is needed to keep us from reading
1024 		 * any other fields out of the rx_desc until we know the
1025 		 * DD bit is set.
1026 		 */
1027 		dma_rmb();
1028 
1029 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
1030 			ICE_RX_FLX_DESC_PKT_LEN_M;
1031 
1032 		rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1033 		/* allocate (if needed) and populate skb */
1034 		if (skb)
1035 			ice_add_rx_frag(rx_buf, skb, size);
1036 		else
1037 			skb = ice_construct_skb(rx_ring, rx_buf, size);
1038 
1039 		/* exit if we failed to retrieve a buffer */
1040 		if (!skb) {
1041 			rx_ring->rx_stats.alloc_buf_failed++;
1042 			rx_buf->pagecnt_bias++;
1043 			break;
1044 		}
1045 
1046 		ice_put_rx_buf(rx_ring, rx_buf);
1047 		cleaned_count++;
1048 
1049 		/* skip if it is NOP desc */
1050 		if (ice_is_non_eop(rx_ring, rx_desc, skb))
1051 			continue;
1052 
1053 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1054 		if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1055 			dev_kfree_skb_any(skb);
1056 			continue;
1057 		}
1058 
1059 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1060 			ICE_RX_FLEX_DESC_PTYPE_M;
1061 
1062 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1063 		if (ice_test_staterr(rx_desc, stat_err_bits))
1064 			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1065 
1066 		/* correct empty headers and pad skb if needed (to make valid
1067 		 * ethernet frame
1068 		 */
1069 		if (ice_cleanup_headers(skb)) {
1070 			skb = NULL;
1071 			continue;
1072 		}
1073 
1074 		/* probably a little skewed due to removing CRC */
1075 		total_rx_bytes += skb->len;
1076 
1077 		/* populate checksum, VLAN, and protocol */
1078 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1079 
1080 		/* send completed skb up the stack */
1081 		ice_receive_skb(rx_ring, skb, vlan_tag);
1082 
1083 		/* update budget accounting */
1084 		total_rx_pkts++;
1085 	}
1086 
1087 	/* update queue and vector specific stats */
1088 	u64_stats_update_begin(&rx_ring->syncp);
1089 	rx_ring->stats.pkts += total_rx_pkts;
1090 	rx_ring->stats.bytes += total_rx_bytes;
1091 	u64_stats_update_end(&rx_ring->syncp);
1092 	rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1093 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1094 
1095 	/* guarantee a trip back through this routine if there was a failure */
1096 	return failure ? budget : (int)total_rx_pkts;
1097 }
1098 
1099 static unsigned int ice_itr_divisor(struct ice_port_info *pi)
1100 {
1101 	switch (pi->phy.link_info.link_speed) {
1102 	case ICE_AQ_LINK_SPEED_40GB:
1103 		return ICE_ITR_ADAPTIVE_MIN_INC * 1024;
1104 	case ICE_AQ_LINK_SPEED_25GB:
1105 	case ICE_AQ_LINK_SPEED_20GB:
1106 		return ICE_ITR_ADAPTIVE_MIN_INC * 512;
1107 	case ICE_AQ_LINK_SPEED_100MB:
1108 		return ICE_ITR_ADAPTIVE_MIN_INC * 32;
1109 	default:
1110 		return ICE_ITR_ADAPTIVE_MIN_INC * 256;
1111 	}
1112 }
1113 
1114 /**
1115  * ice_update_itr - update the adaptive ITR value based on statistics
1116  * @q_vector: structure containing interrupt and ring information
1117  * @rc: structure containing ring performance data
1118  *
1119  * Stores a new ITR value based on packets and byte
1120  * counts during the last interrupt.  The advantage of per interrupt
1121  * computation is faster updates and more accurate ITR for the current
1122  * traffic pattern.  Constants in this function were computed
1123  * based on theoretical maximum wire speed and thresholds were set based
1124  * on testing data as well as attempting to minimize response time
1125  * while increasing bulk throughput.
1126  */
1127 static void
1128 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1129 {
1130 	unsigned int avg_wire_size, packets, bytes, itr;
1131 	unsigned long next_update = jiffies;
1132 	bool container_is_rx;
1133 
1134 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1135 		return;
1136 
1137 	/* If itr_countdown is set it means we programmed an ITR within
1138 	 * the last 4 interrupt cycles. This has a side effect of us
1139 	 * potentially firing an early interrupt. In order to work around
1140 	 * this we need to throw out any data received for a few
1141 	 * interrupts following the update.
1142 	 */
1143 	if (q_vector->itr_countdown) {
1144 		itr = rc->target_itr;
1145 		goto clear_counts;
1146 	}
1147 
1148 	container_is_rx = (&q_vector->rx == rc);
1149 	/* For Rx we want to push the delay up and default to low latency.
1150 	 * for Tx we want to pull the delay down and default to high latency.
1151 	 */
1152 	itr = container_is_rx ?
1153 		ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1154 		ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1155 
1156 	/* If we didn't update within up to 1 - 2 jiffies we can assume
1157 	 * that either packets are coming in so slow there hasn't been
1158 	 * any work, or that there is so much work that NAPI is dealing
1159 	 * with interrupt moderation and we don't need to do anything.
1160 	 */
1161 	if (time_after(next_update, rc->next_update))
1162 		goto clear_counts;
1163 
1164 	packets = rc->total_pkts;
1165 	bytes = rc->total_bytes;
1166 
1167 	if (container_is_rx) {
1168 		/* If Rx there are 1 to 4 packets and bytes are less than
1169 		 * 9000 assume insufficient data to use bulk rate limiting
1170 		 * approach unless Tx is already in bulk rate limiting. We
1171 		 * are likely latency driven.
1172 		 */
1173 		if (packets && packets < 4 && bytes < 9000 &&
1174 		    (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1175 			itr = ICE_ITR_ADAPTIVE_LATENCY;
1176 			goto adjust_by_size;
1177 		}
1178 	} else if (packets < 4) {
1179 		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
1180 		 * bulk mode and we are receiving 4 or fewer packets just
1181 		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1182 		 * that the Rx can relax.
1183 		 */
1184 		if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1185 		    (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1186 		    ICE_ITR_ADAPTIVE_MAX_USECS)
1187 			goto clear_counts;
1188 	} else if (packets > 32) {
1189 		/* If we have processed over 32 packets in a single interrupt
1190 		 * for Tx assume we need to switch over to "bulk" mode.
1191 		 */
1192 		rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1193 	}
1194 
1195 	/* We have no packets to actually measure against. This means
1196 	 * either one of the other queues on this vector is active or
1197 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
1198 	 *
1199 	 * Between 4 and 56 we can assume that our current interrupt delay
1200 	 * is only slightly too low. As such we should increase it by a small
1201 	 * fixed amount.
1202 	 */
1203 	if (packets < 56) {
1204 		itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1205 		if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1206 			itr &= ICE_ITR_ADAPTIVE_LATENCY;
1207 			itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1208 		}
1209 		goto clear_counts;
1210 	}
1211 
1212 	if (packets <= 256) {
1213 		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1214 		itr &= ICE_ITR_MASK;
1215 
1216 		/* Between 56 and 112 is our "goldilocks" zone where we are
1217 		 * working out "just right". Just report that our current
1218 		 * ITR is good for us.
1219 		 */
1220 		if (packets <= 112)
1221 			goto clear_counts;
1222 
1223 		/* If packet count is 128 or greater we are likely looking
1224 		 * at a slight overrun of the delay we want. Try halving
1225 		 * our delay to see if that will cut the number of packets
1226 		 * in half per interrupt.
1227 		 */
1228 		itr >>= 1;
1229 		itr &= ICE_ITR_MASK;
1230 		if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1231 			itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1232 
1233 		goto clear_counts;
1234 	}
1235 
1236 	/* The paths below assume we are dealing with a bulk ITR since
1237 	 * number of packets is greater than 256. We are just going to have
1238 	 * to compute a value and try to bring the count under control,
1239 	 * though for smaller packet sizes there isn't much we can do as
1240 	 * NAPI polling will likely be kicking in sooner rather than later.
1241 	 */
1242 	itr = ICE_ITR_ADAPTIVE_BULK;
1243 
1244 adjust_by_size:
1245 	/* If packet counts are 256 or greater we can assume we have a gross
1246 	 * overestimation of what the rate should be. Instead of trying to fine
1247 	 * tune it just use the formula below to try and dial in an exact value
1248 	 * gives the current packet size of the frame.
1249 	 */
1250 	avg_wire_size = bytes / packets;
1251 
1252 	/* The following is a crude approximation of:
1253 	 *  wmem_default / (size + overhead) = desired_pkts_per_int
1254 	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1255 	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1256 	 *
1257 	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1258 	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1259 	 * formula down to
1260 	 *
1261 	 *  (170 * (size + 24)) / (size + 640) = ITR
1262 	 *
1263 	 * We first do some math on the packet size and then finally bitshift
1264 	 * by 8 after rounding up. We also have to account for PCIe link speed
1265 	 * difference as ITR scales based on this.
1266 	 */
1267 	if (avg_wire_size <= 60) {
1268 		/* Start at 250k ints/sec */
1269 		avg_wire_size = 4096;
1270 	} else if (avg_wire_size <= 380) {
1271 		/* 250K ints/sec to 60K ints/sec */
1272 		avg_wire_size *= 40;
1273 		avg_wire_size += 1696;
1274 	} else if (avg_wire_size <= 1084) {
1275 		/* 60K ints/sec to 36K ints/sec */
1276 		avg_wire_size *= 15;
1277 		avg_wire_size += 11452;
1278 	} else if (avg_wire_size <= 1980) {
1279 		/* 36K ints/sec to 30K ints/sec */
1280 		avg_wire_size *= 5;
1281 		avg_wire_size += 22420;
1282 	} else {
1283 		/* plateau at a limit of 30K ints/sec */
1284 		avg_wire_size = 32256;
1285 	}
1286 
1287 	/* If we are in low latency mode halve our delay which doubles the
1288 	 * rate to somewhere between 100K to 16K ints/sec
1289 	 */
1290 	if (itr & ICE_ITR_ADAPTIVE_LATENCY)
1291 		avg_wire_size >>= 1;
1292 
1293 	/* Resultant value is 256 times larger than it needs to be. This
1294 	 * gives us room to adjust the value as needed to either increase
1295 	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1296 	 *
1297 	 * Use addition as we have already recorded the new latency flag
1298 	 * for the ITR value.
1299 	 */
1300 	itr += DIV_ROUND_UP(avg_wire_size,
1301 			    ice_itr_divisor(q_vector->vsi->port_info)) *
1302 	       ICE_ITR_ADAPTIVE_MIN_INC;
1303 
1304 	if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1305 		itr &= ICE_ITR_ADAPTIVE_LATENCY;
1306 		itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1307 	}
1308 
1309 clear_counts:
1310 	/* write back value */
1311 	rc->target_itr = itr;
1312 
1313 	/* next update should occur within next jiffy */
1314 	rc->next_update = next_update + 1;
1315 
1316 	rc->total_bytes = 0;
1317 	rc->total_pkts = 0;
1318 }
1319 
1320 /**
1321  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1322  * @itr_idx: interrupt throttling index
1323  * @itr: interrupt throttling value in usecs
1324  */
1325 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1326 {
1327 	/* The itr value is reported in microseconds, and the register value is
1328 	 * recorded in 2 microsecond units. For this reason we only need to
1329 	 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1330 	 * granularity as a shift instead of division. The mask makes sure the
1331 	 * ITR value is never odd so we don't accidentally write into the field
1332 	 * prior to the ITR field.
1333 	 */
1334 	itr &= ICE_ITR_MASK;
1335 
1336 	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1337 		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1338 		(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1339 }
1340 
1341 /* The act of updating the ITR will cause it to immediately trigger. In order
1342  * to prevent this from throwing off adaptive update statistics we defer the
1343  * update so that it can only happen so often. So after either Tx or Rx are
1344  * updated we make the adaptive scheme wait until either the ITR completely
1345  * expires via the next_update expiration or we have been through at least
1346  * 3 interrupts.
1347  */
1348 #define ITR_COUNTDOWN_START 3
1349 
1350 /**
1351  * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1352  * @vsi: the VSI associated with the q_vector
1353  * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1354  */
1355 static void
1356 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1357 {
1358 	struct ice_ring_container *tx = &q_vector->tx;
1359 	struct ice_ring_container *rx = &q_vector->rx;
1360 	u32 itr_val;
1361 
1362 	/* This will do nothing if dynamic updates are not enabled */
1363 	ice_update_itr(q_vector, tx);
1364 	ice_update_itr(q_vector, rx);
1365 
1366 	/* This block of logic allows us to get away with only updating
1367 	 * one ITR value with each interrupt. The idea is to perform a
1368 	 * pseudo-lazy update with the following criteria.
1369 	 *
1370 	 * 1. Rx is given higher priority than Tx if both are in same state
1371 	 * 2. If we must reduce an ITR that is given highest priority.
1372 	 * 3. We then give priority to increasing ITR based on amount.
1373 	 */
1374 	if (rx->target_itr < rx->current_itr) {
1375 		/* Rx ITR needs to be reduced, this is highest priority */
1376 		itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1377 		rx->current_itr = rx->target_itr;
1378 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1379 	} else if ((tx->target_itr < tx->current_itr) ||
1380 		   ((rx->target_itr - rx->current_itr) <
1381 		    (tx->target_itr - tx->current_itr))) {
1382 		/* Tx ITR needs to be reduced, this is second priority
1383 		 * Tx ITR needs to be increased more than Rx, fourth priority
1384 		 */
1385 		itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1386 		tx->current_itr = tx->target_itr;
1387 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1388 	} else if (rx->current_itr != rx->target_itr) {
1389 		/* Rx ITR needs to be increased, third priority */
1390 		itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1391 		rx->current_itr = rx->target_itr;
1392 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1393 	} else {
1394 		/* Still have to re-enable the interrupts */
1395 		itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1396 		if (q_vector->itr_countdown)
1397 			q_vector->itr_countdown--;
1398 	}
1399 
1400 	if (!test_bit(__ICE_DOWN, vsi->state))
1401 		wr32(&vsi->back->hw,
1402 		     GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx),
1403 		     itr_val);
1404 }
1405 
1406 /**
1407  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1408  * @napi: napi struct with our devices info in it
1409  * @budget: amount of work driver is allowed to do this pass, in packets
1410  *
1411  * This function will clean all queues associated with a q_vector.
1412  *
1413  * Returns the amount of work done
1414  */
1415 int ice_napi_poll(struct napi_struct *napi, int budget)
1416 {
1417 	struct ice_q_vector *q_vector =
1418 				container_of(napi, struct ice_q_vector, napi);
1419 	struct ice_vsi *vsi = q_vector->vsi;
1420 	struct ice_pf *pf = vsi->back;
1421 	bool clean_complete = true;
1422 	int budget_per_ring = 0;
1423 	struct ice_ring *ring;
1424 	int work_done = 0;
1425 
1426 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1427 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1428 	 */
1429 	ice_for_each_ring(ring, q_vector->tx)
1430 		if (!ice_clean_tx_irq(vsi, ring, budget))
1431 			clean_complete = false;
1432 
1433 	/* Handle case where we are called by netpoll with a budget of 0 */
1434 	if (budget <= 0)
1435 		return budget;
1436 
1437 	/* We attempt to distribute budget to each Rx queue fairly, but don't
1438 	 * allow the budget to go below 1 because that would exit polling early.
1439 	 */
1440 	if (q_vector->num_ring_rx)
1441 		budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1442 
1443 	ice_for_each_ring(ring, q_vector->rx) {
1444 		int cleaned;
1445 
1446 		cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1447 		work_done += cleaned;
1448 		/* if we clean as many as budgeted, we must not be done */
1449 		if (cleaned >= budget_per_ring)
1450 			clean_complete = false;
1451 	}
1452 
1453 	/* If work not completed, return budget and polling will return */
1454 	if (!clean_complete)
1455 		return budget;
1456 
1457 	/* Exit the polling mode, but don't re-enable interrupts if stack might
1458 	 * poll us due to busy-polling
1459 	 */
1460 	if (likely(napi_complete_done(napi, work_done)))
1461 		if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1462 			ice_update_ena_itr(vsi, q_vector);
1463 
1464 	return min_t(int, work_done, budget - 1);
1465 }
1466 
1467 /* helper function for building cmd/type/offset */
1468 static __le64
1469 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1470 {
1471 	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1472 			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
1473 			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
1474 			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1475 			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
1476 }
1477 
1478 /**
1479  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1480  * @tx_ring: the ring to be checked
1481  * @size: the size buffer we want to assure is available
1482  *
1483  * Returns -EBUSY if a stop is needed, else 0
1484  */
1485 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1486 {
1487 	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1488 	/* Memory barrier before checking head and tail */
1489 	smp_mb();
1490 
1491 	/* Check again in a case another CPU has just made room available. */
1492 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1493 		return -EBUSY;
1494 
1495 	/* A reprieve! - use start_subqueue because it doesn't call schedule */
1496 	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1497 	++tx_ring->tx_stats.restart_q;
1498 	return 0;
1499 }
1500 
1501 /**
1502  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1503  * @tx_ring: the ring to be checked
1504  * @size:    the size buffer we want to assure is available
1505  *
1506  * Returns 0 if stop is not needed
1507  */
1508 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1509 {
1510 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1511 		return 0;
1512 
1513 	return __ice_maybe_stop_tx(tx_ring, size);
1514 }
1515 
1516 /**
1517  * ice_tx_map - Build the Tx descriptor
1518  * @tx_ring: ring to send buffer on
1519  * @first: first buffer info buffer to use
1520  * @off: pointer to struct that holds offload parameters
1521  *
1522  * This function loops over the skb data pointed to by *first
1523  * and gets a physical address for each memory location and programs
1524  * it and the length into the transmit descriptor.
1525  */
1526 static void
1527 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1528 	   struct ice_tx_offload_params *off)
1529 {
1530 	u64 td_offset, td_tag, td_cmd;
1531 	u16 i = tx_ring->next_to_use;
1532 	struct skb_frag_struct *frag;
1533 	unsigned int data_len, size;
1534 	struct ice_tx_desc *tx_desc;
1535 	struct ice_tx_buf *tx_buf;
1536 	struct sk_buff *skb;
1537 	dma_addr_t dma;
1538 
1539 	td_tag = off->td_l2tag1;
1540 	td_cmd = off->td_cmd;
1541 	td_offset = off->td_offset;
1542 	skb = first->skb;
1543 
1544 	data_len = skb->data_len;
1545 	size = skb_headlen(skb);
1546 
1547 	tx_desc = ICE_TX_DESC(tx_ring, i);
1548 
1549 	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1550 		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1551 		td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1552 			  ICE_TX_FLAGS_VLAN_S;
1553 	}
1554 
1555 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1556 
1557 	tx_buf = first;
1558 
1559 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1560 		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1561 
1562 		if (dma_mapping_error(tx_ring->dev, dma))
1563 			goto dma_error;
1564 
1565 		/* record length, and DMA address */
1566 		dma_unmap_len_set(tx_buf, len, size);
1567 		dma_unmap_addr_set(tx_buf, dma, dma);
1568 
1569 		/* align size to end of page */
1570 		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1571 		tx_desc->buf_addr = cpu_to_le64(dma);
1572 
1573 		/* account for data chunks larger than the hardware
1574 		 * can handle
1575 		 */
1576 		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1577 			tx_desc->cmd_type_offset_bsz =
1578 				build_ctob(td_cmd, td_offset, max_data, td_tag);
1579 
1580 			tx_desc++;
1581 			i++;
1582 
1583 			if (i == tx_ring->count) {
1584 				tx_desc = ICE_TX_DESC(tx_ring, 0);
1585 				i = 0;
1586 			}
1587 
1588 			dma += max_data;
1589 			size -= max_data;
1590 
1591 			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1592 			tx_desc->buf_addr = cpu_to_le64(dma);
1593 		}
1594 
1595 		if (likely(!data_len))
1596 			break;
1597 
1598 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1599 							  size, td_tag);
1600 
1601 		tx_desc++;
1602 		i++;
1603 
1604 		if (i == tx_ring->count) {
1605 			tx_desc = ICE_TX_DESC(tx_ring, 0);
1606 			i = 0;
1607 		}
1608 
1609 		size = skb_frag_size(frag);
1610 		data_len -= size;
1611 
1612 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1613 				       DMA_TO_DEVICE);
1614 
1615 		tx_buf = &tx_ring->tx_buf[i];
1616 	}
1617 
1618 	/* record bytecount for BQL */
1619 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1620 
1621 	/* record SW timestamp if HW timestamp is not available */
1622 	skb_tx_timestamp(first->skb);
1623 
1624 	i++;
1625 	if (i == tx_ring->count)
1626 		i = 0;
1627 
1628 	/* write last descriptor with RS and EOP bits */
1629 	td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1630 	tx_desc->cmd_type_offset_bsz =
1631 			build_ctob(td_cmd, td_offset, size, td_tag);
1632 
1633 	/* Force memory writes to complete before letting h/w know there
1634 	 * are new descriptors to fetch.
1635 	 *
1636 	 * We also use this memory barrier to make certain all of the
1637 	 * status bits have been updated before next_to_watch is written.
1638 	 */
1639 	wmb();
1640 
1641 	/* set next_to_watch value indicating a packet is present */
1642 	first->next_to_watch = tx_desc;
1643 
1644 	tx_ring->next_to_use = i;
1645 
1646 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1647 
1648 	/* notify HW of packet */
1649 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1650 		writel(i, tx_ring->tail);
1651 
1652 		/* we need this if more than one processor can write to our tail
1653 		 * at a time, it synchronizes IO on IA64/Altix systems
1654 		 */
1655 		mmiowb();
1656 	}
1657 
1658 	return;
1659 
1660 dma_error:
1661 	/* clear dma mappings for failed tx_buf map */
1662 	for (;;) {
1663 		tx_buf = &tx_ring->tx_buf[i];
1664 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1665 		if (tx_buf == first)
1666 			break;
1667 		if (i == 0)
1668 			i = tx_ring->count;
1669 		i--;
1670 	}
1671 
1672 	tx_ring->next_to_use = i;
1673 }
1674 
1675 /**
1676  * ice_tx_csum - Enable Tx checksum offloads
1677  * @first: pointer to the first descriptor
1678  * @off: pointer to struct that holds offload parameters
1679  *
1680  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1681  */
1682 static
1683 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1684 {
1685 	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1686 	struct sk_buff *skb = first->skb;
1687 	union {
1688 		struct iphdr *v4;
1689 		struct ipv6hdr *v6;
1690 		unsigned char *hdr;
1691 	} ip;
1692 	union {
1693 		struct tcphdr *tcp;
1694 		unsigned char *hdr;
1695 	} l4;
1696 	__be16 frag_off, protocol;
1697 	unsigned char *exthdr;
1698 	u32 offset, cmd = 0;
1699 	u8 l4_proto = 0;
1700 
1701 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1702 		return 0;
1703 
1704 	ip.hdr = skb_network_header(skb);
1705 	l4.hdr = skb_transport_header(skb);
1706 
1707 	/* compute outer L2 header size */
1708 	l2_len = ip.hdr - skb->data;
1709 	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1710 
1711 	if (skb->encapsulation)
1712 		return -1;
1713 
1714 	/* Enable IP checksum offloads */
1715 	protocol = vlan_get_protocol(skb);
1716 	if (protocol == htons(ETH_P_IP)) {
1717 		l4_proto = ip.v4->protocol;
1718 		/* the stack computes the IP header already, the only time we
1719 		 * need the hardware to recompute it is in the case of TSO.
1720 		 */
1721 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1722 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1723 		else
1724 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1725 
1726 	} else if (protocol == htons(ETH_P_IPV6)) {
1727 		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1728 		exthdr = ip.hdr + sizeof(*ip.v6);
1729 		l4_proto = ip.v6->nexthdr;
1730 		if (l4.hdr != exthdr)
1731 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1732 					 &frag_off);
1733 	} else {
1734 		return -1;
1735 	}
1736 
1737 	/* compute inner L3 header size */
1738 	l3_len = l4.hdr - ip.hdr;
1739 	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1740 
1741 	/* Enable L4 checksum offloads */
1742 	switch (l4_proto) {
1743 	case IPPROTO_TCP:
1744 		/* enable checksum offloads */
1745 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1746 		l4_len = l4.tcp->doff;
1747 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1748 		break;
1749 	case IPPROTO_UDP:
1750 		/* enable UDP checksum offload */
1751 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1752 		l4_len = (sizeof(struct udphdr) >> 2);
1753 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1754 		break;
1755 	case IPPROTO_SCTP:
1756 		/* enable SCTP checksum offload */
1757 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1758 		l4_len = sizeof(struct sctphdr) >> 2;
1759 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1760 		break;
1761 
1762 	default:
1763 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1764 			return -1;
1765 		skb_checksum_help(skb);
1766 		return 0;
1767 	}
1768 
1769 	off->td_cmd |= cmd;
1770 	off->td_offset |= offset;
1771 	return 1;
1772 }
1773 
1774 /**
1775  * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1776  * @tx_ring: ring to send buffer on
1777  * @first: pointer to struct ice_tx_buf
1778  *
1779  * Checks the skb and set up correspondingly several generic transmit flags
1780  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1781  *
1782  * Returns error code indicate the frame should be dropped upon error and the
1783  * otherwise returns 0 to indicate the flags has been set properly.
1784  */
1785 static int
1786 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1787 {
1788 	struct sk_buff *skb = first->skb;
1789 	__be16 protocol = skb->protocol;
1790 
1791 	if (protocol == htons(ETH_P_8021Q) &&
1792 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1793 		/* when HW VLAN acceleration is turned off by the user the
1794 		 * stack sets the protocol to 8021q so that the driver
1795 		 * can take any steps required to support the SW only
1796 		 * VLAN handling. In our case the driver doesn't need
1797 		 * to take any further steps so just set the protocol
1798 		 * to the encapsulated ethertype.
1799 		 */
1800 		skb->protocol = vlan_get_protocol(skb);
1801 		goto out;
1802 	}
1803 
1804 	/* if we have a HW VLAN tag being added, default to the HW one */
1805 	if (skb_vlan_tag_present(skb)) {
1806 		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1807 		first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1808 	} else if (protocol == htons(ETH_P_8021Q)) {
1809 		struct vlan_hdr *vhdr, _vhdr;
1810 
1811 		/* for SW VLAN, check the next protocol and store the tag */
1812 		vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1813 							     sizeof(_vhdr),
1814 							     &_vhdr);
1815 		if (!vhdr)
1816 			return -EINVAL;
1817 
1818 		first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1819 				   ICE_TX_FLAGS_VLAN_S;
1820 		first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1821 	}
1822 
1823 out:
1824 	return 0;
1825 }
1826 
1827 /**
1828  * ice_tso - computes mss and TSO length to prepare for TSO
1829  * @first: pointer to struct ice_tx_buf
1830  * @off: pointer to struct that holds offload parameters
1831  *
1832  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1833  */
1834 static
1835 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1836 {
1837 	struct sk_buff *skb = first->skb;
1838 	union {
1839 		struct iphdr *v4;
1840 		struct ipv6hdr *v6;
1841 		unsigned char *hdr;
1842 	} ip;
1843 	union {
1844 		struct tcphdr *tcp;
1845 		unsigned char *hdr;
1846 	} l4;
1847 	u64 cd_mss, cd_tso_len;
1848 	u32 paylen, l4_start;
1849 	int err;
1850 
1851 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1852 		return 0;
1853 
1854 	if (!skb_is_gso(skb))
1855 		return 0;
1856 
1857 	err = skb_cow_head(skb, 0);
1858 	if (err < 0)
1859 		return err;
1860 
1861 	ip.hdr = skb_network_header(skb);
1862 	l4.hdr = skb_transport_header(skb);
1863 
1864 	/* initialize outer IP header fields */
1865 	if (ip.v4->version == 4) {
1866 		ip.v4->tot_len = 0;
1867 		ip.v4->check = 0;
1868 	} else {
1869 		ip.v6->payload_len = 0;
1870 	}
1871 
1872 	/* determine offset of transport header */
1873 	l4_start = l4.hdr - skb->data;
1874 
1875 	/* remove payload length from checksum */
1876 	paylen = skb->len - l4_start;
1877 	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1878 
1879 	/* compute length of segmentation header */
1880 	off->header_len = (l4.tcp->doff * 4) + l4_start;
1881 
1882 	/* update gso_segs and bytecount */
1883 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1884 	first->bytecount += (first->gso_segs - 1) * off->header_len;
1885 
1886 	cd_tso_len = skb->len - off->header_len;
1887 	cd_mss = skb_shinfo(skb)->gso_size;
1888 
1889 	/* record cdesc_qw1 with TSO parameters */
1890 	off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
1891 			 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1892 			 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1893 			 (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
1894 	first->tx_flags |= ICE_TX_FLAGS_TSO;
1895 	return 1;
1896 }
1897 
1898 /**
1899  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
1900  * @size: transmit request size in bytes
1901  *
1902  * Due to hardware alignment restrictions (4K alignment), we need to
1903  * assume that we can have no more than 12K of data per descriptor, even
1904  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1905  * Thus, we need to divide by 12K. But division is slow! Instead,
1906  * we decompose the operation into shifts and one relatively cheap
1907  * multiply operation.
1908  *
1909  * To divide by 12K, we first divide by 4K, then divide by 3:
1910  *     To divide by 4K, shift right by 12 bits
1911  *     To divide by 3, multiply by 85, then divide by 256
1912  *     (Divide by 256 is done by shifting right by 8 bits)
1913  * Finally, we add one to round up. Because 256 isn't an exact multiple of
1914  * 3, we'll underestimate near each multiple of 12K. This is actually more
1915  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1916  * segment. For our purposes this is accurate out to 1M which is orders of
1917  * magnitude greater than our largest possible GSO size.
1918  *
1919  * This would then be implemented as:
1920  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1921  *
1922  * Since multiplication and division are commutative, we can reorder
1923  * operations into:
1924  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1925  */
1926 static unsigned int ice_txd_use_count(unsigned int size)
1927 {
1928 	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1929 }
1930 
1931 /**
1932  * ice_xmit_desc_count - calculate number of Tx descriptors needed
1933  * @skb: send buffer
1934  *
1935  * Returns number of data descriptors needed for this skb.
1936  */
1937 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1938 {
1939 	const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1940 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1941 	unsigned int count = 0, size = skb_headlen(skb);
1942 
1943 	for (;;) {
1944 		count += ice_txd_use_count(size);
1945 
1946 		if (!nr_frags--)
1947 			break;
1948 
1949 		size = skb_frag_size(frag++);
1950 	}
1951 
1952 	return count;
1953 }
1954 
1955 /**
1956  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
1957  * @skb: send buffer
1958  *
1959  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
1960  * and so we need to figure out the cases where we need to linearize the skb.
1961  *
1962  * For TSO we need to count the TSO header and segment payload separately.
1963  * As such we need to check cases where we have 7 fragments or more as we
1964  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1965  * the segment payload in the first descriptor, and another 7 for the
1966  * fragments.
1967  */
1968 static bool __ice_chk_linearize(struct sk_buff *skb)
1969 {
1970 	const struct skb_frag_struct *frag, *stale;
1971 	int nr_frags, sum;
1972 
1973 	/* no need to check if number of frags is less than 7 */
1974 	nr_frags = skb_shinfo(skb)->nr_frags;
1975 	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
1976 		return false;
1977 
1978 	/* We need to walk through the list and validate that each group
1979 	 * of 6 fragments totals at least gso_size.
1980 	 */
1981 	nr_frags -= ICE_MAX_BUF_TXD - 2;
1982 	frag = &skb_shinfo(skb)->frags[0];
1983 
1984 	/* Initialize size to the negative value of gso_size minus 1. We
1985 	 * use this as the worst case scenerio in which the frag ahead
1986 	 * of us only provides one byte which is why we are limited to 6
1987 	 * descriptors for a single transmit as the header and previous
1988 	 * fragment are already consuming 2 descriptors.
1989 	 */
1990 	sum = 1 - skb_shinfo(skb)->gso_size;
1991 
1992 	/* Add size of frags 0 through 4 to create our initial sum */
1993 	sum += skb_frag_size(frag++);
1994 	sum += skb_frag_size(frag++);
1995 	sum += skb_frag_size(frag++);
1996 	sum += skb_frag_size(frag++);
1997 	sum += skb_frag_size(frag++);
1998 
1999 	/* Walk through fragments adding latest fragment, testing it, and
2000 	 * then removing stale fragments from the sum.
2001 	 */
2002 	stale = &skb_shinfo(skb)->frags[0];
2003 	for (;;) {
2004 		sum += skb_frag_size(frag++);
2005 
2006 		/* if sum is negative we failed to make sufficient progress */
2007 		if (sum < 0)
2008 			return true;
2009 
2010 		if (!nr_frags--)
2011 			break;
2012 
2013 		sum -= skb_frag_size(stale++);
2014 	}
2015 
2016 	return false;
2017 }
2018 
2019 /**
2020  * ice_chk_linearize - Check if there are more than 8 fragments per packet
2021  * @skb:      send buffer
2022  * @count:    number of buffers used
2023  *
2024  * Note: Our HW can't scatter-gather more than 8 fragments to build
2025  * a packet on the wire and so we need to figure out the cases where we
2026  * need to linearize the skb.
2027  */
2028 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2029 {
2030 	/* Both TSO and single send will work if count is less than 8 */
2031 	if (likely(count < ICE_MAX_BUF_TXD))
2032 		return false;
2033 
2034 	if (skb_is_gso(skb))
2035 		return __ice_chk_linearize(skb);
2036 
2037 	/* we can support up to 8 data buffers for a single send */
2038 	return count != ICE_MAX_BUF_TXD;
2039 }
2040 
2041 /**
2042  * ice_xmit_frame_ring - Sends buffer on Tx ring
2043  * @skb: send buffer
2044  * @tx_ring: ring to send buffer on
2045  *
2046  * Returns NETDEV_TX_OK if sent, else an error code
2047  */
2048 static netdev_tx_t
2049 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2050 {
2051 	struct ice_tx_offload_params offload = { 0 };
2052 	struct ice_tx_buf *first;
2053 	unsigned int count;
2054 	int tso, csum;
2055 
2056 	count = ice_xmit_desc_count(skb);
2057 	if (ice_chk_linearize(skb, count)) {
2058 		if (__skb_linearize(skb))
2059 			goto out_drop;
2060 		count = ice_txd_use_count(skb->len);
2061 		tx_ring->tx_stats.tx_linearize++;
2062 	}
2063 
2064 	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2065 	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2066 	 *       + 4 desc gap to avoid the cache line where head is,
2067 	 *       + 1 desc for context descriptor,
2068 	 * otherwise try next time
2069 	 */
2070 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2071 			      ICE_DESCS_FOR_CTX_DESC)) {
2072 		tx_ring->tx_stats.tx_busy++;
2073 		return NETDEV_TX_BUSY;
2074 	}
2075 
2076 	offload.tx_ring = tx_ring;
2077 
2078 	/* record the location of the first descriptor for this packet */
2079 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
2080 	first->skb = skb;
2081 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2082 	first->gso_segs = 1;
2083 	first->tx_flags = 0;
2084 
2085 	/* prepare the VLAN tagging flags for Tx */
2086 	if (ice_tx_prepare_vlan_flags(tx_ring, first))
2087 		goto out_drop;
2088 
2089 	/* set up TSO offload */
2090 	tso = ice_tso(first, &offload);
2091 	if (tso < 0)
2092 		goto out_drop;
2093 
2094 	/* always set up Tx checksum offload */
2095 	csum = ice_tx_csum(first, &offload);
2096 	if (csum < 0)
2097 		goto out_drop;
2098 
2099 	if (tso || offload.cd_tunnel_params) {
2100 		struct ice_tx_ctx_desc *cdesc;
2101 		int i = tx_ring->next_to_use;
2102 
2103 		/* grab the next descriptor */
2104 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2105 		i++;
2106 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2107 
2108 		/* setup context descriptor */
2109 		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2110 		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2111 		cdesc->rsvd = cpu_to_le16(0);
2112 		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2113 	}
2114 
2115 	ice_tx_map(tx_ring, first, &offload);
2116 	return NETDEV_TX_OK;
2117 
2118 out_drop:
2119 	dev_kfree_skb_any(skb);
2120 	return NETDEV_TX_OK;
2121 }
2122 
2123 /**
2124  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2125  * @skb: send buffer
2126  * @netdev: network interface device structure
2127  *
2128  * Returns NETDEV_TX_OK if sent, else an error code
2129  */
2130 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2131 {
2132 	struct ice_netdev_priv *np = netdev_priv(netdev);
2133 	struct ice_vsi *vsi = np->vsi;
2134 	struct ice_ring *tx_ring;
2135 
2136 	tx_ring = vsi->tx_rings[skb->queue_mapping];
2137 
2138 	/* hardware can't handle really short frames, hardware padding works
2139 	 * beyond this point
2140 	 */
2141 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2142 		return NETDEV_TX_OK;
2143 
2144 	return ice_xmit_frame_ring(skb, tx_ring);
2145 }
2146