1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* The driver transmit and receive code */
5 
6 #include <linux/prefetch.h>
7 #include <linux/mm.h>
8 #include "ice.h"
9 #include "ice_dcb_lib.h"
10 
11 #define ICE_RX_HDR_SIZE		256
12 
13 /**
14  * ice_unmap_and_free_tx_buf - Release a Tx buffer
15  * @ring: the ring that owns the buffer
16  * @tx_buf: the buffer to free
17  */
18 static void
19 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
20 {
21 	if (tx_buf->skb) {
22 		dev_kfree_skb_any(tx_buf->skb);
23 		if (dma_unmap_len(tx_buf, len))
24 			dma_unmap_single(ring->dev,
25 					 dma_unmap_addr(tx_buf, dma),
26 					 dma_unmap_len(tx_buf, len),
27 					 DMA_TO_DEVICE);
28 	} else if (dma_unmap_len(tx_buf, len)) {
29 		dma_unmap_page(ring->dev,
30 			       dma_unmap_addr(tx_buf, dma),
31 			       dma_unmap_len(tx_buf, len),
32 			       DMA_TO_DEVICE);
33 	}
34 
35 	tx_buf->next_to_watch = NULL;
36 	tx_buf->skb = NULL;
37 	dma_unmap_len_set(tx_buf, len, 0);
38 	/* tx_buf must be completely set up in the transmit path */
39 }
40 
41 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
42 {
43 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
44 }
45 
46 /**
47  * ice_clean_tx_ring - Free any empty Tx buffers
48  * @tx_ring: ring to be cleaned
49  */
50 void ice_clean_tx_ring(struct ice_ring *tx_ring)
51 {
52 	u16 i;
53 
54 	/* ring already cleared, nothing to do */
55 	if (!tx_ring->tx_buf)
56 		return;
57 
58 	/* Free all the Tx ring sk_buffs */
59 	for (i = 0; i < tx_ring->count; i++)
60 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
61 
62 	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
63 
64 	/* Zero out the descriptor ring */
65 	memset(tx_ring->desc, 0, tx_ring->size);
66 
67 	tx_ring->next_to_use = 0;
68 	tx_ring->next_to_clean = 0;
69 
70 	if (!tx_ring->netdev)
71 		return;
72 
73 	/* cleanup Tx queue statistics */
74 	netdev_tx_reset_queue(txring_txq(tx_ring));
75 }
76 
77 /**
78  * ice_free_tx_ring - Free Tx resources per queue
79  * @tx_ring: Tx descriptor ring for a specific queue
80  *
81  * Free all transmit software resources
82  */
83 void ice_free_tx_ring(struct ice_ring *tx_ring)
84 {
85 	ice_clean_tx_ring(tx_ring);
86 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
87 	tx_ring->tx_buf = NULL;
88 
89 	if (tx_ring->desc) {
90 		dmam_free_coherent(tx_ring->dev, tx_ring->size,
91 				   tx_ring->desc, tx_ring->dma);
92 		tx_ring->desc = NULL;
93 	}
94 }
95 
96 /**
97  * ice_clean_tx_irq - Reclaim resources after transmit completes
98  * @vsi: the VSI we care about
99  * @tx_ring: Tx ring to clean
100  * @napi_budget: Used to determine if we are in netpoll
101  *
102  * Returns true if there's any budget left (e.g. the clean is finished)
103  */
104 static bool
105 ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
106 {
107 	unsigned int total_bytes = 0, total_pkts = 0;
108 	unsigned int budget = vsi->work_lmt;
109 	s16 i = tx_ring->next_to_clean;
110 	struct ice_tx_desc *tx_desc;
111 	struct ice_tx_buf *tx_buf;
112 
113 	tx_buf = &tx_ring->tx_buf[i];
114 	tx_desc = ICE_TX_DESC(tx_ring, i);
115 	i -= tx_ring->count;
116 
117 	do {
118 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
119 
120 		/* if next_to_watch is not set then there is no work pending */
121 		if (!eop_desc)
122 			break;
123 
124 		smp_rmb();	/* prevent any other reads prior to eop_desc */
125 
126 		/* if the descriptor isn't done, no work yet to do */
127 		if (!(eop_desc->cmd_type_offset_bsz &
128 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
129 			break;
130 
131 		/* clear next_to_watch to prevent false hangs */
132 		tx_buf->next_to_watch = NULL;
133 
134 		/* update the statistics for this packet */
135 		total_bytes += tx_buf->bytecount;
136 		total_pkts += tx_buf->gso_segs;
137 
138 		/* free the skb */
139 		napi_consume_skb(tx_buf->skb, napi_budget);
140 
141 		/* unmap skb header data */
142 		dma_unmap_single(tx_ring->dev,
143 				 dma_unmap_addr(tx_buf, dma),
144 				 dma_unmap_len(tx_buf, len),
145 				 DMA_TO_DEVICE);
146 
147 		/* clear tx_buf data */
148 		tx_buf->skb = NULL;
149 		dma_unmap_len_set(tx_buf, len, 0);
150 
151 		/* unmap remaining buffers */
152 		while (tx_desc != eop_desc) {
153 			tx_buf++;
154 			tx_desc++;
155 			i++;
156 			if (unlikely(!i)) {
157 				i -= tx_ring->count;
158 				tx_buf = tx_ring->tx_buf;
159 				tx_desc = ICE_TX_DESC(tx_ring, 0);
160 			}
161 
162 			/* unmap any remaining paged data */
163 			if (dma_unmap_len(tx_buf, len)) {
164 				dma_unmap_page(tx_ring->dev,
165 					       dma_unmap_addr(tx_buf, dma),
166 					       dma_unmap_len(tx_buf, len),
167 					       DMA_TO_DEVICE);
168 				dma_unmap_len_set(tx_buf, len, 0);
169 			}
170 		}
171 
172 		/* move us one more past the eop_desc for start of next pkt */
173 		tx_buf++;
174 		tx_desc++;
175 		i++;
176 		if (unlikely(!i)) {
177 			i -= tx_ring->count;
178 			tx_buf = tx_ring->tx_buf;
179 			tx_desc = ICE_TX_DESC(tx_ring, 0);
180 		}
181 
182 		prefetch(tx_desc);
183 
184 		/* update budget accounting */
185 		budget--;
186 	} while (likely(budget));
187 
188 	i += tx_ring->count;
189 	tx_ring->next_to_clean = i;
190 	u64_stats_update_begin(&tx_ring->syncp);
191 	tx_ring->stats.bytes += total_bytes;
192 	tx_ring->stats.pkts += total_pkts;
193 	u64_stats_update_end(&tx_ring->syncp);
194 	tx_ring->q_vector->tx.total_bytes += total_bytes;
195 	tx_ring->q_vector->tx.total_pkts += total_pkts;
196 
197 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
198 				  total_bytes);
199 
200 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
201 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
202 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
203 		/* Make sure that anybody stopping the queue after this
204 		 * sees the new next_to_clean.
205 		 */
206 		smp_mb();
207 		if (__netif_subqueue_stopped(tx_ring->netdev,
208 					     tx_ring->q_index) &&
209 		   !test_bit(__ICE_DOWN, vsi->state)) {
210 			netif_wake_subqueue(tx_ring->netdev,
211 					    tx_ring->q_index);
212 			++tx_ring->tx_stats.restart_q;
213 		}
214 	}
215 
216 	return !!budget;
217 }
218 
219 /**
220  * ice_setup_tx_ring - Allocate the Tx descriptors
221  * @tx_ring: the Tx ring to set up
222  *
223  * Return 0 on success, negative on error
224  */
225 int ice_setup_tx_ring(struct ice_ring *tx_ring)
226 {
227 	struct device *dev = tx_ring->dev;
228 
229 	if (!dev)
230 		return -ENOMEM;
231 
232 	/* warn if we are about to overwrite the pointer */
233 	WARN_ON(tx_ring->tx_buf);
234 	tx_ring->tx_buf =
235 		devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
236 			     GFP_KERNEL);
237 	if (!tx_ring->tx_buf)
238 		return -ENOMEM;
239 
240 	/* round up to nearest page */
241 	tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
242 			      PAGE_SIZE);
243 	tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
244 					    GFP_KERNEL);
245 	if (!tx_ring->desc) {
246 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
247 			tx_ring->size);
248 		goto err;
249 	}
250 
251 	tx_ring->next_to_use = 0;
252 	tx_ring->next_to_clean = 0;
253 	tx_ring->tx_stats.prev_pkt = -1;
254 	return 0;
255 
256 err:
257 	devm_kfree(dev, tx_ring->tx_buf);
258 	tx_ring->tx_buf = NULL;
259 	return -ENOMEM;
260 }
261 
262 /**
263  * ice_clean_rx_ring - Free Rx buffers
264  * @rx_ring: ring to be cleaned
265  */
266 void ice_clean_rx_ring(struct ice_ring *rx_ring)
267 {
268 	struct device *dev = rx_ring->dev;
269 	u16 i;
270 
271 	/* ring already cleared, nothing to do */
272 	if (!rx_ring->rx_buf)
273 		return;
274 
275 	/* Free all the Rx ring sk_buffs */
276 	for (i = 0; i < rx_ring->count; i++) {
277 		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
278 
279 		if (rx_buf->skb) {
280 			dev_kfree_skb(rx_buf->skb);
281 			rx_buf->skb = NULL;
282 		}
283 		if (!rx_buf->page)
284 			continue;
285 
286 		/* Invalidate cache lines that may have been written to by
287 		 * device so that we avoid corrupting memory.
288 		 */
289 		dma_sync_single_range_for_cpu(dev, rx_buf->dma,
290 					      rx_buf->page_offset,
291 					      ICE_RXBUF_2048, DMA_FROM_DEVICE);
292 
293 		/* free resources associated with mapping */
294 		dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
295 				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
296 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
297 
298 		rx_buf->page = NULL;
299 		rx_buf->page_offset = 0;
300 	}
301 
302 	memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
303 
304 	/* Zero out the descriptor ring */
305 	memset(rx_ring->desc, 0, rx_ring->size);
306 
307 	rx_ring->next_to_alloc = 0;
308 	rx_ring->next_to_clean = 0;
309 	rx_ring->next_to_use = 0;
310 }
311 
312 /**
313  * ice_free_rx_ring - Free Rx resources
314  * @rx_ring: ring to clean the resources from
315  *
316  * Free all receive software resources
317  */
318 void ice_free_rx_ring(struct ice_ring *rx_ring)
319 {
320 	ice_clean_rx_ring(rx_ring);
321 	devm_kfree(rx_ring->dev, rx_ring->rx_buf);
322 	rx_ring->rx_buf = NULL;
323 
324 	if (rx_ring->desc) {
325 		dmam_free_coherent(rx_ring->dev, rx_ring->size,
326 				   rx_ring->desc, rx_ring->dma);
327 		rx_ring->desc = NULL;
328 	}
329 }
330 
331 /**
332  * ice_setup_rx_ring - Allocate the Rx descriptors
333  * @rx_ring: the Rx ring to set up
334  *
335  * Return 0 on success, negative on error
336  */
337 int ice_setup_rx_ring(struct ice_ring *rx_ring)
338 {
339 	struct device *dev = rx_ring->dev;
340 
341 	if (!dev)
342 		return -ENOMEM;
343 
344 	/* warn if we are about to overwrite the pointer */
345 	WARN_ON(rx_ring->rx_buf);
346 	rx_ring->rx_buf =
347 		devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
348 			     GFP_KERNEL);
349 	if (!rx_ring->rx_buf)
350 		return -ENOMEM;
351 
352 	/* round up to nearest page */
353 	rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
354 			      PAGE_SIZE);
355 	rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
356 					    GFP_KERNEL);
357 	if (!rx_ring->desc) {
358 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
359 			rx_ring->size);
360 		goto err;
361 	}
362 
363 	rx_ring->next_to_use = 0;
364 	rx_ring->next_to_clean = 0;
365 	return 0;
366 
367 err:
368 	devm_kfree(dev, rx_ring->rx_buf);
369 	rx_ring->rx_buf = NULL;
370 	return -ENOMEM;
371 }
372 
373 /**
374  * ice_release_rx_desc - Store the new tail and head values
375  * @rx_ring: ring to bump
376  * @val: new head index
377  */
378 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
379 {
380 	u16 prev_ntu = rx_ring->next_to_use;
381 
382 	rx_ring->next_to_use = val;
383 
384 	/* update next to alloc since we have filled the ring */
385 	rx_ring->next_to_alloc = val;
386 
387 	/* QRX_TAIL will be updated with any tail value, but hardware ignores
388 	 * the lower 3 bits. This makes it so we only bump tail on meaningful
389 	 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
390 	 * the budget depending on the current traffic load.
391 	 */
392 	val &= ~0x7;
393 	if (prev_ntu != val) {
394 		/* Force memory writes to complete before letting h/w
395 		 * know there are new descriptors to fetch. (Only
396 		 * applicable for weak-ordered memory model archs,
397 		 * such as IA-64).
398 		 */
399 		wmb();
400 		writel(val, rx_ring->tail);
401 	}
402 }
403 
404 /**
405  * ice_alloc_mapped_page - recycle or make a new page
406  * @rx_ring: ring to use
407  * @bi: rx_buf struct to modify
408  *
409  * Returns true if the page was successfully allocated or
410  * reused.
411  */
412 static bool
413 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
414 {
415 	struct page *page = bi->page;
416 	dma_addr_t dma;
417 
418 	/* since we are recycling buffers we should seldom need to alloc */
419 	if (likely(page)) {
420 		rx_ring->rx_stats.page_reuse_count++;
421 		return true;
422 	}
423 
424 	/* alloc new page for storage */
425 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
426 	if (unlikely(!page)) {
427 		rx_ring->rx_stats.alloc_page_failed++;
428 		return false;
429 	}
430 
431 	/* map page for use */
432 	dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
433 				 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
434 
435 	/* if mapping failed free memory back to system since
436 	 * there isn't much point in holding memory we can't use
437 	 */
438 	if (dma_mapping_error(rx_ring->dev, dma)) {
439 		__free_pages(page, 0);
440 		rx_ring->rx_stats.alloc_page_failed++;
441 		return false;
442 	}
443 
444 	bi->dma = dma;
445 	bi->page = page;
446 	bi->page_offset = 0;
447 	page_ref_add(page, USHRT_MAX - 1);
448 	bi->pagecnt_bias = USHRT_MAX;
449 
450 	return true;
451 }
452 
453 /**
454  * ice_alloc_rx_bufs - Replace used receive buffers
455  * @rx_ring: ring to place buffers on
456  * @cleaned_count: number of buffers to replace
457  *
458  * Returns false if all allocations were successful, true if any fail. Returning
459  * true signals to the caller that we didn't replace cleaned_count buffers and
460  * there is more work to do.
461  *
462  * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
463  * buffers. Then bump tail at most one time. Grouping like this lets us avoid
464  * multiple tail writes per call.
465  */
466 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
467 {
468 	union ice_32b_rx_flex_desc *rx_desc;
469 	u16 ntu = rx_ring->next_to_use;
470 	struct ice_rx_buf *bi;
471 
472 	/* do nothing if no valid netdev defined */
473 	if (!rx_ring->netdev || !cleaned_count)
474 		return false;
475 
476 	/* get the Rx descriptor and buffer based on next_to_use */
477 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
478 	bi = &rx_ring->rx_buf[ntu];
479 
480 	do {
481 		/* if we fail here, we have work remaining */
482 		if (!ice_alloc_mapped_page(rx_ring, bi))
483 			break;
484 
485 		/* sync the buffer for use by the device */
486 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
487 						 bi->page_offset,
488 						 ICE_RXBUF_2048,
489 						 DMA_FROM_DEVICE);
490 
491 		/* Refresh the desc even if buffer_addrs didn't change
492 		 * because each write-back erases this info.
493 		 */
494 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
495 
496 		rx_desc++;
497 		bi++;
498 		ntu++;
499 		if (unlikely(ntu == rx_ring->count)) {
500 			rx_desc = ICE_RX_DESC(rx_ring, 0);
501 			bi = rx_ring->rx_buf;
502 			ntu = 0;
503 		}
504 
505 		/* clear the status bits for the next_to_use descriptor */
506 		rx_desc->wb.status_error0 = 0;
507 
508 		cleaned_count--;
509 	} while (cleaned_count);
510 
511 	if (rx_ring->next_to_use != ntu)
512 		ice_release_rx_desc(rx_ring, ntu);
513 
514 	return !!cleaned_count;
515 }
516 
517 /**
518  * ice_page_is_reserved - check if reuse is possible
519  * @page: page struct to check
520  */
521 static bool ice_page_is_reserved(struct page *page)
522 {
523 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
524 }
525 
526 /**
527  * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
528  * @rx_buf: Rx buffer to adjust
529  * @size: Size of adjustment
530  *
531  * Update the offset within page so that Rx buf will be ready to be reused.
532  * For systems with PAGE_SIZE < 8192 this function will flip the page offset
533  * so the second half of page assigned to Rx buffer will be used, otherwise
534  * the offset is moved by the @size bytes
535  */
536 static void
537 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
538 {
539 #if (PAGE_SIZE < 8192)
540 	/* flip page offset to other buffer */
541 	rx_buf->page_offset ^= size;
542 #else
543 	/* move offset up to the next cache line */
544 	rx_buf->page_offset += size;
545 #endif
546 }
547 
548 /**
549  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
550  * @rx_buf: buffer containing the page
551  *
552  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
553  * which will assign the current buffer to the buffer that next_to_alloc is
554  * pointing to; otherwise, the DMA mapping needs to be destroyed and
555  * page freed
556  */
557 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
558 {
559 #if (PAGE_SIZE >= 8192)
560 	unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
561 #endif
562 	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
563 	struct page *page = rx_buf->page;
564 
565 	/* avoid re-using remote pages */
566 	if (unlikely(ice_page_is_reserved(page)))
567 		return false;
568 
569 #if (PAGE_SIZE < 8192)
570 	/* if we are only owner of page we can reuse it */
571 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
572 		return false;
573 #else
574 	if (rx_buf->page_offset > last_offset)
575 		return false;
576 #endif /* PAGE_SIZE < 8192) */
577 
578 	/* If we have drained the page fragment pool we need to update
579 	 * the pagecnt_bias and page count so that we fully restock the
580 	 * number of references the driver holds.
581 	 */
582 	if (unlikely(pagecnt_bias == 1)) {
583 		page_ref_add(page, USHRT_MAX - 1);
584 		rx_buf->pagecnt_bias = USHRT_MAX;
585 	}
586 
587 	return true;
588 }
589 
590 /**
591  * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
592  * @rx_buf: buffer containing page to add
593  * @skb: sk_buff to place the data into
594  * @size: packet length from rx_desc
595  *
596  * This function will add the data contained in rx_buf->page to the skb.
597  * It will just attach the page as a frag to the skb.
598  * The function will then update the page offset.
599  */
600 static void
601 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
602 		unsigned int size)
603 {
604 #if (PAGE_SIZE >= 8192)
605 	unsigned int truesize = SKB_DATA_ALIGN(size);
606 #else
607 	unsigned int truesize = ICE_RXBUF_2048;
608 #endif
609 
610 	if (!size)
611 		return;
612 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
613 			rx_buf->page_offset, size, truesize);
614 
615 	/* page is being used so we must update the page offset */
616 	ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
617 }
618 
619 /**
620  * ice_reuse_rx_page - page flip buffer and store it back on the ring
621  * @rx_ring: Rx descriptor ring to store buffers on
622  * @old_buf: donor buffer to have page reused
623  *
624  * Synchronizes page for reuse by the adapter
625  */
626 static void
627 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
628 {
629 	u16 nta = rx_ring->next_to_alloc;
630 	struct ice_rx_buf *new_buf;
631 
632 	new_buf = &rx_ring->rx_buf[nta];
633 
634 	/* update, and store next to alloc */
635 	nta++;
636 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
637 
638 	/* Transfer page from old buffer to new buffer.
639 	 * Move each member individually to avoid possible store
640 	 * forwarding stalls and unnecessary copy of skb.
641 	 */
642 	new_buf->dma = old_buf->dma;
643 	new_buf->page = old_buf->page;
644 	new_buf->page_offset = old_buf->page_offset;
645 	new_buf->pagecnt_bias = old_buf->pagecnt_bias;
646 }
647 
648 /**
649  * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
650  * @rx_ring: Rx descriptor ring to transact packets on
651  * @skb: skb to be used
652  * @size: size of buffer to add to skb
653  *
654  * This function will pull an Rx buffer from the ring and synchronize it
655  * for use by the CPU.
656  */
657 static struct ice_rx_buf *
658 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
659 	       const unsigned int size)
660 {
661 	struct ice_rx_buf *rx_buf;
662 
663 	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
664 	prefetchw(rx_buf->page);
665 	*skb = rx_buf->skb;
666 
667 	if (!size)
668 		return rx_buf;
669 	/* we are reusing so sync this buffer for CPU use */
670 	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
671 				      rx_buf->page_offset, size,
672 				      DMA_FROM_DEVICE);
673 
674 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
675 	rx_buf->pagecnt_bias--;
676 
677 	return rx_buf;
678 }
679 
680 /**
681  * ice_construct_skb - Allocate skb and populate it
682  * @rx_ring: Rx descriptor ring to transact packets on
683  * @rx_buf: Rx buffer to pull data from
684  * @size: the length of the packet
685  *
686  * This function allocates an skb. It then populates it with the page
687  * data from the current receive descriptor, taking care to set up the
688  * skb correctly.
689  */
690 static struct sk_buff *
691 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
692 		  unsigned int size)
693 {
694 	void *va = page_address(rx_buf->page) + rx_buf->page_offset;
695 	unsigned int headlen;
696 	struct sk_buff *skb;
697 
698 	/* prefetch first cache line of first page */
699 	prefetch(va);
700 #if L1_CACHE_BYTES < 128
701 	prefetch((u8 *)va + L1_CACHE_BYTES);
702 #endif /* L1_CACHE_BYTES */
703 
704 	/* allocate a skb to store the frags */
705 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
706 			       GFP_ATOMIC | __GFP_NOWARN);
707 	if (unlikely(!skb))
708 		return NULL;
709 
710 	skb_record_rx_queue(skb, rx_ring->q_index);
711 	/* Determine available headroom for copy */
712 	headlen = size;
713 	if (headlen > ICE_RX_HDR_SIZE)
714 		headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
715 
716 	/* align pull length to size of long to optimize memcpy performance */
717 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
718 
719 	/* if we exhaust the linear part then add what is left as a frag */
720 	size -= headlen;
721 	if (size) {
722 #if (PAGE_SIZE >= 8192)
723 		unsigned int truesize = SKB_DATA_ALIGN(size);
724 #else
725 		unsigned int truesize = ICE_RXBUF_2048;
726 #endif
727 		skb_add_rx_frag(skb, 0, rx_buf->page,
728 				rx_buf->page_offset + headlen, size, truesize);
729 		/* buffer is used by skb, update page_offset */
730 		ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
731 	} else {
732 		/* buffer is unused, reset bias back to rx_buf; data was copied
733 		 * onto skb's linear part so there's no need for adjusting
734 		 * page offset and we can reuse this buffer as-is
735 		 */
736 		rx_buf->pagecnt_bias++;
737 	}
738 
739 	return skb;
740 }
741 
742 /**
743  * ice_put_rx_buf - Clean up used buffer and either recycle or free
744  * @rx_ring: Rx descriptor ring to transact packets on
745  * @rx_buf: Rx buffer to pull data from
746  *
747  * This function will  clean up the contents of the rx_buf. It will
748  * either recycle the buffer or unmap it and free the associated resources.
749  */
750 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
751 {
752 	if (!rx_buf)
753 		return;
754 
755 	if (ice_can_reuse_rx_page(rx_buf)) {
756 		/* hand second half of page back to the ring */
757 		ice_reuse_rx_page(rx_ring, rx_buf);
758 		rx_ring->rx_stats.page_reuse_count++;
759 	} else {
760 		/* we are not reusing the buffer so unmap it */
761 		dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
762 				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
763 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
764 	}
765 
766 	/* clear contents of buffer_info */
767 	rx_buf->page = NULL;
768 	rx_buf->skb = NULL;
769 }
770 
771 /**
772  * ice_cleanup_headers - Correct empty headers
773  * @skb: pointer to current skb being fixed
774  *
775  * Also address the case where we are pulling data in on pages only
776  * and as such no data is present in the skb header.
777  *
778  * In addition if skb is not at least 60 bytes we need to pad it so that
779  * it is large enough to qualify as a valid Ethernet frame.
780  *
781  * Returns true if an error was encountered and skb was freed.
782  */
783 static bool ice_cleanup_headers(struct sk_buff *skb)
784 {
785 	/* if eth_skb_pad returns an error the skb was freed */
786 	if (eth_skb_pad(skb))
787 		return true;
788 
789 	return false;
790 }
791 
792 /**
793  * ice_test_staterr - tests bits in Rx descriptor status and error fields
794  * @rx_desc: pointer to receive descriptor (in le64 format)
795  * @stat_err_bits: value to mask
796  *
797  * This function does some fast chicanery in order to return the
798  * value of the mask which is really only used for boolean tests.
799  * The status_error_len doesn't need to be shifted because it begins
800  * at offset zero.
801  */
802 static bool
803 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
804 {
805 	return !!(rx_desc->wb.status_error0 &
806 		  cpu_to_le16(stat_err_bits));
807 }
808 
809 /**
810  * ice_is_non_eop - process handling of non-EOP buffers
811  * @rx_ring: Rx ring being processed
812  * @rx_desc: Rx descriptor for current buffer
813  * @skb: Current socket buffer containing buffer in progress
814  *
815  * This function updates next to clean. If the buffer is an EOP buffer
816  * this function exits returning false, otherwise it will place the
817  * sk_buff in the next buffer to be chained and return true indicating
818  * that this is in fact a non-EOP buffer.
819  */
820 static bool
821 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
822 	       struct sk_buff *skb)
823 {
824 	u32 ntc = rx_ring->next_to_clean + 1;
825 
826 	/* fetch, update, and store next to clean */
827 	ntc = (ntc < rx_ring->count) ? ntc : 0;
828 	rx_ring->next_to_clean = ntc;
829 
830 	prefetch(ICE_RX_DESC(rx_ring, ntc));
831 
832 	/* if we are the last buffer then there is nothing else to do */
833 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
834 	if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
835 		return false;
836 
837 	/* place skb in next buffer to be received */
838 	rx_ring->rx_buf[ntc].skb = skb;
839 	rx_ring->rx_stats.non_eop_descs++;
840 
841 	return true;
842 }
843 
844 /**
845  * ice_ptype_to_htype - get a hash type
846  * @ptype: the ptype value from the descriptor
847  *
848  * Returns a hash type to be used by skb_set_hash
849  */
850 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
851 {
852 	return PKT_HASH_TYPE_NONE;
853 }
854 
855 /**
856  * ice_rx_hash - set the hash value in the skb
857  * @rx_ring: descriptor ring
858  * @rx_desc: specific descriptor
859  * @skb: pointer to current skb
860  * @rx_ptype: the ptype value from the descriptor
861  */
862 static void
863 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
864 	    struct sk_buff *skb, u8 rx_ptype)
865 {
866 	struct ice_32b_rx_flex_desc_nic *nic_mdid;
867 	u32 hash;
868 
869 	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
870 		return;
871 
872 	if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
873 		return;
874 
875 	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
876 	hash = le32_to_cpu(nic_mdid->rss_hash);
877 	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
878 }
879 
880 /**
881  * ice_rx_csum - Indicate in skb if checksum is good
882  * @vsi: the VSI we care about
883  * @skb: skb currently being received and modified
884  * @rx_desc: the receive descriptor
885  * @ptype: the packet type decoded by hardware
886  *
887  * skb->protocol must be set before this function is called
888  */
889 static void
890 ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
891 	    union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
892 {
893 	struct ice_rx_ptype_decoded decoded;
894 	u32 rx_error, rx_status;
895 	bool ipv4, ipv6;
896 
897 	rx_status = le16_to_cpu(rx_desc->wb.status_error0);
898 	rx_error = rx_status;
899 
900 	decoded = ice_decode_rx_desc_ptype(ptype);
901 
902 	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
903 	skb->ip_summed = CHECKSUM_NONE;
904 	skb_checksum_none_assert(skb);
905 
906 	/* check if Rx checksum is enabled */
907 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
908 		return;
909 
910 	/* check if HW has decoded the packet and checksum */
911 	if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
912 		return;
913 
914 	if (!(decoded.known && decoded.outer_ip))
915 		return;
916 
917 	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
918 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
919 	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
920 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
921 
922 	if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
923 				 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
924 		goto checksum_fail;
925 	else if (ipv6 && (rx_status &
926 		 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
927 		goto checksum_fail;
928 
929 	/* check for L4 errors and handle packets that were not able to be
930 	 * checksummed due to arrival speed
931 	 */
932 	if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
933 		goto checksum_fail;
934 
935 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
936 	switch (decoded.inner_prot) {
937 	case ICE_RX_PTYPE_INNER_PROT_TCP:
938 	case ICE_RX_PTYPE_INNER_PROT_UDP:
939 	case ICE_RX_PTYPE_INNER_PROT_SCTP:
940 		skb->ip_summed = CHECKSUM_UNNECESSARY;
941 	default:
942 		break;
943 	}
944 	return;
945 
946 checksum_fail:
947 	vsi->back->hw_csum_rx_error++;
948 }
949 
950 /**
951  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
952  * @rx_ring: Rx descriptor ring packet is being transacted on
953  * @rx_desc: pointer to the EOP Rx descriptor
954  * @skb: pointer to current skb being populated
955  * @ptype: the packet type decoded by hardware
956  *
957  * This function checks the ring, descriptor, and packet information in
958  * order to populate the hash, checksum, VLAN, protocol, and
959  * other fields within the skb.
960  */
961 static void
962 ice_process_skb_fields(struct ice_ring *rx_ring,
963 		       union ice_32b_rx_flex_desc *rx_desc,
964 		       struct sk_buff *skb, u8 ptype)
965 {
966 	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
967 
968 	/* modifies the skb - consumes the enet header */
969 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
970 
971 	ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
972 }
973 
974 /**
975  * ice_receive_skb - Send a completed packet up the stack
976  * @rx_ring: Rx ring in play
977  * @skb: packet to send up
978  * @vlan_tag: VLAN tag for packet
979  *
980  * This function sends the completed packet (via. skb) up the stack using
981  * gro receive functions (with/without VLAN tag)
982  */
983 static void
984 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
985 {
986 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
987 	    (vlan_tag & VLAN_VID_MASK))
988 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
989 	napi_gro_receive(&rx_ring->q_vector->napi, skb);
990 }
991 
992 /**
993  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
994  * @rx_ring: Rx descriptor ring to transact packets on
995  * @budget: Total limit on number of packets to process
996  *
997  * This function provides a "bounce buffer" approach to Rx interrupt
998  * processing. The advantage to this is that on systems that have
999  * expensive overhead for IOMMU access this provides a means of avoiding
1000  * it by maintaining the mapping of the page to the system.
1001  *
1002  * Returns amount of work completed
1003  */
1004 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1005 {
1006 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1007 	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1008 	bool failure;
1009 
1010 	/* start the loop to process Rx packets bounded by 'budget' */
1011 	while (likely(total_rx_pkts < (unsigned int)budget)) {
1012 		union ice_32b_rx_flex_desc *rx_desc;
1013 		struct ice_rx_buf *rx_buf;
1014 		struct sk_buff *skb;
1015 		unsigned int size;
1016 		u16 stat_err_bits;
1017 		u16 vlan_tag = 0;
1018 		u8 rx_ptype;
1019 
1020 		/* get the Rx desc from Rx ring based on 'next_to_clean' */
1021 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1022 
1023 		/* status_error_len will always be zero for unused descriptors
1024 		 * because it's cleared in cleanup, and overlaps with hdr_addr
1025 		 * which is always zero because packet split isn't used, if the
1026 		 * hardware wrote DD then it will be non-zero
1027 		 */
1028 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1029 		if (!ice_test_staterr(rx_desc, stat_err_bits))
1030 			break;
1031 
1032 		/* This memory barrier is needed to keep us from reading
1033 		 * any other fields out of the rx_desc until we know the
1034 		 * DD bit is set.
1035 		 */
1036 		dma_rmb();
1037 
1038 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
1039 			ICE_RX_FLX_DESC_PKT_LEN_M;
1040 
1041 		/* retrieve a buffer from the ring */
1042 		rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1043 
1044 		if (skb)
1045 			ice_add_rx_frag(rx_buf, skb, size);
1046 		else
1047 			skb = ice_construct_skb(rx_ring, rx_buf, size);
1048 
1049 		/* exit if we failed to retrieve a buffer */
1050 		if (!skb) {
1051 			rx_ring->rx_stats.alloc_buf_failed++;
1052 			if (rx_buf)
1053 				rx_buf->pagecnt_bias++;
1054 			break;
1055 		}
1056 
1057 		ice_put_rx_buf(rx_ring, rx_buf);
1058 		cleaned_count++;
1059 
1060 		/* skip if it is NOP desc */
1061 		if (ice_is_non_eop(rx_ring, rx_desc, skb))
1062 			continue;
1063 
1064 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1065 		if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1066 			dev_kfree_skb_any(skb);
1067 			continue;
1068 		}
1069 
1070 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1071 			ICE_RX_FLEX_DESC_PTYPE_M;
1072 
1073 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1074 		if (ice_test_staterr(rx_desc, stat_err_bits))
1075 			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1076 
1077 		/* correct empty headers and pad skb if needed (to make valid
1078 		 * ethernet frame
1079 		 */
1080 		if (ice_cleanup_headers(skb)) {
1081 			skb = NULL;
1082 			continue;
1083 		}
1084 
1085 		/* probably a little skewed due to removing CRC */
1086 		total_rx_bytes += skb->len;
1087 
1088 		/* populate checksum, VLAN, and protocol */
1089 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1090 
1091 		/* send completed skb up the stack */
1092 		ice_receive_skb(rx_ring, skb, vlan_tag);
1093 
1094 		/* update budget accounting */
1095 		total_rx_pkts++;
1096 	}
1097 
1098 	/* return up to cleaned_count buffers to hardware */
1099 	failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1100 
1101 	/* update queue and vector specific stats */
1102 	u64_stats_update_begin(&rx_ring->syncp);
1103 	rx_ring->stats.pkts += total_rx_pkts;
1104 	rx_ring->stats.bytes += total_rx_bytes;
1105 	u64_stats_update_end(&rx_ring->syncp);
1106 	rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1107 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1108 
1109 	/* guarantee a trip back through this routine if there was a failure */
1110 	return failure ? budget : (int)total_rx_pkts;
1111 }
1112 
1113 /**
1114  * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1115  * @port_info: port_info structure containing the current link speed
1116  * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1117  * @itr: ITR value to update
1118  *
1119  * Calculate how big of an increment should be applied to the ITR value passed
1120  * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
1121  * link speed.
1122  *
1123  * The following is a calculation derived from:
1124  *  wmem_default / (size + overhead) = desired_pkts_per_int
1125  *  rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
1126  *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1127  *
1128  * Assuming wmem_default is 212992 and overhead is 640 bytes per
1129  * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1130  * formula down to:
1131  *
1132  *	 wmem_default * bits_per_byte * usecs_per_sec   pkt_size + 24
1133  * ITR = -------------------------------------------- * --------------
1134  *			     rate			pkt_size + 640
1135  */
1136 static unsigned int
1137 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1138 				 unsigned int avg_pkt_size,
1139 				 unsigned int itr)
1140 {
1141 	switch (port_info->phy.link_info.link_speed) {
1142 	case ICE_AQ_LINK_SPEED_100GB:
1143 		itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1144 				    avg_pkt_size + 640);
1145 		break;
1146 	case ICE_AQ_LINK_SPEED_50GB:
1147 		itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1148 				    avg_pkt_size + 640);
1149 		break;
1150 	case ICE_AQ_LINK_SPEED_40GB:
1151 		itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1152 				    avg_pkt_size + 640);
1153 		break;
1154 	case ICE_AQ_LINK_SPEED_25GB:
1155 		itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1156 				    avg_pkt_size + 640);
1157 		break;
1158 	case ICE_AQ_LINK_SPEED_20GB:
1159 		itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1160 				    avg_pkt_size + 640);
1161 		break;
1162 	case ICE_AQ_LINK_SPEED_10GB:
1163 		/* fall through */
1164 	default:
1165 		itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1166 				    avg_pkt_size + 640);
1167 		break;
1168 	}
1169 
1170 	if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1171 		itr &= ICE_ITR_ADAPTIVE_LATENCY;
1172 		itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1173 	}
1174 
1175 	return itr;
1176 }
1177 
1178 /**
1179  * ice_update_itr - update the adaptive ITR value based on statistics
1180  * @q_vector: structure containing interrupt and ring information
1181  * @rc: structure containing ring performance data
1182  *
1183  * Stores a new ITR value based on packets and byte
1184  * counts during the last interrupt.  The advantage of per interrupt
1185  * computation is faster updates and more accurate ITR for the current
1186  * traffic pattern.  Constants in this function were computed
1187  * based on theoretical maximum wire speed and thresholds were set based
1188  * on testing data as well as attempting to minimize response time
1189  * while increasing bulk throughput.
1190  */
1191 static void
1192 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1193 {
1194 	unsigned long next_update = jiffies;
1195 	unsigned int packets, bytes, itr;
1196 	bool container_is_rx;
1197 
1198 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1199 		return;
1200 
1201 	/* If itr_countdown is set it means we programmed an ITR within
1202 	 * the last 4 interrupt cycles. This has a side effect of us
1203 	 * potentially firing an early interrupt. In order to work around
1204 	 * this we need to throw out any data received for a few
1205 	 * interrupts following the update.
1206 	 */
1207 	if (q_vector->itr_countdown) {
1208 		itr = rc->target_itr;
1209 		goto clear_counts;
1210 	}
1211 
1212 	container_is_rx = (&q_vector->rx == rc);
1213 	/* For Rx we want to push the delay up and default to low latency.
1214 	 * for Tx we want to pull the delay down and default to high latency.
1215 	 */
1216 	itr = container_is_rx ?
1217 		ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1218 		ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1219 
1220 	/* If we didn't update within up to 1 - 2 jiffies we can assume
1221 	 * that either packets are coming in so slow there hasn't been
1222 	 * any work, or that there is so much work that NAPI is dealing
1223 	 * with interrupt moderation and we don't need to do anything.
1224 	 */
1225 	if (time_after(next_update, rc->next_update))
1226 		goto clear_counts;
1227 
1228 	packets = rc->total_pkts;
1229 	bytes = rc->total_bytes;
1230 
1231 	if (container_is_rx) {
1232 		/* If Rx there are 1 to 4 packets and bytes are less than
1233 		 * 9000 assume insufficient data to use bulk rate limiting
1234 		 * approach unless Tx is already in bulk rate limiting. We
1235 		 * are likely latency driven.
1236 		 */
1237 		if (packets && packets < 4 && bytes < 9000 &&
1238 		    (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1239 			itr = ICE_ITR_ADAPTIVE_LATENCY;
1240 			goto adjust_by_size_and_speed;
1241 		}
1242 	} else if (packets < 4) {
1243 		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
1244 		 * bulk mode and we are receiving 4 or fewer packets just
1245 		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1246 		 * that the Rx can relax.
1247 		 */
1248 		if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1249 		    (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1250 		    ICE_ITR_ADAPTIVE_MAX_USECS)
1251 			goto clear_counts;
1252 	} else if (packets > 32) {
1253 		/* If we have processed over 32 packets in a single interrupt
1254 		 * for Tx assume we need to switch over to "bulk" mode.
1255 		 */
1256 		rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1257 	}
1258 
1259 	/* We have no packets to actually measure against. This means
1260 	 * either one of the other queues on this vector is active or
1261 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
1262 	 *
1263 	 * Between 4 and 56 we can assume that our current interrupt delay
1264 	 * is only slightly too low. As such we should increase it by a small
1265 	 * fixed amount.
1266 	 */
1267 	if (packets < 56) {
1268 		itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1269 		if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1270 			itr &= ICE_ITR_ADAPTIVE_LATENCY;
1271 			itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1272 		}
1273 		goto clear_counts;
1274 	}
1275 
1276 	if (packets <= 256) {
1277 		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1278 		itr &= ICE_ITR_MASK;
1279 
1280 		/* Between 56 and 112 is our "goldilocks" zone where we are
1281 		 * working out "just right". Just report that our current
1282 		 * ITR is good for us.
1283 		 */
1284 		if (packets <= 112)
1285 			goto clear_counts;
1286 
1287 		/* If packet count is 128 or greater we are likely looking
1288 		 * at a slight overrun of the delay we want. Try halving
1289 		 * our delay to see if that will cut the number of packets
1290 		 * in half per interrupt.
1291 		 */
1292 		itr >>= 1;
1293 		itr &= ICE_ITR_MASK;
1294 		if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1295 			itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1296 
1297 		goto clear_counts;
1298 	}
1299 
1300 	/* The paths below assume we are dealing with a bulk ITR since
1301 	 * number of packets is greater than 256. We are just going to have
1302 	 * to compute a value and try to bring the count under control,
1303 	 * though for smaller packet sizes there isn't much we can do as
1304 	 * NAPI polling will likely be kicking in sooner rather than later.
1305 	 */
1306 	itr = ICE_ITR_ADAPTIVE_BULK;
1307 
1308 adjust_by_size_and_speed:
1309 
1310 	/* based on checks above packets cannot be 0 so division is safe */
1311 	itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1312 					       bytes / packets, itr);
1313 
1314 clear_counts:
1315 	/* write back value */
1316 	rc->target_itr = itr;
1317 
1318 	/* next update should occur within next jiffy */
1319 	rc->next_update = next_update + 1;
1320 
1321 	rc->total_bytes = 0;
1322 	rc->total_pkts = 0;
1323 }
1324 
1325 /**
1326  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1327  * @itr_idx: interrupt throttling index
1328  * @itr: interrupt throttling value in usecs
1329  */
1330 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1331 {
1332 	/* The ITR value is reported in microseconds, and the register value is
1333 	 * recorded in 2 microsecond units. For this reason we only need to
1334 	 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1335 	 * granularity as a shift instead of division. The mask makes sure the
1336 	 * ITR value is never odd so we don't accidentally write into the field
1337 	 * prior to the ITR field.
1338 	 */
1339 	itr &= ICE_ITR_MASK;
1340 
1341 	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1342 		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1343 		(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1344 }
1345 
1346 /* The act of updating the ITR will cause it to immediately trigger. In order
1347  * to prevent this from throwing off adaptive update statistics we defer the
1348  * update so that it can only happen so often. So after either Tx or Rx are
1349  * updated we make the adaptive scheme wait until either the ITR completely
1350  * expires via the next_update expiration or we have been through at least
1351  * 3 interrupts.
1352  */
1353 #define ITR_COUNTDOWN_START 3
1354 
1355 /**
1356  * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1357  * @vsi: the VSI associated with the q_vector
1358  * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1359  */
1360 static void
1361 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1362 {
1363 	struct ice_ring_container *tx = &q_vector->tx;
1364 	struct ice_ring_container *rx = &q_vector->rx;
1365 	u32 itr_val;
1366 
1367 	/* when exiting WB_ON_ITR lets set a low ITR value and trigger
1368 	 * interrupts to expire right away in case we have more work ready to go
1369 	 * already
1370 	 */
1371 	if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1372 		itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1373 		wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1374 		/* set target back to last user set value */
1375 		rx->target_itr = rx->itr_setting;
1376 		/* set current to what we just wrote and dynamic if needed */
1377 		rx->current_itr = ICE_WB_ON_ITR_USECS |
1378 			(rx->itr_setting & ICE_ITR_DYNAMIC);
1379 		/* allow normal interrupt flow to start */
1380 		q_vector->itr_countdown = 0;
1381 		return;
1382 	}
1383 
1384 	/* This will do nothing if dynamic updates are not enabled */
1385 	ice_update_itr(q_vector, tx);
1386 	ice_update_itr(q_vector, rx);
1387 
1388 	/* This block of logic allows us to get away with only updating
1389 	 * one ITR value with each interrupt. The idea is to perform a
1390 	 * pseudo-lazy update with the following criteria.
1391 	 *
1392 	 * 1. Rx is given higher priority than Tx if both are in same state
1393 	 * 2. If we must reduce an ITR that is given highest priority.
1394 	 * 3. We then give priority to increasing ITR based on amount.
1395 	 */
1396 	if (rx->target_itr < rx->current_itr) {
1397 		/* Rx ITR needs to be reduced, this is highest priority */
1398 		itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1399 		rx->current_itr = rx->target_itr;
1400 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1401 	} else if ((tx->target_itr < tx->current_itr) ||
1402 		   ((rx->target_itr - rx->current_itr) <
1403 		    (tx->target_itr - tx->current_itr))) {
1404 		/* Tx ITR needs to be reduced, this is second priority
1405 		 * Tx ITR needs to be increased more than Rx, fourth priority
1406 		 */
1407 		itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1408 		tx->current_itr = tx->target_itr;
1409 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1410 	} else if (rx->current_itr != rx->target_itr) {
1411 		/* Rx ITR needs to be increased, third priority */
1412 		itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1413 		rx->current_itr = rx->target_itr;
1414 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1415 	} else {
1416 		/* Still have to re-enable the interrupts */
1417 		itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1418 		if (q_vector->itr_countdown)
1419 			q_vector->itr_countdown--;
1420 	}
1421 
1422 	if (!test_bit(__ICE_DOWN, vsi->state))
1423 		wr32(&vsi->back->hw,
1424 		     GLINT_DYN_CTL(q_vector->reg_idx),
1425 		     itr_val);
1426 }
1427 
1428 /**
1429  * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1430  * @vsi: pointer to the VSI structure
1431  * @q_vector: q_vector to set WB_ON_ITR on
1432  *
1433  * We need to tell hardware to write-back completed descriptors even when
1434  * interrupts are disabled. Descriptors will be written back on cache line
1435  * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1436  * descriptors may not be written back if they don't fill a cache line until the
1437  * next interrupt.
1438  *
1439  * This sets the write-back frequency to 2 microseconds as that is the minimum
1440  * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1441  * make sure hardware knows we aren't meddling with the INTENA_M bit.
1442  */
1443 static void
1444 ice_set_wb_on_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1445 {
1446 	/* already in WB_ON_ITR mode no need to change it */
1447 	if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1448 		return;
1449 
1450 	if (q_vector->num_ring_rx)
1451 		wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1452 		     ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1453 						 ICE_RX_ITR));
1454 
1455 	if (q_vector->num_ring_tx)
1456 		wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1457 		     ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1458 						 ICE_TX_ITR));
1459 
1460 	q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1461 }
1462 
1463 /**
1464  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1465  * @napi: napi struct with our devices info in it
1466  * @budget: amount of work driver is allowed to do this pass, in packets
1467  *
1468  * This function will clean all queues associated with a q_vector.
1469  *
1470  * Returns the amount of work done
1471  */
1472 int ice_napi_poll(struct napi_struct *napi, int budget)
1473 {
1474 	struct ice_q_vector *q_vector =
1475 				container_of(napi, struct ice_q_vector, napi);
1476 	struct ice_vsi *vsi = q_vector->vsi;
1477 	bool clean_complete = true;
1478 	struct ice_ring *ring;
1479 	int budget_per_ring;
1480 	int work_done = 0;
1481 
1482 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1483 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1484 	 */
1485 	ice_for_each_ring(ring, q_vector->tx)
1486 		if (!ice_clean_tx_irq(vsi, ring, budget))
1487 			clean_complete = false;
1488 
1489 	/* Handle case where we are called by netpoll with a budget of 0 */
1490 	if (budget <= 0)
1491 		return budget;
1492 
1493 	/* normally we have 1 Rx ring per q_vector */
1494 	if (unlikely(q_vector->num_ring_rx > 1))
1495 		/* We attempt to distribute budget to each Rx queue fairly, but
1496 		 * don't allow the budget to go below 1 because that would exit
1497 		 * polling early.
1498 		 */
1499 		budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1500 	else
1501 		/* Max of 1 Rx ring in this q_vector so give it the budget */
1502 		budget_per_ring = budget;
1503 
1504 	ice_for_each_ring(ring, q_vector->rx) {
1505 		int cleaned;
1506 
1507 		cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1508 		work_done += cleaned;
1509 		/* if we clean as many as budgeted, we must not be done */
1510 		if (cleaned >= budget_per_ring)
1511 			clean_complete = false;
1512 	}
1513 
1514 	/* If work not completed, return budget and polling will return */
1515 	if (!clean_complete)
1516 		return budget;
1517 
1518 	/* Exit the polling mode, but don't re-enable interrupts if stack might
1519 	 * poll us due to busy-polling
1520 	 */
1521 	if (likely(napi_complete_done(napi, work_done)))
1522 		ice_update_ena_itr(vsi, q_vector);
1523 	else
1524 		ice_set_wb_on_itr(vsi, q_vector);
1525 
1526 	return min_t(int, work_done, budget - 1);
1527 }
1528 
1529 /* helper function for building cmd/type/offset */
1530 static __le64
1531 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1532 {
1533 	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1534 			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
1535 			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
1536 			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1537 			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
1538 }
1539 
1540 /**
1541  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1542  * @tx_ring: the ring to be checked
1543  * @size: the size buffer we want to assure is available
1544  *
1545  * Returns -EBUSY if a stop is needed, else 0
1546  */
1547 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1548 {
1549 	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1550 	/* Memory barrier before checking head and tail */
1551 	smp_mb();
1552 
1553 	/* Check again in a case another CPU has just made room available. */
1554 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1555 		return -EBUSY;
1556 
1557 	/* A reprieve! - use start_subqueue because it doesn't call schedule */
1558 	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1559 	++tx_ring->tx_stats.restart_q;
1560 	return 0;
1561 }
1562 
1563 /**
1564  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1565  * @tx_ring: the ring to be checked
1566  * @size:    the size buffer we want to assure is available
1567  *
1568  * Returns 0 if stop is not needed
1569  */
1570 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1571 {
1572 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1573 		return 0;
1574 
1575 	return __ice_maybe_stop_tx(tx_ring, size);
1576 }
1577 
1578 /**
1579  * ice_tx_map - Build the Tx descriptor
1580  * @tx_ring: ring to send buffer on
1581  * @first: first buffer info buffer to use
1582  * @off: pointer to struct that holds offload parameters
1583  *
1584  * This function loops over the skb data pointed to by *first
1585  * and gets a physical address for each memory location and programs
1586  * it and the length into the transmit descriptor.
1587  */
1588 static void
1589 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1590 	   struct ice_tx_offload_params *off)
1591 {
1592 	u64 td_offset, td_tag, td_cmd;
1593 	u16 i = tx_ring->next_to_use;
1594 	skb_frag_t *frag;
1595 	unsigned int data_len, size;
1596 	struct ice_tx_desc *tx_desc;
1597 	struct ice_tx_buf *tx_buf;
1598 	struct sk_buff *skb;
1599 	dma_addr_t dma;
1600 
1601 	td_tag = off->td_l2tag1;
1602 	td_cmd = off->td_cmd;
1603 	td_offset = off->td_offset;
1604 	skb = first->skb;
1605 
1606 	data_len = skb->data_len;
1607 	size = skb_headlen(skb);
1608 
1609 	tx_desc = ICE_TX_DESC(tx_ring, i);
1610 
1611 	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1612 		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1613 		td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1614 			  ICE_TX_FLAGS_VLAN_S;
1615 	}
1616 
1617 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1618 
1619 	tx_buf = first;
1620 
1621 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1622 		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1623 
1624 		if (dma_mapping_error(tx_ring->dev, dma))
1625 			goto dma_error;
1626 
1627 		/* record length, and DMA address */
1628 		dma_unmap_len_set(tx_buf, len, size);
1629 		dma_unmap_addr_set(tx_buf, dma, dma);
1630 
1631 		/* align size to end of page */
1632 		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1633 		tx_desc->buf_addr = cpu_to_le64(dma);
1634 
1635 		/* account for data chunks larger than the hardware
1636 		 * can handle
1637 		 */
1638 		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1639 			tx_desc->cmd_type_offset_bsz =
1640 				build_ctob(td_cmd, td_offset, max_data, td_tag);
1641 
1642 			tx_desc++;
1643 			i++;
1644 
1645 			if (i == tx_ring->count) {
1646 				tx_desc = ICE_TX_DESC(tx_ring, 0);
1647 				i = 0;
1648 			}
1649 
1650 			dma += max_data;
1651 			size -= max_data;
1652 
1653 			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1654 			tx_desc->buf_addr = cpu_to_le64(dma);
1655 		}
1656 
1657 		if (likely(!data_len))
1658 			break;
1659 
1660 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1661 							  size, td_tag);
1662 
1663 		tx_desc++;
1664 		i++;
1665 
1666 		if (i == tx_ring->count) {
1667 			tx_desc = ICE_TX_DESC(tx_ring, 0);
1668 			i = 0;
1669 		}
1670 
1671 		size = skb_frag_size(frag);
1672 		data_len -= size;
1673 
1674 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1675 				       DMA_TO_DEVICE);
1676 
1677 		tx_buf = &tx_ring->tx_buf[i];
1678 	}
1679 
1680 	/* record bytecount for BQL */
1681 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1682 
1683 	/* record SW timestamp if HW timestamp is not available */
1684 	skb_tx_timestamp(first->skb);
1685 
1686 	i++;
1687 	if (i == tx_ring->count)
1688 		i = 0;
1689 
1690 	/* write last descriptor with RS and EOP bits */
1691 	td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1692 	tx_desc->cmd_type_offset_bsz =
1693 			build_ctob(td_cmd, td_offset, size, td_tag);
1694 
1695 	/* Force memory writes to complete before letting h/w know there
1696 	 * are new descriptors to fetch.
1697 	 *
1698 	 * We also use this memory barrier to make certain all of the
1699 	 * status bits have been updated before next_to_watch is written.
1700 	 */
1701 	wmb();
1702 
1703 	/* set next_to_watch value indicating a packet is present */
1704 	first->next_to_watch = tx_desc;
1705 
1706 	tx_ring->next_to_use = i;
1707 
1708 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1709 
1710 	/* notify HW of packet */
1711 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1712 		writel(i, tx_ring->tail);
1713 	}
1714 
1715 	return;
1716 
1717 dma_error:
1718 	/* clear DMA mappings for failed tx_buf map */
1719 	for (;;) {
1720 		tx_buf = &tx_ring->tx_buf[i];
1721 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1722 		if (tx_buf == first)
1723 			break;
1724 		if (i == 0)
1725 			i = tx_ring->count;
1726 		i--;
1727 	}
1728 
1729 	tx_ring->next_to_use = i;
1730 }
1731 
1732 /**
1733  * ice_tx_csum - Enable Tx checksum offloads
1734  * @first: pointer to the first descriptor
1735  * @off: pointer to struct that holds offload parameters
1736  *
1737  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1738  */
1739 static
1740 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1741 {
1742 	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1743 	struct sk_buff *skb = first->skb;
1744 	union {
1745 		struct iphdr *v4;
1746 		struct ipv6hdr *v6;
1747 		unsigned char *hdr;
1748 	} ip;
1749 	union {
1750 		struct tcphdr *tcp;
1751 		unsigned char *hdr;
1752 	} l4;
1753 	__be16 frag_off, protocol;
1754 	unsigned char *exthdr;
1755 	u32 offset, cmd = 0;
1756 	u8 l4_proto = 0;
1757 
1758 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1759 		return 0;
1760 
1761 	ip.hdr = skb_network_header(skb);
1762 	l4.hdr = skb_transport_header(skb);
1763 
1764 	/* compute outer L2 header size */
1765 	l2_len = ip.hdr - skb->data;
1766 	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1767 
1768 	if (skb->encapsulation)
1769 		return -1;
1770 
1771 	/* Enable IP checksum offloads */
1772 	protocol = vlan_get_protocol(skb);
1773 	if (protocol == htons(ETH_P_IP)) {
1774 		l4_proto = ip.v4->protocol;
1775 		/* the stack computes the IP header already, the only time we
1776 		 * need the hardware to recompute it is in the case of TSO.
1777 		 */
1778 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1779 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1780 		else
1781 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1782 
1783 	} else if (protocol == htons(ETH_P_IPV6)) {
1784 		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1785 		exthdr = ip.hdr + sizeof(*ip.v6);
1786 		l4_proto = ip.v6->nexthdr;
1787 		if (l4.hdr != exthdr)
1788 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1789 					 &frag_off);
1790 	} else {
1791 		return -1;
1792 	}
1793 
1794 	/* compute inner L3 header size */
1795 	l3_len = l4.hdr - ip.hdr;
1796 	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1797 
1798 	/* Enable L4 checksum offloads */
1799 	switch (l4_proto) {
1800 	case IPPROTO_TCP:
1801 		/* enable checksum offloads */
1802 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1803 		l4_len = l4.tcp->doff;
1804 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1805 		break;
1806 	case IPPROTO_UDP:
1807 		/* enable UDP checksum offload */
1808 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1809 		l4_len = (sizeof(struct udphdr) >> 2);
1810 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1811 		break;
1812 	case IPPROTO_SCTP:
1813 		/* enable SCTP checksum offload */
1814 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1815 		l4_len = sizeof(struct sctphdr) >> 2;
1816 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1817 		break;
1818 
1819 	default:
1820 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1821 			return -1;
1822 		skb_checksum_help(skb);
1823 		return 0;
1824 	}
1825 
1826 	off->td_cmd |= cmd;
1827 	off->td_offset |= offset;
1828 	return 1;
1829 }
1830 
1831 /**
1832  * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1833  * @tx_ring: ring to send buffer on
1834  * @first: pointer to struct ice_tx_buf
1835  *
1836  * Checks the skb and set up correspondingly several generic transmit flags
1837  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1838  *
1839  * Returns error code indicate the frame should be dropped upon error and the
1840  * otherwise returns 0 to indicate the flags has been set properly.
1841  */
1842 static int
1843 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1844 {
1845 	struct sk_buff *skb = first->skb;
1846 	__be16 protocol = skb->protocol;
1847 
1848 	if (protocol == htons(ETH_P_8021Q) &&
1849 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1850 		/* when HW VLAN acceleration is turned off by the user the
1851 		 * stack sets the protocol to 8021q so that the driver
1852 		 * can take any steps required to support the SW only
1853 		 * VLAN handling. In our case the driver doesn't need
1854 		 * to take any further steps so just set the protocol
1855 		 * to the encapsulated ethertype.
1856 		 */
1857 		skb->protocol = vlan_get_protocol(skb);
1858 		return 0;
1859 	}
1860 
1861 	/* if we have a HW VLAN tag being added, default to the HW one */
1862 	if (skb_vlan_tag_present(skb)) {
1863 		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1864 		first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1865 	} else if (protocol == htons(ETH_P_8021Q)) {
1866 		struct vlan_hdr *vhdr, _vhdr;
1867 
1868 		/* for SW VLAN, check the next protocol and store the tag */
1869 		vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1870 							     sizeof(_vhdr),
1871 							     &_vhdr);
1872 		if (!vhdr)
1873 			return -EINVAL;
1874 
1875 		first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1876 				   ICE_TX_FLAGS_VLAN_S;
1877 		first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1878 	}
1879 
1880 	return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1881 }
1882 
1883 /**
1884  * ice_tso - computes mss and TSO length to prepare for TSO
1885  * @first: pointer to struct ice_tx_buf
1886  * @off: pointer to struct that holds offload parameters
1887  *
1888  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1889  */
1890 static
1891 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1892 {
1893 	struct sk_buff *skb = first->skb;
1894 	union {
1895 		struct iphdr *v4;
1896 		struct ipv6hdr *v6;
1897 		unsigned char *hdr;
1898 	} ip;
1899 	union {
1900 		struct tcphdr *tcp;
1901 		unsigned char *hdr;
1902 	} l4;
1903 	u64 cd_mss, cd_tso_len;
1904 	u32 paylen, l4_start;
1905 	int err;
1906 
1907 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1908 		return 0;
1909 
1910 	if (!skb_is_gso(skb))
1911 		return 0;
1912 
1913 	err = skb_cow_head(skb, 0);
1914 	if (err < 0)
1915 		return err;
1916 
1917 	/* cppcheck-suppress unreadVariable */
1918 	ip.hdr = skb_network_header(skb);
1919 	l4.hdr = skb_transport_header(skb);
1920 
1921 	/* initialize outer IP header fields */
1922 	if (ip.v4->version == 4) {
1923 		ip.v4->tot_len = 0;
1924 		ip.v4->check = 0;
1925 	} else {
1926 		ip.v6->payload_len = 0;
1927 	}
1928 
1929 	/* determine offset of transport header */
1930 	l4_start = l4.hdr - skb->data;
1931 
1932 	/* remove payload length from checksum */
1933 	paylen = skb->len - l4_start;
1934 	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1935 
1936 	/* compute length of segmentation header */
1937 	off->header_len = (l4.tcp->doff * 4) + l4_start;
1938 
1939 	/* update gso_segs and bytecount */
1940 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1941 	first->bytecount += (first->gso_segs - 1) * off->header_len;
1942 
1943 	cd_tso_len = skb->len - off->header_len;
1944 	cd_mss = skb_shinfo(skb)->gso_size;
1945 
1946 	/* record cdesc_qw1 with TSO parameters */
1947 	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1948 			     (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1949 			     (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1950 			     (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1951 	first->tx_flags |= ICE_TX_FLAGS_TSO;
1952 	return 1;
1953 }
1954 
1955 /**
1956  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
1957  * @size: transmit request size in bytes
1958  *
1959  * Due to hardware alignment restrictions (4K alignment), we need to
1960  * assume that we can have no more than 12K of data per descriptor, even
1961  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1962  * Thus, we need to divide by 12K. But division is slow! Instead,
1963  * we decompose the operation into shifts and one relatively cheap
1964  * multiply operation.
1965  *
1966  * To divide by 12K, we first divide by 4K, then divide by 3:
1967  *     To divide by 4K, shift right by 12 bits
1968  *     To divide by 3, multiply by 85, then divide by 256
1969  *     (Divide by 256 is done by shifting right by 8 bits)
1970  * Finally, we add one to round up. Because 256 isn't an exact multiple of
1971  * 3, we'll underestimate near each multiple of 12K. This is actually more
1972  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1973  * segment. For our purposes this is accurate out to 1M which is orders of
1974  * magnitude greater than our largest possible GSO size.
1975  *
1976  * This would then be implemented as:
1977  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1978  *
1979  * Since multiplication and division are commutative, we can reorder
1980  * operations into:
1981  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1982  */
1983 static unsigned int ice_txd_use_count(unsigned int size)
1984 {
1985 	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1986 }
1987 
1988 /**
1989  * ice_xmit_desc_count - calculate number of Tx descriptors needed
1990  * @skb: send buffer
1991  *
1992  * Returns number of data descriptors needed for this skb.
1993  */
1994 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1995 {
1996 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1997 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1998 	unsigned int count = 0, size = skb_headlen(skb);
1999 
2000 	for (;;) {
2001 		count += ice_txd_use_count(size);
2002 
2003 		if (!nr_frags--)
2004 			break;
2005 
2006 		size = skb_frag_size(frag++);
2007 	}
2008 
2009 	return count;
2010 }
2011 
2012 /**
2013  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2014  * @skb: send buffer
2015  *
2016  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2017  * and so we need to figure out the cases where we need to linearize the skb.
2018  *
2019  * For TSO we need to count the TSO header and segment payload separately.
2020  * As such we need to check cases where we have 7 fragments or more as we
2021  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2022  * the segment payload in the first descriptor, and another 7 for the
2023  * fragments.
2024  */
2025 static bool __ice_chk_linearize(struct sk_buff *skb)
2026 {
2027 	const skb_frag_t *frag, *stale;
2028 	int nr_frags, sum;
2029 
2030 	/* no need to check if number of frags is less than 7 */
2031 	nr_frags = skb_shinfo(skb)->nr_frags;
2032 	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2033 		return false;
2034 
2035 	/* We need to walk through the list and validate that each group
2036 	 * of 6 fragments totals at least gso_size.
2037 	 */
2038 	nr_frags -= ICE_MAX_BUF_TXD - 2;
2039 	frag = &skb_shinfo(skb)->frags[0];
2040 
2041 	/* Initialize size to the negative value of gso_size minus 1. We
2042 	 * use this as the worst case scenerio in which the frag ahead
2043 	 * of us only provides one byte which is why we are limited to 6
2044 	 * descriptors for a single transmit as the header and previous
2045 	 * fragment are already consuming 2 descriptors.
2046 	 */
2047 	sum = 1 - skb_shinfo(skb)->gso_size;
2048 
2049 	/* Add size of frags 0 through 4 to create our initial sum */
2050 	sum += skb_frag_size(frag++);
2051 	sum += skb_frag_size(frag++);
2052 	sum += skb_frag_size(frag++);
2053 	sum += skb_frag_size(frag++);
2054 	sum += skb_frag_size(frag++);
2055 
2056 	/* Walk through fragments adding latest fragment, testing it, and
2057 	 * then removing stale fragments from the sum.
2058 	 */
2059 	stale = &skb_shinfo(skb)->frags[0];
2060 	for (;;) {
2061 		sum += skb_frag_size(frag++);
2062 
2063 		/* if sum is negative we failed to make sufficient progress */
2064 		if (sum < 0)
2065 			return true;
2066 
2067 		if (!nr_frags--)
2068 			break;
2069 
2070 		sum -= skb_frag_size(stale++);
2071 	}
2072 
2073 	return false;
2074 }
2075 
2076 /**
2077  * ice_chk_linearize - Check if there are more than 8 fragments per packet
2078  * @skb:      send buffer
2079  * @count:    number of buffers used
2080  *
2081  * Note: Our HW can't scatter-gather more than 8 fragments to build
2082  * a packet on the wire and so we need to figure out the cases where we
2083  * need to linearize the skb.
2084  */
2085 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2086 {
2087 	/* Both TSO and single send will work if count is less than 8 */
2088 	if (likely(count < ICE_MAX_BUF_TXD))
2089 		return false;
2090 
2091 	if (skb_is_gso(skb))
2092 		return __ice_chk_linearize(skb);
2093 
2094 	/* we can support up to 8 data buffers for a single send */
2095 	return count != ICE_MAX_BUF_TXD;
2096 }
2097 
2098 /**
2099  * ice_xmit_frame_ring - Sends buffer on Tx ring
2100  * @skb: send buffer
2101  * @tx_ring: ring to send buffer on
2102  *
2103  * Returns NETDEV_TX_OK if sent, else an error code
2104  */
2105 static netdev_tx_t
2106 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2107 {
2108 	struct ice_tx_offload_params offload = { 0 };
2109 	struct ice_vsi *vsi = tx_ring->vsi;
2110 	struct ice_tx_buf *first;
2111 	unsigned int count;
2112 	int tso, csum;
2113 
2114 	count = ice_xmit_desc_count(skb);
2115 	if (ice_chk_linearize(skb, count)) {
2116 		if (__skb_linearize(skb))
2117 			goto out_drop;
2118 		count = ice_txd_use_count(skb->len);
2119 		tx_ring->tx_stats.tx_linearize++;
2120 	}
2121 
2122 	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2123 	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2124 	 *       + 4 desc gap to avoid the cache line where head is,
2125 	 *       + 1 desc for context descriptor,
2126 	 * otherwise try next time
2127 	 */
2128 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2129 			      ICE_DESCS_FOR_CTX_DESC)) {
2130 		tx_ring->tx_stats.tx_busy++;
2131 		return NETDEV_TX_BUSY;
2132 	}
2133 
2134 	offload.tx_ring = tx_ring;
2135 
2136 	/* record the location of the first descriptor for this packet */
2137 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
2138 	first->skb = skb;
2139 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2140 	first->gso_segs = 1;
2141 	first->tx_flags = 0;
2142 
2143 	/* prepare the VLAN tagging flags for Tx */
2144 	if (ice_tx_prepare_vlan_flags(tx_ring, first))
2145 		goto out_drop;
2146 
2147 	/* set up TSO offload */
2148 	tso = ice_tso(first, &offload);
2149 	if (tso < 0)
2150 		goto out_drop;
2151 
2152 	/* always set up Tx checksum offload */
2153 	csum = ice_tx_csum(first, &offload);
2154 	if (csum < 0)
2155 		goto out_drop;
2156 
2157 	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2158 	if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2159 		     vsi->type == ICE_VSI_PF &&
2160 		     vsi->port_info->is_sw_lldp))
2161 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2162 					ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2163 					ICE_TXD_CTX_QW1_CMD_S);
2164 
2165 	if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2166 		struct ice_tx_ctx_desc *cdesc;
2167 		int i = tx_ring->next_to_use;
2168 
2169 		/* grab the next descriptor */
2170 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2171 		i++;
2172 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2173 
2174 		/* setup context descriptor */
2175 		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2176 		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2177 		cdesc->rsvd = cpu_to_le16(0);
2178 		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2179 	}
2180 
2181 	ice_tx_map(tx_ring, first, &offload);
2182 	return NETDEV_TX_OK;
2183 
2184 out_drop:
2185 	dev_kfree_skb_any(skb);
2186 	return NETDEV_TX_OK;
2187 }
2188 
2189 /**
2190  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2191  * @skb: send buffer
2192  * @netdev: network interface device structure
2193  *
2194  * Returns NETDEV_TX_OK if sent, else an error code
2195  */
2196 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2197 {
2198 	struct ice_netdev_priv *np = netdev_priv(netdev);
2199 	struct ice_vsi *vsi = np->vsi;
2200 	struct ice_ring *tx_ring;
2201 
2202 	tx_ring = vsi->tx_rings[skb->queue_mapping];
2203 
2204 	/* hardware can't handle really short frames, hardware padding works
2205 	 * beyond this point
2206 	 */
2207 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2208 		return NETDEV_TX_OK;
2209 
2210 	return ice_xmit_frame_ring(skb, tx_ring);
2211 }
2212