1ae06c70bSJeff Kirsher // SPDX-License-Identifier: GPL-2.0
27a432d57SJacob Keller /* Copyright(c) 2013 - 2019 Intel Corporation. */
3b3890e30SAlexander Duyck 
4b3890e30SAlexander Duyck #include <linux/types.h>
5b3890e30SAlexander Duyck #include <linux/module.h>
6b3890e30SAlexander Duyck #include <net/ipv6.h>
7b3890e30SAlexander Duyck #include <net/ip.h>
8b3890e30SAlexander Duyck #include <net/tcp.h>
9b3890e30SAlexander Duyck #include <linux/if_macvlan.h>
10b101c962SAlexander Duyck #include <linux/prefetch.h>
11b3890e30SAlexander Duyck 
12b3890e30SAlexander Duyck #include "fm10k.h"
13b3890e30SAlexander Duyck 
142d0f76beSJacob Keller #define DRV_SUMMARY	"Intel(R) Ethernet Switch Host Interface Driver"
15b3890e30SAlexander Duyck char fm10k_driver_name[] = "fm10k";
162d0f76beSJacob Keller static const char fm10k_driver_string[] = DRV_SUMMARY;
17b3890e30SAlexander Duyck static const char fm10k_copyright[] =
187a432d57SJacob Keller 	"Copyright(c) 2013 - 2019 Intel Corporation.";
19b3890e30SAlexander Duyck 
20b3890e30SAlexander Duyck MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
212d0f76beSJacob Keller MODULE_DESCRIPTION(DRV_SUMMARY);
2298674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2");
23b3890e30SAlexander Duyck 
24b382bb1bSJeff Kirsher /* single workqueue for entire fm10k driver */
2507146e2eSBruce Allan struct workqueue_struct *fm10k_workqueue;
26b382bb1bSJeff Kirsher 
276d2ce900SAlexander Duyck /**
286d2ce900SAlexander Duyck  * fm10k_init_module - Driver Registration Routine
29b3890e30SAlexander Duyck  *
30b3890e30SAlexander Duyck  * fm10k_init_module is the first routine called when the driver is
31b3890e30SAlexander Duyck  * loaded.  All it does is register with the PCI subsystem.
32b3890e30SAlexander Duyck  **/
fm10k_init_module(void)33b3890e30SAlexander Duyck static int __init fm10k_init_module(void)
34b3890e30SAlexander Duyck {
35*771a794cSYuan Can 	int ret;
36*771a794cSYuan Can 
3734a2a3b8SJeff Kirsher 	pr_info("%s\n", fm10k_driver_string);
38b3890e30SAlexander Duyck 	pr_info("%s\n", fm10k_copyright);
39b3890e30SAlexander Duyck 
40b382bb1bSJeff Kirsher 	/* create driver workqueue */
415e3d033eSJacob Keller 	fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
425e3d033eSJacob Keller 					  fm10k_driver_name);
4301ca6671SYue Haibing 	if (!fm10k_workqueue)
4401ca6671SYue Haibing 		return -ENOMEM;
45b382bb1bSJeff Kirsher 
467461fd91SAlexander Duyck 	fm10k_dbg_init();
477461fd91SAlexander Duyck 
48*771a794cSYuan Can 	ret = fm10k_register_pci_driver();
49*771a794cSYuan Can 	if (ret) {
50*771a794cSYuan Can 		fm10k_dbg_exit();
51*771a794cSYuan Can 		destroy_workqueue(fm10k_workqueue);
52*771a794cSYuan Can 	}
53*771a794cSYuan Can 
54*771a794cSYuan Can 	return ret;
55b3890e30SAlexander Duyck }
56b3890e30SAlexander Duyck module_init(fm10k_init_module);
57b3890e30SAlexander Duyck 
58b3890e30SAlexander Duyck /**
59b3890e30SAlexander Duyck  * fm10k_exit_module - Driver Exit Cleanup Routine
60b3890e30SAlexander Duyck  *
61b3890e30SAlexander Duyck  * fm10k_exit_module is called just before the driver is removed
62b3890e30SAlexander Duyck  * from memory.
63b3890e30SAlexander Duyck  **/
fm10k_exit_module(void)64b3890e30SAlexander Duyck static void __exit fm10k_exit_module(void)
65b3890e30SAlexander Duyck {
66b3890e30SAlexander Duyck 	fm10k_unregister_pci_driver();
677461fd91SAlexander Duyck 
687461fd91SAlexander Duyck 	fm10k_dbg_exit();
69b382bb1bSJeff Kirsher 
70b382bb1bSJeff Kirsher 	/* destroy driver workqueue */
71b382bb1bSJeff Kirsher 	destroy_workqueue(fm10k_workqueue);
72b3890e30SAlexander Duyck }
73b3890e30SAlexander Duyck module_exit(fm10k_exit_module);
7418283cadSAlexander Duyck 
fm10k_alloc_mapped_page(struct fm10k_ring * rx_ring,struct fm10k_rx_buffer * bi)75b101c962SAlexander Duyck static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
76b101c962SAlexander Duyck 				    struct fm10k_rx_buffer *bi)
77b101c962SAlexander Duyck {
78b101c962SAlexander Duyck 	struct page *page = bi->page;
79b101c962SAlexander Duyck 	dma_addr_t dma;
80b101c962SAlexander Duyck 
81b101c962SAlexander Duyck 	/* Only page will be NULL if buffer was consumed */
82b101c962SAlexander Duyck 	if (likely(page))
83b101c962SAlexander Duyck 		return true;
84b101c962SAlexander Duyck 
85b101c962SAlexander Duyck 	/* alloc new page for storage */
8642b17f09SAlexander Duyck 	page = dev_alloc_page();
87b101c962SAlexander Duyck 	if (unlikely(!page)) {
88b101c962SAlexander Duyck 		rx_ring->rx_stats.alloc_failed++;
89b101c962SAlexander Duyck 		return false;
90b101c962SAlexander Duyck 	}
91b101c962SAlexander Duyck 
92b101c962SAlexander Duyck 	/* map page for use */
93b101c962SAlexander Duyck 	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
94b101c962SAlexander Duyck 
95b101c962SAlexander Duyck 	/* if mapping failed free memory back to system since
96b101c962SAlexander Duyck 	 * there isn't much point in holding memory we can't use
97b101c962SAlexander Duyck 	 */
98b101c962SAlexander Duyck 	if (dma_mapping_error(rx_ring->dev, dma)) {
99b101c962SAlexander Duyck 		__free_page(page);
100b101c962SAlexander Duyck 
101b101c962SAlexander Duyck 		rx_ring->rx_stats.alloc_failed++;
102b101c962SAlexander Duyck 		return false;
103b101c962SAlexander Duyck 	}
104b101c962SAlexander Duyck 
105b101c962SAlexander Duyck 	bi->dma = dma;
106b101c962SAlexander Duyck 	bi->page = page;
107b101c962SAlexander Duyck 	bi->page_offset = 0;
108b101c962SAlexander Duyck 
109b101c962SAlexander Duyck 	return true;
110b101c962SAlexander Duyck }
111b101c962SAlexander Duyck 
112b101c962SAlexander Duyck /**
113b101c962SAlexander Duyck  * fm10k_alloc_rx_buffers - Replace used receive buffers
114b101c962SAlexander Duyck  * @rx_ring: ring to place buffers on
115b101c962SAlexander Duyck  * @cleaned_count: number of buffers to replace
116b101c962SAlexander Duyck  **/
fm10k_alloc_rx_buffers(struct fm10k_ring * rx_ring,u16 cleaned_count)117b101c962SAlexander Duyck void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
118b101c962SAlexander Duyck {
119b101c962SAlexander Duyck 	union fm10k_rx_desc *rx_desc;
120b101c962SAlexander Duyck 	struct fm10k_rx_buffer *bi;
121b101c962SAlexander Duyck 	u16 i = rx_ring->next_to_use;
122b101c962SAlexander Duyck 
123b101c962SAlexander Duyck 	/* nothing to do */
124b101c962SAlexander Duyck 	if (!cleaned_count)
125b101c962SAlexander Duyck 		return;
126b101c962SAlexander Duyck 
127b101c962SAlexander Duyck 	rx_desc = FM10K_RX_DESC(rx_ring, i);
128b101c962SAlexander Duyck 	bi = &rx_ring->rx_buffer[i];
129b101c962SAlexander Duyck 	i -= rx_ring->count;
130b101c962SAlexander Duyck 
131b101c962SAlexander Duyck 	do {
132b101c962SAlexander Duyck 		if (!fm10k_alloc_mapped_page(rx_ring, bi))
133b101c962SAlexander Duyck 			break;
134b101c962SAlexander Duyck 
135b101c962SAlexander Duyck 		/* Refresh the desc even if buffer_addrs didn't change
136b101c962SAlexander Duyck 		 * because each write-back erases this info.
137b101c962SAlexander Duyck 		 */
138b101c962SAlexander Duyck 		rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
139b101c962SAlexander Duyck 
140b101c962SAlexander Duyck 		rx_desc++;
141b101c962SAlexander Duyck 		bi++;
142b101c962SAlexander Duyck 		i++;
143b101c962SAlexander Duyck 		if (unlikely(!i)) {
144b101c962SAlexander Duyck 			rx_desc = FM10K_RX_DESC(rx_ring, 0);
145b101c962SAlexander Duyck 			bi = rx_ring->rx_buffer;
146b101c962SAlexander Duyck 			i -= rx_ring->count;
147b101c962SAlexander Duyck 		}
148b101c962SAlexander Duyck 
149ba5b8dcdSAlexander Duyck 		/* clear the status bits for the next_to_use descriptor */
150ba5b8dcdSAlexander Duyck 		rx_desc->d.staterr = 0;
151b101c962SAlexander Duyck 
152b101c962SAlexander Duyck 		cleaned_count--;
153b101c962SAlexander Duyck 	} while (cleaned_count);
154b101c962SAlexander Duyck 
155b101c962SAlexander Duyck 	i += rx_ring->count;
156b101c962SAlexander Duyck 
157b101c962SAlexander Duyck 	if (rx_ring->next_to_use != i) {
158b101c962SAlexander Duyck 		/* record the next descriptor to use */
159b101c962SAlexander Duyck 		rx_ring->next_to_use = i;
160b101c962SAlexander Duyck 
161b101c962SAlexander Duyck 		/* update next to alloc since we have filled the ring */
162b101c962SAlexander Duyck 		rx_ring->next_to_alloc = i;
163b101c962SAlexander Duyck 
164b101c962SAlexander Duyck 		/* Force memory writes to complete before letting h/w
165b101c962SAlexander Duyck 		 * know there are new descriptors to fetch.  (Only
166b101c962SAlexander Duyck 		 * applicable for weak-ordered memory model archs,
167b101c962SAlexander Duyck 		 * such as IA-64).
168b101c962SAlexander Duyck 		 */
169b101c962SAlexander Duyck 		wmb();
170b101c962SAlexander Duyck 
171b101c962SAlexander Duyck 		/* notify hardware of new descriptors */
172b101c962SAlexander Duyck 		writel(i, rx_ring->tail);
173b101c962SAlexander Duyck 	}
174b101c962SAlexander Duyck }
175b101c962SAlexander Duyck 
176b101c962SAlexander Duyck /**
177b101c962SAlexander Duyck  * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
178b101c962SAlexander Duyck  * @rx_ring: rx descriptor ring to store buffers on
179b101c962SAlexander Duyck  * @old_buff: donor buffer to have page reused
180b101c962SAlexander Duyck  *
181b101c962SAlexander Duyck  * Synchronizes page for reuse by the interface
182b101c962SAlexander Duyck  **/
fm10k_reuse_rx_page(struct fm10k_ring * rx_ring,struct fm10k_rx_buffer * old_buff)183b101c962SAlexander Duyck static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
184b101c962SAlexander Duyck 				struct fm10k_rx_buffer *old_buff)
185b101c962SAlexander Duyck {
186b101c962SAlexander Duyck 	struct fm10k_rx_buffer *new_buff;
187b101c962SAlexander Duyck 	u16 nta = rx_ring->next_to_alloc;
188b101c962SAlexander Duyck 
189b101c962SAlexander Duyck 	new_buff = &rx_ring->rx_buffer[nta];
190b101c962SAlexander Duyck 
191b101c962SAlexander Duyck 	/* update, and store next to alloc */
192b101c962SAlexander Duyck 	nta++;
193b101c962SAlexander Duyck 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
194b101c962SAlexander Duyck 
195b101c962SAlexander Duyck 	/* transfer page from old buffer to new buffer */
196ba5b8dcdSAlexander Duyck 	*new_buff = *old_buff;
197b101c962SAlexander Duyck 
198b101c962SAlexander Duyck 	/* sync the buffer for use by the device */
199b101c962SAlexander Duyck 	dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
200b101c962SAlexander Duyck 					 old_buff->page_offset,
201b101c962SAlexander Duyck 					 FM10K_RX_BUFSZ,
202b101c962SAlexander Duyck 					 DMA_FROM_DEVICE);
203b101c962SAlexander Duyck }
204b101c962SAlexander Duyck 
fm10k_can_reuse_rx_page(struct fm10k_rx_buffer * rx_buffer,struct page * page,unsigned int __maybe_unused truesize)205b101c962SAlexander Duyck static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
206b101c962SAlexander Duyck 				    struct page *page,
207de445199SJeff Kirsher 				    unsigned int __maybe_unused truesize)
208b101c962SAlexander Duyck {
209a79afa78SAlexander Lobakin 	/* avoid re-using remote and pfmemalloc pages */
210a79afa78SAlexander Lobakin 	if (!dev_page_is_reusable(page))
211b101c962SAlexander Duyck 		return false;
212b101c962SAlexander Duyck 
213b101c962SAlexander Duyck #if (PAGE_SIZE < 8192)
214b101c962SAlexander Duyck 	/* if we are only owner of page we can reuse it */
215b101c962SAlexander Duyck 	if (unlikely(page_count(page) != 1))
216b101c962SAlexander Duyck 		return false;
217b101c962SAlexander Duyck 
218b101c962SAlexander Duyck 	/* flip page offset to other buffer */
219b101c962SAlexander Duyck 	rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
220b101c962SAlexander Duyck #else
221b101c962SAlexander Duyck 	/* move offset up to the next cache line */
222b101c962SAlexander Duyck 	rx_buffer->page_offset += truesize;
223b101c962SAlexander Duyck 
224b101c962SAlexander Duyck 	if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
225b101c962SAlexander Duyck 		return false;
226b101c962SAlexander Duyck #endif
227b101c962SAlexander Duyck 
228ba5b8dcdSAlexander Duyck 	/* Even if we own the page, we are not allowed to use atomic_set()
229ba5b8dcdSAlexander Duyck 	 * This would break get_page_unless_zero() users.
230ba5b8dcdSAlexander Duyck 	 */
231fe896d18SJoonsoo Kim 	page_ref_inc(page);
232ba5b8dcdSAlexander Duyck 
233b101c962SAlexander Duyck 	return true;
234b101c962SAlexander Duyck }
235b101c962SAlexander Duyck 
236b101c962SAlexander Duyck /**
237b101c962SAlexander Duyck  * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
238b101c962SAlexander Duyck  * @rx_buffer: buffer containing page to add
239881571c1SScott Peterson  * @size: packet size from rx_desc
240b101c962SAlexander Duyck  * @rx_desc: descriptor containing length of buffer written by hardware
241b101c962SAlexander Duyck  * @skb: sk_buff to place the data into
242b101c962SAlexander Duyck  *
243b101c962SAlexander Duyck  * This function will add the data contained in rx_buffer->page to the skb.
244b101c962SAlexander Duyck  * This is done either through a direct copy if the data in the buffer is
245b101c962SAlexander Duyck  * less than the skb header size, otherwise it will just attach the page as
246b101c962SAlexander Duyck  * a frag to the skb.
247b101c962SAlexander Duyck  *
248b101c962SAlexander Duyck  * The function will then update the page offset if necessary and return
249b101c962SAlexander Duyck  * true if the buffer can be reused by the interface.
250b101c962SAlexander Duyck  **/
fm10k_add_rx_frag(struct fm10k_rx_buffer * rx_buffer,unsigned int size,union fm10k_rx_desc * rx_desc,struct sk_buff * skb)251de445199SJeff Kirsher static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
252881571c1SScott Peterson 			      unsigned int size,
253b101c962SAlexander Duyck 			      union fm10k_rx_desc *rx_desc,
254b101c962SAlexander Duyck 			      struct sk_buff *skb)
255b101c962SAlexander Duyck {
256b101c962SAlexander Duyck 	struct page *page = rx_buffer->page;
2571a8782e5SAlexander Duyck 	unsigned char *va = page_address(page) + rx_buffer->page_offset;
258b101c962SAlexander Duyck #if (PAGE_SIZE < 8192)
259b101c962SAlexander Duyck 	unsigned int truesize = FM10K_RX_BUFSZ;
260b101c962SAlexander Duyck #else
261fb5677aaSAlexander Duyck 	unsigned int truesize = ALIGN(size, 512);
262b101c962SAlexander Duyck #endif
2631a8782e5SAlexander Duyck 	unsigned int pull_len;
264b101c962SAlexander Duyck 
2651a8782e5SAlexander Duyck 	if (unlikely(skb_is_nonlinear(skb)))
2661a8782e5SAlexander Duyck 		goto add_tail_frag;
267b101c962SAlexander Duyck 
2681a8782e5SAlexander Duyck 	if (likely(size <= FM10K_RX_HDR_LEN)) {
269b101c962SAlexander Duyck 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
270b101c962SAlexander Duyck 
271a79afa78SAlexander Lobakin 		/* page is reusable, we can reuse buffer as-is */
272a79afa78SAlexander Lobakin 		if (dev_page_is_reusable(page))
273b101c962SAlexander Duyck 			return true;
274b101c962SAlexander Duyck 
275b101c962SAlexander Duyck 		/* this page cannot be reused so discard it */
276ba5b8dcdSAlexander Duyck 		__free_page(page);
277b101c962SAlexander Duyck 		return false;
278b101c962SAlexander Duyck 	}
279b101c962SAlexander Duyck 
2801a8782e5SAlexander Duyck 	/* we need the header to contain the greater of either ETH_HLEN or
2811a8782e5SAlexander Duyck 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
2821a8782e5SAlexander Duyck 	 */
283c43f1255SStanislav Fomichev 	pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN);
2841a8782e5SAlexander Duyck 
2851a8782e5SAlexander Duyck 	/* align pull length to size of long to optimize memcpy performance */
2861a8782e5SAlexander Duyck 	memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
2871a8782e5SAlexander Duyck 
2881a8782e5SAlexander Duyck 	/* update all of the pointers */
2891a8782e5SAlexander Duyck 	va += pull_len;
2901a8782e5SAlexander Duyck 	size -= pull_len;
2911a8782e5SAlexander Duyck 
2921a8782e5SAlexander Duyck add_tail_frag:
293b101c962SAlexander Duyck 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2941a8782e5SAlexander Duyck 			(unsigned long)va & ~PAGE_MASK, size, truesize);
295b101c962SAlexander Duyck 
296b101c962SAlexander Duyck 	return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
297b101c962SAlexander Duyck }
298b101c962SAlexander Duyck 
fm10k_fetch_rx_buffer(struct fm10k_ring * rx_ring,union fm10k_rx_desc * rx_desc,struct sk_buff * skb)299b101c962SAlexander Duyck static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
300b101c962SAlexander Duyck 					     union fm10k_rx_desc *rx_desc,
301b101c962SAlexander Duyck 					     struct sk_buff *skb)
302b101c962SAlexander Duyck {
303881571c1SScott Peterson 	unsigned int size = le16_to_cpu(rx_desc->w.length);
304b101c962SAlexander Duyck 	struct fm10k_rx_buffer *rx_buffer;
305b101c962SAlexander Duyck 	struct page *page;
306b101c962SAlexander Duyck 
307b101c962SAlexander Duyck 	rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
308b101c962SAlexander Duyck 	page = rx_buffer->page;
309b101c962SAlexander Duyck 	prefetchw(page);
310b101c962SAlexander Duyck 
311b101c962SAlexander Duyck 	if (likely(!skb)) {
312b101c962SAlexander Duyck 		void *page_addr = page_address(page) +
313b101c962SAlexander Duyck 				  rx_buffer->page_offset;
314b101c962SAlexander Duyck 
315b101c962SAlexander Duyck 		/* prefetch first cache line of first page */
316f468f21bSTariq Toukan 		net_prefetch(page_addr);
317b101c962SAlexander Duyck 
318b101c962SAlexander Duyck 		/* allocate a skb to store the frags */
31967fd893eSAlexander Duyck 		skb = napi_alloc_skb(&rx_ring->q_vector->napi,
320b101c962SAlexander Duyck 				     FM10K_RX_HDR_LEN);
321b101c962SAlexander Duyck 		if (unlikely(!skb)) {
322b101c962SAlexander Duyck 			rx_ring->rx_stats.alloc_failed++;
323b101c962SAlexander Duyck 			return NULL;
324b101c962SAlexander Duyck 		}
325b101c962SAlexander Duyck 
326b101c962SAlexander Duyck 		/* we will be copying header into skb->data in
327b101c962SAlexander Duyck 		 * pskb_may_pull so it is in our interest to prefetch
328b101c962SAlexander Duyck 		 * it now to avoid a possible cache miss
329b101c962SAlexander Duyck 		 */
330b101c962SAlexander Duyck 		prefetchw(skb->data);
331b101c962SAlexander Duyck 	}
332b101c962SAlexander Duyck 
333b101c962SAlexander Duyck 	/* we are reusing so sync this buffer for CPU use */
334b101c962SAlexander Duyck 	dma_sync_single_range_for_cpu(rx_ring->dev,
335b101c962SAlexander Duyck 				      rx_buffer->dma,
336b101c962SAlexander Duyck 				      rx_buffer->page_offset,
337881571c1SScott Peterson 				      size,
338b101c962SAlexander Duyck 				      DMA_FROM_DEVICE);
339b101c962SAlexander Duyck 
340b101c962SAlexander Duyck 	/* pull page into skb */
341881571c1SScott Peterson 	if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) {
342b101c962SAlexander Duyck 		/* hand second half of page back to the ring */
343b101c962SAlexander Duyck 		fm10k_reuse_rx_page(rx_ring, rx_buffer);
344b101c962SAlexander Duyck 	} else {
345b101c962SAlexander Duyck 		/* we are not reusing the buffer so unmap it */
346b101c962SAlexander Duyck 		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
347b101c962SAlexander Duyck 			       PAGE_SIZE, DMA_FROM_DEVICE);
348b101c962SAlexander Duyck 	}
349b101c962SAlexander Duyck 
350b101c962SAlexander Duyck 	/* clear contents of rx_buffer */
351b101c962SAlexander Duyck 	rx_buffer->page = NULL;
352b101c962SAlexander Duyck 
353b101c962SAlexander Duyck 	return skb;
354b101c962SAlexander Duyck }
355b101c962SAlexander Duyck 
fm10k_rx_checksum(struct fm10k_ring * ring,union fm10k_rx_desc * rx_desc,struct sk_buff * skb)35676a540d4SAlexander Duyck static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
35776a540d4SAlexander Duyck 				     union fm10k_rx_desc *rx_desc,
35876a540d4SAlexander Duyck 				     struct sk_buff *skb)
35976a540d4SAlexander Duyck {
36076a540d4SAlexander Duyck 	skb_checksum_none_assert(skb);
36176a540d4SAlexander Duyck 
36276a540d4SAlexander Duyck 	/* Rx checksum disabled via ethtool */
36376a540d4SAlexander Duyck 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
36476a540d4SAlexander Duyck 		return;
36576a540d4SAlexander Duyck 
36676a540d4SAlexander Duyck 	/* TCP/UDP checksum error bit is set */
36776a540d4SAlexander Duyck 	if (fm10k_test_staterr(rx_desc,
36876a540d4SAlexander Duyck 			       FM10K_RXD_STATUS_L4E |
36976a540d4SAlexander Duyck 			       FM10K_RXD_STATUS_L4E2 |
37076a540d4SAlexander Duyck 			       FM10K_RXD_STATUS_IPE |
37176a540d4SAlexander Duyck 			       FM10K_RXD_STATUS_IPE2)) {
37276a540d4SAlexander Duyck 		ring->rx_stats.csum_err++;
37376a540d4SAlexander Duyck 		return;
37476a540d4SAlexander Duyck 	}
37576a540d4SAlexander Duyck 
37676a540d4SAlexander Duyck 	/* It must be a TCP or UDP packet with a valid checksum */
37776a540d4SAlexander Duyck 	if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2))
37876a540d4SAlexander Duyck 		skb->encapsulation = true;
37976a540d4SAlexander Duyck 	else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS))
38076a540d4SAlexander Duyck 		return;
38176a540d4SAlexander Duyck 
38276a540d4SAlexander Duyck 	skb->ip_summed = CHECKSUM_UNNECESSARY;
38380043f3bSJacob Keller 
38480043f3bSJacob Keller 	ring->rx_stats.csum_good++;
38576a540d4SAlexander Duyck }
38676a540d4SAlexander Duyck 
38776a540d4SAlexander Duyck #define FM10K_RSS_L4_TYPES_MASK \
388fcdb0a99SBruce Allan 	(BIT(FM10K_RSSTYPE_IPV4_TCP) | \
389fcdb0a99SBruce Allan 	 BIT(FM10K_RSSTYPE_IPV4_UDP) | \
390fcdb0a99SBruce Allan 	 BIT(FM10K_RSSTYPE_IPV6_TCP) | \
391fcdb0a99SBruce Allan 	 BIT(FM10K_RSSTYPE_IPV6_UDP))
39276a540d4SAlexander Duyck 
fm10k_rx_hash(struct fm10k_ring * ring,union fm10k_rx_desc * rx_desc,struct sk_buff * skb)39376a540d4SAlexander Duyck static inline void fm10k_rx_hash(struct fm10k_ring *ring,
39476a540d4SAlexander Duyck 				 union fm10k_rx_desc *rx_desc,
39576a540d4SAlexander Duyck 				 struct sk_buff *skb)
39676a540d4SAlexander Duyck {
39776a540d4SAlexander Duyck 	u16 rss_type;
39876a540d4SAlexander Duyck 
39976a540d4SAlexander Duyck 	if (!(ring->netdev->features & NETIF_F_RXHASH))
40076a540d4SAlexander Duyck 		return;
40176a540d4SAlexander Duyck 
40276a540d4SAlexander Duyck 	rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK;
40376a540d4SAlexander Duyck 	if (!rss_type)
40476a540d4SAlexander Duyck 		return;
40576a540d4SAlexander Duyck 
40676a540d4SAlexander Duyck 	skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
407fcdb0a99SBruce Allan 		     (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
40876a540d4SAlexander Duyck 		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
40976a540d4SAlexander Duyck }
41076a540d4SAlexander Duyck 
fm10k_type_trans(struct fm10k_ring * rx_ring,union fm10k_rx_desc __maybe_unused * rx_desc,struct sk_buff * skb)4115cd5e2e9SAlexander Duyck static void fm10k_type_trans(struct fm10k_ring *rx_ring,
412de445199SJeff Kirsher 			     union fm10k_rx_desc __maybe_unused *rx_desc,
4135cd5e2e9SAlexander Duyck 			     struct sk_buff *skb)
4145cd5e2e9SAlexander Duyck {
4155cd5e2e9SAlexander Duyck 	struct net_device *dev = rx_ring->netdev;
4165cd5e2e9SAlexander Duyck 	struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel);
4175cd5e2e9SAlexander Duyck 
4185cd5e2e9SAlexander Duyck 	/* check to see if DGLORT belongs to a MACVLAN */
4195cd5e2e9SAlexander Duyck 	if (l2_accel) {
4205cd5e2e9SAlexander Duyck 		u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
4215cd5e2e9SAlexander Duyck 
4225cd5e2e9SAlexander Duyck 		idx -= l2_accel->dglort;
4235cd5e2e9SAlexander Duyck 		if (idx < l2_accel->size && l2_accel->macvlan[idx])
4245cd5e2e9SAlexander Duyck 			dev = l2_accel->macvlan[idx];
4255cd5e2e9SAlexander Duyck 		else
4265cd5e2e9SAlexander Duyck 			l2_accel = NULL;
4275cd5e2e9SAlexander Duyck 	}
4285cd5e2e9SAlexander Duyck 
42958918df0SAlexander Duyck 	/* Record Rx queue, or update macvlan statistics */
4305cd5e2e9SAlexander Duyck 	if (!l2_accel)
43158918df0SAlexander Duyck 		skb_record_rx_queue(skb, rx_ring->queue_index);
43258918df0SAlexander Duyck 	else
43358918df0SAlexander Duyck 		macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
4348d80ac43SAlexander Duyck 				 false);
4358d80ac43SAlexander Duyck 
4368d80ac43SAlexander Duyck 	skb->protocol = eth_type_trans(skb, dev);
4375cd5e2e9SAlexander Duyck }
4385cd5e2e9SAlexander Duyck 
439b101c962SAlexander Duyck /**
440b101c962SAlexander Duyck  * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
441b101c962SAlexander Duyck  * @rx_ring: rx descriptor ring packet is being transacted on
442b101c962SAlexander Duyck  * @rx_desc: pointer to the EOP Rx descriptor
443b101c962SAlexander Duyck  * @skb: pointer to current skb being populated
444b101c962SAlexander Duyck  *
445b101c962SAlexander Duyck  * This function checks the ring, descriptor, and packet information in
446b101c962SAlexander Duyck  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
447b101c962SAlexander Duyck  * other fields within the skb.
448b101c962SAlexander Duyck  **/
fm10k_process_skb_fields(struct fm10k_ring * rx_ring,union fm10k_rx_desc * rx_desc,struct sk_buff * skb)449b101c962SAlexander Duyck static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
450b101c962SAlexander Duyck 					     union fm10k_rx_desc *rx_desc,
451b101c962SAlexander Duyck 					     struct sk_buff *skb)
452b101c962SAlexander Duyck {
453b101c962SAlexander Duyck 	unsigned int len = skb->len;
454b101c962SAlexander Duyck 
45576a540d4SAlexander Duyck 	fm10k_rx_hash(rx_ring, rx_desc, skb);
45676a540d4SAlexander Duyck 
45776a540d4SAlexander Duyck 	fm10k_rx_checksum(rx_ring, rx_desc, skb);
45876a540d4SAlexander Duyck 
459b5db29f0SJacob Keller 	FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
460b5db29f0SJacob Keller 
461b101c962SAlexander Duyck 	FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
462b101c962SAlexander Duyck 
463b101c962SAlexander Duyck 	FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
464b101c962SAlexander Duyck 
465b101c962SAlexander Duyck 	if (rx_desc->w.vlan) {
466b101c962SAlexander Duyck 		u16 vid = le16_to_cpu(rx_desc->w.vlan);
467b101c962SAlexander Duyck 
468e71c9318SJacob Keller 		if ((vid & VLAN_VID_MASK) != rx_ring->vid)
469b101c962SAlexander Duyck 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
470e71c9318SJacob Keller 		else if (vid & VLAN_PRIO_MASK)
471e71c9318SJacob Keller 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
472e71c9318SJacob Keller 					       vid & VLAN_PRIO_MASK);
473b101c962SAlexander Duyck 	}
474b101c962SAlexander Duyck 
4755cd5e2e9SAlexander Duyck 	fm10k_type_trans(rx_ring, rx_desc, skb);
476b101c962SAlexander Duyck 
477b101c962SAlexander Duyck 	return len;
478b101c962SAlexander Duyck }
479b101c962SAlexander Duyck 
480b101c962SAlexander Duyck /**
481b101c962SAlexander Duyck  * fm10k_is_non_eop - process handling of non-EOP buffers
482b101c962SAlexander Duyck  * @rx_ring: Rx ring being processed
483b101c962SAlexander Duyck  * @rx_desc: Rx descriptor for current buffer
484b101c962SAlexander Duyck  *
485b101c962SAlexander Duyck  * This function updates next to clean.  If the buffer is an EOP buffer
486b101c962SAlexander Duyck  * this function exits returning false, otherwise it will place the
487b101c962SAlexander Duyck  * sk_buff in the next buffer to be chained and return true indicating
488b101c962SAlexander Duyck  * that this is in fact a non-EOP buffer.
489b101c962SAlexander Duyck  **/
fm10k_is_non_eop(struct fm10k_ring * rx_ring,union fm10k_rx_desc * rx_desc)490b101c962SAlexander Duyck static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
491b101c962SAlexander Duyck 			     union fm10k_rx_desc *rx_desc)
492b101c962SAlexander Duyck {
493b101c962SAlexander Duyck 	u32 ntc = rx_ring->next_to_clean + 1;
494b101c962SAlexander Duyck 
495b101c962SAlexander Duyck 	/* fetch, update, and store next to clean */
496b101c962SAlexander Duyck 	ntc = (ntc < rx_ring->count) ? ntc : 0;
497b101c962SAlexander Duyck 	rx_ring->next_to_clean = ntc;
498b101c962SAlexander Duyck 
499b101c962SAlexander Duyck 	prefetch(FM10K_RX_DESC(rx_ring, ntc));
500b101c962SAlexander Duyck 
501b101c962SAlexander Duyck 	if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP)))
502b101c962SAlexander Duyck 		return false;
503b101c962SAlexander Duyck 
504b101c962SAlexander Duyck 	return true;
505b101c962SAlexander Duyck }
506b101c962SAlexander Duyck 
507b101c962SAlexander Duyck /**
508b101c962SAlexander Duyck  * fm10k_cleanup_headers - Correct corrupted or empty headers
509b101c962SAlexander Duyck  * @rx_ring: rx descriptor ring packet is being transacted on
510b101c962SAlexander Duyck  * @rx_desc: pointer to the EOP Rx descriptor
511b101c962SAlexander Duyck  * @skb: pointer to current skb being fixed
512b101c962SAlexander Duyck  *
513b101c962SAlexander Duyck  * Address the case where we are pulling data in on pages only
514b101c962SAlexander Duyck  * and as such no data is present in the skb header.
515b101c962SAlexander Duyck  *
516b101c962SAlexander Duyck  * In addition if skb is not at least 60 bytes we need to pad it so that
517b101c962SAlexander Duyck  * it is large enough to qualify as a valid Ethernet frame.
518b101c962SAlexander Duyck  *
519b101c962SAlexander Duyck  * Returns true if an error was encountered and skb was freed.
520b101c962SAlexander Duyck  **/
fm10k_cleanup_headers(struct fm10k_ring * rx_ring,union fm10k_rx_desc * rx_desc,struct sk_buff * skb)521b101c962SAlexander Duyck static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
522b101c962SAlexander Duyck 				  union fm10k_rx_desc *rx_desc,
523b101c962SAlexander Duyck 				  struct sk_buff *skb)
524b101c962SAlexander Duyck {
525b101c962SAlexander Duyck 	if (unlikely((fm10k_test_staterr(rx_desc,
526b101c962SAlexander Duyck 					 FM10K_RXD_STATUS_RXE)))) {
52780043f3bSJacob Keller #define FM10K_TEST_RXD_BIT(rxd, bit) \
52880043f3bSJacob Keller 	((rxd)->w.csum_err & cpu_to_le16(bit))
52980043f3bSJacob Keller 		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
53080043f3bSJacob Keller 			rx_ring->rx_stats.switch_errors++;
53180043f3bSJacob Keller 		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
53280043f3bSJacob Keller 			rx_ring->rx_stats.drops++;
53380043f3bSJacob Keller 		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
53480043f3bSJacob Keller 			rx_ring->rx_stats.pp_errors++;
53580043f3bSJacob Keller 		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
53680043f3bSJacob Keller 			rx_ring->rx_stats.link_errors++;
53780043f3bSJacob Keller 		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
53880043f3bSJacob Keller 			rx_ring->rx_stats.length_errors++;
539b101c962SAlexander Duyck 		dev_kfree_skb_any(skb);
540b101c962SAlexander Duyck 		rx_ring->rx_stats.errors++;
541b101c962SAlexander Duyck 		return true;
542b101c962SAlexander Duyck 	}
543b101c962SAlexander Duyck 
544a94d9e22SAlexander Duyck 	/* if eth_skb_pad returns an error the skb was freed */
545a94d9e22SAlexander Duyck 	if (eth_skb_pad(skb))
546b101c962SAlexander Duyck 		return true;
547b101c962SAlexander Duyck 
548b101c962SAlexander Duyck 	return false;
549b101c962SAlexander Duyck }
550b101c962SAlexander Duyck 
551b101c962SAlexander Duyck /**
552b101c962SAlexander Duyck  * fm10k_receive_skb - helper function to handle rx indications
553b101c962SAlexander Duyck  * @q_vector: structure containing interrupt and ring information
554b101c962SAlexander Duyck  * @skb: packet to send up
555b101c962SAlexander Duyck  **/
fm10k_receive_skb(struct fm10k_q_vector * q_vector,struct sk_buff * skb)556b101c962SAlexander Duyck static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
557b101c962SAlexander Duyck 			      struct sk_buff *skb)
558b101c962SAlexander Duyck {
559b101c962SAlexander Duyck 	napi_gro_receive(&q_vector->napi, skb);
560b101c962SAlexander Duyck }
561b101c962SAlexander Duyck 
fm10k_clean_rx_irq(struct fm10k_q_vector * q_vector,struct fm10k_ring * rx_ring,int budget)56232b3e08fSJesse Brandeburg static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
563b101c962SAlexander Duyck 			      struct fm10k_ring *rx_ring,
564b101c962SAlexander Duyck 			      int budget)
565b101c962SAlexander Duyck {
566b101c962SAlexander Duyck 	struct sk_buff *skb = rx_ring->skb;
567b101c962SAlexander Duyck 	unsigned int total_bytes = 0, total_packets = 0;
568b101c962SAlexander Duyck 	u16 cleaned_count = fm10k_desc_unused(rx_ring);
569b101c962SAlexander Duyck 
57059486329SAlexander Duyck 	while (likely(total_packets < budget)) {
571b101c962SAlexander Duyck 		union fm10k_rx_desc *rx_desc;
572b101c962SAlexander Duyck 
573b101c962SAlexander Duyck 		/* return some buffers to hardware, one at a time is too slow */
574b101c962SAlexander Duyck 		if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
575b101c962SAlexander Duyck 			fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
576b101c962SAlexander Duyck 			cleaned_count = 0;
577b101c962SAlexander Duyck 		}
578b101c962SAlexander Duyck 
579b101c962SAlexander Duyck 		rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
580b101c962SAlexander Duyck 
581124b74c1SAlexander Duyck 		if (!rx_desc->d.staterr)
582b101c962SAlexander Duyck 			break;
583b101c962SAlexander Duyck 
584b101c962SAlexander Duyck 		/* This memory barrier is needed to keep us from reading
585b101c962SAlexander Duyck 		 * any other fields out of the rx_desc until we know the
586124b74c1SAlexander Duyck 		 * descriptor has been written back
587b101c962SAlexander Duyck 		 */
588124b74c1SAlexander Duyck 		dma_rmb();
589b101c962SAlexander Duyck 
590b101c962SAlexander Duyck 		/* retrieve a buffer from the ring */
591b101c962SAlexander Duyck 		skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
592b101c962SAlexander Duyck 
593b101c962SAlexander Duyck 		/* exit if we failed to retrieve a buffer */
594b101c962SAlexander Duyck 		if (!skb)
595b101c962SAlexander Duyck 			break;
596b101c962SAlexander Duyck 
597b101c962SAlexander Duyck 		cleaned_count++;
598b101c962SAlexander Duyck 
599b101c962SAlexander Duyck 		/* fetch next buffer in frame if non-eop */
600b101c962SAlexander Duyck 		if (fm10k_is_non_eop(rx_ring, rx_desc))
601b101c962SAlexander Duyck 			continue;
602b101c962SAlexander Duyck 
603b101c962SAlexander Duyck 		/* verify the packet layout is correct */
604b101c962SAlexander Duyck 		if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
605b101c962SAlexander Duyck 			skb = NULL;
606b101c962SAlexander Duyck 			continue;
607b101c962SAlexander Duyck 		}
608b101c962SAlexander Duyck 
609b101c962SAlexander Duyck 		/* populate checksum, timestamp, VLAN, and protocol */
610b101c962SAlexander Duyck 		total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
611b101c962SAlexander Duyck 
612b101c962SAlexander Duyck 		fm10k_receive_skb(q_vector, skb);
613b101c962SAlexander Duyck 
614b101c962SAlexander Duyck 		/* reset skb pointer */
615b101c962SAlexander Duyck 		skb = NULL;
616b101c962SAlexander Duyck 
617b101c962SAlexander Duyck 		/* update budget accounting */
618b101c962SAlexander Duyck 		total_packets++;
61959486329SAlexander Duyck 	}
620b101c962SAlexander Duyck 
621b101c962SAlexander Duyck 	/* place incomplete frames back on ring for completion */
622b101c962SAlexander Duyck 	rx_ring->skb = skb;
623b101c962SAlexander Duyck 
624b101c962SAlexander Duyck 	u64_stats_update_begin(&rx_ring->syncp);
625b101c962SAlexander Duyck 	rx_ring->stats.packets += total_packets;
626b101c962SAlexander Duyck 	rx_ring->stats.bytes += total_bytes;
627b101c962SAlexander Duyck 	u64_stats_update_end(&rx_ring->syncp);
628b101c962SAlexander Duyck 	q_vector->rx.total_packets += total_packets;
629b101c962SAlexander Duyck 	q_vector->rx.total_bytes += total_bytes;
630b101c962SAlexander Duyck 
63132b3e08fSJesse Brandeburg 	return total_packets;
632b101c962SAlexander Duyck }
633b101c962SAlexander Duyck 
63476a540d4SAlexander Duyck #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
fm10k_port_is_vxlan(struct sk_buff * skb)63576a540d4SAlexander Duyck static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
63676a540d4SAlexander Duyck {
63776a540d4SAlexander Duyck 	struct fm10k_intfc *interface = netdev_priv(skb->dev);
63876a540d4SAlexander Duyck 
639f7529b4bSJakub Kicinski 	if (interface->vxlan_port != udp_hdr(skb)->dest)
64076a540d4SAlexander Duyck 		return NULL;
64176a540d4SAlexander Duyck 
64276a540d4SAlexander Duyck 	/* return offset of udp_hdr plus 8 bytes for VXLAN header */
64376a540d4SAlexander Duyck 	return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN);
64476a540d4SAlexander Duyck }
64576a540d4SAlexander Duyck 
64676a540d4SAlexander Duyck #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
64776a540d4SAlexander Duyck #define NVGRE_TNI htons(0x2000)
64876a540d4SAlexander Duyck struct fm10k_nvgre_hdr {
64976a540d4SAlexander Duyck 	__be16 flags;
65076a540d4SAlexander Duyck 	__be16 proto;
65176a540d4SAlexander Duyck 	__be32 tni;
65276a540d4SAlexander Duyck };
65376a540d4SAlexander Duyck 
fm10k_gre_is_nvgre(struct sk_buff * skb)65476a540d4SAlexander Duyck static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
65576a540d4SAlexander Duyck {
65676a540d4SAlexander Duyck 	struct fm10k_nvgre_hdr *nvgre_hdr;
65776a540d4SAlexander Duyck 	int hlen = ip_hdrlen(skb);
65876a540d4SAlexander Duyck 
65976a540d4SAlexander Duyck 	/* currently only IPv4 is supported due to hlen above */
66076a540d4SAlexander Duyck 	if (vlan_get_protocol(skb) != htons(ETH_P_IP))
66176a540d4SAlexander Duyck 		return NULL;
66276a540d4SAlexander Duyck 
66376a540d4SAlexander Duyck 	/* our transport header should be NVGRE */
66476a540d4SAlexander Duyck 	nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen);
66576a540d4SAlexander Duyck 
66676a540d4SAlexander Duyck 	/* verify all reserved flags are 0 */
66776a540d4SAlexander Duyck 	if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
66876a540d4SAlexander Duyck 		return NULL;
66976a540d4SAlexander Duyck 
67076a540d4SAlexander Duyck 	/* report start of ethernet header */
67176a540d4SAlexander Duyck 	if (nvgre_hdr->flags & NVGRE_TNI)
67276a540d4SAlexander Duyck 		return (struct ethhdr *)(nvgre_hdr + 1);
67376a540d4SAlexander Duyck 
67476a540d4SAlexander Duyck 	return (struct ethhdr *)(&nvgre_hdr->tni);
67576a540d4SAlexander Duyck }
67676a540d4SAlexander Duyck 
fm10k_tx_encap_offload(struct sk_buff * skb)6775bf33dc6SMatthew Vick __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
67876a540d4SAlexander Duyck {
6798c1a90aaSMatthew Vick 	u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
68076a540d4SAlexander Duyck 	struct ethhdr *eth_hdr;
68176a540d4SAlexander Duyck 
6828c1a90aaSMatthew Vick 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
6838c1a90aaSMatthew Vick 	    skb->inner_protocol != htons(ETH_P_TEB))
684b66b6d9fSJoe Stringer 		return 0;
685b66b6d9fSJoe Stringer 
68676a540d4SAlexander Duyck 	switch (vlan_get_protocol(skb)) {
68776a540d4SAlexander Duyck 	case htons(ETH_P_IP):
68876a540d4SAlexander Duyck 		l4_hdr = ip_hdr(skb)->protocol;
68976a540d4SAlexander Duyck 		break;
69076a540d4SAlexander Duyck 	case htons(ETH_P_IPV6):
69176a540d4SAlexander Duyck 		l4_hdr = ipv6_hdr(skb)->nexthdr;
69276a540d4SAlexander Duyck 		break;
69376a540d4SAlexander Duyck 	default:
69476a540d4SAlexander Duyck 		return 0;
69576a540d4SAlexander Duyck 	}
69676a540d4SAlexander Duyck 
69776a540d4SAlexander Duyck 	switch (l4_hdr) {
69876a540d4SAlexander Duyck 	case IPPROTO_UDP:
69976a540d4SAlexander Duyck 		eth_hdr = fm10k_port_is_vxlan(skb);
70076a540d4SAlexander Duyck 		break;
70176a540d4SAlexander Duyck 	case IPPROTO_GRE:
70276a540d4SAlexander Duyck 		eth_hdr = fm10k_gre_is_nvgre(skb);
70376a540d4SAlexander Duyck 		break;
70476a540d4SAlexander Duyck 	default:
70576a540d4SAlexander Duyck 		return 0;
70676a540d4SAlexander Duyck 	}
70776a540d4SAlexander Duyck 
70876a540d4SAlexander Duyck 	if (!eth_hdr)
70976a540d4SAlexander Duyck 		return 0;
71076a540d4SAlexander Duyck 
71176a540d4SAlexander Duyck 	switch (eth_hdr->h_proto) {
71276a540d4SAlexander Duyck 	case htons(ETH_P_IP):
7138c1a90aaSMatthew Vick 		inner_l4_hdr = inner_ip_hdr(skb)->protocol;
7148c1a90aaSMatthew Vick 		break;
71576a540d4SAlexander Duyck 	case htons(ETH_P_IPV6):
7168c1a90aaSMatthew Vick 		inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
71776a540d4SAlexander Duyck 		break;
71876a540d4SAlexander Duyck 	default:
71976a540d4SAlexander Duyck 		return 0;
72076a540d4SAlexander Duyck 	}
72176a540d4SAlexander Duyck 
7228c1a90aaSMatthew Vick 	switch (inner_l4_hdr) {
7238c1a90aaSMatthew Vick 	case IPPROTO_TCP:
7248c1a90aaSMatthew Vick 		inner_l4_hlen = inner_tcp_hdrlen(skb);
7258c1a90aaSMatthew Vick 		break;
7268c1a90aaSMatthew Vick 	case IPPROTO_UDP:
7278c1a90aaSMatthew Vick 		inner_l4_hlen = 8;
7288c1a90aaSMatthew Vick 		break;
7298c1a90aaSMatthew Vick 	default:
7308c1a90aaSMatthew Vick 		return 0;
7318c1a90aaSMatthew Vick 	}
7328c1a90aaSMatthew Vick 
7338c1a90aaSMatthew Vick 	/* The hardware allows tunnel offloads only if the combined inner and
7348c1a90aaSMatthew Vick 	 * outer header is 184 bytes or less
7358c1a90aaSMatthew Vick 	 */
7368c1a90aaSMatthew Vick 	if (skb_inner_transport_header(skb) + inner_l4_hlen -
7378c1a90aaSMatthew Vick 	    skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
7388c1a90aaSMatthew Vick 		return 0;
7398c1a90aaSMatthew Vick 
74076a540d4SAlexander Duyck 	return eth_hdr->h_proto;
74176a540d4SAlexander Duyck }
74276a540d4SAlexander Duyck 
fm10k_tso(struct fm10k_ring * tx_ring,struct fm10k_tx_buffer * first)74376a540d4SAlexander Duyck static int fm10k_tso(struct fm10k_ring *tx_ring,
74476a540d4SAlexander Duyck 		     struct fm10k_tx_buffer *first)
74576a540d4SAlexander Duyck {
74676a540d4SAlexander Duyck 	struct sk_buff *skb = first->skb;
74776a540d4SAlexander Duyck 	struct fm10k_tx_desc *tx_desc;
74876a540d4SAlexander Duyck 	unsigned char *th;
74976a540d4SAlexander Duyck 	u8 hdrlen;
75076a540d4SAlexander Duyck 
75176a540d4SAlexander Duyck 	if (skb->ip_summed != CHECKSUM_PARTIAL)
75276a540d4SAlexander Duyck 		return 0;
75376a540d4SAlexander Duyck 
75476a540d4SAlexander Duyck 	if (!skb_is_gso(skb))
75576a540d4SAlexander Duyck 		return 0;
75676a540d4SAlexander Duyck 
75776a540d4SAlexander Duyck 	/* compute header lengths */
75876a540d4SAlexander Duyck 	if (skb->encapsulation) {
75976a540d4SAlexander Duyck 		if (!fm10k_tx_encap_offload(skb))
76076a540d4SAlexander Duyck 			goto err_vxlan;
76176a540d4SAlexander Duyck 		th = skb_inner_transport_header(skb);
76276a540d4SAlexander Duyck 	} else {
76376a540d4SAlexander Duyck 		th = skb_transport_header(skb);
76476a540d4SAlexander Duyck 	}
76576a540d4SAlexander Duyck 
76676a540d4SAlexander Duyck 	/* compute offset from SOF to transport header and add header len */
76776a540d4SAlexander Duyck 	hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2);
76876a540d4SAlexander Duyck 
76976a540d4SAlexander Duyck 	first->tx_flags |= FM10K_TX_FLAGS_CSUM;
77076a540d4SAlexander Duyck 
77176a540d4SAlexander Duyck 	/* update gso size and bytecount with header size */
77276a540d4SAlexander Duyck 	first->gso_segs = skb_shinfo(skb)->gso_segs;
77376a540d4SAlexander Duyck 	first->bytecount += (first->gso_segs - 1) * hdrlen;
77476a540d4SAlexander Duyck 
77576a540d4SAlexander Duyck 	/* populate Tx descriptor header size and mss */
77676a540d4SAlexander Duyck 	tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
77776a540d4SAlexander Duyck 	tx_desc->hdrlen = hdrlen;
77876a540d4SAlexander Duyck 	tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
77976a540d4SAlexander Duyck 
78076a540d4SAlexander Duyck 	return 1;
781c0ad8ef3SJoe Perches 
78276a540d4SAlexander Duyck err_vxlan:
78376a540d4SAlexander Duyck 	tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
784c0ad8ef3SJoe Perches 	if (net_ratelimit())
78576a540d4SAlexander Duyck 		netdev_err(tx_ring->netdev,
78676a540d4SAlexander Duyck 			   "TSO requested for unsupported tunnel, disabling offload\n");
78776a540d4SAlexander Duyck 	return -1;
78876a540d4SAlexander Duyck }
78976a540d4SAlexander Duyck 
fm10k_tx_csum(struct fm10k_ring * tx_ring,struct fm10k_tx_buffer * first)79076a540d4SAlexander Duyck static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
79176a540d4SAlexander Duyck 			  struct fm10k_tx_buffer *first)
79276a540d4SAlexander Duyck {
79376a540d4SAlexander Duyck 	struct sk_buff *skb = first->skb;
79476a540d4SAlexander Duyck 	struct fm10k_tx_desc *tx_desc;
79576a540d4SAlexander Duyck 	union {
79676a540d4SAlexander Duyck 		struct iphdr *ipv4;
79776a540d4SAlexander Duyck 		struct ipv6hdr *ipv6;
79876a540d4SAlexander Duyck 		u8 *raw;
79976a540d4SAlexander Duyck 	} network_hdr;
800dc1b4c2bSJacob Keller 	u8 *transport_hdr;
801dc1b4c2bSJacob Keller 	__be16 frag_off;
80276a540d4SAlexander Duyck 	__be16 protocol;
80376a540d4SAlexander Duyck 	u8 l4_hdr = 0;
80476a540d4SAlexander Duyck 
80576a540d4SAlexander Duyck 	if (skb->ip_summed != CHECKSUM_PARTIAL)
80676a540d4SAlexander Duyck 		goto no_csum;
80776a540d4SAlexander Duyck 
80876a540d4SAlexander Duyck 	if (skb->encapsulation) {
80976a540d4SAlexander Duyck 		protocol = fm10k_tx_encap_offload(skb);
81076a540d4SAlexander Duyck 		if (!protocol) {
81176a540d4SAlexander Duyck 			if (skb_checksum_help(skb)) {
81276a540d4SAlexander Duyck 				dev_warn(tx_ring->dev,
81376a540d4SAlexander Duyck 					 "failed to offload encap csum!\n");
81476a540d4SAlexander Duyck 				tx_ring->tx_stats.csum_err++;
81576a540d4SAlexander Duyck 			}
81676a540d4SAlexander Duyck 			goto no_csum;
81776a540d4SAlexander Duyck 		}
81876a540d4SAlexander Duyck 		network_hdr.raw = skb_inner_network_header(skb);
819dc1b4c2bSJacob Keller 		transport_hdr = skb_inner_transport_header(skb);
82076a540d4SAlexander Duyck 	} else {
82176a540d4SAlexander Duyck 		protocol = vlan_get_protocol(skb);
82276a540d4SAlexander Duyck 		network_hdr.raw = skb_network_header(skb);
823dc1b4c2bSJacob Keller 		transport_hdr = skb_transport_header(skb);
82476a540d4SAlexander Duyck 	}
82576a540d4SAlexander Duyck 
82676a540d4SAlexander Duyck 	switch (protocol) {
82776a540d4SAlexander Duyck 	case htons(ETH_P_IP):
82876a540d4SAlexander Duyck 		l4_hdr = network_hdr.ipv4->protocol;
82976a540d4SAlexander Duyck 		break;
83076a540d4SAlexander Duyck 	case htons(ETH_P_IPV6):
83176a540d4SAlexander Duyck 		l4_hdr = network_hdr.ipv6->nexthdr;
832dc1b4c2bSJacob Keller 		if (likely((transport_hdr - network_hdr.raw) ==
833dc1b4c2bSJacob Keller 			   sizeof(struct ipv6hdr)))
834dc1b4c2bSJacob Keller 			break;
835dc1b4c2bSJacob Keller 		ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
836dc1b4c2bSJacob Keller 				      sizeof(struct ipv6hdr),
837dc1b4c2bSJacob Keller 				 &l4_hdr, &frag_off);
838dc1b4c2bSJacob Keller 		if (unlikely(frag_off))
839dc1b4c2bSJacob Keller 			l4_hdr = NEXTHDR_FRAGMENT;
84076a540d4SAlexander Duyck 		break;
84176a540d4SAlexander Duyck 	default:
842dc1b4c2bSJacob Keller 		break;
84376a540d4SAlexander Duyck 	}
84476a540d4SAlexander Duyck 
84576a540d4SAlexander Duyck 	switch (l4_hdr) {
84676a540d4SAlexander Duyck 	case IPPROTO_TCP:
84776a540d4SAlexander Duyck 	case IPPROTO_UDP:
84876a540d4SAlexander Duyck 		break;
84976a540d4SAlexander Duyck 	case IPPROTO_GRE:
85076a540d4SAlexander Duyck 		if (skb->encapsulation)
85176a540d4SAlexander Duyck 			break;
8525463fce6SJeff Kirsher 		fallthrough;
85376a540d4SAlexander Duyck 	default:
85476a540d4SAlexander Duyck 		if (unlikely(net_ratelimit())) {
85576a540d4SAlexander Duyck 			dev_warn(tx_ring->dev,
856dc1b4c2bSJacob Keller 				 "partial checksum, version=%d l4 proto=%x\n",
857dc1b4c2bSJacob Keller 				 protocol, l4_hdr);
85876a540d4SAlexander Duyck 		}
859dc1b4c2bSJacob Keller 		skb_checksum_help(skb);
86076a540d4SAlexander Duyck 		tx_ring->tx_stats.csum_err++;
86176a540d4SAlexander Duyck 		goto no_csum;
86276a540d4SAlexander Duyck 	}
86376a540d4SAlexander Duyck 
86476a540d4SAlexander Duyck 	/* update TX checksum flag */
86576a540d4SAlexander Duyck 	first->tx_flags |= FM10K_TX_FLAGS_CSUM;
86680043f3bSJacob Keller 	tx_ring->tx_stats.csum_good++;
86776a540d4SAlexander Duyck 
86876a540d4SAlexander Duyck no_csum:
86976a540d4SAlexander Duyck 	/* populate Tx descriptor header size and mss */
87076a540d4SAlexander Duyck 	tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
87176a540d4SAlexander Duyck 	tx_desc->hdrlen = 0;
87276a540d4SAlexander Duyck 	tx_desc->mss = 0;
87376a540d4SAlexander Duyck }
87476a540d4SAlexander Duyck 
87576a540d4SAlexander Duyck #define FM10K_SET_FLAG(_input, _flag, _result) \
87676a540d4SAlexander Duyck 	((_flag <= _result) ? \
87776a540d4SAlexander Duyck 	 ((u32)(_input & _flag) * (_result / _flag)) : \
87876a540d4SAlexander Duyck 	 ((u32)(_input & _flag) / (_flag / _result)))
87976a540d4SAlexander Duyck 
fm10k_tx_desc_flags(struct sk_buff * skb,u32 tx_flags)88076a540d4SAlexander Duyck static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
88176a540d4SAlexander Duyck {
88276a540d4SAlexander Duyck 	/* set type for advanced descriptor with frame checksum insertion */
88376a540d4SAlexander Duyck 	u32 desc_flags = 0;
88476a540d4SAlexander Duyck 
88576a540d4SAlexander Duyck 	/* set checksum offload bits */
88676a540d4SAlexander Duyck 	desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
88776a540d4SAlexander Duyck 				     FM10K_TXD_FLAG_CSUM);
88876a540d4SAlexander Duyck 
88976a540d4SAlexander Duyck 	return desc_flags;
89076a540d4SAlexander Duyck }
89176a540d4SAlexander Duyck 
fm10k_tx_desc_push(struct fm10k_ring * tx_ring,struct fm10k_tx_desc * tx_desc,u16 i,dma_addr_t dma,unsigned int size,u8 desc_flags)892b101c962SAlexander Duyck static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
893b101c962SAlexander Duyck 			       struct fm10k_tx_desc *tx_desc, u16 i,
894b101c962SAlexander Duyck 			       dma_addr_t dma, unsigned int size, u8 desc_flags)
895b101c962SAlexander Duyck {
896b101c962SAlexander Duyck 	/* set RS and INT for last frame in a cache line */
897b101c962SAlexander Duyck 	if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0)
898b101c962SAlexander Duyck 		desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT;
899b101c962SAlexander Duyck 
900b101c962SAlexander Duyck 	/* record values to descriptor */
901b101c962SAlexander Duyck 	tx_desc->buffer_addr = cpu_to_le64(dma);
902b101c962SAlexander Duyck 	tx_desc->flags = desc_flags;
903b101c962SAlexander Duyck 	tx_desc->buflen = cpu_to_le16(size);
904b101c962SAlexander Duyck 
905b101c962SAlexander Duyck 	/* return true if we just wrapped the ring */
906b101c962SAlexander Duyck 	return i == tx_ring->count;
907b101c962SAlexander Duyck }
908b101c962SAlexander Duyck 
__fm10k_maybe_stop_tx(struct fm10k_ring * tx_ring,u16 size)9092c2b2f0cSAlexander Duyck static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
9102c2b2f0cSAlexander Duyck {
9112c2b2f0cSAlexander Duyck 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
9122c2b2f0cSAlexander Duyck 
913eca32047SMatthew Vick 	/* Memory barrier before checking head and tail */
9142c2b2f0cSAlexander Duyck 	smp_mb();
9152c2b2f0cSAlexander Duyck 
916eca32047SMatthew Vick 	/* Check again in a case another CPU has just made room available */
9172c2b2f0cSAlexander Duyck 	if (likely(fm10k_desc_unused(tx_ring) < size))
9182c2b2f0cSAlexander Duyck 		return -EBUSY;
9192c2b2f0cSAlexander Duyck 
9202c2b2f0cSAlexander Duyck 	/* A reprieve! - use start_queue because it doesn't call schedule */
9212c2b2f0cSAlexander Duyck 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
9222c2b2f0cSAlexander Duyck 	++tx_ring->tx_stats.restart_queue;
9232c2b2f0cSAlexander Duyck 	return 0;
9242c2b2f0cSAlexander Duyck }
9252c2b2f0cSAlexander Duyck 
fm10k_maybe_stop_tx(struct fm10k_ring * tx_ring,u16 size)9262c2b2f0cSAlexander Duyck static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
9272c2b2f0cSAlexander Duyck {
9282c2b2f0cSAlexander Duyck 	if (likely(fm10k_desc_unused(tx_ring) >= size))
9292c2b2f0cSAlexander Duyck 		return 0;
9302c2b2f0cSAlexander Duyck 	return __fm10k_maybe_stop_tx(tx_ring, size);
9312c2b2f0cSAlexander Duyck }
9322c2b2f0cSAlexander Duyck 
fm10k_tx_map(struct fm10k_ring * tx_ring,struct fm10k_tx_buffer * first)933b101c962SAlexander Duyck static void fm10k_tx_map(struct fm10k_ring *tx_ring,
934b101c962SAlexander Duyck 			 struct fm10k_tx_buffer *first)
935b101c962SAlexander Duyck {
936b101c962SAlexander Duyck 	struct sk_buff *skb = first->skb;
937b101c962SAlexander Duyck 	struct fm10k_tx_buffer *tx_buffer;
938b101c962SAlexander Duyck 	struct fm10k_tx_desc *tx_desc;
939d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *frag;
940b101c962SAlexander Duyck 	unsigned char *data;
941b101c962SAlexander Duyck 	dma_addr_t dma;
942b101c962SAlexander Duyck 	unsigned int data_len, size;
94376a540d4SAlexander Duyck 	u32 tx_flags = first->tx_flags;
944b101c962SAlexander Duyck 	u16 i = tx_ring->next_to_use;
94576a540d4SAlexander Duyck 	u8 flags = fm10k_tx_desc_flags(skb, tx_flags);
946b101c962SAlexander Duyck 
947b101c962SAlexander Duyck 	tx_desc = FM10K_TX_DESC(tx_ring, i);
948b101c962SAlexander Duyck 
949b101c962SAlexander Duyck 	/* add HW VLAN tag */
950df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
951df8a39deSJiri Pirko 		tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
952b101c962SAlexander Duyck 	else
953b101c962SAlexander Duyck 		tx_desc->vlan = 0;
954b101c962SAlexander Duyck 
955b101c962SAlexander Duyck 	size = skb_headlen(skb);
956b101c962SAlexander Duyck 	data = skb->data;
957b101c962SAlexander Duyck 
958b101c962SAlexander Duyck 	dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
959b101c962SAlexander Duyck 
960b101c962SAlexander Duyck 	data_len = skb->data_len;
961b101c962SAlexander Duyck 	tx_buffer = first;
962b101c962SAlexander Duyck 
963b101c962SAlexander Duyck 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
964b101c962SAlexander Duyck 		if (dma_mapping_error(tx_ring->dev, dma))
965b101c962SAlexander Duyck 			goto dma_error;
966b101c962SAlexander Duyck 
967b101c962SAlexander Duyck 		/* record length, and DMA address */
968b101c962SAlexander Duyck 		dma_unmap_len_set(tx_buffer, len, size);
969b101c962SAlexander Duyck 		dma_unmap_addr_set(tx_buffer, dma, dma);
970b101c962SAlexander Duyck 
971b101c962SAlexander Duyck 		while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) {
972b101c962SAlexander Duyck 			if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
973b101c962SAlexander Duyck 					       FM10K_MAX_DATA_PER_TXD, flags)) {
974b101c962SAlexander Duyck 				tx_desc = FM10K_TX_DESC(tx_ring, 0);
975b101c962SAlexander Duyck 				i = 0;
976b101c962SAlexander Duyck 			}
977b101c962SAlexander Duyck 
978b101c962SAlexander Duyck 			dma += FM10K_MAX_DATA_PER_TXD;
979b101c962SAlexander Duyck 			size -= FM10K_MAX_DATA_PER_TXD;
980b101c962SAlexander Duyck 		}
981b101c962SAlexander Duyck 
982b101c962SAlexander Duyck 		if (likely(!data_len))
983b101c962SAlexander Duyck 			break;
984b101c962SAlexander Duyck 
985b101c962SAlexander Duyck 		if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
986b101c962SAlexander Duyck 				       dma, size, flags)) {
987b101c962SAlexander Duyck 			tx_desc = FM10K_TX_DESC(tx_ring, 0);
988b101c962SAlexander Duyck 			i = 0;
989b101c962SAlexander Duyck 		}
990b101c962SAlexander Duyck 
991b101c962SAlexander Duyck 		size = skb_frag_size(frag);
992b101c962SAlexander Duyck 		data_len -= size;
993b101c962SAlexander Duyck 
994b101c962SAlexander Duyck 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
995b101c962SAlexander Duyck 				       DMA_TO_DEVICE);
996b101c962SAlexander Duyck 
997b101c962SAlexander Duyck 		tx_buffer = &tx_ring->tx_buffer[i];
998b101c962SAlexander Duyck 	}
999b101c962SAlexander Duyck 
1000b101c962SAlexander Duyck 	/* write last descriptor with LAST bit set */
1001b101c962SAlexander Duyck 	flags |= FM10K_TXD_FLAG_LAST;
1002b101c962SAlexander Duyck 
1003b101c962SAlexander Duyck 	if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
1004b101c962SAlexander Duyck 		i = 0;
1005b101c962SAlexander Duyck 
1006b101c962SAlexander Duyck 	/* record bytecount for BQL */
1007b101c962SAlexander Duyck 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1008b101c962SAlexander Duyck 
1009b101c962SAlexander Duyck 	/* record SW timestamp if HW timestamp is not available */
1010b101c962SAlexander Duyck 	skb_tx_timestamp(first->skb);
1011b101c962SAlexander Duyck 
1012b101c962SAlexander Duyck 	/* Force memory writes to complete before letting h/w know there
1013b101c962SAlexander Duyck 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
1014b101c962SAlexander Duyck 	 * memory model archs, such as IA-64).
1015b101c962SAlexander Duyck 	 *
1016b101c962SAlexander Duyck 	 * We also need this memory barrier to make certain all of the
1017b101c962SAlexander Duyck 	 * status bits have been updated before next_to_watch is written.
1018b101c962SAlexander Duyck 	 */
1019b101c962SAlexander Duyck 	wmb();
1020b101c962SAlexander Duyck 
1021b101c962SAlexander Duyck 	/* set next_to_watch value indicating a packet is present */
1022b101c962SAlexander Duyck 	first->next_to_watch = tx_desc;
1023b101c962SAlexander Duyck 
1024b101c962SAlexander Duyck 	tx_ring->next_to_use = i;
1025b101c962SAlexander Duyck 
10262c2b2f0cSAlexander Duyck 	/* Make sure there is space in the ring for the next send. */
10272c2b2f0cSAlexander Duyck 	fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
10282c2b2f0cSAlexander Duyck 
1029b101c962SAlexander Duyck 	/* notify HW of packet */
10306b16f9eeSFlorian Westphal 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1031b101c962SAlexander Duyck 		writel(i, tx_ring->tail);
10322c2b2f0cSAlexander Duyck 	}
1033b101c962SAlexander Duyck 
1034b101c962SAlexander Duyck 	return;
1035b101c962SAlexander Duyck dma_error:
1036b101c962SAlexander Duyck 	dev_err(tx_ring->dev, "TX DMA map failed\n");
1037b101c962SAlexander Duyck 
1038b101c962SAlexander Duyck 	/* clear dma mappings for failed tx_buffer map */
1039b101c962SAlexander Duyck 	for (;;) {
1040b101c962SAlexander Duyck 		tx_buffer = &tx_ring->tx_buffer[i];
1041b101c962SAlexander Duyck 		fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1042b101c962SAlexander Duyck 		if (tx_buffer == first)
1043b101c962SAlexander Duyck 			break;
1044b101c962SAlexander Duyck 		if (i == 0)
1045b101c962SAlexander Duyck 			i = tx_ring->count;
1046b101c962SAlexander Duyck 		i--;
1047b101c962SAlexander Duyck 	}
1048b101c962SAlexander Duyck 
1049b101c962SAlexander Duyck 	tx_ring->next_to_use = i;
1050b101c962SAlexander Duyck }
1051b101c962SAlexander Duyck 
fm10k_xmit_frame_ring(struct sk_buff * skb,struct fm10k_ring * tx_ring)1052b101c962SAlexander Duyck netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1053b101c962SAlexander Duyck 				  struct fm10k_ring *tx_ring)
1054b101c962SAlexander Duyck {
1055b101c962SAlexander Duyck 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
105603d13a51SJacob Keller 	struct fm10k_tx_buffer *first;
105703d13a51SJacob Keller 	unsigned short f;
105803d13a51SJacob Keller 	u32 tx_flags = 0;
105903d13a51SJacob Keller 	int tso;
1060b101c962SAlexander Duyck 
1061b101c962SAlexander Duyck 	/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
1062b101c962SAlexander Duyck 	 *       + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
1063b101c962SAlexander Duyck 	 *       + 2 desc gap to keep tail from touching head
1064b101c962SAlexander Duyck 	 * otherwise try next time
1065b101c962SAlexander Duyck 	 */
10660ea7e88dSJacob Keller 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
10670ea7e88dSJacob Keller 		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
10680ea7e88dSJacob Keller 
10690ea7e88dSJacob Keller 		count += TXD_USE_COUNT(skb_frag_size(frag));
10700ea7e88dSJacob Keller 	}
1071aae072e3SAlexander Duyck 
1072b101c962SAlexander Duyck 	if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
1073b101c962SAlexander Duyck 		tx_ring->tx_stats.tx_busy++;
1074b101c962SAlexander Duyck 		return NETDEV_TX_BUSY;
1075b101c962SAlexander Duyck 	}
1076b101c962SAlexander Duyck 
1077b101c962SAlexander Duyck 	/* record the location of the first descriptor for this packet */
1078b101c962SAlexander Duyck 	first = &tx_ring->tx_buffer[tx_ring->next_to_use];
1079b101c962SAlexander Duyck 	first->skb = skb;
1080b101c962SAlexander Duyck 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1081b101c962SAlexander Duyck 	first->gso_segs = 1;
1082b101c962SAlexander Duyck 
1083b101c962SAlexander Duyck 	/* record initial flags and protocol */
1084b101c962SAlexander Duyck 	first->tx_flags = tx_flags;
1085b101c962SAlexander Duyck 
108676a540d4SAlexander Duyck 	tso = fm10k_tso(tx_ring, first);
108776a540d4SAlexander Duyck 	if (tso < 0)
108876a540d4SAlexander Duyck 		goto out_drop;
108976a540d4SAlexander Duyck 	else if (!tso)
109076a540d4SAlexander Duyck 		fm10k_tx_csum(tx_ring, first);
109176a540d4SAlexander Duyck 
1092b101c962SAlexander Duyck 	fm10k_tx_map(tx_ring, first);
1093b101c962SAlexander Duyck 
1094b101c962SAlexander Duyck 	return NETDEV_TX_OK;
109576a540d4SAlexander Duyck 
109676a540d4SAlexander Duyck out_drop:
109776a540d4SAlexander Duyck 	dev_kfree_skb_any(first->skb);
109876a540d4SAlexander Duyck 	first->skb = NULL;
109976a540d4SAlexander Duyck 
110076a540d4SAlexander Duyck 	return NETDEV_TX_OK;
1101b101c962SAlexander Duyck }
1102b101c962SAlexander Duyck 
fm10k_get_tx_completed(struct fm10k_ring * ring)1103b101c962SAlexander Duyck static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
1104b101c962SAlexander Duyck {
1105b101c962SAlexander Duyck 	return ring->stats.packets;
1106b101c962SAlexander Duyck }
1107b101c962SAlexander Duyck 
11085b9e4432SJacob Keller /**
11095b9e4432SJacob Keller  * fm10k_get_tx_pending - how many Tx descriptors not processed
11105b9e4432SJacob Keller  * @ring: the ring structure
11115b9e4432SJacob Keller  * @in_sw: is tx_pending being checked in SW or in HW?
11125b9e4432SJacob Keller  */
fm10k_get_tx_pending(struct fm10k_ring * ring,bool in_sw)11135b9e4432SJacob Keller u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
1114b101c962SAlexander Duyck {
111534bad71cSJacob Keller 	struct fm10k_intfc *interface = ring->q_vector->interface;
111634bad71cSJacob Keller 	struct fm10k_hw *hw = &interface->hw;
11175b9e4432SJacob Keller 	u32 head, tail;
111834bad71cSJacob Keller 
11195b9e4432SJacob Keller 	if (likely(in_sw)) {
11205b9e4432SJacob Keller 		head = ring->next_to_clean;
11215b9e4432SJacob Keller 		tail = ring->next_to_use;
11225b9e4432SJacob Keller 	} else {
11235b9e4432SJacob Keller 		head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
11245b9e4432SJacob Keller 		tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
11255b9e4432SJacob Keller 	}
1126b101c962SAlexander Duyck 
1127b101c962SAlexander Duyck 	return ((head <= tail) ? tail : tail + ring->count) - head;
1128b101c962SAlexander Duyck }
1129b101c962SAlexander Duyck 
fm10k_check_tx_hang(struct fm10k_ring * tx_ring)1130b101c962SAlexander Duyck bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
1131b101c962SAlexander Duyck {
1132b101c962SAlexander Duyck 	u32 tx_done = fm10k_get_tx_completed(tx_ring);
1133b101c962SAlexander Duyck 	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
11345b9e4432SJacob Keller 	u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
1135b101c962SAlexander Duyck 
1136b101c962SAlexander Duyck 	clear_check_for_tx_hang(tx_ring);
1137b101c962SAlexander Duyck 
1138b101c962SAlexander Duyck 	/* Check for a hung queue, but be thorough. This verifies
1139b101c962SAlexander Duyck 	 * that a transmit has been completed since the previous
1140b101c962SAlexander Duyck 	 * check AND there is at least one packet pending. By
1141b101c962SAlexander Duyck 	 * requiring this to fail twice we avoid races with
1142b101c962SAlexander Duyck 	 * clearing the ARMED bit and conditions where we
1143b101c962SAlexander Duyck 	 * run the check_tx_hang logic with a transmit completion
1144b101c962SAlexander Duyck 	 * pending but without time to complete it yet.
1145b101c962SAlexander Duyck 	 */
1146b101c962SAlexander Duyck 	if (!tx_pending || (tx_done_old != tx_done)) {
1147b101c962SAlexander Duyck 		/* update completed stats and continue */
1148b101c962SAlexander Duyck 		tx_ring->tx_stats.tx_done_old = tx_done;
1149b101c962SAlexander Duyck 		/* reset the countdown */
115046929557SJacob Keller 		clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
1151b101c962SAlexander Duyck 
1152b101c962SAlexander Duyck 		return false;
1153b101c962SAlexander Duyck 	}
1154b101c962SAlexander Duyck 
1155b101c962SAlexander Duyck 	/* make sure it is true for two checks in a row */
115646929557SJacob Keller 	return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
1157b101c962SAlexander Duyck }
1158b101c962SAlexander Duyck 
1159b101c962SAlexander Duyck /**
1160b101c962SAlexander Duyck  * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
1161b101c962SAlexander Duyck  * @interface: driver private struct
1162b101c962SAlexander Duyck  **/
fm10k_tx_timeout_reset(struct fm10k_intfc * interface)1163b101c962SAlexander Duyck void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
1164b101c962SAlexander Duyck {
1165b101c962SAlexander Duyck 	/* Do the reset outside of interrupt context */
116646929557SJacob Keller 	if (!test_bit(__FM10K_DOWN, interface->state)) {
1167b101c962SAlexander Duyck 		interface->tx_timeout_count++;
11683ee7b3a3SJacob Keller 		set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
1169b101c962SAlexander Duyck 		fm10k_service_event_schedule(interface);
1170b101c962SAlexander Duyck 	}
1171b101c962SAlexander Duyck }
1172b101c962SAlexander Duyck 
1173b101c962SAlexander Duyck /**
1174b101c962SAlexander Duyck  * fm10k_clean_tx_irq - Reclaim resources after transmit completes
1175b101c962SAlexander Duyck  * @q_vector: structure containing interrupt and ring information
1176b101c962SAlexander Duyck  * @tx_ring: tx ring to clean
1177144d8305SAlexander Duyck  * @napi_budget: Used to determine if we are in netpoll
1178b101c962SAlexander Duyck  **/
fm10k_clean_tx_irq(struct fm10k_q_vector * q_vector,struct fm10k_ring * tx_ring,int napi_budget)1179b101c962SAlexander Duyck static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
1180144d8305SAlexander Duyck 			       struct fm10k_ring *tx_ring, int napi_budget)
1181b101c962SAlexander Duyck {
1182b101c962SAlexander Duyck 	struct fm10k_intfc *interface = q_vector->interface;
1183b101c962SAlexander Duyck 	struct fm10k_tx_buffer *tx_buffer;
1184b101c962SAlexander Duyck 	struct fm10k_tx_desc *tx_desc;
1185b101c962SAlexander Duyck 	unsigned int total_bytes = 0, total_packets = 0;
1186b101c962SAlexander Duyck 	unsigned int budget = q_vector->tx.work_limit;
1187b101c962SAlexander Duyck 	unsigned int i = tx_ring->next_to_clean;
1188b101c962SAlexander Duyck 
118946929557SJacob Keller 	if (test_bit(__FM10K_DOWN, interface->state))
1190b101c962SAlexander Duyck 		return true;
1191b101c962SAlexander Duyck 
1192b101c962SAlexander Duyck 	tx_buffer = &tx_ring->tx_buffer[i];
1193b101c962SAlexander Duyck 	tx_desc = FM10K_TX_DESC(tx_ring, i);
1194b101c962SAlexander Duyck 	i -= tx_ring->count;
1195b101c962SAlexander Duyck 
1196b101c962SAlexander Duyck 	do {
1197b101c962SAlexander Duyck 		struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
1198b101c962SAlexander Duyck 
1199b101c962SAlexander Duyck 		/* if next_to_watch is not set then there is no work pending */
1200b101c962SAlexander Duyck 		if (!eop_desc)
1201b101c962SAlexander Duyck 			break;
1202b101c962SAlexander Duyck 
1203b101c962SAlexander Duyck 		/* prevent any other reads prior to eop_desc */
12047b8edcc6SBrian King 		smp_rmb();
1205b101c962SAlexander Duyck 
1206b101c962SAlexander Duyck 		/* if DD is not set pending work has not been completed */
1207b101c962SAlexander Duyck 		if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
1208b101c962SAlexander Duyck 			break;
1209b101c962SAlexander Duyck 
1210b101c962SAlexander Duyck 		/* clear next_to_watch to prevent false hangs */
1211b101c962SAlexander Duyck 		tx_buffer->next_to_watch = NULL;
1212b101c962SAlexander Duyck 
1213b101c962SAlexander Duyck 		/* update the statistics for this packet */
1214b101c962SAlexander Duyck 		total_bytes += tx_buffer->bytecount;
1215b101c962SAlexander Duyck 		total_packets += tx_buffer->gso_segs;
1216b101c962SAlexander Duyck 
1217b101c962SAlexander Duyck 		/* free the skb */
1218144d8305SAlexander Duyck 		napi_consume_skb(tx_buffer->skb, napi_budget);
1219b101c962SAlexander Duyck 
1220b101c962SAlexander Duyck 		/* unmap skb header data */
1221b101c962SAlexander Duyck 		dma_unmap_single(tx_ring->dev,
1222b101c962SAlexander Duyck 				 dma_unmap_addr(tx_buffer, dma),
1223b101c962SAlexander Duyck 				 dma_unmap_len(tx_buffer, len),
1224b101c962SAlexander Duyck 				 DMA_TO_DEVICE);
1225b101c962SAlexander Duyck 
1226b101c962SAlexander Duyck 		/* clear tx_buffer data */
1227b101c962SAlexander Duyck 		tx_buffer->skb = NULL;
1228b101c962SAlexander Duyck 		dma_unmap_len_set(tx_buffer, len, 0);
1229b101c962SAlexander Duyck 
1230b101c962SAlexander Duyck 		/* unmap remaining buffers */
1231b101c962SAlexander Duyck 		while (tx_desc != eop_desc) {
1232b101c962SAlexander Duyck 			tx_buffer++;
1233b101c962SAlexander Duyck 			tx_desc++;
1234b101c962SAlexander Duyck 			i++;
1235b101c962SAlexander Duyck 			if (unlikely(!i)) {
1236b101c962SAlexander Duyck 				i -= tx_ring->count;
1237b101c962SAlexander Duyck 				tx_buffer = tx_ring->tx_buffer;
1238b101c962SAlexander Duyck 				tx_desc = FM10K_TX_DESC(tx_ring, 0);
1239b101c962SAlexander Duyck 			}
1240b101c962SAlexander Duyck 
1241b101c962SAlexander Duyck 			/* unmap any remaining paged data */
1242b101c962SAlexander Duyck 			if (dma_unmap_len(tx_buffer, len)) {
1243b101c962SAlexander Duyck 				dma_unmap_page(tx_ring->dev,
1244b101c962SAlexander Duyck 					       dma_unmap_addr(tx_buffer, dma),
1245b101c962SAlexander Duyck 					       dma_unmap_len(tx_buffer, len),
1246b101c962SAlexander Duyck 					       DMA_TO_DEVICE);
1247b101c962SAlexander Duyck 				dma_unmap_len_set(tx_buffer, len, 0);
1248b101c962SAlexander Duyck 			}
1249b101c962SAlexander Duyck 		}
1250b101c962SAlexander Duyck 
1251b101c962SAlexander Duyck 		/* move us one more past the eop_desc for start of next pkt */
1252b101c962SAlexander Duyck 		tx_buffer++;
1253b101c962SAlexander Duyck 		tx_desc++;
1254b101c962SAlexander Duyck 		i++;
1255b101c962SAlexander Duyck 		if (unlikely(!i)) {
1256b101c962SAlexander Duyck 			i -= tx_ring->count;
1257b101c962SAlexander Duyck 			tx_buffer = tx_ring->tx_buffer;
1258b101c962SAlexander Duyck 			tx_desc = FM10K_TX_DESC(tx_ring, 0);
1259b101c962SAlexander Duyck 		}
1260b101c962SAlexander Duyck 
1261b101c962SAlexander Duyck 		/* issue prefetch for next Tx descriptor */
1262b101c962SAlexander Duyck 		prefetch(tx_desc);
1263b101c962SAlexander Duyck 
1264b101c962SAlexander Duyck 		/* update budget accounting */
1265b101c962SAlexander Duyck 		budget--;
1266b101c962SAlexander Duyck 	} while (likely(budget));
1267b101c962SAlexander Duyck 
1268b101c962SAlexander Duyck 	i += tx_ring->count;
1269b101c962SAlexander Duyck 	tx_ring->next_to_clean = i;
1270b101c962SAlexander Duyck 	u64_stats_update_begin(&tx_ring->syncp);
1271b101c962SAlexander Duyck 	tx_ring->stats.bytes += total_bytes;
1272b101c962SAlexander Duyck 	tx_ring->stats.packets += total_packets;
1273b101c962SAlexander Duyck 	u64_stats_update_end(&tx_ring->syncp);
1274b101c962SAlexander Duyck 	q_vector->tx.total_bytes += total_bytes;
1275b101c962SAlexander Duyck 	q_vector->tx.total_packets += total_packets;
1276b101c962SAlexander Duyck 
1277b101c962SAlexander Duyck 	if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
1278b101c962SAlexander Duyck 		/* schedule immediate reset if we believe we hung */
1279b101c962SAlexander Duyck 		struct fm10k_hw *hw = &interface->hw;
1280b101c962SAlexander Duyck 
1281b101c962SAlexander Duyck 		netif_err(interface, drv, tx_ring->netdev,
1282b101c962SAlexander Duyck 			  "Detected Tx Unit Hang\n"
1283b101c962SAlexander Duyck 			  "  Tx Queue             <%d>\n"
1284b101c962SAlexander Duyck 			  "  TDH, TDT             <%x>, <%x>\n"
1285b101c962SAlexander Duyck 			  "  next_to_use          <%x>\n"
1286b101c962SAlexander Duyck 			  "  next_to_clean        <%x>\n",
1287b101c962SAlexander Duyck 			  tx_ring->queue_index,
1288b101c962SAlexander Duyck 			  fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
1289b101c962SAlexander Duyck 			  fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
1290b101c962SAlexander Duyck 			  tx_ring->next_to_use, i);
1291b101c962SAlexander Duyck 
1292b101c962SAlexander Duyck 		netif_stop_subqueue(tx_ring->netdev,
1293b101c962SAlexander Duyck 				    tx_ring->queue_index);
1294b101c962SAlexander Duyck 
1295b101c962SAlexander Duyck 		netif_info(interface, probe, tx_ring->netdev,
1296b101c962SAlexander Duyck 			   "tx hang %d detected on queue %d, resetting interface\n",
1297b101c962SAlexander Duyck 			   interface->tx_timeout_count + 1,
1298b101c962SAlexander Duyck 			   tx_ring->queue_index);
1299b101c962SAlexander Duyck 
1300b101c962SAlexander Duyck 		fm10k_tx_timeout_reset(interface);
1301b101c962SAlexander Duyck 
1302b101c962SAlexander Duyck 		/* the netdev is about to reset, no point in enabling stuff */
1303b101c962SAlexander Duyck 		return true;
1304b101c962SAlexander Duyck 	}
1305b101c962SAlexander Duyck 
1306b101c962SAlexander Duyck 	/* notify netdev of completed buffers */
1307b101c962SAlexander Duyck 	netdev_tx_completed_queue(txring_txq(tx_ring),
1308b101c962SAlexander Duyck 				  total_packets, total_bytes);
1309b101c962SAlexander Duyck 
1310b101c962SAlexander Duyck #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
1311b101c962SAlexander Duyck 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1312b101c962SAlexander Duyck 		     (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1313b101c962SAlexander Duyck 		/* Make sure that anybody stopping the queue after this
1314b101c962SAlexander Duyck 		 * sees the new next_to_clean.
1315b101c962SAlexander Duyck 		 */
1316b101c962SAlexander Duyck 		smp_mb();
1317b101c962SAlexander Duyck 		if (__netif_subqueue_stopped(tx_ring->netdev,
1318b101c962SAlexander Duyck 					     tx_ring->queue_index) &&
131946929557SJacob Keller 		    !test_bit(__FM10K_DOWN, interface->state)) {
1320b101c962SAlexander Duyck 			netif_wake_subqueue(tx_ring->netdev,
1321b101c962SAlexander Duyck 					    tx_ring->queue_index);
1322b101c962SAlexander Duyck 			++tx_ring->tx_stats.restart_queue;
1323b101c962SAlexander Duyck 		}
1324b101c962SAlexander Duyck 	}
1325b101c962SAlexander Duyck 
1326b101c962SAlexander Duyck 	return !!budget;
1327b101c962SAlexander Duyck }
1328b101c962SAlexander Duyck 
132918283cadSAlexander Duyck /**
133018283cadSAlexander Duyck  * fm10k_update_itr - update the dynamic ITR value based on packet size
133118283cadSAlexander Duyck  *
133218283cadSAlexander Duyck  *      Stores a new ITR value based on strictly on packet size.  The
133318283cadSAlexander Duyck  *      divisors and thresholds used by this function were determined based
133418283cadSAlexander Duyck  *      on theoretical maximum wire speed and testing data, in order to
133518283cadSAlexander Duyck  *      minimize response time while increasing bulk throughput.
133618283cadSAlexander Duyck  *
133718283cadSAlexander Duyck  * @ring_container: Container for rings to have ITR updated
133818283cadSAlexander Duyck  **/
fm10k_update_itr(struct fm10k_ring_container * ring_container)133918283cadSAlexander Duyck static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
134018283cadSAlexander Duyck {
1341242722ddSJacob Keller 	unsigned int avg_wire_size, packets, itr_round;
134218283cadSAlexander Duyck 
134318283cadSAlexander Duyck 	/* Only update ITR if we are using adaptive setting */
1344584373f5SJacob Keller 	if (!ITR_IS_ADAPTIVE(ring_container->itr))
134518283cadSAlexander Duyck 		goto clear_counts;
134618283cadSAlexander Duyck 
134718283cadSAlexander Duyck 	packets = ring_container->total_packets;
134818283cadSAlexander Duyck 	if (!packets)
134918283cadSAlexander Duyck 		goto clear_counts;
135018283cadSAlexander Duyck 
135118283cadSAlexander Duyck 	avg_wire_size = ring_container->total_bytes / packets;
135218283cadSAlexander Duyck 
1353242722ddSJacob Keller 	/* The following is a crude approximation of:
1354242722ddSJacob Keller 	 *  wmem_default / (size + overhead) = desired_pkts_per_int
1355242722ddSJacob Keller 	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1356242722ddSJacob Keller 	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1357242722ddSJacob Keller 	 *
1358242722ddSJacob Keller 	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1359242722ddSJacob Keller 	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1360242722ddSJacob Keller 	 * formula down to
1361242722ddSJacob Keller 	 *
1362242722ddSJacob Keller 	 *  (34 * (size + 24)) / (size + 640) = ITR
1363242722ddSJacob Keller 	 *
1364242722ddSJacob Keller 	 * We first do some math on the packet size and then finally bitshift
1365242722ddSJacob Keller 	 * by 8 after rounding up. We also have to account for PCIe link speed
1366242722ddSJacob Keller 	 * difference as ITR scales based on this.
1367242722ddSJacob Keller 	 */
1368242722ddSJacob Keller 	if (avg_wire_size <= 360) {
1369242722ddSJacob Keller 		/* Start at 250K ints/sec and gradually drop to 77K ints/sec */
1370242722ddSJacob Keller 		avg_wire_size *= 8;
1371242722ddSJacob Keller 		avg_wire_size += 376;
1372242722ddSJacob Keller 	} else if (avg_wire_size <= 1152) {
1373242722ddSJacob Keller 		/* 77K ints/sec to 45K ints/sec */
1374242722ddSJacob Keller 		avg_wire_size *= 3;
1375242722ddSJacob Keller 		avg_wire_size += 2176;
1376242722ddSJacob Keller 	} else if (avg_wire_size <= 1920) {
1377242722ddSJacob Keller 		/* 45K ints/sec to 38K ints/sec */
1378242722ddSJacob Keller 		avg_wire_size += 4480;
1379242722ddSJacob Keller 	} else {
1380242722ddSJacob Keller 		/* plateau at a limit of 38K ints/sec */
1381242722ddSJacob Keller 		avg_wire_size = 6656;
1382242722ddSJacob Keller 	}
138318283cadSAlexander Duyck 
1384242722ddSJacob Keller 	/* Perform final bitshift for division after rounding up to ensure
1385242722ddSJacob Keller 	 * that the calculation will never get below a 1. The bit shift
1386242722ddSJacob Keller 	 * accounts for changes in the ITR due to PCIe link speed.
1387242722ddSJacob Keller 	 */
1388ce4dad2cSJacob Keller 	itr_round = READ_ONCE(ring_container->itr_scale) + 8;
1389fcdb0a99SBruce Allan 	avg_wire_size += BIT(itr_round) - 1;
1390242722ddSJacob Keller 	avg_wire_size >>= itr_round;
139118283cadSAlexander Duyck 
139218283cadSAlexander Duyck 	/* write back value and retain adaptive flag */
139318283cadSAlexander Duyck 	ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
139418283cadSAlexander Duyck 
139518283cadSAlexander Duyck clear_counts:
139618283cadSAlexander Duyck 	ring_container->total_bytes = 0;
139718283cadSAlexander Duyck 	ring_container->total_packets = 0;
139818283cadSAlexander Duyck }
139918283cadSAlexander Duyck 
fm10k_qv_enable(struct fm10k_q_vector * q_vector)140018283cadSAlexander Duyck static void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
140118283cadSAlexander Duyck {
140218283cadSAlexander Duyck 	/* Enable auto-mask and clear the current mask */
140318283cadSAlexander Duyck 	u32 itr = FM10K_ITR_ENABLE;
140418283cadSAlexander Duyck 
140518283cadSAlexander Duyck 	/* Update Tx ITR */
140618283cadSAlexander Duyck 	fm10k_update_itr(&q_vector->tx);
140718283cadSAlexander Duyck 
140818283cadSAlexander Duyck 	/* Update Rx ITR */
140918283cadSAlexander Duyck 	fm10k_update_itr(&q_vector->rx);
141018283cadSAlexander Duyck 
141118283cadSAlexander Duyck 	/* Store Tx itr in timer slot 0 */
141218283cadSAlexander Duyck 	itr |= (q_vector->tx.itr & FM10K_ITR_MAX);
141318283cadSAlexander Duyck 
141418283cadSAlexander Duyck 	/* Shift Rx itr to timer slot 1 */
141518283cadSAlexander Duyck 	itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT;
141618283cadSAlexander Duyck 
141718283cadSAlexander Duyck 	/* Write the final value to the ITR register */
141818283cadSAlexander Duyck 	writel(itr, q_vector->itr);
141918283cadSAlexander Duyck }
142018283cadSAlexander Duyck 
fm10k_poll(struct napi_struct * napi,int budget)142118283cadSAlexander Duyck static int fm10k_poll(struct napi_struct *napi, int budget)
142218283cadSAlexander Duyck {
142318283cadSAlexander Duyck 	struct fm10k_q_vector *q_vector =
142418283cadSAlexander Duyck 			       container_of(napi, struct fm10k_q_vector, napi);
1425b101c962SAlexander Duyck 	struct fm10k_ring *ring;
142632b3e08fSJesse Brandeburg 	int per_ring_budget, work_done = 0;
1427b101c962SAlexander Duyck 	bool clean_complete = true;
1428b101c962SAlexander Duyck 
1429144d8305SAlexander Duyck 	fm10k_for_each_ring(ring, q_vector->tx) {
1430144d8305SAlexander Duyck 		if (!fm10k_clean_tx_irq(q_vector, ring, budget))
1431144d8305SAlexander Duyck 			clean_complete = false;
1432144d8305SAlexander Duyck 	}
1433b101c962SAlexander Duyck 
14349f872986SAlexander Duyck 	/* Handle case where we are called by netpoll with a budget of 0 */
14359f872986SAlexander Duyck 	if (budget <= 0)
14369f872986SAlexander Duyck 		return budget;
14379f872986SAlexander Duyck 
1438b101c962SAlexander Duyck 	/* attempt to distribute budget to each queue fairly, but don't
1439b101c962SAlexander Duyck 	 * allow the budget to go below 1 because we'll exit polling
1440b101c962SAlexander Duyck 	 */
1441b101c962SAlexander Duyck 	if (q_vector->rx.count > 1)
1442b101c962SAlexander Duyck 		per_ring_budget = max(budget / q_vector->rx.count, 1);
1443b101c962SAlexander Duyck 	else
1444b101c962SAlexander Duyck 		per_ring_budget = budget;
1445b101c962SAlexander Duyck 
144632b3e08fSJesse Brandeburg 	fm10k_for_each_ring(ring, q_vector->rx) {
144732b3e08fSJesse Brandeburg 		int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
144832b3e08fSJesse Brandeburg 
144932b3e08fSJesse Brandeburg 		work_done += work;
1450144d8305SAlexander Duyck 		if (work >= per_ring_budget)
1451144d8305SAlexander Duyck 			clean_complete = false;
145232b3e08fSJesse Brandeburg 	}
1453b101c962SAlexander Duyck 
1454b101c962SAlexander Duyck 	/* If all work not completed, return budget and keep polling */
1455b101c962SAlexander Duyck 	if (!clean_complete)
1456b101c962SAlexander Duyck 		return budget;
145718283cadSAlexander Duyck 
14580bcd952fSJesse Brandeburg 	/* Exit the polling mode, but don't re-enable interrupts if stack might
14590bcd952fSJesse Brandeburg 	 * poll us due to busy-polling
14600bcd952fSJesse Brandeburg 	 */
14610bcd952fSJesse Brandeburg 	if (likely(napi_complete_done(napi, work_done)))
146218283cadSAlexander Duyck 		fm10k_qv_enable(q_vector);
146318283cadSAlexander Duyck 
1464e5fbfb78SJacob Keller 	return min(work_done, budget - 1);
146518283cadSAlexander Duyck }
146618283cadSAlexander Duyck 
146718283cadSAlexander Duyck /**
1468aa3ac822SAlexander Duyck  * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
1469aa3ac822SAlexander Duyck  * @interface: board private structure to initialize
1470aa3ac822SAlexander Duyck  *
1471aa3ac822SAlexander Duyck  * When QoS (Quality of Service) is enabled, allocate queues for
1472aa3ac822SAlexander Duyck  * each traffic class.  If multiqueue isn't available,then abort QoS
1473aa3ac822SAlexander Duyck  * initialization.
1474aa3ac822SAlexander Duyck  *
1475aa3ac822SAlexander Duyck  * This function handles all combinations of Qos and RSS.
1476aa3ac822SAlexander Duyck  *
1477aa3ac822SAlexander Duyck  **/
fm10k_set_qos_queues(struct fm10k_intfc * interface)1478aa3ac822SAlexander Duyck static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
1479aa3ac822SAlexander Duyck {
1480aa3ac822SAlexander Duyck 	struct net_device *dev = interface->netdev;
1481aa3ac822SAlexander Duyck 	struct fm10k_ring_feature *f;
1482aa3ac822SAlexander Duyck 	int rss_i, i;
1483aa3ac822SAlexander Duyck 	int pcs;
1484aa3ac822SAlexander Duyck 
1485aa3ac822SAlexander Duyck 	/* Map queue offset and counts onto allocated tx queues */
1486aa3ac822SAlexander Duyck 	pcs = netdev_get_num_tc(dev);
1487aa3ac822SAlexander Duyck 
1488aa3ac822SAlexander Duyck 	if (pcs <= 1)
1489aa3ac822SAlexander Duyck 		return false;
1490aa3ac822SAlexander Duyck 
1491aa3ac822SAlexander Duyck 	/* set QoS mask and indices */
1492aa3ac822SAlexander Duyck 	f = &interface->ring_feature[RING_F_QOS];
1493aa3ac822SAlexander Duyck 	f->indices = pcs;
1494fcdb0a99SBruce Allan 	f->mask = BIT(fls(pcs - 1)) - 1;
1495aa3ac822SAlexander Duyck 
1496aa3ac822SAlexander Duyck 	/* determine the upper limit for our current DCB mode */
1497aa3ac822SAlexander Duyck 	rss_i = interface->hw.mac.max_queues / pcs;
1498fcdb0a99SBruce Allan 	rss_i = BIT(fls(rss_i) - 1);
1499aa3ac822SAlexander Duyck 
1500aa3ac822SAlexander Duyck 	/* set RSS mask and indices */
1501aa3ac822SAlexander Duyck 	f = &interface->ring_feature[RING_F_RSS];
1502aa3ac822SAlexander Duyck 	rss_i = min_t(u16, rss_i, f->limit);
1503aa3ac822SAlexander Duyck 	f->indices = rss_i;
1504fcdb0a99SBruce Allan 	f->mask = BIT(fls(rss_i - 1)) - 1;
1505aa3ac822SAlexander Duyck 
1506aa3ac822SAlexander Duyck 	/* configure pause class to queue mapping */
1507aa3ac822SAlexander Duyck 	for (i = 0; i < pcs; i++)
1508aa3ac822SAlexander Duyck 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
1509aa3ac822SAlexander Duyck 
1510aa3ac822SAlexander Duyck 	interface->num_rx_queues = rss_i * pcs;
1511aa3ac822SAlexander Duyck 	interface->num_tx_queues = rss_i * pcs;
1512aa3ac822SAlexander Duyck 
1513aa3ac822SAlexander Duyck 	return true;
1514aa3ac822SAlexander Duyck }
1515aa3ac822SAlexander Duyck 
1516aa3ac822SAlexander Duyck /**
1517aa3ac822SAlexander Duyck  * fm10k_set_rss_queues: Allocate queues for RSS
1518aa3ac822SAlexander Duyck  * @interface: board private structure to initialize
1519aa3ac822SAlexander Duyck  *
1520aa3ac822SAlexander Duyck  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
1521aa3ac822SAlexander Duyck  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1522aa3ac822SAlexander Duyck  *
1523aa3ac822SAlexander Duyck  **/
fm10k_set_rss_queues(struct fm10k_intfc * interface)1524aa3ac822SAlexander Duyck static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
1525aa3ac822SAlexander Duyck {
1526aa3ac822SAlexander Duyck 	struct fm10k_ring_feature *f;
1527aa3ac822SAlexander Duyck 	u16 rss_i;
1528aa3ac822SAlexander Duyck 
1529aa3ac822SAlexander Duyck 	f = &interface->ring_feature[RING_F_RSS];
1530aa3ac822SAlexander Duyck 	rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit);
1531aa3ac822SAlexander Duyck 
1532aa3ac822SAlexander Duyck 	/* record indices and power of 2 mask for RSS */
1533aa3ac822SAlexander Duyck 	f->indices = rss_i;
1534fcdb0a99SBruce Allan 	f->mask = BIT(fls(rss_i - 1)) - 1;
1535aa3ac822SAlexander Duyck 
1536aa3ac822SAlexander Duyck 	interface->num_rx_queues = rss_i;
1537aa3ac822SAlexander Duyck 	interface->num_tx_queues = rss_i;
1538aa3ac822SAlexander Duyck 
1539aa3ac822SAlexander Duyck 	return true;
1540aa3ac822SAlexander Duyck }
1541aa3ac822SAlexander Duyck 
1542aa3ac822SAlexander Duyck /**
154318283cadSAlexander Duyck  * fm10k_set_num_queues: Allocate queues for device, feature dependent
154418283cadSAlexander Duyck  * @interface: board private structure to initialize
154518283cadSAlexander Duyck  *
154618283cadSAlexander Duyck  * This is the top level queue allocation routine.  The order here is very
154718283cadSAlexander Duyck  * important, starting with the "most" number of features turned on at once,
154818283cadSAlexander Duyck  * and ending with the smallest set of features.  This way large combinations
154918283cadSAlexander Duyck  * can be allocated if they're turned on, and smaller combinations are the
155018283cadSAlexander Duyck  * fall through conditions.
155118283cadSAlexander Duyck  *
155218283cadSAlexander Duyck  **/
fm10k_set_num_queues(struct fm10k_intfc * interface)155318283cadSAlexander Duyck static void fm10k_set_num_queues(struct fm10k_intfc *interface)
155418283cadSAlexander Duyck {
1555b3525696SJacob Keller 	/* Attempt to setup QoS and RSS first */
1556aa3ac822SAlexander Duyck 	if (fm10k_set_qos_queues(interface))
1557aa3ac822SAlexander Duyck 		return;
1558aa3ac822SAlexander Duyck 
1559b3525696SJacob Keller 	/* If we don't have QoS, just fallback to only RSS. */
1560aa3ac822SAlexander Duyck 	fm10k_set_rss_queues(interface);
156118283cadSAlexander Duyck }
156218283cadSAlexander Duyck 
156318283cadSAlexander Duyck /**
15644be37c42SJacob Keller  * fm10k_reset_num_queues - Reset the number of queues to zero
15654be37c42SJacob Keller  * @interface: board private structure
15664be37c42SJacob Keller  *
15674be37c42SJacob Keller  * This function should be called whenever we need to reset the number of
15684be37c42SJacob Keller  * queues after an error condition.
15694be37c42SJacob Keller  */
fm10k_reset_num_queues(struct fm10k_intfc * interface)15704be37c42SJacob Keller static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
15714be37c42SJacob Keller {
15724be37c42SJacob Keller 	interface->num_tx_queues = 0;
15734be37c42SJacob Keller 	interface->num_rx_queues = 0;
15744be37c42SJacob Keller 	interface->num_q_vectors = 0;
15754be37c42SJacob Keller }
15764be37c42SJacob Keller 
15774be37c42SJacob Keller /**
157818283cadSAlexander Duyck  * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
157918283cadSAlexander Duyck  * @interface: board private structure to initialize
158018283cadSAlexander Duyck  * @v_count: q_vectors allocated on interface, used for ring interleaving
158118283cadSAlexander Duyck  * @v_idx: index of vector in interface struct
158218283cadSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
158318283cadSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
158418283cadSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
158518283cadSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
158618283cadSAlexander Duyck  *
158718283cadSAlexander Duyck  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
158818283cadSAlexander Duyck  **/
fm10k_alloc_q_vector(struct fm10k_intfc * interface,unsigned int v_count,unsigned int v_idx,unsigned int txr_count,unsigned int txr_idx,unsigned int rxr_count,unsigned int rxr_idx)158918283cadSAlexander Duyck static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
159018283cadSAlexander Duyck 				unsigned int v_count, unsigned int v_idx,
159118283cadSAlexander Duyck 				unsigned int txr_count, unsigned int txr_idx,
159218283cadSAlexander Duyck 				unsigned int rxr_count, unsigned int rxr_idx)
159318283cadSAlexander Duyck {
159418283cadSAlexander Duyck 	struct fm10k_q_vector *q_vector;
1595e27ef599SAlexander Duyck 	struct fm10k_ring *ring;
15969a00536cSGustavo A. R. Silva 	int ring_count;
159718283cadSAlexander Duyck 
159818283cadSAlexander Duyck 	ring_count = txr_count + rxr_count;
159918283cadSAlexander Duyck 
160018283cadSAlexander Duyck 	/* allocate q_vector and rings */
16019a00536cSGustavo A. R. Silva 	q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL);
160218283cadSAlexander Duyck 	if (!q_vector)
160318283cadSAlexander Duyck 		return -ENOMEM;
160418283cadSAlexander Duyck 
160518283cadSAlexander Duyck 	/* initialize NAPI */
1606b48b89f9SJakub Kicinski 	netif_napi_add(interface->netdev, &q_vector->napi, fm10k_poll);
160718283cadSAlexander Duyck 
160818283cadSAlexander Duyck 	/* tie q_vector and interface together */
160918283cadSAlexander Duyck 	interface->q_vector[v_idx] = q_vector;
161018283cadSAlexander Duyck 	q_vector->interface = interface;
161118283cadSAlexander Duyck 	q_vector->v_idx = v_idx;
161218283cadSAlexander Duyck 
1613e27ef599SAlexander Duyck 	/* initialize pointer to rings */
1614e27ef599SAlexander Duyck 	ring = q_vector->ring;
1615e27ef599SAlexander Duyck 
161618283cadSAlexander Duyck 	/* save Tx ring container info */
1617e27ef599SAlexander Duyck 	q_vector->tx.ring = ring;
1618e27ef599SAlexander Duyck 	q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
161918283cadSAlexander Duyck 	q_vector->tx.itr = interface->tx_itr;
1620242722ddSJacob Keller 	q_vector->tx.itr_scale = interface->hw.mac.itr_scale;
162118283cadSAlexander Duyck 	q_vector->tx.count = txr_count;
162218283cadSAlexander Duyck 
1623e27ef599SAlexander Duyck 	while (txr_count) {
1624e27ef599SAlexander Duyck 		/* assign generic ring traits */
1625e27ef599SAlexander Duyck 		ring->dev = &interface->pdev->dev;
1626e27ef599SAlexander Duyck 		ring->netdev = interface->netdev;
1627e27ef599SAlexander Duyck 
1628e27ef599SAlexander Duyck 		/* configure backlink on ring */
1629e27ef599SAlexander Duyck 		ring->q_vector = q_vector;
1630e27ef599SAlexander Duyck 
1631e27ef599SAlexander Duyck 		/* apply Tx specific ring traits */
1632e27ef599SAlexander Duyck 		ring->count = interface->tx_ring_count;
1633e27ef599SAlexander Duyck 		ring->queue_index = txr_idx;
1634e27ef599SAlexander Duyck 
1635e27ef599SAlexander Duyck 		/* assign ring to interface */
1636e27ef599SAlexander Duyck 		interface->tx_ring[txr_idx] = ring;
1637e27ef599SAlexander Duyck 
1638e27ef599SAlexander Duyck 		/* update count and index */
1639e27ef599SAlexander Duyck 		txr_count--;
1640e27ef599SAlexander Duyck 		txr_idx += v_count;
1641e27ef599SAlexander Duyck 
1642e27ef599SAlexander Duyck 		/* push pointer to next ring */
1643e27ef599SAlexander Duyck 		ring++;
1644e27ef599SAlexander Duyck 	}
1645e27ef599SAlexander Duyck 
164618283cadSAlexander Duyck 	/* save Rx ring container info */
1647e27ef599SAlexander Duyck 	q_vector->rx.ring = ring;
164818283cadSAlexander Duyck 	q_vector->rx.itr = interface->rx_itr;
1649242722ddSJacob Keller 	q_vector->rx.itr_scale = interface->hw.mac.itr_scale;
165018283cadSAlexander Duyck 	q_vector->rx.count = rxr_count;
165118283cadSAlexander Duyck 
1652e27ef599SAlexander Duyck 	while (rxr_count) {
1653e27ef599SAlexander Duyck 		/* assign generic ring traits */
1654e27ef599SAlexander Duyck 		ring->dev = &interface->pdev->dev;
1655e27ef599SAlexander Duyck 		ring->netdev = interface->netdev;
16565cd5e2e9SAlexander Duyck 		rcu_assign_pointer(ring->l2_accel, interface->l2_accel);
1657e27ef599SAlexander Duyck 
1658e27ef599SAlexander Duyck 		/* configure backlink on ring */
1659e27ef599SAlexander Duyck 		ring->q_vector = q_vector;
1660e27ef599SAlexander Duyck 
1661e27ef599SAlexander Duyck 		/* apply Rx specific ring traits */
1662e27ef599SAlexander Duyck 		ring->count = interface->rx_ring_count;
1663e27ef599SAlexander Duyck 		ring->queue_index = rxr_idx;
1664e27ef599SAlexander Duyck 
1665e27ef599SAlexander Duyck 		/* assign ring to interface */
1666e27ef599SAlexander Duyck 		interface->rx_ring[rxr_idx] = ring;
1667e27ef599SAlexander Duyck 
1668e27ef599SAlexander Duyck 		/* update count and index */
1669e27ef599SAlexander Duyck 		rxr_count--;
1670e27ef599SAlexander Duyck 		rxr_idx += v_count;
1671e27ef599SAlexander Duyck 
1672e27ef599SAlexander Duyck 		/* push pointer to next ring */
1673e27ef599SAlexander Duyck 		ring++;
1674e27ef599SAlexander Duyck 	}
1675e27ef599SAlexander Duyck 
16767461fd91SAlexander Duyck 	fm10k_dbg_q_vector_init(q_vector);
16777461fd91SAlexander Duyck 
167818283cadSAlexander Duyck 	return 0;
167918283cadSAlexander Duyck }
168018283cadSAlexander Duyck 
168118283cadSAlexander Duyck /**
168218283cadSAlexander Duyck  * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
168318283cadSAlexander Duyck  * @interface: board private structure to initialize
168418283cadSAlexander Duyck  * @v_idx: Index of vector to be freed
168518283cadSAlexander Duyck  *
168618283cadSAlexander Duyck  * This function frees the memory allocated to the q_vector.  In addition if
168718283cadSAlexander Duyck  * NAPI is enabled it will delete any references to the NAPI struct prior
168818283cadSAlexander Duyck  * to freeing the q_vector.
168918283cadSAlexander Duyck  **/
fm10k_free_q_vector(struct fm10k_intfc * interface,int v_idx)169018283cadSAlexander Duyck static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
169118283cadSAlexander Duyck {
169218283cadSAlexander Duyck 	struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
1693e27ef599SAlexander Duyck 	struct fm10k_ring *ring;
1694e27ef599SAlexander Duyck 
16957461fd91SAlexander Duyck 	fm10k_dbg_q_vector_exit(q_vector);
16967461fd91SAlexander Duyck 
1697e27ef599SAlexander Duyck 	fm10k_for_each_ring(ring, q_vector->tx)
1698e27ef599SAlexander Duyck 		interface->tx_ring[ring->queue_index] = NULL;
1699e27ef599SAlexander Duyck 
1700e27ef599SAlexander Duyck 	fm10k_for_each_ring(ring, q_vector->rx)
1701e27ef599SAlexander Duyck 		interface->rx_ring[ring->queue_index] = NULL;
170218283cadSAlexander Duyck 
170318283cadSAlexander Duyck 	interface->q_vector[v_idx] = NULL;
170418283cadSAlexander Duyck 	netif_napi_del(&q_vector->napi);
170518283cadSAlexander Duyck 	kfree_rcu(q_vector, rcu);
170618283cadSAlexander Duyck }
170718283cadSAlexander Duyck 
170818283cadSAlexander Duyck /**
170918283cadSAlexander Duyck  * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
171018283cadSAlexander Duyck  * @interface: board private structure to initialize
171118283cadSAlexander Duyck  *
171218283cadSAlexander Duyck  * We allocate one q_vector per queue interrupt.  If allocation fails we
171318283cadSAlexander Duyck  * return -ENOMEM.
171418283cadSAlexander Duyck  **/
fm10k_alloc_q_vectors(struct fm10k_intfc * interface)171518283cadSAlexander Duyck static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
171618283cadSAlexander Duyck {
171718283cadSAlexander Duyck 	unsigned int q_vectors = interface->num_q_vectors;
171818283cadSAlexander Duyck 	unsigned int rxr_remaining = interface->num_rx_queues;
171918283cadSAlexander Duyck 	unsigned int txr_remaining = interface->num_tx_queues;
172018283cadSAlexander Duyck 	unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
172118283cadSAlexander Duyck 	int err;
172218283cadSAlexander Duyck 
172318283cadSAlexander Duyck 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
172418283cadSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
172518283cadSAlexander Duyck 			err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
172618283cadSAlexander Duyck 						   0, 0, 1, rxr_idx);
172718283cadSAlexander Duyck 			if (err)
172818283cadSAlexander Duyck 				goto err_out;
172918283cadSAlexander Duyck 
173018283cadSAlexander Duyck 			/* update counts and index */
173118283cadSAlexander Duyck 			rxr_remaining--;
173218283cadSAlexander Duyck 			rxr_idx++;
173318283cadSAlexander Duyck 		}
173418283cadSAlexander Duyck 	}
173518283cadSAlexander Duyck 
173618283cadSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
173718283cadSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
173818283cadSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
173918283cadSAlexander Duyck 
174018283cadSAlexander Duyck 		err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
174118283cadSAlexander Duyck 					   tqpv, txr_idx,
174218283cadSAlexander Duyck 					   rqpv, rxr_idx);
174318283cadSAlexander Duyck 
174418283cadSAlexander Duyck 		if (err)
174518283cadSAlexander Duyck 			goto err_out;
174618283cadSAlexander Duyck 
174718283cadSAlexander Duyck 		/* update counts and index */
174818283cadSAlexander Duyck 		rxr_remaining -= rqpv;
174918283cadSAlexander Duyck 		txr_remaining -= tqpv;
175018283cadSAlexander Duyck 		rxr_idx++;
175118283cadSAlexander Duyck 		txr_idx++;
175218283cadSAlexander Duyck 	}
175318283cadSAlexander Duyck 
175418283cadSAlexander Duyck 	return 0;
175518283cadSAlexander Duyck 
175618283cadSAlexander Duyck err_out:
17574be37c42SJacob Keller 	fm10k_reset_num_queues(interface);
175818283cadSAlexander Duyck 
175918283cadSAlexander Duyck 	while (v_idx--)
176018283cadSAlexander Duyck 		fm10k_free_q_vector(interface, v_idx);
176118283cadSAlexander Duyck 
176218283cadSAlexander Duyck 	return -ENOMEM;
176318283cadSAlexander Duyck }
176418283cadSAlexander Duyck 
176518283cadSAlexander Duyck /**
176618283cadSAlexander Duyck  * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
176718283cadSAlexander Duyck  * @interface: board private structure to initialize
176818283cadSAlexander Duyck  *
176918283cadSAlexander Duyck  * This function frees the memory allocated to the q_vectors.  In addition if
177018283cadSAlexander Duyck  * NAPI is enabled it will delete any references to the NAPI struct prior
177118283cadSAlexander Duyck  * to freeing the q_vector.
177218283cadSAlexander Duyck  **/
fm10k_free_q_vectors(struct fm10k_intfc * interface)177318283cadSAlexander Duyck static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
177418283cadSAlexander Duyck {
177518283cadSAlexander Duyck 	int v_idx = interface->num_q_vectors;
177618283cadSAlexander Duyck 
17774be37c42SJacob Keller 	fm10k_reset_num_queues(interface);
177818283cadSAlexander Duyck 
177918283cadSAlexander Duyck 	while (v_idx--)
178018283cadSAlexander Duyck 		fm10k_free_q_vector(interface, v_idx);
178118283cadSAlexander Duyck }
178218283cadSAlexander Duyck 
178318283cadSAlexander Duyck /**
1784262de08fSJesse Brandeburg  * fm10k_reset_msix_capability - reset MSI-X capability
178518283cadSAlexander Duyck  * @interface: board private structure to initialize
178618283cadSAlexander Duyck  *
178718283cadSAlexander Duyck  * Reset the MSI-X capability back to its starting state
178818283cadSAlexander Duyck  **/
fm10k_reset_msix_capability(struct fm10k_intfc * interface)178918283cadSAlexander Duyck static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
179018283cadSAlexander Duyck {
179118283cadSAlexander Duyck 	pci_disable_msix(interface->pdev);
179218283cadSAlexander Duyck 	kfree(interface->msix_entries);
179318283cadSAlexander Duyck 	interface->msix_entries = NULL;
179418283cadSAlexander Duyck }
179518283cadSAlexander Duyck 
179618283cadSAlexander Duyck /**
1797262de08fSJesse Brandeburg  * fm10k_init_msix_capability - configure MSI-X capability
179818283cadSAlexander Duyck  * @interface: board private structure to initialize
179918283cadSAlexander Duyck  *
180018283cadSAlexander Duyck  * Attempt to configure the interrupts using the best available
180118283cadSAlexander Duyck  * capabilities of the hardware and the kernel.
180218283cadSAlexander Duyck  **/
fm10k_init_msix_capability(struct fm10k_intfc * interface)180318283cadSAlexander Duyck static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
180418283cadSAlexander Duyck {
180518283cadSAlexander Duyck 	struct fm10k_hw *hw = &interface->hw;
180618283cadSAlexander Duyck 	int v_budget, vector;
180718283cadSAlexander Duyck 
180818283cadSAlexander Duyck 	/* It's easy to be greedy for MSI-X vectors, but it really
180918283cadSAlexander Duyck 	 * doesn't do us much good if we have a lot more vectors
181018283cadSAlexander Duyck 	 * than CPU's.  So let's be conservative and only ask for
181118283cadSAlexander Duyck 	 * (roughly) the same number of vectors as there are CPU's.
181218283cadSAlexander Duyck 	 * the default is to use pairs of vectors
181318283cadSAlexander Duyck 	 */
181418283cadSAlexander Duyck 	v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
181518283cadSAlexander Duyck 	v_budget = min_t(u16, v_budget, num_online_cpus());
181618283cadSAlexander Duyck 
181718283cadSAlexander Duyck 	/* account for vectors not related to queues */
1818a3ffeaf7SJacob Keller 	v_budget += NON_Q_VECTORS;
181918283cadSAlexander Duyck 
182018283cadSAlexander Duyck 	/* At the same time, hardware can only support a maximum of
182118283cadSAlexander Duyck 	 * hw.mac->max_msix_vectors vectors.  With features
182218283cadSAlexander Duyck 	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
182318283cadSAlexander Duyck 	 * descriptor queues supported by our device.  Thus, we cap it off in
182418283cadSAlexander Duyck 	 * those rare cases where the cpu count also exceeds our vector limit.
182518283cadSAlexander Duyck 	 */
182618283cadSAlexander Duyck 	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
182718283cadSAlexander Duyck 
182818283cadSAlexander Duyck 	/* A failure in MSI-X entry allocation is fatal. */
182918283cadSAlexander Duyck 	interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
183018283cadSAlexander Duyck 					  GFP_KERNEL);
183118283cadSAlexander Duyck 	if (!interface->msix_entries)
183218283cadSAlexander Duyck 		return -ENOMEM;
183318283cadSAlexander Duyck 
183418283cadSAlexander Duyck 	/* populate entry values */
183518283cadSAlexander Duyck 	for (vector = 0; vector < v_budget; vector++)
183618283cadSAlexander Duyck 		interface->msix_entries[vector].entry = vector;
183718283cadSAlexander Duyck 
183818283cadSAlexander Duyck 	/* Attempt to enable MSI-X with requested value */
183918283cadSAlexander Duyck 	v_budget = pci_enable_msix_range(interface->pdev,
184018283cadSAlexander Duyck 					 interface->msix_entries,
184118283cadSAlexander Duyck 					 MIN_MSIX_COUNT(hw),
184218283cadSAlexander Duyck 					 v_budget);
184318283cadSAlexander Duyck 	if (v_budget < 0) {
184418283cadSAlexander Duyck 		kfree(interface->msix_entries);
184518283cadSAlexander Duyck 		interface->msix_entries = NULL;
184630e23b71SJacob Keller 		return v_budget;
184718283cadSAlexander Duyck 	}
184818283cadSAlexander Duyck 
184918283cadSAlexander Duyck 	/* record the number of queues available for q_vectors */
1850a3ffeaf7SJacob Keller 	interface->num_q_vectors = v_budget - NON_Q_VECTORS;
185118283cadSAlexander Duyck 
185218283cadSAlexander Duyck 	return 0;
185318283cadSAlexander Duyck }
185418283cadSAlexander Duyck 
1855aa3ac822SAlexander Duyck /**
1856aa3ac822SAlexander Duyck  * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
1857aa3ac822SAlexander Duyck  * @interface: Interface structure continaining rings and devices
1858aa3ac822SAlexander Duyck  *
1859aa3ac822SAlexander Duyck  * Cache the descriptor ring offsets for Qos
1860aa3ac822SAlexander Duyck  **/
fm10k_cache_ring_qos(struct fm10k_intfc * interface)1861aa3ac822SAlexander Duyck static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
1862aa3ac822SAlexander Duyck {
1863aa3ac822SAlexander Duyck 	struct net_device *dev = interface->netdev;
18647a432d57SJacob Keller 	int pc, offset, rss_i, i;
1865aa3ac822SAlexander Duyck 	u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
1866aa3ac822SAlexander Duyck 	u8 num_pcs = netdev_get_num_tc(dev);
1867aa3ac822SAlexander Duyck 
1868aa3ac822SAlexander Duyck 	if (num_pcs <= 1)
1869aa3ac822SAlexander Duyck 		return false;
1870aa3ac822SAlexander Duyck 
1871aa3ac822SAlexander Duyck 	rss_i = interface->ring_feature[RING_F_RSS].indices;
1872aa3ac822SAlexander Duyck 
1873aa3ac822SAlexander Duyck 	for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
18747a432d57SJacob Keller 		int q_idx = pc;
18757a432d57SJacob Keller 
1876aa3ac822SAlexander Duyck 		for (i = 0; i < rss_i; i++) {
1877aa3ac822SAlexander Duyck 			interface->tx_ring[offset + i]->reg_idx = q_idx;
1878aa3ac822SAlexander Duyck 			interface->tx_ring[offset + i]->qos_pc = pc;
1879aa3ac822SAlexander Duyck 			interface->rx_ring[offset + i]->reg_idx = q_idx;
1880aa3ac822SAlexander Duyck 			interface->rx_ring[offset + i]->qos_pc = pc;
1881aa3ac822SAlexander Duyck 			q_idx += pc_stride;
1882aa3ac822SAlexander Duyck 		}
1883aa3ac822SAlexander Duyck 	}
1884aa3ac822SAlexander Duyck 
1885aa3ac822SAlexander Duyck 	return true;
1886aa3ac822SAlexander Duyck }
1887aa3ac822SAlexander Duyck 
1888aa3ac822SAlexander Duyck /**
1889aa3ac822SAlexander Duyck  * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
1890aa3ac822SAlexander Duyck  * @interface: Interface structure continaining rings and devices
1891aa3ac822SAlexander Duyck  *
1892aa3ac822SAlexander Duyck  * Cache the descriptor ring offsets for RSS
1893aa3ac822SAlexander Duyck  **/
fm10k_cache_ring_rss(struct fm10k_intfc * interface)1894aa3ac822SAlexander Duyck static void fm10k_cache_ring_rss(struct fm10k_intfc *interface)
1895aa3ac822SAlexander Duyck {
1896aa3ac822SAlexander Duyck 	int i;
1897aa3ac822SAlexander Duyck 
1898aa3ac822SAlexander Duyck 	for (i = 0; i < interface->num_rx_queues; i++)
1899aa3ac822SAlexander Duyck 		interface->rx_ring[i]->reg_idx = i;
1900aa3ac822SAlexander Duyck 
1901aa3ac822SAlexander Duyck 	for (i = 0; i < interface->num_tx_queues; i++)
1902aa3ac822SAlexander Duyck 		interface->tx_ring[i]->reg_idx = i;
1903aa3ac822SAlexander Duyck }
1904aa3ac822SAlexander Duyck 
1905aa3ac822SAlexander Duyck /**
1906aa3ac822SAlexander Duyck  * fm10k_assign_rings - Map rings to network devices
1907aa3ac822SAlexander Duyck  * @interface: Interface structure containing rings and devices
1908aa3ac822SAlexander Duyck  *
1909aa3ac822SAlexander Duyck  * This function is meant to go though and configure both the network
1910aa3ac822SAlexander Duyck  * devices so that they contain rings, and configure the rings so that
1911aa3ac822SAlexander Duyck  * they function with their network devices.
1912aa3ac822SAlexander Duyck  **/
fm10k_assign_rings(struct fm10k_intfc * interface)1913aa3ac822SAlexander Duyck static void fm10k_assign_rings(struct fm10k_intfc *interface)
1914aa3ac822SAlexander Duyck {
1915aa3ac822SAlexander Duyck 	if (fm10k_cache_ring_qos(interface))
1916aa3ac822SAlexander Duyck 		return;
1917aa3ac822SAlexander Duyck 
1918aa3ac822SAlexander Duyck 	fm10k_cache_ring_rss(interface);
1919aa3ac822SAlexander Duyck }
1920aa3ac822SAlexander Duyck 
fm10k_init_reta(struct fm10k_intfc * interface)192118283cadSAlexander Duyck static void fm10k_init_reta(struct fm10k_intfc *interface)
192218283cadSAlexander Duyck {
192318283cadSAlexander Duyck 	u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
1924540a5d85SJacob Keller 	u32 reta;
192518283cadSAlexander Duyck 
19261012014eSKeller, Jacob E 	/* If the Rx flow indirection table has been configured manually, we
19271012014eSKeller, Jacob E 	 * need to maintain it when possible.
19281012014eSKeller, Jacob E 	 */
19291012014eSKeller, Jacob E 	if (netif_is_rxfh_configured(interface->netdev)) {
193018283cadSAlexander Duyck 		for (i = FM10K_RETA_SIZE; i--;) {
193118283cadSAlexander Duyck 			reta = interface->reta[i];
193218283cadSAlexander Duyck 			if ((((reta << 24) >> 24) < rss_i) &&
193318283cadSAlexander Duyck 			    (((reta << 16) >> 24) < rss_i) &&
193418283cadSAlexander Duyck 			    (((reta <<  8) >> 24) < rss_i) &&
193518283cadSAlexander Duyck 			    (((reta)       >> 24) < rss_i))
193618283cadSAlexander Duyck 				continue;
19371012014eSKeller, Jacob E 
19381012014eSKeller, Jacob E 			/* this should never happen */
19391012014eSKeller, Jacob E 			dev_err(&interface->pdev->dev,
19401012014eSKeller, Jacob E 				"RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n");
194118283cadSAlexander Duyck 			goto repopulate_reta;
194218283cadSAlexander Duyck 		}
194318283cadSAlexander Duyck 
194418283cadSAlexander Duyck 		/* do nothing if all of the elements are in bounds */
194518283cadSAlexander Duyck 		return;
194618283cadSAlexander Duyck 	}
194718283cadSAlexander Duyck 
194818283cadSAlexander Duyck repopulate_reta:
1949540a5d85SJacob Keller 	fm10k_write_reta(interface, NULL);
195018283cadSAlexander Duyck }
195118283cadSAlexander Duyck 
195218283cadSAlexander Duyck /**
195318283cadSAlexander Duyck  * fm10k_init_queueing_scheme - Determine proper queueing scheme
195418283cadSAlexander Duyck  * @interface: board private structure to initialize
195518283cadSAlexander Duyck  *
195618283cadSAlexander Duyck  * We determine which queueing scheme to use based on...
195718283cadSAlexander Duyck  * - Hardware queue count (num_*_queues)
195818283cadSAlexander Duyck  *   - defined by miscellaneous hardware support/features (RSS, etc.)
195918283cadSAlexander Duyck  **/
fm10k_init_queueing_scheme(struct fm10k_intfc * interface)196018283cadSAlexander Duyck int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
196118283cadSAlexander Duyck {
196218283cadSAlexander Duyck 	int err;
196318283cadSAlexander Duyck 
196418283cadSAlexander Duyck 	/* Number of supported queues */
196518283cadSAlexander Duyck 	fm10k_set_num_queues(interface);
196618283cadSAlexander Duyck 
196718283cadSAlexander Duyck 	/* Configure MSI-X capability */
196818283cadSAlexander Duyck 	err = fm10k_init_msix_capability(interface);
196918283cadSAlexander Duyck 	if (err) {
197018283cadSAlexander Duyck 		dev_err(&interface->pdev->dev,
197118283cadSAlexander Duyck 			"Unable to initialize MSI-X capability\n");
19724be37c42SJacob Keller 		goto err_init_msix;
197318283cadSAlexander Duyck 	}
197418283cadSAlexander Duyck 
197518283cadSAlexander Duyck 	/* Allocate memory for queues */
197618283cadSAlexander Duyck 	err = fm10k_alloc_q_vectors(interface);
1977587731e6SAlexander Duyck 	if (err) {
19784be37c42SJacob Keller 		dev_err(&interface->pdev->dev,
19794be37c42SJacob Keller 			"Unable to allocate queue vectors\n");
19804be37c42SJacob Keller 		goto err_alloc_q_vectors;
1981587731e6SAlexander Duyck 	}
198218283cadSAlexander Duyck 
1983aa3ac822SAlexander Duyck 	/* Map rings to devices, and map devices to physical queues */
1984aa3ac822SAlexander Duyck 	fm10k_assign_rings(interface);
1985aa3ac822SAlexander Duyck 
198618283cadSAlexander Duyck 	/* Initialize RSS redirection table */
198718283cadSAlexander Duyck 	fm10k_init_reta(interface);
198818283cadSAlexander Duyck 
198918283cadSAlexander Duyck 	return 0;
19904be37c42SJacob Keller 
19914be37c42SJacob Keller err_alloc_q_vectors:
19924be37c42SJacob Keller 	fm10k_reset_msix_capability(interface);
19934be37c42SJacob Keller err_init_msix:
19944be37c42SJacob Keller 	fm10k_reset_num_queues(interface);
19954be37c42SJacob Keller 	return err;
199618283cadSAlexander Duyck }
199718283cadSAlexander Duyck 
199818283cadSAlexander Duyck /**
199918283cadSAlexander Duyck  * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
200018283cadSAlexander Duyck  * @interface: board private structure to clear queueing scheme on
200118283cadSAlexander Duyck  *
200218283cadSAlexander Duyck  * We go through and clear queueing specific resources and reset the structure
200318283cadSAlexander Duyck  * to pre-load conditions
200418283cadSAlexander Duyck  **/
fm10k_clear_queueing_scheme(struct fm10k_intfc * interface)200518283cadSAlexander Duyck void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
200618283cadSAlexander Duyck {
200718283cadSAlexander Duyck 	fm10k_free_q_vectors(interface);
200818283cadSAlexander Duyck 	fm10k_reset_msix_capability(interface);
200918283cadSAlexander Duyck }
2010