1b3890e30SAlexander Duyck /* Intel Ethernet Switch Host Interface Driver
2b3890e30SAlexander Duyck  * Copyright(c) 2013 - 2014 Intel Corporation.
3b3890e30SAlexander Duyck  *
4b3890e30SAlexander Duyck  * This program is free software; you can redistribute it and/or modify it
5b3890e30SAlexander Duyck  * under the terms and conditions of the GNU General Public License,
6b3890e30SAlexander Duyck  * version 2, as published by the Free Software Foundation.
7b3890e30SAlexander Duyck  *
8b3890e30SAlexander Duyck  * This program is distributed in the hope it will be useful, but WITHOUT
9b3890e30SAlexander Duyck  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10b3890e30SAlexander Duyck  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11b3890e30SAlexander Duyck  * more details.
12b3890e30SAlexander Duyck  *
13b3890e30SAlexander Duyck  * The full GNU General Public License is included in this distribution in
14b3890e30SAlexander Duyck  * the file called "COPYING".
15b3890e30SAlexander Duyck  *
16b3890e30SAlexander Duyck  * Contact Information:
17b3890e30SAlexander Duyck  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18b3890e30SAlexander Duyck  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19b3890e30SAlexander Duyck  */
20b3890e30SAlexander Duyck 
21b3890e30SAlexander Duyck #include <linux/types.h>
22b3890e30SAlexander Duyck #include <linux/module.h>
23b3890e30SAlexander Duyck #include <net/ipv6.h>
24b3890e30SAlexander Duyck #include <net/ip.h>
25b3890e30SAlexander Duyck #include <net/tcp.h>
26b3890e30SAlexander Duyck #include <linux/if_macvlan.h>
27b101c962SAlexander Duyck #include <linux/prefetch.h>
28b3890e30SAlexander Duyck 
29b3890e30SAlexander Duyck #include "fm10k.h"
30b3890e30SAlexander Duyck 
31b3890e30SAlexander Duyck #define DRV_VERSION	"0.12.2-k"
32b3890e30SAlexander Duyck const char fm10k_driver_version[] = DRV_VERSION;
33b3890e30SAlexander Duyck char fm10k_driver_name[] = "fm10k";
34b3890e30SAlexander Duyck static const char fm10k_driver_string[] =
35b3890e30SAlexander Duyck 	"Intel(R) Ethernet Switch Host Interface Driver";
36b3890e30SAlexander Duyck static const char fm10k_copyright[] =
37b3890e30SAlexander Duyck 	"Copyright (c) 2013 Intel Corporation.";
38b3890e30SAlexander Duyck 
39b3890e30SAlexander Duyck MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
40b3890e30SAlexander Duyck MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
41b3890e30SAlexander Duyck MODULE_LICENSE("GPL");
42b3890e30SAlexander Duyck MODULE_VERSION(DRV_VERSION);
43b3890e30SAlexander Duyck 
446d2ce900SAlexander Duyck /**
456d2ce900SAlexander Duyck  * fm10k_init_module - Driver Registration Routine
46b3890e30SAlexander Duyck  *
47b3890e30SAlexander Duyck  * fm10k_init_module is the first routine called when the driver is
48b3890e30SAlexander Duyck  * loaded.  All it does is register with the PCI subsystem.
49b3890e30SAlexander Duyck  **/
50b3890e30SAlexander Duyck static int __init fm10k_init_module(void)
51b3890e30SAlexander Duyck {
52b3890e30SAlexander Duyck 	pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
53b3890e30SAlexander Duyck 	pr_info("%s\n", fm10k_copyright);
54b3890e30SAlexander Duyck 
55b3890e30SAlexander Duyck 	return fm10k_register_pci_driver();
56b3890e30SAlexander Duyck }
57b3890e30SAlexander Duyck module_init(fm10k_init_module);
58b3890e30SAlexander Duyck 
59b3890e30SAlexander Duyck /**
60b3890e30SAlexander Duyck  * fm10k_exit_module - Driver Exit Cleanup Routine
61b3890e30SAlexander Duyck  *
62b3890e30SAlexander Duyck  * fm10k_exit_module is called just before the driver is removed
63b3890e30SAlexander Duyck  * from memory.
64b3890e30SAlexander Duyck  **/
65b3890e30SAlexander Duyck static void __exit fm10k_exit_module(void)
66b3890e30SAlexander Duyck {
67b3890e30SAlexander Duyck 	fm10k_unregister_pci_driver();
68b3890e30SAlexander Duyck }
69b3890e30SAlexander Duyck module_exit(fm10k_exit_module);
7018283cadSAlexander Duyck 
71b101c962SAlexander Duyck static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
72b101c962SAlexander Duyck 				    struct fm10k_rx_buffer *bi)
73b101c962SAlexander Duyck {
74b101c962SAlexander Duyck 	struct page *page = bi->page;
75b101c962SAlexander Duyck 	dma_addr_t dma;
76b101c962SAlexander Duyck 
77b101c962SAlexander Duyck 	/* Only page will be NULL if buffer was consumed */
78b101c962SAlexander Duyck 	if (likely(page))
79b101c962SAlexander Duyck 		return true;
80b101c962SAlexander Duyck 
81b101c962SAlexander Duyck 	/* alloc new page for storage */
82b101c962SAlexander Duyck 	page = alloc_page(GFP_ATOMIC | __GFP_COLD);
83b101c962SAlexander Duyck 	if (unlikely(!page)) {
84b101c962SAlexander Duyck 		rx_ring->rx_stats.alloc_failed++;
85b101c962SAlexander Duyck 		return false;
86b101c962SAlexander Duyck 	}
87b101c962SAlexander Duyck 
88b101c962SAlexander Duyck 	/* map page for use */
89b101c962SAlexander Duyck 	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
90b101c962SAlexander Duyck 
91b101c962SAlexander Duyck 	/* if mapping failed free memory back to system since
92b101c962SAlexander Duyck 	 * there isn't much point in holding memory we can't use
93b101c962SAlexander Duyck 	 */
94b101c962SAlexander Duyck 	if (dma_mapping_error(rx_ring->dev, dma)) {
95b101c962SAlexander Duyck 		__free_page(page);
96b101c962SAlexander Duyck 		bi->page = NULL;
97b101c962SAlexander Duyck 
98b101c962SAlexander Duyck 		rx_ring->rx_stats.alloc_failed++;
99b101c962SAlexander Duyck 		return false;
100b101c962SAlexander Duyck 	}
101b101c962SAlexander Duyck 
102b101c962SAlexander Duyck 	bi->dma = dma;
103b101c962SAlexander Duyck 	bi->page = page;
104b101c962SAlexander Duyck 	bi->page_offset = 0;
105b101c962SAlexander Duyck 
106b101c962SAlexander Duyck 	return true;
107b101c962SAlexander Duyck }
108b101c962SAlexander Duyck 
109b101c962SAlexander Duyck /**
110b101c962SAlexander Duyck  * fm10k_alloc_rx_buffers - Replace used receive buffers
111b101c962SAlexander Duyck  * @rx_ring: ring to place buffers on
112b101c962SAlexander Duyck  * @cleaned_count: number of buffers to replace
113b101c962SAlexander Duyck  **/
114b101c962SAlexander Duyck void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
115b101c962SAlexander Duyck {
116b101c962SAlexander Duyck 	union fm10k_rx_desc *rx_desc;
117b101c962SAlexander Duyck 	struct fm10k_rx_buffer *bi;
118b101c962SAlexander Duyck 	u16 i = rx_ring->next_to_use;
119b101c962SAlexander Duyck 
120b101c962SAlexander Duyck 	/* nothing to do */
121b101c962SAlexander Duyck 	if (!cleaned_count)
122b101c962SAlexander Duyck 		return;
123b101c962SAlexander Duyck 
124b101c962SAlexander Duyck 	rx_desc = FM10K_RX_DESC(rx_ring, i);
125b101c962SAlexander Duyck 	bi = &rx_ring->rx_buffer[i];
126b101c962SAlexander Duyck 	i -= rx_ring->count;
127b101c962SAlexander Duyck 
128b101c962SAlexander Duyck 	do {
129b101c962SAlexander Duyck 		if (!fm10k_alloc_mapped_page(rx_ring, bi))
130b101c962SAlexander Duyck 			break;
131b101c962SAlexander Duyck 
132b101c962SAlexander Duyck 		/* Refresh the desc even if buffer_addrs didn't change
133b101c962SAlexander Duyck 		 * because each write-back erases this info.
134b101c962SAlexander Duyck 		 */
135b101c962SAlexander Duyck 		rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
136b101c962SAlexander Duyck 
137b101c962SAlexander Duyck 		rx_desc++;
138b101c962SAlexander Duyck 		bi++;
139b101c962SAlexander Duyck 		i++;
140b101c962SAlexander Duyck 		if (unlikely(!i)) {
141b101c962SAlexander Duyck 			rx_desc = FM10K_RX_DESC(rx_ring, 0);
142b101c962SAlexander Duyck 			bi = rx_ring->rx_buffer;
143b101c962SAlexander Duyck 			i -= rx_ring->count;
144b101c962SAlexander Duyck 		}
145b101c962SAlexander Duyck 
146b101c962SAlexander Duyck 		/* clear the hdr_addr for the next_to_use descriptor */
147b101c962SAlexander Duyck 		rx_desc->q.hdr_addr = 0;
148b101c962SAlexander Duyck 
149b101c962SAlexander Duyck 		cleaned_count--;
150b101c962SAlexander Duyck 	} while (cleaned_count);
151b101c962SAlexander Duyck 
152b101c962SAlexander Duyck 	i += rx_ring->count;
153b101c962SAlexander Duyck 
154b101c962SAlexander Duyck 	if (rx_ring->next_to_use != i) {
155b101c962SAlexander Duyck 		/* record the next descriptor to use */
156b101c962SAlexander Duyck 		rx_ring->next_to_use = i;
157b101c962SAlexander Duyck 
158b101c962SAlexander Duyck 		/* update next to alloc since we have filled the ring */
159b101c962SAlexander Duyck 		rx_ring->next_to_alloc = i;
160b101c962SAlexander Duyck 
161b101c962SAlexander Duyck 		/* Force memory writes to complete before letting h/w
162b101c962SAlexander Duyck 		 * know there are new descriptors to fetch.  (Only
163b101c962SAlexander Duyck 		 * applicable for weak-ordered memory model archs,
164b101c962SAlexander Duyck 		 * such as IA-64).
165b101c962SAlexander Duyck 		 */
166b101c962SAlexander Duyck 		wmb();
167b101c962SAlexander Duyck 
168b101c962SAlexander Duyck 		/* notify hardware of new descriptors */
169b101c962SAlexander Duyck 		writel(i, rx_ring->tail);
170b101c962SAlexander Duyck 	}
171b101c962SAlexander Duyck }
172b101c962SAlexander Duyck 
173b101c962SAlexander Duyck /**
174b101c962SAlexander Duyck  * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
175b101c962SAlexander Duyck  * @rx_ring: rx descriptor ring to store buffers on
176b101c962SAlexander Duyck  * @old_buff: donor buffer to have page reused
177b101c962SAlexander Duyck  *
178b101c962SAlexander Duyck  * Synchronizes page for reuse by the interface
179b101c962SAlexander Duyck  **/
180b101c962SAlexander Duyck static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
181b101c962SAlexander Duyck 				struct fm10k_rx_buffer *old_buff)
182b101c962SAlexander Duyck {
183b101c962SAlexander Duyck 	struct fm10k_rx_buffer *new_buff;
184b101c962SAlexander Duyck 	u16 nta = rx_ring->next_to_alloc;
185b101c962SAlexander Duyck 
186b101c962SAlexander Duyck 	new_buff = &rx_ring->rx_buffer[nta];
187b101c962SAlexander Duyck 
188b101c962SAlexander Duyck 	/* update, and store next to alloc */
189b101c962SAlexander Duyck 	nta++;
190b101c962SAlexander Duyck 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
191b101c962SAlexander Duyck 
192b101c962SAlexander Duyck 	/* transfer page from old buffer to new buffer */
193b101c962SAlexander Duyck 	memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer));
194b101c962SAlexander Duyck 
195b101c962SAlexander Duyck 	/* sync the buffer for use by the device */
196b101c962SAlexander Duyck 	dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
197b101c962SAlexander Duyck 					 old_buff->page_offset,
198b101c962SAlexander Duyck 					 FM10K_RX_BUFSZ,
199b101c962SAlexander Duyck 					 DMA_FROM_DEVICE);
200b101c962SAlexander Duyck }
201b101c962SAlexander Duyck 
202b101c962SAlexander Duyck static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
203b101c962SAlexander Duyck 				    struct page *page,
204b101c962SAlexander Duyck 				    unsigned int truesize)
205b101c962SAlexander Duyck {
206b101c962SAlexander Duyck 	/* avoid re-using remote pages */
207b101c962SAlexander Duyck 	if (unlikely(page_to_nid(page) != numa_mem_id()))
208b101c962SAlexander Duyck 		return false;
209b101c962SAlexander Duyck 
210b101c962SAlexander Duyck #if (PAGE_SIZE < 8192)
211b101c962SAlexander Duyck 	/* if we are only owner of page we can reuse it */
212b101c962SAlexander Duyck 	if (unlikely(page_count(page) != 1))
213b101c962SAlexander Duyck 		return false;
214b101c962SAlexander Duyck 
215b101c962SAlexander Duyck 	/* flip page offset to other buffer */
216b101c962SAlexander Duyck 	rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
217b101c962SAlexander Duyck 
218b101c962SAlexander Duyck 	/* since we are the only owner of the page and we need to
219b101c962SAlexander Duyck 	 * increment it, just set the value to 2 in order to avoid
220b101c962SAlexander Duyck 	 * an unnecessary locked operation
221b101c962SAlexander Duyck 	 */
222b101c962SAlexander Duyck 	atomic_set(&page->_count, 2);
223b101c962SAlexander Duyck #else
224b101c962SAlexander Duyck 	/* move offset up to the next cache line */
225b101c962SAlexander Duyck 	rx_buffer->page_offset += truesize;
226b101c962SAlexander Duyck 
227b101c962SAlexander Duyck 	if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
228b101c962SAlexander Duyck 		return false;
229b101c962SAlexander Duyck 
230b101c962SAlexander Duyck 	/* bump ref count on page before it is given to the stack */
231b101c962SAlexander Duyck 	get_page(page);
232b101c962SAlexander Duyck #endif
233b101c962SAlexander Duyck 
234b101c962SAlexander Duyck 	return true;
235b101c962SAlexander Duyck }
236b101c962SAlexander Duyck 
237b101c962SAlexander Duyck /**
238b101c962SAlexander Duyck  * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
239b101c962SAlexander Duyck  * @rx_ring: rx descriptor ring to transact packets on
240b101c962SAlexander Duyck  * @rx_buffer: buffer containing page to add
241b101c962SAlexander Duyck  * @rx_desc: descriptor containing length of buffer written by hardware
242b101c962SAlexander Duyck  * @skb: sk_buff to place the data into
243b101c962SAlexander Duyck  *
244b101c962SAlexander Duyck  * This function will add the data contained in rx_buffer->page to the skb.
245b101c962SAlexander Duyck  * This is done either through a direct copy if the data in the buffer is
246b101c962SAlexander Duyck  * less than the skb header size, otherwise it will just attach the page as
247b101c962SAlexander Duyck  * a frag to the skb.
248b101c962SAlexander Duyck  *
249b101c962SAlexander Duyck  * The function will then update the page offset if necessary and return
250b101c962SAlexander Duyck  * true if the buffer can be reused by the interface.
251b101c962SAlexander Duyck  **/
252b101c962SAlexander Duyck static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
253b101c962SAlexander Duyck 			      struct fm10k_rx_buffer *rx_buffer,
254b101c962SAlexander Duyck 			      union fm10k_rx_desc *rx_desc,
255b101c962SAlexander Duyck 			      struct sk_buff *skb)
256b101c962SAlexander Duyck {
257b101c962SAlexander Duyck 	struct page *page = rx_buffer->page;
258b101c962SAlexander Duyck 	unsigned int size = le16_to_cpu(rx_desc->w.length);
259b101c962SAlexander Duyck #if (PAGE_SIZE < 8192)
260b101c962SAlexander Duyck 	unsigned int truesize = FM10K_RX_BUFSZ;
261b101c962SAlexander Duyck #else
262b101c962SAlexander Duyck 	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
263b101c962SAlexander Duyck #endif
264b101c962SAlexander Duyck 
265b101c962SAlexander Duyck 	if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
266b101c962SAlexander Duyck 		unsigned char *va = page_address(page) + rx_buffer->page_offset;
267b101c962SAlexander Duyck 
268b101c962SAlexander Duyck 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
269b101c962SAlexander Duyck 
270b101c962SAlexander Duyck 		/* we can reuse buffer as-is, just make sure it is local */
271b101c962SAlexander Duyck 		if (likely(page_to_nid(page) == numa_mem_id()))
272b101c962SAlexander Duyck 			return true;
273b101c962SAlexander Duyck 
274b101c962SAlexander Duyck 		/* this page cannot be reused so discard it */
275b101c962SAlexander Duyck 		put_page(page);
276b101c962SAlexander Duyck 		return false;
277b101c962SAlexander Duyck 	}
278b101c962SAlexander Duyck 
279b101c962SAlexander Duyck 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
280b101c962SAlexander Duyck 			rx_buffer->page_offset, size, truesize);
281b101c962SAlexander Duyck 
282b101c962SAlexander Duyck 	return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
283b101c962SAlexander Duyck }
284b101c962SAlexander Duyck 
285b101c962SAlexander Duyck static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
286b101c962SAlexander Duyck 					     union fm10k_rx_desc *rx_desc,
287b101c962SAlexander Duyck 					     struct sk_buff *skb)
288b101c962SAlexander Duyck {
289b101c962SAlexander Duyck 	struct fm10k_rx_buffer *rx_buffer;
290b101c962SAlexander Duyck 	struct page *page;
291b101c962SAlexander Duyck 
292b101c962SAlexander Duyck 	rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
293b101c962SAlexander Duyck 
294b101c962SAlexander Duyck 	page = rx_buffer->page;
295b101c962SAlexander Duyck 	prefetchw(page);
296b101c962SAlexander Duyck 
297b101c962SAlexander Duyck 	if (likely(!skb)) {
298b101c962SAlexander Duyck 		void *page_addr = page_address(page) +
299b101c962SAlexander Duyck 				  rx_buffer->page_offset;
300b101c962SAlexander Duyck 
301b101c962SAlexander Duyck 		/* prefetch first cache line of first page */
302b101c962SAlexander Duyck 		prefetch(page_addr);
303b101c962SAlexander Duyck #if L1_CACHE_BYTES < 128
304b101c962SAlexander Duyck 		prefetch(page_addr + L1_CACHE_BYTES);
305b101c962SAlexander Duyck #endif
306b101c962SAlexander Duyck 
307b101c962SAlexander Duyck 		/* allocate a skb to store the frags */
308b101c962SAlexander Duyck 		skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
309b101c962SAlexander Duyck 						FM10K_RX_HDR_LEN);
310b101c962SAlexander Duyck 		if (unlikely(!skb)) {
311b101c962SAlexander Duyck 			rx_ring->rx_stats.alloc_failed++;
312b101c962SAlexander Duyck 			return NULL;
313b101c962SAlexander Duyck 		}
314b101c962SAlexander Duyck 
315b101c962SAlexander Duyck 		/* we will be copying header into skb->data in
316b101c962SAlexander Duyck 		 * pskb_may_pull so it is in our interest to prefetch
317b101c962SAlexander Duyck 		 * it now to avoid a possible cache miss
318b101c962SAlexander Duyck 		 */
319b101c962SAlexander Duyck 		prefetchw(skb->data);
320b101c962SAlexander Duyck 	}
321b101c962SAlexander Duyck 
322b101c962SAlexander Duyck 	/* we are reusing so sync this buffer for CPU use */
323b101c962SAlexander Duyck 	dma_sync_single_range_for_cpu(rx_ring->dev,
324b101c962SAlexander Duyck 				      rx_buffer->dma,
325b101c962SAlexander Duyck 				      rx_buffer->page_offset,
326b101c962SAlexander Duyck 				      FM10K_RX_BUFSZ,
327b101c962SAlexander Duyck 				      DMA_FROM_DEVICE);
328b101c962SAlexander Duyck 
329b101c962SAlexander Duyck 	/* pull page into skb */
330b101c962SAlexander Duyck 	if (fm10k_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
331b101c962SAlexander Duyck 		/* hand second half of page back to the ring */
332b101c962SAlexander Duyck 		fm10k_reuse_rx_page(rx_ring, rx_buffer);
333b101c962SAlexander Duyck 	} else {
334b101c962SAlexander Duyck 		/* we are not reusing the buffer so unmap it */
335b101c962SAlexander Duyck 		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
336b101c962SAlexander Duyck 			       PAGE_SIZE, DMA_FROM_DEVICE);
337b101c962SAlexander Duyck 	}
338b101c962SAlexander Duyck 
339b101c962SAlexander Duyck 	/* clear contents of rx_buffer */
340b101c962SAlexander Duyck 	rx_buffer->page = NULL;
341b101c962SAlexander Duyck 
342b101c962SAlexander Duyck 	return skb;
343b101c962SAlexander Duyck }
344b101c962SAlexander Duyck 
345b101c962SAlexander Duyck /**
346b101c962SAlexander Duyck  * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
347b101c962SAlexander Duyck  * @rx_ring: rx descriptor ring packet is being transacted on
348b101c962SAlexander Duyck  * @rx_desc: pointer to the EOP Rx descriptor
349b101c962SAlexander Duyck  * @skb: pointer to current skb being populated
350b101c962SAlexander Duyck  *
351b101c962SAlexander Duyck  * This function checks the ring, descriptor, and packet information in
352b101c962SAlexander Duyck  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
353b101c962SAlexander Duyck  * other fields within the skb.
354b101c962SAlexander Duyck  **/
355b101c962SAlexander Duyck static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
356b101c962SAlexander Duyck 					     union fm10k_rx_desc *rx_desc,
357b101c962SAlexander Duyck 					     struct sk_buff *skb)
358b101c962SAlexander Duyck {
359b101c962SAlexander Duyck 	unsigned int len = skb->len;
360b101c962SAlexander Duyck 
361b101c962SAlexander Duyck 	FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
362b101c962SAlexander Duyck 
363b101c962SAlexander Duyck 	skb_record_rx_queue(skb, rx_ring->queue_index);
364b101c962SAlexander Duyck 
365b101c962SAlexander Duyck 	FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
366b101c962SAlexander Duyck 
367b101c962SAlexander Duyck 	if (rx_desc->w.vlan) {
368b101c962SAlexander Duyck 		u16 vid = le16_to_cpu(rx_desc->w.vlan);
369b101c962SAlexander Duyck 
370b101c962SAlexander Duyck 		if (vid != rx_ring->vid)
371b101c962SAlexander Duyck 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
372b101c962SAlexander Duyck 	}
373b101c962SAlexander Duyck 
374b101c962SAlexander Duyck 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
375b101c962SAlexander Duyck 
376b101c962SAlexander Duyck 	return len;
377b101c962SAlexander Duyck }
378b101c962SAlexander Duyck 
379b101c962SAlexander Duyck /**
380b101c962SAlexander Duyck  * fm10k_is_non_eop - process handling of non-EOP buffers
381b101c962SAlexander Duyck  * @rx_ring: Rx ring being processed
382b101c962SAlexander Duyck  * @rx_desc: Rx descriptor for current buffer
383b101c962SAlexander Duyck  *
384b101c962SAlexander Duyck  * This function updates next to clean.  If the buffer is an EOP buffer
385b101c962SAlexander Duyck  * this function exits returning false, otherwise it will place the
386b101c962SAlexander Duyck  * sk_buff in the next buffer to be chained and return true indicating
387b101c962SAlexander Duyck  * that this is in fact a non-EOP buffer.
388b101c962SAlexander Duyck  **/
389b101c962SAlexander Duyck static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
390b101c962SAlexander Duyck 			     union fm10k_rx_desc *rx_desc)
391b101c962SAlexander Duyck {
392b101c962SAlexander Duyck 	u32 ntc = rx_ring->next_to_clean + 1;
393b101c962SAlexander Duyck 
394b101c962SAlexander Duyck 	/* fetch, update, and store next to clean */
395b101c962SAlexander Duyck 	ntc = (ntc < rx_ring->count) ? ntc : 0;
396b101c962SAlexander Duyck 	rx_ring->next_to_clean = ntc;
397b101c962SAlexander Duyck 
398b101c962SAlexander Duyck 	prefetch(FM10K_RX_DESC(rx_ring, ntc));
399b101c962SAlexander Duyck 
400b101c962SAlexander Duyck 	if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP)))
401b101c962SAlexander Duyck 		return false;
402b101c962SAlexander Duyck 
403b101c962SAlexander Duyck 	return true;
404b101c962SAlexander Duyck }
405b101c962SAlexander Duyck 
406b101c962SAlexander Duyck /**
407b101c962SAlexander Duyck  * fm10k_pull_tail - fm10k specific version of skb_pull_tail
408b101c962SAlexander Duyck  * @rx_ring: rx descriptor ring packet is being transacted on
409b101c962SAlexander Duyck  * @rx_desc: pointer to the EOP Rx descriptor
410b101c962SAlexander Duyck  * @skb: pointer to current skb being adjusted
411b101c962SAlexander Duyck  *
412b101c962SAlexander Duyck  * This function is an fm10k specific version of __pskb_pull_tail.  The
413b101c962SAlexander Duyck  * main difference between this version and the original function is that
414b101c962SAlexander Duyck  * this function can make several assumptions about the state of things
415b101c962SAlexander Duyck  * that allow for significant optimizations versus the standard function.
416b101c962SAlexander Duyck  * As a result we can do things like drop a frag and maintain an accurate
417b101c962SAlexander Duyck  * truesize for the skb.
418b101c962SAlexander Duyck  */
419b101c962SAlexander Duyck static void fm10k_pull_tail(struct fm10k_ring *rx_ring,
420b101c962SAlexander Duyck 			    union fm10k_rx_desc *rx_desc,
421b101c962SAlexander Duyck 			    struct sk_buff *skb)
422b101c962SAlexander Duyck {
423b101c962SAlexander Duyck 	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
424b101c962SAlexander Duyck 	unsigned char *va;
425b101c962SAlexander Duyck 	unsigned int pull_len;
426b101c962SAlexander Duyck 
427b101c962SAlexander Duyck 	/* it is valid to use page_address instead of kmap since we are
428b101c962SAlexander Duyck 	 * working with pages allocated out of the lomem pool per
429b101c962SAlexander Duyck 	 * alloc_page(GFP_ATOMIC)
430b101c962SAlexander Duyck 	 */
431b101c962SAlexander Duyck 	va = skb_frag_address(frag);
432b101c962SAlexander Duyck 
433b101c962SAlexander Duyck 	/* we need the header to contain the greater of either ETH_HLEN or
434b101c962SAlexander Duyck 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
435b101c962SAlexander Duyck 	 */
436b101c962SAlexander Duyck 	pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
437b101c962SAlexander Duyck 
438b101c962SAlexander Duyck 	/* align pull length to size of long to optimize memcpy performance */
439b101c962SAlexander Duyck 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
440b101c962SAlexander Duyck 
441b101c962SAlexander Duyck 	/* update all of the pointers */
442b101c962SAlexander Duyck 	skb_frag_size_sub(frag, pull_len);
443b101c962SAlexander Duyck 	frag->page_offset += pull_len;
444b101c962SAlexander Duyck 	skb->data_len -= pull_len;
445b101c962SAlexander Duyck 	skb->tail += pull_len;
446b101c962SAlexander Duyck }
447b101c962SAlexander Duyck 
448b101c962SAlexander Duyck /**
449b101c962SAlexander Duyck  * fm10k_cleanup_headers - Correct corrupted or empty headers
450b101c962SAlexander Duyck  * @rx_ring: rx descriptor ring packet is being transacted on
451b101c962SAlexander Duyck  * @rx_desc: pointer to the EOP Rx descriptor
452b101c962SAlexander Duyck  * @skb: pointer to current skb being fixed
453b101c962SAlexander Duyck  *
454b101c962SAlexander Duyck  * Address the case where we are pulling data in on pages only
455b101c962SAlexander Duyck  * and as such no data is present in the skb header.
456b101c962SAlexander Duyck  *
457b101c962SAlexander Duyck  * In addition if skb is not at least 60 bytes we need to pad it so that
458b101c962SAlexander Duyck  * it is large enough to qualify as a valid Ethernet frame.
459b101c962SAlexander Duyck  *
460b101c962SAlexander Duyck  * Returns true if an error was encountered and skb was freed.
461b101c962SAlexander Duyck  **/
462b101c962SAlexander Duyck static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
463b101c962SAlexander Duyck 				  union fm10k_rx_desc *rx_desc,
464b101c962SAlexander Duyck 				  struct sk_buff *skb)
465b101c962SAlexander Duyck {
466b101c962SAlexander Duyck 	if (unlikely((fm10k_test_staterr(rx_desc,
467b101c962SAlexander Duyck 					 FM10K_RXD_STATUS_RXE)))) {
468b101c962SAlexander Duyck 		dev_kfree_skb_any(skb);
469b101c962SAlexander Duyck 		rx_ring->rx_stats.errors++;
470b101c962SAlexander Duyck 		return true;
471b101c962SAlexander Duyck 	}
472b101c962SAlexander Duyck 
473b101c962SAlexander Duyck 	/* place header in linear portion of buffer */
474b101c962SAlexander Duyck 	if (skb_is_nonlinear(skb))
475b101c962SAlexander Duyck 		fm10k_pull_tail(rx_ring, rx_desc, skb);
476b101c962SAlexander Duyck 
477b101c962SAlexander Duyck 	/* if skb_pad returns an error the skb was freed */
478b101c962SAlexander Duyck 	if (unlikely(skb->len < 60)) {
479b101c962SAlexander Duyck 		int pad_len = 60 - skb->len;
480b101c962SAlexander Duyck 
481b101c962SAlexander Duyck 		if (skb_pad(skb, pad_len))
482b101c962SAlexander Duyck 			return true;
483b101c962SAlexander Duyck 		__skb_put(skb, pad_len);
484b101c962SAlexander Duyck 	}
485b101c962SAlexander Duyck 
486b101c962SAlexander Duyck 	return false;
487b101c962SAlexander Duyck }
488b101c962SAlexander Duyck 
489b101c962SAlexander Duyck /**
490b101c962SAlexander Duyck  * fm10k_receive_skb - helper function to handle rx indications
491b101c962SAlexander Duyck  * @q_vector: structure containing interrupt and ring information
492b101c962SAlexander Duyck  * @skb: packet to send up
493b101c962SAlexander Duyck  **/
494b101c962SAlexander Duyck static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
495b101c962SAlexander Duyck 			      struct sk_buff *skb)
496b101c962SAlexander Duyck {
497b101c962SAlexander Duyck 	napi_gro_receive(&q_vector->napi, skb);
498b101c962SAlexander Duyck }
499b101c962SAlexander Duyck 
500b101c962SAlexander Duyck static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
501b101c962SAlexander Duyck 			       struct fm10k_ring *rx_ring,
502b101c962SAlexander Duyck 			       int budget)
503b101c962SAlexander Duyck {
504b101c962SAlexander Duyck 	struct sk_buff *skb = rx_ring->skb;
505b101c962SAlexander Duyck 	unsigned int total_bytes = 0, total_packets = 0;
506b101c962SAlexander Duyck 	u16 cleaned_count = fm10k_desc_unused(rx_ring);
507b101c962SAlexander Duyck 
508b101c962SAlexander Duyck 	do {
509b101c962SAlexander Duyck 		union fm10k_rx_desc *rx_desc;
510b101c962SAlexander Duyck 
511b101c962SAlexander Duyck 		/* return some buffers to hardware, one at a time is too slow */
512b101c962SAlexander Duyck 		if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
513b101c962SAlexander Duyck 			fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
514b101c962SAlexander Duyck 			cleaned_count = 0;
515b101c962SAlexander Duyck 		}
516b101c962SAlexander Duyck 
517b101c962SAlexander Duyck 		rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
518b101c962SAlexander Duyck 
519b101c962SAlexander Duyck 		if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD))
520b101c962SAlexander Duyck 			break;
521b101c962SAlexander Duyck 
522b101c962SAlexander Duyck 		/* This memory barrier is needed to keep us from reading
523b101c962SAlexander Duyck 		 * any other fields out of the rx_desc until we know the
524b101c962SAlexander Duyck 		 * RXD_STATUS_DD bit is set
525b101c962SAlexander Duyck 		 */
526b101c962SAlexander Duyck 		rmb();
527b101c962SAlexander Duyck 
528b101c962SAlexander Duyck 		/* retrieve a buffer from the ring */
529b101c962SAlexander Duyck 		skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
530b101c962SAlexander Duyck 
531b101c962SAlexander Duyck 		/* exit if we failed to retrieve a buffer */
532b101c962SAlexander Duyck 		if (!skb)
533b101c962SAlexander Duyck 			break;
534b101c962SAlexander Duyck 
535b101c962SAlexander Duyck 		cleaned_count++;
536b101c962SAlexander Duyck 
537b101c962SAlexander Duyck 		/* fetch next buffer in frame if non-eop */
538b101c962SAlexander Duyck 		if (fm10k_is_non_eop(rx_ring, rx_desc))
539b101c962SAlexander Duyck 			continue;
540b101c962SAlexander Duyck 
541b101c962SAlexander Duyck 		/* verify the packet layout is correct */
542b101c962SAlexander Duyck 		if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
543b101c962SAlexander Duyck 			skb = NULL;
544b101c962SAlexander Duyck 			continue;
545b101c962SAlexander Duyck 		}
546b101c962SAlexander Duyck 
547b101c962SAlexander Duyck 		/* populate checksum, timestamp, VLAN, and protocol */
548b101c962SAlexander Duyck 		total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
549b101c962SAlexander Duyck 
550b101c962SAlexander Duyck 		fm10k_receive_skb(q_vector, skb);
551b101c962SAlexander Duyck 
552b101c962SAlexander Duyck 		/* reset skb pointer */
553b101c962SAlexander Duyck 		skb = NULL;
554b101c962SAlexander Duyck 
555b101c962SAlexander Duyck 		/* update budget accounting */
556b101c962SAlexander Duyck 		total_packets++;
557b101c962SAlexander Duyck 	} while (likely(total_packets < budget));
558b101c962SAlexander Duyck 
559b101c962SAlexander Duyck 	/* place incomplete frames back on ring for completion */
560b101c962SAlexander Duyck 	rx_ring->skb = skb;
561b101c962SAlexander Duyck 
562b101c962SAlexander Duyck 	u64_stats_update_begin(&rx_ring->syncp);
563b101c962SAlexander Duyck 	rx_ring->stats.packets += total_packets;
564b101c962SAlexander Duyck 	rx_ring->stats.bytes += total_bytes;
565b101c962SAlexander Duyck 	u64_stats_update_end(&rx_ring->syncp);
566b101c962SAlexander Duyck 	q_vector->rx.total_packets += total_packets;
567b101c962SAlexander Duyck 	q_vector->rx.total_bytes += total_bytes;
568b101c962SAlexander Duyck 
569b101c962SAlexander Duyck 	return total_packets < budget;
570b101c962SAlexander Duyck }
571b101c962SAlexander Duyck 
572b101c962SAlexander Duyck static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
573b101c962SAlexander Duyck 			       struct fm10k_tx_desc *tx_desc, u16 i,
574b101c962SAlexander Duyck 			       dma_addr_t dma, unsigned int size, u8 desc_flags)
575b101c962SAlexander Duyck {
576b101c962SAlexander Duyck 	/* set RS and INT for last frame in a cache line */
577b101c962SAlexander Duyck 	if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0)
578b101c962SAlexander Duyck 		desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT;
579b101c962SAlexander Duyck 
580b101c962SAlexander Duyck 	/* record values to descriptor */
581b101c962SAlexander Duyck 	tx_desc->buffer_addr = cpu_to_le64(dma);
582b101c962SAlexander Duyck 	tx_desc->flags = desc_flags;
583b101c962SAlexander Duyck 	tx_desc->buflen = cpu_to_le16(size);
584b101c962SAlexander Duyck 
585b101c962SAlexander Duyck 	/* return true if we just wrapped the ring */
586b101c962SAlexander Duyck 	return i == tx_ring->count;
587b101c962SAlexander Duyck }
588b101c962SAlexander Duyck 
589b101c962SAlexander Duyck static void fm10k_tx_map(struct fm10k_ring *tx_ring,
590b101c962SAlexander Duyck 			 struct fm10k_tx_buffer *first)
591b101c962SAlexander Duyck {
592b101c962SAlexander Duyck 	struct sk_buff *skb = first->skb;
593b101c962SAlexander Duyck 	struct fm10k_tx_buffer *tx_buffer;
594b101c962SAlexander Duyck 	struct fm10k_tx_desc *tx_desc;
595b101c962SAlexander Duyck 	struct skb_frag_struct *frag;
596b101c962SAlexander Duyck 	unsigned char *data;
597b101c962SAlexander Duyck 	dma_addr_t dma;
598b101c962SAlexander Duyck 	unsigned int data_len, size;
599b101c962SAlexander Duyck 	u16 i = tx_ring->next_to_use;
600b101c962SAlexander Duyck 	u8 flags = 0;
601b101c962SAlexander Duyck 
602b101c962SAlexander Duyck 	tx_desc = FM10K_TX_DESC(tx_ring, i);
603b101c962SAlexander Duyck 
604b101c962SAlexander Duyck 	/* add HW VLAN tag */
605b101c962SAlexander Duyck 	if (vlan_tx_tag_present(skb))
606b101c962SAlexander Duyck 		tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
607b101c962SAlexander Duyck 	else
608b101c962SAlexander Duyck 		tx_desc->vlan = 0;
609b101c962SAlexander Duyck 
610b101c962SAlexander Duyck 	size = skb_headlen(skb);
611b101c962SAlexander Duyck 	data = skb->data;
612b101c962SAlexander Duyck 
613b101c962SAlexander Duyck 	dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
614b101c962SAlexander Duyck 
615b101c962SAlexander Duyck 	data_len = skb->data_len;
616b101c962SAlexander Duyck 	tx_buffer = first;
617b101c962SAlexander Duyck 
618b101c962SAlexander Duyck 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
619b101c962SAlexander Duyck 		if (dma_mapping_error(tx_ring->dev, dma))
620b101c962SAlexander Duyck 			goto dma_error;
621b101c962SAlexander Duyck 
622b101c962SAlexander Duyck 		/* record length, and DMA address */
623b101c962SAlexander Duyck 		dma_unmap_len_set(tx_buffer, len, size);
624b101c962SAlexander Duyck 		dma_unmap_addr_set(tx_buffer, dma, dma);
625b101c962SAlexander Duyck 
626b101c962SAlexander Duyck 		while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) {
627b101c962SAlexander Duyck 			if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
628b101c962SAlexander Duyck 					       FM10K_MAX_DATA_PER_TXD, flags)) {
629b101c962SAlexander Duyck 				tx_desc = FM10K_TX_DESC(tx_ring, 0);
630b101c962SAlexander Duyck 				i = 0;
631b101c962SAlexander Duyck 			}
632b101c962SAlexander Duyck 
633b101c962SAlexander Duyck 			dma += FM10K_MAX_DATA_PER_TXD;
634b101c962SAlexander Duyck 			size -= FM10K_MAX_DATA_PER_TXD;
635b101c962SAlexander Duyck 		}
636b101c962SAlexander Duyck 
637b101c962SAlexander Duyck 		if (likely(!data_len))
638b101c962SAlexander Duyck 			break;
639b101c962SAlexander Duyck 
640b101c962SAlexander Duyck 		if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
641b101c962SAlexander Duyck 				       dma, size, flags)) {
642b101c962SAlexander Duyck 			tx_desc = FM10K_TX_DESC(tx_ring, 0);
643b101c962SAlexander Duyck 			i = 0;
644b101c962SAlexander Duyck 		}
645b101c962SAlexander Duyck 
646b101c962SAlexander Duyck 		size = skb_frag_size(frag);
647b101c962SAlexander Duyck 		data_len -= size;
648b101c962SAlexander Duyck 
649b101c962SAlexander Duyck 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
650b101c962SAlexander Duyck 				       DMA_TO_DEVICE);
651b101c962SAlexander Duyck 
652b101c962SAlexander Duyck 		tx_buffer = &tx_ring->tx_buffer[i];
653b101c962SAlexander Duyck 	}
654b101c962SAlexander Duyck 
655b101c962SAlexander Duyck 	/* write last descriptor with LAST bit set */
656b101c962SAlexander Duyck 	flags |= FM10K_TXD_FLAG_LAST;
657b101c962SAlexander Duyck 
658b101c962SAlexander Duyck 	if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
659b101c962SAlexander Duyck 		i = 0;
660b101c962SAlexander Duyck 
661b101c962SAlexander Duyck 	/* record bytecount for BQL */
662b101c962SAlexander Duyck 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
663b101c962SAlexander Duyck 
664b101c962SAlexander Duyck 	/* record SW timestamp if HW timestamp is not available */
665b101c962SAlexander Duyck 	skb_tx_timestamp(first->skb);
666b101c962SAlexander Duyck 
667b101c962SAlexander Duyck 	/* Force memory writes to complete before letting h/w know there
668b101c962SAlexander Duyck 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
669b101c962SAlexander Duyck 	 * memory model archs, such as IA-64).
670b101c962SAlexander Duyck 	 *
671b101c962SAlexander Duyck 	 * We also need this memory barrier to make certain all of the
672b101c962SAlexander Duyck 	 * status bits have been updated before next_to_watch is written.
673b101c962SAlexander Duyck 	 */
674b101c962SAlexander Duyck 	wmb();
675b101c962SAlexander Duyck 
676b101c962SAlexander Duyck 	/* set next_to_watch value indicating a packet is present */
677b101c962SAlexander Duyck 	first->next_to_watch = tx_desc;
678b101c962SAlexander Duyck 
679b101c962SAlexander Duyck 	tx_ring->next_to_use = i;
680b101c962SAlexander Duyck 
681b101c962SAlexander Duyck 	/* notify HW of packet */
682b101c962SAlexander Duyck 	writel(i, tx_ring->tail);
683b101c962SAlexander Duyck 
684b101c962SAlexander Duyck 	/* we need this if more than one processor can write to our tail
685b101c962SAlexander Duyck 	 * at a time, it synchronizes IO on IA64/Altix systems
686b101c962SAlexander Duyck 	 */
687b101c962SAlexander Duyck 	mmiowb();
688b101c962SAlexander Duyck 
689b101c962SAlexander Duyck 	return;
690b101c962SAlexander Duyck dma_error:
691b101c962SAlexander Duyck 	dev_err(tx_ring->dev, "TX DMA map failed\n");
692b101c962SAlexander Duyck 
693b101c962SAlexander Duyck 	/* clear dma mappings for failed tx_buffer map */
694b101c962SAlexander Duyck 	for (;;) {
695b101c962SAlexander Duyck 		tx_buffer = &tx_ring->tx_buffer[i];
696b101c962SAlexander Duyck 		fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
697b101c962SAlexander Duyck 		if (tx_buffer == first)
698b101c962SAlexander Duyck 			break;
699b101c962SAlexander Duyck 		if (i == 0)
700b101c962SAlexander Duyck 			i = tx_ring->count;
701b101c962SAlexander Duyck 		i--;
702b101c962SAlexander Duyck 	}
703b101c962SAlexander Duyck 
704b101c962SAlexander Duyck 	tx_ring->next_to_use = i;
705b101c962SAlexander Duyck }
706b101c962SAlexander Duyck 
707b101c962SAlexander Duyck static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
708b101c962SAlexander Duyck {
709b101c962SAlexander Duyck 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
710b101c962SAlexander Duyck 
711b101c962SAlexander Duyck 	smp_mb();
712b101c962SAlexander Duyck 
713b101c962SAlexander Duyck 	/* We need to check again in a case another CPU has just
714b101c962SAlexander Duyck 	 * made room available. */
715b101c962SAlexander Duyck 	if (likely(fm10k_desc_unused(tx_ring) < size))
716b101c962SAlexander Duyck 		return -EBUSY;
717b101c962SAlexander Duyck 
718b101c962SAlexander Duyck 	/* A reprieve! - use start_queue because it doesn't call schedule */
719b101c962SAlexander Duyck 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
720b101c962SAlexander Duyck 	++tx_ring->tx_stats.restart_queue;
721b101c962SAlexander Duyck 	return 0;
722b101c962SAlexander Duyck }
723b101c962SAlexander Duyck 
724b101c962SAlexander Duyck static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
725b101c962SAlexander Duyck {
726b101c962SAlexander Duyck 	if (likely(fm10k_desc_unused(tx_ring) >= size))
727b101c962SAlexander Duyck 		return 0;
728b101c962SAlexander Duyck 	return __fm10k_maybe_stop_tx(tx_ring, size);
729b101c962SAlexander Duyck }
730b101c962SAlexander Duyck 
731b101c962SAlexander Duyck netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
732b101c962SAlexander Duyck 				  struct fm10k_ring *tx_ring)
733b101c962SAlexander Duyck {
734b101c962SAlexander Duyck 	struct fm10k_tx_buffer *first;
735b101c962SAlexander Duyck 	u32 tx_flags = 0;
736b101c962SAlexander Duyck #if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
737b101c962SAlexander Duyck 	unsigned short f;
738b101c962SAlexander Duyck #endif
739b101c962SAlexander Duyck 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
740b101c962SAlexander Duyck 
741b101c962SAlexander Duyck 	/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
742b101c962SAlexander Duyck 	 *       + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
743b101c962SAlexander Duyck 	 *       + 2 desc gap to keep tail from touching head
744b101c962SAlexander Duyck 	 * otherwise try next time
745b101c962SAlexander Duyck 	 */
746b101c962SAlexander Duyck #if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
747b101c962SAlexander Duyck 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
748b101c962SAlexander Duyck 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
749b101c962SAlexander Duyck #else
750b101c962SAlexander Duyck 	count += skb_shinfo(skb)->nr_frags;
751b101c962SAlexander Duyck #endif
752b101c962SAlexander Duyck 	if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
753b101c962SAlexander Duyck 		tx_ring->tx_stats.tx_busy++;
754b101c962SAlexander Duyck 		return NETDEV_TX_BUSY;
755b101c962SAlexander Duyck 	}
756b101c962SAlexander Duyck 
757b101c962SAlexander Duyck 	/* record the location of the first descriptor for this packet */
758b101c962SAlexander Duyck 	first = &tx_ring->tx_buffer[tx_ring->next_to_use];
759b101c962SAlexander Duyck 	first->skb = skb;
760b101c962SAlexander Duyck 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
761b101c962SAlexander Duyck 	first->gso_segs = 1;
762b101c962SAlexander Duyck 
763b101c962SAlexander Duyck 	/* record initial flags and protocol */
764b101c962SAlexander Duyck 	first->tx_flags = tx_flags;
765b101c962SAlexander Duyck 
766b101c962SAlexander Duyck 	fm10k_tx_map(tx_ring, first);
767b101c962SAlexander Duyck 
768b101c962SAlexander Duyck 	fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
769b101c962SAlexander Duyck 
770b101c962SAlexander Duyck 	return NETDEV_TX_OK;
771b101c962SAlexander Duyck }
772b101c962SAlexander Duyck 
773b101c962SAlexander Duyck static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
774b101c962SAlexander Duyck {
775b101c962SAlexander Duyck 	return ring->stats.packets;
776b101c962SAlexander Duyck }
777b101c962SAlexander Duyck 
778b101c962SAlexander Duyck static u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
779b101c962SAlexander Duyck {
780b101c962SAlexander Duyck 	/* use SW head and tail until we have real hardware */
781b101c962SAlexander Duyck 	u32 head = ring->next_to_clean;
782b101c962SAlexander Duyck 	u32 tail = ring->next_to_use;
783b101c962SAlexander Duyck 
784b101c962SAlexander Duyck 	return ((head <= tail) ? tail : tail + ring->count) - head;
785b101c962SAlexander Duyck }
786b101c962SAlexander Duyck 
787b101c962SAlexander Duyck bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
788b101c962SAlexander Duyck {
789b101c962SAlexander Duyck 	u32 tx_done = fm10k_get_tx_completed(tx_ring);
790b101c962SAlexander Duyck 	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
791b101c962SAlexander Duyck 	u32 tx_pending = fm10k_get_tx_pending(tx_ring);
792b101c962SAlexander Duyck 
793b101c962SAlexander Duyck 	clear_check_for_tx_hang(tx_ring);
794b101c962SAlexander Duyck 
795b101c962SAlexander Duyck 	/* Check for a hung queue, but be thorough. This verifies
796b101c962SAlexander Duyck 	 * that a transmit has been completed since the previous
797b101c962SAlexander Duyck 	 * check AND there is at least one packet pending. By
798b101c962SAlexander Duyck 	 * requiring this to fail twice we avoid races with
799b101c962SAlexander Duyck 	 * clearing the ARMED bit and conditions where we
800b101c962SAlexander Duyck 	 * run the check_tx_hang logic with a transmit completion
801b101c962SAlexander Duyck 	 * pending but without time to complete it yet.
802b101c962SAlexander Duyck 	 */
803b101c962SAlexander Duyck 	if (!tx_pending || (tx_done_old != tx_done)) {
804b101c962SAlexander Duyck 		/* update completed stats and continue */
805b101c962SAlexander Duyck 		tx_ring->tx_stats.tx_done_old = tx_done;
806b101c962SAlexander Duyck 		/* reset the countdown */
807b101c962SAlexander Duyck 		clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
808b101c962SAlexander Duyck 
809b101c962SAlexander Duyck 		return false;
810b101c962SAlexander Duyck 	}
811b101c962SAlexander Duyck 
812b101c962SAlexander Duyck 	/* make sure it is true for two checks in a row */
813b101c962SAlexander Duyck 	return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
814b101c962SAlexander Duyck }
815b101c962SAlexander Duyck 
816b101c962SAlexander Duyck /**
817b101c962SAlexander Duyck  * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
818b101c962SAlexander Duyck  * @interface: driver private struct
819b101c962SAlexander Duyck  **/
820b101c962SAlexander Duyck void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
821b101c962SAlexander Duyck {
822b101c962SAlexander Duyck 	/* Do the reset outside of interrupt context */
823b101c962SAlexander Duyck 	if (!test_bit(__FM10K_DOWN, &interface->state)) {
824b101c962SAlexander Duyck 		netdev_err(interface->netdev, "Reset interface\n");
825b101c962SAlexander Duyck 		interface->tx_timeout_count++;
826b101c962SAlexander Duyck 		interface->flags |= FM10K_FLAG_RESET_REQUESTED;
827b101c962SAlexander Duyck 		fm10k_service_event_schedule(interface);
828b101c962SAlexander Duyck 	}
829b101c962SAlexander Duyck }
830b101c962SAlexander Duyck 
831b101c962SAlexander Duyck /**
832b101c962SAlexander Duyck  * fm10k_clean_tx_irq - Reclaim resources after transmit completes
833b101c962SAlexander Duyck  * @q_vector: structure containing interrupt and ring information
834b101c962SAlexander Duyck  * @tx_ring: tx ring to clean
835b101c962SAlexander Duyck  **/
836b101c962SAlexander Duyck static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
837b101c962SAlexander Duyck 			       struct fm10k_ring *tx_ring)
838b101c962SAlexander Duyck {
839b101c962SAlexander Duyck 	struct fm10k_intfc *interface = q_vector->interface;
840b101c962SAlexander Duyck 	struct fm10k_tx_buffer *tx_buffer;
841b101c962SAlexander Duyck 	struct fm10k_tx_desc *tx_desc;
842b101c962SAlexander Duyck 	unsigned int total_bytes = 0, total_packets = 0;
843b101c962SAlexander Duyck 	unsigned int budget = q_vector->tx.work_limit;
844b101c962SAlexander Duyck 	unsigned int i = tx_ring->next_to_clean;
845b101c962SAlexander Duyck 
846b101c962SAlexander Duyck 	if (test_bit(__FM10K_DOWN, &interface->state))
847b101c962SAlexander Duyck 		return true;
848b101c962SAlexander Duyck 
849b101c962SAlexander Duyck 	tx_buffer = &tx_ring->tx_buffer[i];
850b101c962SAlexander Duyck 	tx_desc = FM10K_TX_DESC(tx_ring, i);
851b101c962SAlexander Duyck 	i -= tx_ring->count;
852b101c962SAlexander Duyck 
853b101c962SAlexander Duyck 	do {
854b101c962SAlexander Duyck 		struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
855b101c962SAlexander Duyck 
856b101c962SAlexander Duyck 		/* if next_to_watch is not set then there is no work pending */
857b101c962SAlexander Duyck 		if (!eop_desc)
858b101c962SAlexander Duyck 			break;
859b101c962SAlexander Duyck 
860b101c962SAlexander Duyck 		/* prevent any other reads prior to eop_desc */
861b101c962SAlexander Duyck 		read_barrier_depends();
862b101c962SAlexander Duyck 
863b101c962SAlexander Duyck 		/* if DD is not set pending work has not been completed */
864b101c962SAlexander Duyck 		if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
865b101c962SAlexander Duyck 			break;
866b101c962SAlexander Duyck 
867b101c962SAlexander Duyck 		/* clear next_to_watch to prevent false hangs */
868b101c962SAlexander Duyck 		tx_buffer->next_to_watch = NULL;
869b101c962SAlexander Duyck 
870b101c962SAlexander Duyck 		/* update the statistics for this packet */
871b101c962SAlexander Duyck 		total_bytes += tx_buffer->bytecount;
872b101c962SAlexander Duyck 		total_packets += tx_buffer->gso_segs;
873b101c962SAlexander Duyck 
874b101c962SAlexander Duyck 		/* free the skb */
875b101c962SAlexander Duyck 		dev_consume_skb_any(tx_buffer->skb);
876b101c962SAlexander Duyck 
877b101c962SAlexander Duyck 		/* unmap skb header data */
878b101c962SAlexander Duyck 		dma_unmap_single(tx_ring->dev,
879b101c962SAlexander Duyck 				 dma_unmap_addr(tx_buffer, dma),
880b101c962SAlexander Duyck 				 dma_unmap_len(tx_buffer, len),
881b101c962SAlexander Duyck 				 DMA_TO_DEVICE);
882b101c962SAlexander Duyck 
883b101c962SAlexander Duyck 		/* clear tx_buffer data */
884b101c962SAlexander Duyck 		tx_buffer->skb = NULL;
885b101c962SAlexander Duyck 		dma_unmap_len_set(tx_buffer, len, 0);
886b101c962SAlexander Duyck 
887b101c962SAlexander Duyck 		/* unmap remaining buffers */
888b101c962SAlexander Duyck 		while (tx_desc != eop_desc) {
889b101c962SAlexander Duyck 			tx_buffer++;
890b101c962SAlexander Duyck 			tx_desc++;
891b101c962SAlexander Duyck 			i++;
892b101c962SAlexander Duyck 			if (unlikely(!i)) {
893b101c962SAlexander Duyck 				i -= tx_ring->count;
894b101c962SAlexander Duyck 				tx_buffer = tx_ring->tx_buffer;
895b101c962SAlexander Duyck 				tx_desc = FM10K_TX_DESC(tx_ring, 0);
896b101c962SAlexander Duyck 			}
897b101c962SAlexander Duyck 
898b101c962SAlexander Duyck 			/* unmap any remaining paged data */
899b101c962SAlexander Duyck 			if (dma_unmap_len(tx_buffer, len)) {
900b101c962SAlexander Duyck 				dma_unmap_page(tx_ring->dev,
901b101c962SAlexander Duyck 					       dma_unmap_addr(tx_buffer, dma),
902b101c962SAlexander Duyck 					       dma_unmap_len(tx_buffer, len),
903b101c962SAlexander Duyck 					       DMA_TO_DEVICE);
904b101c962SAlexander Duyck 				dma_unmap_len_set(tx_buffer, len, 0);
905b101c962SAlexander Duyck 			}
906b101c962SAlexander Duyck 		}
907b101c962SAlexander Duyck 
908b101c962SAlexander Duyck 		/* move us one more past the eop_desc for start of next pkt */
909b101c962SAlexander Duyck 		tx_buffer++;
910b101c962SAlexander Duyck 		tx_desc++;
911b101c962SAlexander Duyck 		i++;
912b101c962SAlexander Duyck 		if (unlikely(!i)) {
913b101c962SAlexander Duyck 			i -= tx_ring->count;
914b101c962SAlexander Duyck 			tx_buffer = tx_ring->tx_buffer;
915b101c962SAlexander Duyck 			tx_desc = FM10K_TX_DESC(tx_ring, 0);
916b101c962SAlexander Duyck 		}
917b101c962SAlexander Duyck 
918b101c962SAlexander Duyck 		/* issue prefetch for next Tx descriptor */
919b101c962SAlexander Duyck 		prefetch(tx_desc);
920b101c962SAlexander Duyck 
921b101c962SAlexander Duyck 		/* update budget accounting */
922b101c962SAlexander Duyck 		budget--;
923b101c962SAlexander Duyck 	} while (likely(budget));
924b101c962SAlexander Duyck 
925b101c962SAlexander Duyck 	i += tx_ring->count;
926b101c962SAlexander Duyck 	tx_ring->next_to_clean = i;
927b101c962SAlexander Duyck 	u64_stats_update_begin(&tx_ring->syncp);
928b101c962SAlexander Duyck 	tx_ring->stats.bytes += total_bytes;
929b101c962SAlexander Duyck 	tx_ring->stats.packets += total_packets;
930b101c962SAlexander Duyck 	u64_stats_update_end(&tx_ring->syncp);
931b101c962SAlexander Duyck 	q_vector->tx.total_bytes += total_bytes;
932b101c962SAlexander Duyck 	q_vector->tx.total_packets += total_packets;
933b101c962SAlexander Duyck 
934b101c962SAlexander Duyck 	if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
935b101c962SAlexander Duyck 		/* schedule immediate reset if we believe we hung */
936b101c962SAlexander Duyck 		struct fm10k_hw *hw = &interface->hw;
937b101c962SAlexander Duyck 
938b101c962SAlexander Duyck 		netif_err(interface, drv, tx_ring->netdev,
939b101c962SAlexander Duyck 			  "Detected Tx Unit Hang\n"
940b101c962SAlexander Duyck 			  "  Tx Queue             <%d>\n"
941b101c962SAlexander Duyck 			  "  TDH, TDT             <%x>, <%x>\n"
942b101c962SAlexander Duyck 			  "  next_to_use          <%x>\n"
943b101c962SAlexander Duyck 			  "  next_to_clean        <%x>\n",
944b101c962SAlexander Duyck 			  tx_ring->queue_index,
945b101c962SAlexander Duyck 			  fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
946b101c962SAlexander Duyck 			  fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
947b101c962SAlexander Duyck 			  tx_ring->next_to_use, i);
948b101c962SAlexander Duyck 
949b101c962SAlexander Duyck 		netif_stop_subqueue(tx_ring->netdev,
950b101c962SAlexander Duyck 				    tx_ring->queue_index);
951b101c962SAlexander Duyck 
952b101c962SAlexander Duyck 		netif_info(interface, probe, tx_ring->netdev,
953b101c962SAlexander Duyck 			   "tx hang %d detected on queue %d, resetting interface\n",
954b101c962SAlexander Duyck 			   interface->tx_timeout_count + 1,
955b101c962SAlexander Duyck 			   tx_ring->queue_index);
956b101c962SAlexander Duyck 
957b101c962SAlexander Duyck 		fm10k_tx_timeout_reset(interface);
958b101c962SAlexander Duyck 
959b101c962SAlexander Duyck 		/* the netdev is about to reset, no point in enabling stuff */
960b101c962SAlexander Duyck 		return true;
961b101c962SAlexander Duyck 	}
962b101c962SAlexander Duyck 
963b101c962SAlexander Duyck 	/* notify netdev of completed buffers */
964b101c962SAlexander Duyck 	netdev_tx_completed_queue(txring_txq(tx_ring),
965b101c962SAlexander Duyck 				  total_packets, total_bytes);
966b101c962SAlexander Duyck 
967b101c962SAlexander Duyck #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
968b101c962SAlexander Duyck 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
969b101c962SAlexander Duyck 		     (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
970b101c962SAlexander Duyck 		/* Make sure that anybody stopping the queue after this
971b101c962SAlexander Duyck 		 * sees the new next_to_clean.
972b101c962SAlexander Duyck 		 */
973b101c962SAlexander Duyck 		smp_mb();
974b101c962SAlexander Duyck 		if (__netif_subqueue_stopped(tx_ring->netdev,
975b101c962SAlexander Duyck 					     tx_ring->queue_index) &&
976b101c962SAlexander Duyck 		    !test_bit(__FM10K_DOWN, &interface->state)) {
977b101c962SAlexander Duyck 			netif_wake_subqueue(tx_ring->netdev,
978b101c962SAlexander Duyck 					    tx_ring->queue_index);
979b101c962SAlexander Duyck 			++tx_ring->tx_stats.restart_queue;
980b101c962SAlexander Duyck 		}
981b101c962SAlexander Duyck 	}
982b101c962SAlexander Duyck 
983b101c962SAlexander Duyck 	return !!budget;
984b101c962SAlexander Duyck }
985b101c962SAlexander Duyck 
98618283cadSAlexander Duyck /**
98718283cadSAlexander Duyck  * fm10k_update_itr - update the dynamic ITR value based on packet size
98818283cadSAlexander Duyck  *
98918283cadSAlexander Duyck  *      Stores a new ITR value based on strictly on packet size.  The
99018283cadSAlexander Duyck  *      divisors and thresholds used by this function were determined based
99118283cadSAlexander Duyck  *      on theoretical maximum wire speed and testing data, in order to
99218283cadSAlexander Duyck  *      minimize response time while increasing bulk throughput.
99318283cadSAlexander Duyck  *
99418283cadSAlexander Duyck  * @ring_container: Container for rings to have ITR updated
99518283cadSAlexander Duyck  **/
99618283cadSAlexander Duyck static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
99718283cadSAlexander Duyck {
99818283cadSAlexander Duyck 	unsigned int avg_wire_size, packets;
99918283cadSAlexander Duyck 
100018283cadSAlexander Duyck 	/* Only update ITR if we are using adaptive setting */
100118283cadSAlexander Duyck 	if (!(ring_container->itr & FM10K_ITR_ADAPTIVE))
100218283cadSAlexander Duyck 		goto clear_counts;
100318283cadSAlexander Duyck 
100418283cadSAlexander Duyck 	packets = ring_container->total_packets;
100518283cadSAlexander Duyck 	if (!packets)
100618283cadSAlexander Duyck 		goto clear_counts;
100718283cadSAlexander Duyck 
100818283cadSAlexander Duyck 	avg_wire_size = ring_container->total_bytes / packets;
100918283cadSAlexander Duyck 
101018283cadSAlexander Duyck 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
101118283cadSAlexander Duyck 	avg_wire_size += 24;
101218283cadSAlexander Duyck 
101318283cadSAlexander Duyck 	/* Don't starve jumbo frames */
101418283cadSAlexander Duyck 	if (avg_wire_size > 3000)
101518283cadSAlexander Duyck 		avg_wire_size = 3000;
101618283cadSAlexander Duyck 
101718283cadSAlexander Duyck 	/* Give a little boost to mid-size frames */
101818283cadSAlexander Duyck 	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
101918283cadSAlexander Duyck 		avg_wire_size /= 3;
102018283cadSAlexander Duyck 	else
102118283cadSAlexander Duyck 		avg_wire_size /= 2;
102218283cadSAlexander Duyck 
102318283cadSAlexander Duyck 	/* write back value and retain adaptive flag */
102418283cadSAlexander Duyck 	ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
102518283cadSAlexander Duyck 
102618283cadSAlexander Duyck clear_counts:
102718283cadSAlexander Duyck 	ring_container->total_bytes = 0;
102818283cadSAlexander Duyck 	ring_container->total_packets = 0;
102918283cadSAlexander Duyck }
103018283cadSAlexander Duyck 
103118283cadSAlexander Duyck static void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
103218283cadSAlexander Duyck {
103318283cadSAlexander Duyck 	/* Enable auto-mask and clear the current mask */
103418283cadSAlexander Duyck 	u32 itr = FM10K_ITR_ENABLE;
103518283cadSAlexander Duyck 
103618283cadSAlexander Duyck 	/* Update Tx ITR */
103718283cadSAlexander Duyck 	fm10k_update_itr(&q_vector->tx);
103818283cadSAlexander Duyck 
103918283cadSAlexander Duyck 	/* Update Rx ITR */
104018283cadSAlexander Duyck 	fm10k_update_itr(&q_vector->rx);
104118283cadSAlexander Duyck 
104218283cadSAlexander Duyck 	/* Store Tx itr in timer slot 0 */
104318283cadSAlexander Duyck 	itr |= (q_vector->tx.itr & FM10K_ITR_MAX);
104418283cadSAlexander Duyck 
104518283cadSAlexander Duyck 	/* Shift Rx itr to timer slot 1 */
104618283cadSAlexander Duyck 	itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT;
104718283cadSAlexander Duyck 
104818283cadSAlexander Duyck 	/* Write the final value to the ITR register */
104918283cadSAlexander Duyck 	writel(itr, q_vector->itr);
105018283cadSAlexander Duyck }
105118283cadSAlexander Duyck 
105218283cadSAlexander Duyck static int fm10k_poll(struct napi_struct *napi, int budget)
105318283cadSAlexander Duyck {
105418283cadSAlexander Duyck 	struct fm10k_q_vector *q_vector =
105518283cadSAlexander Duyck 			       container_of(napi, struct fm10k_q_vector, napi);
1056b101c962SAlexander Duyck 	struct fm10k_ring *ring;
1057b101c962SAlexander Duyck 	int per_ring_budget;
1058b101c962SAlexander Duyck 	bool clean_complete = true;
1059b101c962SAlexander Duyck 
1060b101c962SAlexander Duyck 	fm10k_for_each_ring(ring, q_vector->tx)
1061b101c962SAlexander Duyck 		clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
1062b101c962SAlexander Duyck 
1063b101c962SAlexander Duyck 	/* attempt to distribute budget to each queue fairly, but don't
1064b101c962SAlexander Duyck 	 * allow the budget to go below 1 because we'll exit polling
1065b101c962SAlexander Duyck 	 */
1066b101c962SAlexander Duyck 	if (q_vector->rx.count > 1)
1067b101c962SAlexander Duyck 		per_ring_budget = max(budget/q_vector->rx.count, 1);
1068b101c962SAlexander Duyck 	else
1069b101c962SAlexander Duyck 		per_ring_budget = budget;
1070b101c962SAlexander Duyck 
1071b101c962SAlexander Duyck 	fm10k_for_each_ring(ring, q_vector->rx)
1072b101c962SAlexander Duyck 		clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
1073b101c962SAlexander Duyck 						     per_ring_budget);
1074b101c962SAlexander Duyck 
1075b101c962SAlexander Duyck 	/* If all work not completed, return budget and keep polling */
1076b101c962SAlexander Duyck 	if (!clean_complete)
1077b101c962SAlexander Duyck 		return budget;
107818283cadSAlexander Duyck 
107918283cadSAlexander Duyck 	/* all work done, exit the polling mode */
108018283cadSAlexander Duyck 	napi_complete(napi);
108118283cadSAlexander Duyck 
108218283cadSAlexander Duyck 	/* re-enable the q_vector */
108318283cadSAlexander Duyck 	fm10k_qv_enable(q_vector);
108418283cadSAlexander Duyck 
108518283cadSAlexander Duyck 	return 0;
108618283cadSAlexander Duyck }
108718283cadSAlexander Duyck 
108818283cadSAlexander Duyck /**
108918283cadSAlexander Duyck  * fm10k_set_num_queues: Allocate queues for device, feature dependent
109018283cadSAlexander Duyck  * @interface: board private structure to initialize
109118283cadSAlexander Duyck  *
109218283cadSAlexander Duyck  * This is the top level queue allocation routine.  The order here is very
109318283cadSAlexander Duyck  * important, starting with the "most" number of features turned on at once,
109418283cadSAlexander Duyck  * and ending with the smallest set of features.  This way large combinations
109518283cadSAlexander Duyck  * can be allocated if they're turned on, and smaller combinations are the
109618283cadSAlexander Duyck  * fallthrough conditions.
109718283cadSAlexander Duyck  *
109818283cadSAlexander Duyck  **/
109918283cadSAlexander Duyck static void fm10k_set_num_queues(struct fm10k_intfc *interface)
110018283cadSAlexander Duyck {
110118283cadSAlexander Duyck 	/* Start with base case */
110218283cadSAlexander Duyck 	interface->num_rx_queues = 1;
110318283cadSAlexander Duyck 	interface->num_tx_queues = 1;
110418283cadSAlexander Duyck }
110518283cadSAlexander Duyck 
110618283cadSAlexander Duyck /**
110718283cadSAlexander Duyck  * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
110818283cadSAlexander Duyck  * @interface: board private structure to initialize
110918283cadSAlexander Duyck  * @v_count: q_vectors allocated on interface, used for ring interleaving
111018283cadSAlexander Duyck  * @v_idx: index of vector in interface struct
111118283cadSAlexander Duyck  * @txr_count: total number of Tx rings to allocate
111218283cadSAlexander Duyck  * @txr_idx: index of first Tx ring to allocate
111318283cadSAlexander Duyck  * @rxr_count: total number of Rx rings to allocate
111418283cadSAlexander Duyck  * @rxr_idx: index of first Rx ring to allocate
111518283cadSAlexander Duyck  *
111618283cadSAlexander Duyck  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
111718283cadSAlexander Duyck  **/
111818283cadSAlexander Duyck static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
111918283cadSAlexander Duyck 				unsigned int v_count, unsigned int v_idx,
112018283cadSAlexander Duyck 				unsigned int txr_count, unsigned int txr_idx,
112118283cadSAlexander Duyck 				unsigned int rxr_count, unsigned int rxr_idx)
112218283cadSAlexander Duyck {
112318283cadSAlexander Duyck 	struct fm10k_q_vector *q_vector;
1124e27ef599SAlexander Duyck 	struct fm10k_ring *ring;
112518283cadSAlexander Duyck 	int ring_count, size;
112618283cadSAlexander Duyck 
112718283cadSAlexander Duyck 	ring_count = txr_count + rxr_count;
1128e27ef599SAlexander Duyck 	size = sizeof(struct fm10k_q_vector) +
1129e27ef599SAlexander Duyck 	       (sizeof(struct fm10k_ring) * ring_count);
113018283cadSAlexander Duyck 
113118283cadSAlexander Duyck 	/* allocate q_vector and rings */
113218283cadSAlexander Duyck 	q_vector = kzalloc(size, GFP_KERNEL);
113318283cadSAlexander Duyck 	if (!q_vector)
113418283cadSAlexander Duyck 		return -ENOMEM;
113518283cadSAlexander Duyck 
113618283cadSAlexander Duyck 	/* initialize NAPI */
113718283cadSAlexander Duyck 	netif_napi_add(interface->netdev, &q_vector->napi,
113818283cadSAlexander Duyck 		       fm10k_poll, NAPI_POLL_WEIGHT);
113918283cadSAlexander Duyck 
114018283cadSAlexander Duyck 	/* tie q_vector and interface together */
114118283cadSAlexander Duyck 	interface->q_vector[v_idx] = q_vector;
114218283cadSAlexander Duyck 	q_vector->interface = interface;
114318283cadSAlexander Duyck 	q_vector->v_idx = v_idx;
114418283cadSAlexander Duyck 
1145e27ef599SAlexander Duyck 	/* initialize pointer to rings */
1146e27ef599SAlexander Duyck 	ring = q_vector->ring;
1147e27ef599SAlexander Duyck 
114818283cadSAlexander Duyck 	/* save Tx ring container info */
1149e27ef599SAlexander Duyck 	q_vector->tx.ring = ring;
1150e27ef599SAlexander Duyck 	q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
115118283cadSAlexander Duyck 	q_vector->tx.itr = interface->tx_itr;
115218283cadSAlexander Duyck 	q_vector->tx.count = txr_count;
115318283cadSAlexander Duyck 
1154e27ef599SAlexander Duyck 	while (txr_count) {
1155e27ef599SAlexander Duyck 		/* assign generic ring traits */
1156e27ef599SAlexander Duyck 		ring->dev = &interface->pdev->dev;
1157e27ef599SAlexander Duyck 		ring->netdev = interface->netdev;
1158e27ef599SAlexander Duyck 
1159e27ef599SAlexander Duyck 		/* configure backlink on ring */
1160e27ef599SAlexander Duyck 		ring->q_vector = q_vector;
1161e27ef599SAlexander Duyck 
1162e27ef599SAlexander Duyck 		/* apply Tx specific ring traits */
1163e27ef599SAlexander Duyck 		ring->count = interface->tx_ring_count;
1164e27ef599SAlexander Duyck 		ring->queue_index = txr_idx;
1165e27ef599SAlexander Duyck 
1166e27ef599SAlexander Duyck 		/* assign ring to interface */
1167e27ef599SAlexander Duyck 		interface->tx_ring[txr_idx] = ring;
1168e27ef599SAlexander Duyck 
1169e27ef599SAlexander Duyck 		/* update count and index */
1170e27ef599SAlexander Duyck 		txr_count--;
1171e27ef599SAlexander Duyck 		txr_idx += v_count;
1172e27ef599SAlexander Duyck 
1173e27ef599SAlexander Duyck 		/* push pointer to next ring */
1174e27ef599SAlexander Duyck 		ring++;
1175e27ef599SAlexander Duyck 	}
1176e27ef599SAlexander Duyck 
117718283cadSAlexander Duyck 	/* save Rx ring container info */
1178e27ef599SAlexander Duyck 	q_vector->rx.ring = ring;
117918283cadSAlexander Duyck 	q_vector->rx.itr = interface->rx_itr;
118018283cadSAlexander Duyck 	q_vector->rx.count = rxr_count;
118118283cadSAlexander Duyck 
1182e27ef599SAlexander Duyck 	while (rxr_count) {
1183e27ef599SAlexander Duyck 		/* assign generic ring traits */
1184e27ef599SAlexander Duyck 		ring->dev = &interface->pdev->dev;
1185e27ef599SAlexander Duyck 		ring->netdev = interface->netdev;
1186e27ef599SAlexander Duyck 
1187e27ef599SAlexander Duyck 		/* configure backlink on ring */
1188e27ef599SAlexander Duyck 		ring->q_vector = q_vector;
1189e27ef599SAlexander Duyck 
1190e27ef599SAlexander Duyck 		/* apply Rx specific ring traits */
1191e27ef599SAlexander Duyck 		ring->count = interface->rx_ring_count;
1192e27ef599SAlexander Duyck 		ring->queue_index = rxr_idx;
1193e27ef599SAlexander Duyck 
1194e27ef599SAlexander Duyck 		/* assign ring to interface */
1195e27ef599SAlexander Duyck 		interface->rx_ring[rxr_idx] = ring;
1196e27ef599SAlexander Duyck 
1197e27ef599SAlexander Duyck 		/* update count and index */
1198e27ef599SAlexander Duyck 		rxr_count--;
1199e27ef599SAlexander Duyck 		rxr_idx += v_count;
1200e27ef599SAlexander Duyck 
1201e27ef599SAlexander Duyck 		/* push pointer to next ring */
1202e27ef599SAlexander Duyck 		ring++;
1203e27ef599SAlexander Duyck 	}
1204e27ef599SAlexander Duyck 
120518283cadSAlexander Duyck 	return 0;
120618283cadSAlexander Duyck }
120718283cadSAlexander Duyck 
120818283cadSAlexander Duyck /**
120918283cadSAlexander Duyck  * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
121018283cadSAlexander Duyck  * @interface: board private structure to initialize
121118283cadSAlexander Duyck  * @v_idx: Index of vector to be freed
121218283cadSAlexander Duyck  *
121318283cadSAlexander Duyck  * This function frees the memory allocated to the q_vector.  In addition if
121418283cadSAlexander Duyck  * NAPI is enabled it will delete any references to the NAPI struct prior
121518283cadSAlexander Duyck  * to freeing the q_vector.
121618283cadSAlexander Duyck  **/
121718283cadSAlexander Duyck static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
121818283cadSAlexander Duyck {
121918283cadSAlexander Duyck 	struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
1220e27ef599SAlexander Duyck 	struct fm10k_ring *ring;
1221e27ef599SAlexander Duyck 
1222e27ef599SAlexander Duyck 	fm10k_for_each_ring(ring, q_vector->tx)
1223e27ef599SAlexander Duyck 		interface->tx_ring[ring->queue_index] = NULL;
1224e27ef599SAlexander Duyck 
1225e27ef599SAlexander Duyck 	fm10k_for_each_ring(ring, q_vector->rx)
1226e27ef599SAlexander Duyck 		interface->rx_ring[ring->queue_index] = NULL;
122718283cadSAlexander Duyck 
122818283cadSAlexander Duyck 	interface->q_vector[v_idx] = NULL;
122918283cadSAlexander Duyck 	netif_napi_del(&q_vector->napi);
123018283cadSAlexander Duyck 	kfree_rcu(q_vector, rcu);
123118283cadSAlexander Duyck }
123218283cadSAlexander Duyck 
123318283cadSAlexander Duyck /**
123418283cadSAlexander Duyck  * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
123518283cadSAlexander Duyck  * @interface: board private structure to initialize
123618283cadSAlexander Duyck  *
123718283cadSAlexander Duyck  * We allocate one q_vector per queue interrupt.  If allocation fails we
123818283cadSAlexander Duyck  * return -ENOMEM.
123918283cadSAlexander Duyck  **/
124018283cadSAlexander Duyck static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
124118283cadSAlexander Duyck {
124218283cadSAlexander Duyck 	unsigned int q_vectors = interface->num_q_vectors;
124318283cadSAlexander Duyck 	unsigned int rxr_remaining = interface->num_rx_queues;
124418283cadSAlexander Duyck 	unsigned int txr_remaining = interface->num_tx_queues;
124518283cadSAlexander Duyck 	unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
124618283cadSAlexander Duyck 	int err;
124718283cadSAlexander Duyck 
124818283cadSAlexander Duyck 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
124918283cadSAlexander Duyck 		for (; rxr_remaining; v_idx++) {
125018283cadSAlexander Duyck 			err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
125118283cadSAlexander Duyck 						   0, 0, 1, rxr_idx);
125218283cadSAlexander Duyck 			if (err)
125318283cadSAlexander Duyck 				goto err_out;
125418283cadSAlexander Duyck 
125518283cadSAlexander Duyck 			/* update counts and index */
125618283cadSAlexander Duyck 			rxr_remaining--;
125718283cadSAlexander Duyck 			rxr_idx++;
125818283cadSAlexander Duyck 		}
125918283cadSAlexander Duyck 	}
126018283cadSAlexander Duyck 
126118283cadSAlexander Duyck 	for (; v_idx < q_vectors; v_idx++) {
126218283cadSAlexander Duyck 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
126318283cadSAlexander Duyck 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
126418283cadSAlexander Duyck 
126518283cadSAlexander Duyck 		err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
126618283cadSAlexander Duyck 					   tqpv, txr_idx,
126718283cadSAlexander Duyck 					   rqpv, rxr_idx);
126818283cadSAlexander Duyck 
126918283cadSAlexander Duyck 		if (err)
127018283cadSAlexander Duyck 			goto err_out;
127118283cadSAlexander Duyck 
127218283cadSAlexander Duyck 		/* update counts and index */
127318283cadSAlexander Duyck 		rxr_remaining -= rqpv;
127418283cadSAlexander Duyck 		txr_remaining -= tqpv;
127518283cadSAlexander Duyck 		rxr_idx++;
127618283cadSAlexander Duyck 		txr_idx++;
127718283cadSAlexander Duyck 	}
127818283cadSAlexander Duyck 
127918283cadSAlexander Duyck 	return 0;
128018283cadSAlexander Duyck 
128118283cadSAlexander Duyck err_out:
128218283cadSAlexander Duyck 	interface->num_tx_queues = 0;
128318283cadSAlexander Duyck 	interface->num_rx_queues = 0;
128418283cadSAlexander Duyck 	interface->num_q_vectors = 0;
128518283cadSAlexander Duyck 
128618283cadSAlexander Duyck 	while (v_idx--)
128718283cadSAlexander Duyck 		fm10k_free_q_vector(interface, v_idx);
128818283cadSAlexander Duyck 
128918283cadSAlexander Duyck 	return -ENOMEM;
129018283cadSAlexander Duyck }
129118283cadSAlexander Duyck 
129218283cadSAlexander Duyck /**
129318283cadSAlexander Duyck  * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
129418283cadSAlexander Duyck  * @interface: board private structure to initialize
129518283cadSAlexander Duyck  *
129618283cadSAlexander Duyck  * This function frees the memory allocated to the q_vectors.  In addition if
129718283cadSAlexander Duyck  * NAPI is enabled it will delete any references to the NAPI struct prior
129818283cadSAlexander Duyck  * to freeing the q_vector.
129918283cadSAlexander Duyck  **/
130018283cadSAlexander Duyck static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
130118283cadSAlexander Duyck {
130218283cadSAlexander Duyck 	int v_idx = interface->num_q_vectors;
130318283cadSAlexander Duyck 
130418283cadSAlexander Duyck 	interface->num_tx_queues = 0;
130518283cadSAlexander Duyck 	interface->num_rx_queues = 0;
130618283cadSAlexander Duyck 	interface->num_q_vectors = 0;
130718283cadSAlexander Duyck 
130818283cadSAlexander Duyck 	while (v_idx--)
130918283cadSAlexander Duyck 		fm10k_free_q_vector(interface, v_idx);
131018283cadSAlexander Duyck }
131118283cadSAlexander Duyck 
131218283cadSAlexander Duyck /**
131318283cadSAlexander Duyck  * f10k_reset_msix_capability - reset MSI-X capability
131418283cadSAlexander Duyck  * @interface: board private structure to initialize
131518283cadSAlexander Duyck  *
131618283cadSAlexander Duyck  * Reset the MSI-X capability back to its starting state
131718283cadSAlexander Duyck  **/
131818283cadSAlexander Duyck static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
131918283cadSAlexander Duyck {
132018283cadSAlexander Duyck 	pci_disable_msix(interface->pdev);
132118283cadSAlexander Duyck 	kfree(interface->msix_entries);
132218283cadSAlexander Duyck 	interface->msix_entries = NULL;
132318283cadSAlexander Duyck }
132418283cadSAlexander Duyck 
132518283cadSAlexander Duyck /**
132618283cadSAlexander Duyck  * f10k_init_msix_capability - configure MSI-X capability
132718283cadSAlexander Duyck  * @interface: board private structure to initialize
132818283cadSAlexander Duyck  *
132918283cadSAlexander Duyck  * Attempt to configure the interrupts using the best available
133018283cadSAlexander Duyck  * capabilities of the hardware and the kernel.
133118283cadSAlexander Duyck  **/
133218283cadSAlexander Duyck static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
133318283cadSAlexander Duyck {
133418283cadSAlexander Duyck 	struct fm10k_hw *hw = &interface->hw;
133518283cadSAlexander Duyck 	int v_budget, vector;
133618283cadSAlexander Duyck 
133718283cadSAlexander Duyck 	/* It's easy to be greedy for MSI-X vectors, but it really
133818283cadSAlexander Duyck 	 * doesn't do us much good if we have a lot more vectors
133918283cadSAlexander Duyck 	 * than CPU's.  So let's be conservative and only ask for
134018283cadSAlexander Duyck 	 * (roughly) the same number of vectors as there are CPU's.
134118283cadSAlexander Duyck 	 * the default is to use pairs of vectors
134218283cadSAlexander Duyck 	 */
134318283cadSAlexander Duyck 	v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
134418283cadSAlexander Duyck 	v_budget = min_t(u16, v_budget, num_online_cpus());
134518283cadSAlexander Duyck 
134618283cadSAlexander Duyck 	/* account for vectors not related to queues */
134718283cadSAlexander Duyck 	v_budget += NON_Q_VECTORS(hw);
134818283cadSAlexander Duyck 
134918283cadSAlexander Duyck 	/* At the same time, hardware can only support a maximum of
135018283cadSAlexander Duyck 	 * hw.mac->max_msix_vectors vectors.  With features
135118283cadSAlexander Duyck 	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
135218283cadSAlexander Duyck 	 * descriptor queues supported by our device.  Thus, we cap it off in
135318283cadSAlexander Duyck 	 * those rare cases where the cpu count also exceeds our vector limit.
135418283cadSAlexander Duyck 	 */
135518283cadSAlexander Duyck 	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
135618283cadSAlexander Duyck 
135718283cadSAlexander Duyck 	/* A failure in MSI-X entry allocation is fatal. */
135818283cadSAlexander Duyck 	interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
135918283cadSAlexander Duyck 					  GFP_KERNEL);
136018283cadSAlexander Duyck 	if (!interface->msix_entries)
136118283cadSAlexander Duyck 		return -ENOMEM;
136218283cadSAlexander Duyck 
136318283cadSAlexander Duyck 	/* populate entry values */
136418283cadSAlexander Duyck 	for (vector = 0; vector < v_budget; vector++)
136518283cadSAlexander Duyck 		interface->msix_entries[vector].entry = vector;
136618283cadSAlexander Duyck 
136718283cadSAlexander Duyck 	/* Attempt to enable MSI-X with requested value */
136818283cadSAlexander Duyck 	v_budget = pci_enable_msix_range(interface->pdev,
136918283cadSAlexander Duyck 					 interface->msix_entries,
137018283cadSAlexander Duyck 					 MIN_MSIX_COUNT(hw),
137118283cadSAlexander Duyck 					 v_budget);
137218283cadSAlexander Duyck 	if (v_budget < 0) {
137318283cadSAlexander Duyck 		kfree(interface->msix_entries);
137418283cadSAlexander Duyck 		interface->msix_entries = NULL;
137518283cadSAlexander Duyck 		return -ENOMEM;
137618283cadSAlexander Duyck 	}
137718283cadSAlexander Duyck 
137818283cadSAlexander Duyck 	/* record the number of queues available for q_vectors */
137918283cadSAlexander Duyck 	interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
138018283cadSAlexander Duyck 
138118283cadSAlexander Duyck 	return 0;
138218283cadSAlexander Duyck }
138318283cadSAlexander Duyck 
138418283cadSAlexander Duyck static void fm10k_init_reta(struct fm10k_intfc *interface)
138518283cadSAlexander Duyck {
138618283cadSAlexander Duyck 	u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
138718283cadSAlexander Duyck 	u32 reta, base;
138818283cadSAlexander Duyck 
138918283cadSAlexander Duyck 	/* If the netdev is initialized we have to maintain table if possible */
139018283cadSAlexander Duyck 	if (interface->netdev->reg_state) {
139118283cadSAlexander Duyck 		for (i = FM10K_RETA_SIZE; i--;) {
139218283cadSAlexander Duyck 			reta = interface->reta[i];
139318283cadSAlexander Duyck 			if ((((reta << 24) >> 24) < rss_i) &&
139418283cadSAlexander Duyck 			    (((reta << 16) >> 24) < rss_i) &&
139518283cadSAlexander Duyck 			    (((reta <<  8) >> 24) < rss_i) &&
139618283cadSAlexander Duyck 			    (((reta)       >> 24) < rss_i))
139718283cadSAlexander Duyck 				continue;
139818283cadSAlexander Duyck 			goto repopulate_reta;
139918283cadSAlexander Duyck 		}
140018283cadSAlexander Duyck 
140118283cadSAlexander Duyck 		/* do nothing if all of the elements are in bounds */
140218283cadSAlexander Duyck 		return;
140318283cadSAlexander Duyck 	}
140418283cadSAlexander Duyck 
140518283cadSAlexander Duyck repopulate_reta:
140618283cadSAlexander Duyck 	/* Populate the redirection table 4 entries at a time.  To do this
140718283cadSAlexander Duyck 	 * we are generating the results for n and n+2 and then interleaving
140818283cadSAlexander Duyck 	 * those with the results with n+1 and n+3.
140918283cadSAlexander Duyck 	 */
141018283cadSAlexander Duyck 	for (i = FM10K_RETA_SIZE; i--;) {
141118283cadSAlexander Duyck 		/* first pass generates n and n+2 */
141218283cadSAlexander Duyck 		base = ((i * 0x00040004) + 0x00020000) * rss_i;
141318283cadSAlexander Duyck 		reta = (base & 0x3F803F80) >> 7;
141418283cadSAlexander Duyck 
141518283cadSAlexander Duyck 		/* second pass generates n+1 and n+3 */
141618283cadSAlexander Duyck 		base += 0x00010001 * rss_i;
141718283cadSAlexander Duyck 		reta |= (base & 0x3F803F80) << 1;
141818283cadSAlexander Duyck 
141918283cadSAlexander Duyck 		interface->reta[i] = reta;
142018283cadSAlexander Duyck 	}
142118283cadSAlexander Duyck }
142218283cadSAlexander Duyck 
142318283cadSAlexander Duyck /**
142418283cadSAlexander Duyck  * fm10k_init_queueing_scheme - Determine proper queueing scheme
142518283cadSAlexander Duyck  * @interface: board private structure to initialize
142618283cadSAlexander Duyck  *
142718283cadSAlexander Duyck  * We determine which queueing scheme to use based on...
142818283cadSAlexander Duyck  * - Hardware queue count (num_*_queues)
142918283cadSAlexander Duyck  *   - defined by miscellaneous hardware support/features (RSS, etc.)
143018283cadSAlexander Duyck  **/
143118283cadSAlexander Duyck int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
143218283cadSAlexander Duyck {
143318283cadSAlexander Duyck 	int err;
143418283cadSAlexander Duyck 
143518283cadSAlexander Duyck 	/* Number of supported queues */
143618283cadSAlexander Duyck 	fm10k_set_num_queues(interface);
143718283cadSAlexander Duyck 
143818283cadSAlexander Duyck 	/* Configure MSI-X capability */
143918283cadSAlexander Duyck 	err = fm10k_init_msix_capability(interface);
144018283cadSAlexander Duyck 	if (err) {
144118283cadSAlexander Duyck 		dev_err(&interface->pdev->dev,
144218283cadSAlexander Duyck 			"Unable to initialize MSI-X capability\n");
144318283cadSAlexander Duyck 		return err;
144418283cadSAlexander Duyck 	}
144518283cadSAlexander Duyck 
144618283cadSAlexander Duyck 	/* Allocate memory for queues */
144718283cadSAlexander Duyck 	err = fm10k_alloc_q_vectors(interface);
144818283cadSAlexander Duyck 	if (err)
144918283cadSAlexander Duyck 		return err;
145018283cadSAlexander Duyck 
145118283cadSAlexander Duyck 	/* Initialize RSS redirection table */
145218283cadSAlexander Duyck 	fm10k_init_reta(interface);
145318283cadSAlexander Duyck 
145418283cadSAlexander Duyck 	return 0;
145518283cadSAlexander Duyck }
145618283cadSAlexander Duyck 
145718283cadSAlexander Duyck /**
145818283cadSAlexander Duyck  * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
145918283cadSAlexander Duyck  * @interface: board private structure to clear queueing scheme on
146018283cadSAlexander Duyck  *
146118283cadSAlexander Duyck  * We go through and clear queueing specific resources and reset the structure
146218283cadSAlexander Duyck  * to pre-load conditions
146318283cadSAlexander Duyck  **/
146418283cadSAlexander Duyck void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
146518283cadSAlexander Duyck {
146618283cadSAlexander Duyck 	fm10k_free_q_vectors(interface);
146718283cadSAlexander Duyck 	fm10k_reset_msix_capability(interface);
146818283cadSAlexander Duyck }
1469