xref: /openbmc/linux/drivers/net/ethernet/intel/i40e/i40e_main.c (revision e983940270f10fe8551baf0098be76ea478294a3)
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2016 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26 
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 
31 /* Local includes */
32 #include "i40e.h"
33 #include "i40e_diag.h"
34 #include <net/udp_tunnel.h>
35 
36 const char i40e_driver_name[] = "i40e";
37 static const char i40e_driver_string[] =
38 			"Intel(R) Ethernet Connection XL710 Network Driver";
39 
40 #define DRV_KERN "-k"
41 
42 #define DRV_VERSION_MAJOR 1
43 #define DRV_VERSION_MINOR 6
44 #define DRV_VERSION_BUILD 16
45 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 	     __stringify(DRV_VERSION_MINOR) "." \
47 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
48 const char i40e_driver_version_str[] = DRV_VERSION;
49 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
50 
51 /* a bit of forward declarations */
52 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
53 static void i40e_handle_reset_warning(struct i40e_pf *pf);
54 static int i40e_add_vsi(struct i40e_vsi *vsi);
55 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
56 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
57 static int i40e_setup_misc_vector(struct i40e_pf *pf);
58 static void i40e_determine_queue_usage(struct i40e_pf *pf);
59 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
60 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
61 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
62 
63 /* i40e_pci_tbl - PCI Device ID Table
64  *
65  * Last entry must be all 0s
66  *
67  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68  *   Class, Class Mask, private data (not used) }
69  */
70 static const struct pci_device_id i40e_pci_tbl[] = {
71 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
72 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
73 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
75 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
76 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
77 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
78 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
79 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
80 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
81 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
82 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
83 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
84 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
85 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
86 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
87 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
88 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
89 	/* required last entry */
90 	{0, }
91 };
92 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
93 
94 #define I40E_MAX_VF_COUNT 128
95 static int debug = -1;
96 module_param(debug, int, 0);
97 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98 
99 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
100 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
101 MODULE_LICENSE("GPL");
102 MODULE_VERSION(DRV_VERSION);
103 
104 static struct workqueue_struct *i40e_wq;
105 
106 /**
107  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
108  * @hw:   pointer to the HW structure
109  * @mem:  ptr to mem struct to fill out
110  * @size: size of memory requested
111  * @alignment: what to align the allocation to
112  **/
113 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
114 			    u64 size, u32 alignment)
115 {
116 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
117 
118 	mem->size = ALIGN(size, alignment);
119 	mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
120 				      &mem->pa, GFP_KERNEL);
121 	if (!mem->va)
122 		return -ENOMEM;
123 
124 	return 0;
125 }
126 
127 /**
128  * i40e_free_dma_mem_d - OS specific memory free for shared code
129  * @hw:   pointer to the HW structure
130  * @mem:  ptr to mem struct to free
131  **/
132 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
133 {
134 	struct i40e_pf *pf = (struct i40e_pf *)hw->back;
135 
136 	dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
137 	mem->va = NULL;
138 	mem->pa = 0;
139 	mem->size = 0;
140 
141 	return 0;
142 }
143 
144 /**
145  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
146  * @hw:   pointer to the HW structure
147  * @mem:  ptr to mem struct to fill out
148  * @size: size of memory requested
149  **/
150 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
151 			     u32 size)
152 {
153 	mem->size = size;
154 	mem->va = kzalloc(size, GFP_KERNEL);
155 
156 	if (!mem->va)
157 		return -ENOMEM;
158 
159 	return 0;
160 }
161 
162 /**
163  * i40e_free_virt_mem_d - OS specific memory free for shared code
164  * @hw:   pointer to the HW structure
165  * @mem:  ptr to mem struct to free
166  **/
167 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
168 {
169 	/* it's ok to kfree a NULL pointer */
170 	kfree(mem->va);
171 	mem->va = NULL;
172 	mem->size = 0;
173 
174 	return 0;
175 }
176 
177 /**
178  * i40e_get_lump - find a lump of free generic resource
179  * @pf: board private structure
180  * @pile: the pile of resource to search
181  * @needed: the number of items needed
182  * @id: an owner id to stick on the items assigned
183  *
184  * Returns the base item index of the lump, or negative for error
185  *
186  * The search_hint trick and lack of advanced fit-finding only work
187  * because we're highly likely to have all the same size lump requests.
188  * Linear search time and any fragmentation should be minimal.
189  **/
190 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
191 			 u16 needed, u16 id)
192 {
193 	int ret = -ENOMEM;
194 	int i, j;
195 
196 	if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
197 		dev_info(&pf->pdev->dev,
198 			 "param err: pile=%p needed=%d id=0x%04x\n",
199 			 pile, needed, id);
200 		return -EINVAL;
201 	}
202 
203 	/* start the linear search with an imperfect hint */
204 	i = pile->search_hint;
205 	while (i < pile->num_entries) {
206 		/* skip already allocated entries */
207 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
208 			i++;
209 			continue;
210 		}
211 
212 		/* do we have enough in this lump? */
213 		for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
214 			if (pile->list[i+j] & I40E_PILE_VALID_BIT)
215 				break;
216 		}
217 
218 		if (j == needed) {
219 			/* there was enough, so assign it to the requestor */
220 			for (j = 0; j < needed; j++)
221 				pile->list[i+j] = id | I40E_PILE_VALID_BIT;
222 			ret = i;
223 			pile->search_hint = i + j;
224 			break;
225 		}
226 
227 		/* not enough, so skip over it and continue looking */
228 		i += j;
229 	}
230 
231 	return ret;
232 }
233 
234 /**
235  * i40e_put_lump - return a lump of generic resource
236  * @pile: the pile of resource to search
237  * @index: the base item index
238  * @id: the owner id of the items assigned
239  *
240  * Returns the count of items in the lump
241  **/
242 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
243 {
244 	int valid_id = (id | I40E_PILE_VALID_BIT);
245 	int count = 0;
246 	int i;
247 
248 	if (!pile || index >= pile->num_entries)
249 		return -EINVAL;
250 
251 	for (i = index;
252 	     i < pile->num_entries && pile->list[i] == valid_id;
253 	     i++) {
254 		pile->list[i] = 0;
255 		count++;
256 	}
257 
258 	if (count && index < pile->search_hint)
259 		pile->search_hint = index;
260 
261 	return count;
262 }
263 
264 /**
265  * i40e_find_vsi_from_id - searches for the vsi with the given id
266  * @pf - the pf structure to search for the vsi
267  * @id - id of the vsi it is searching for
268  **/
269 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
270 {
271 	int i;
272 
273 	for (i = 0; i < pf->num_alloc_vsi; i++)
274 		if (pf->vsi[i] && (pf->vsi[i]->id == id))
275 			return pf->vsi[i];
276 
277 	return NULL;
278 }
279 
280 /**
281  * i40e_service_event_schedule - Schedule the service task to wake up
282  * @pf: board private structure
283  *
284  * If not already scheduled, this puts the task into the work queue
285  **/
286 void i40e_service_event_schedule(struct i40e_pf *pf)
287 {
288 	if (!test_bit(__I40E_DOWN, &pf->state) &&
289 	    !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
290 	    !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
291 		queue_work(i40e_wq, &pf->service_task);
292 }
293 
294 /**
295  * i40e_tx_timeout - Respond to a Tx Hang
296  * @netdev: network interface device structure
297  *
298  * If any port has noticed a Tx timeout, it is likely that the whole
299  * device is munged, not just the one netdev port, so go for the full
300  * reset.
301  **/
302 #ifdef I40E_FCOE
303 void i40e_tx_timeout(struct net_device *netdev)
304 #else
305 static void i40e_tx_timeout(struct net_device *netdev)
306 #endif
307 {
308 	struct i40e_netdev_priv *np = netdev_priv(netdev);
309 	struct i40e_vsi *vsi = np->vsi;
310 	struct i40e_pf *pf = vsi->back;
311 	struct i40e_ring *tx_ring = NULL;
312 	unsigned int i, hung_queue = 0;
313 	u32 head, val;
314 
315 	pf->tx_timeout_count++;
316 
317 	/* find the stopped queue the same way the stack does */
318 	for (i = 0; i < netdev->num_tx_queues; i++) {
319 		struct netdev_queue *q;
320 		unsigned long trans_start;
321 
322 		q = netdev_get_tx_queue(netdev, i);
323 		trans_start = q->trans_start;
324 		if (netif_xmit_stopped(q) &&
325 		    time_after(jiffies,
326 			       (trans_start + netdev->watchdog_timeo))) {
327 			hung_queue = i;
328 			break;
329 		}
330 	}
331 
332 	if (i == netdev->num_tx_queues) {
333 		netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
334 	} else {
335 		/* now that we have an index, find the tx_ring struct */
336 		for (i = 0; i < vsi->num_queue_pairs; i++) {
337 			if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
338 				if (hung_queue ==
339 				    vsi->tx_rings[i]->queue_index) {
340 					tx_ring = vsi->tx_rings[i];
341 					break;
342 				}
343 			}
344 		}
345 	}
346 
347 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
348 		pf->tx_timeout_recovery_level = 1;  /* reset after some time */
349 	else if (time_before(jiffies,
350 		      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
351 		return;   /* don't do any new action before the next timeout */
352 
353 	if (tx_ring) {
354 		head = i40e_get_head(tx_ring);
355 		/* Read interrupt register */
356 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
357 			val = rd32(&pf->hw,
358 			     I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
359 						tx_ring->vsi->base_vector - 1));
360 		else
361 			val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
362 
363 		netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
364 			    vsi->seid, hung_queue, tx_ring->next_to_clean,
365 			    head, tx_ring->next_to_use,
366 			    readl(tx_ring->tail), val);
367 	}
368 
369 	pf->tx_timeout_last_recovery = jiffies;
370 	netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
371 		    pf->tx_timeout_recovery_level, hung_queue);
372 
373 	switch (pf->tx_timeout_recovery_level) {
374 	case 1:
375 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
376 		break;
377 	case 2:
378 		set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
379 		break;
380 	case 3:
381 		set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
382 		break;
383 	default:
384 		netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
385 		break;
386 	}
387 
388 	i40e_service_event_schedule(pf);
389 	pf->tx_timeout_recovery_level++;
390 }
391 
392 /**
393  * i40e_get_vsi_stats_struct - Get System Network Statistics
394  * @vsi: the VSI we care about
395  *
396  * Returns the address of the device statistics structure.
397  * The statistics are actually updated from the service task.
398  **/
399 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
400 {
401 	return &vsi->net_stats;
402 }
403 
404 /**
405  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
406  * @netdev: network interface device structure
407  *
408  * Returns the address of the device statistics structure.
409  * The statistics are actually updated from the service task.
410  **/
411 #ifdef I40E_FCOE
412 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
413 					     struct net_device *netdev,
414 					     struct rtnl_link_stats64 *stats)
415 #else
416 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
417 					     struct net_device *netdev,
418 					     struct rtnl_link_stats64 *stats)
419 #endif
420 {
421 	struct i40e_netdev_priv *np = netdev_priv(netdev);
422 	struct i40e_ring *tx_ring, *rx_ring;
423 	struct i40e_vsi *vsi = np->vsi;
424 	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
425 	int i;
426 
427 	if (test_bit(__I40E_DOWN, &vsi->state))
428 		return stats;
429 
430 	if (!vsi->tx_rings)
431 		return stats;
432 
433 	rcu_read_lock();
434 	for (i = 0; i < vsi->num_queue_pairs; i++) {
435 		u64 bytes, packets;
436 		unsigned int start;
437 
438 		tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
439 		if (!tx_ring)
440 			continue;
441 
442 		do {
443 			start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
444 			packets = tx_ring->stats.packets;
445 			bytes   = tx_ring->stats.bytes;
446 		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
447 
448 		stats->tx_packets += packets;
449 		stats->tx_bytes   += bytes;
450 		rx_ring = &tx_ring[1];
451 
452 		do {
453 			start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
454 			packets = rx_ring->stats.packets;
455 			bytes   = rx_ring->stats.bytes;
456 		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
457 
458 		stats->rx_packets += packets;
459 		stats->rx_bytes   += bytes;
460 	}
461 	rcu_read_unlock();
462 
463 	/* following stats updated by i40e_watchdog_subtask() */
464 	stats->multicast	= vsi_stats->multicast;
465 	stats->tx_errors	= vsi_stats->tx_errors;
466 	stats->tx_dropped	= vsi_stats->tx_dropped;
467 	stats->rx_errors	= vsi_stats->rx_errors;
468 	stats->rx_dropped	= vsi_stats->rx_dropped;
469 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
470 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
471 
472 	return stats;
473 }
474 
475 /**
476  * i40e_vsi_reset_stats - Resets all stats of the given vsi
477  * @vsi: the VSI to have its stats reset
478  **/
479 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
480 {
481 	struct rtnl_link_stats64 *ns;
482 	int i;
483 
484 	if (!vsi)
485 		return;
486 
487 	ns = i40e_get_vsi_stats_struct(vsi);
488 	memset(ns, 0, sizeof(*ns));
489 	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
490 	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
491 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
492 	if (vsi->rx_rings && vsi->rx_rings[0]) {
493 		for (i = 0; i < vsi->num_queue_pairs; i++) {
494 			memset(&vsi->rx_rings[i]->stats, 0,
495 			       sizeof(vsi->rx_rings[i]->stats));
496 			memset(&vsi->rx_rings[i]->rx_stats, 0,
497 			       sizeof(vsi->rx_rings[i]->rx_stats));
498 			memset(&vsi->tx_rings[i]->stats, 0,
499 			       sizeof(vsi->tx_rings[i]->stats));
500 			memset(&vsi->tx_rings[i]->tx_stats, 0,
501 			       sizeof(vsi->tx_rings[i]->tx_stats));
502 		}
503 	}
504 	vsi->stat_offsets_loaded = false;
505 }
506 
507 /**
508  * i40e_pf_reset_stats - Reset all of the stats for the given PF
509  * @pf: the PF to be reset
510  **/
511 void i40e_pf_reset_stats(struct i40e_pf *pf)
512 {
513 	int i;
514 
515 	memset(&pf->stats, 0, sizeof(pf->stats));
516 	memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
517 	pf->stat_offsets_loaded = false;
518 
519 	for (i = 0; i < I40E_MAX_VEB; i++) {
520 		if (pf->veb[i]) {
521 			memset(&pf->veb[i]->stats, 0,
522 			       sizeof(pf->veb[i]->stats));
523 			memset(&pf->veb[i]->stats_offsets, 0,
524 			       sizeof(pf->veb[i]->stats_offsets));
525 			pf->veb[i]->stat_offsets_loaded = false;
526 		}
527 	}
528 	pf->hw_csum_rx_error = 0;
529 }
530 
531 /**
532  * i40e_stat_update48 - read and update a 48 bit stat from the chip
533  * @hw: ptr to the hardware info
534  * @hireg: the high 32 bit reg to read
535  * @loreg: the low 32 bit reg to read
536  * @offset_loaded: has the initial offset been loaded yet
537  * @offset: ptr to current offset value
538  * @stat: ptr to the stat
539  *
540  * Since the device stats are not reset at PFReset, they likely will not
541  * be zeroed when the driver starts.  We'll save the first values read
542  * and use them as offsets to be subtracted from the raw values in order
543  * to report stats that count from zero.  In the process, we also manage
544  * the potential roll-over.
545  **/
546 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
547 			       bool offset_loaded, u64 *offset, u64 *stat)
548 {
549 	u64 new_data;
550 
551 	if (hw->device_id == I40E_DEV_ID_QEMU) {
552 		new_data = rd32(hw, loreg);
553 		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
554 	} else {
555 		new_data = rd64(hw, loreg);
556 	}
557 	if (!offset_loaded)
558 		*offset = new_data;
559 	if (likely(new_data >= *offset))
560 		*stat = new_data - *offset;
561 	else
562 		*stat = (new_data + BIT_ULL(48)) - *offset;
563 	*stat &= 0xFFFFFFFFFFFFULL;
564 }
565 
566 /**
567  * i40e_stat_update32 - read and update a 32 bit stat from the chip
568  * @hw: ptr to the hardware info
569  * @reg: the hw reg to read
570  * @offset_loaded: has the initial offset been loaded yet
571  * @offset: ptr to current offset value
572  * @stat: ptr to the stat
573  **/
574 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
575 			       bool offset_loaded, u64 *offset, u64 *stat)
576 {
577 	u32 new_data;
578 
579 	new_data = rd32(hw, reg);
580 	if (!offset_loaded)
581 		*offset = new_data;
582 	if (likely(new_data >= *offset))
583 		*stat = (u32)(new_data - *offset);
584 	else
585 		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
586 }
587 
588 /**
589  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
590  * @vsi: the VSI to be updated
591  **/
592 void i40e_update_eth_stats(struct i40e_vsi *vsi)
593 {
594 	int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
595 	struct i40e_pf *pf = vsi->back;
596 	struct i40e_hw *hw = &pf->hw;
597 	struct i40e_eth_stats *oes;
598 	struct i40e_eth_stats *es;     /* device's eth stats */
599 
600 	es = &vsi->eth_stats;
601 	oes = &vsi->eth_stats_offsets;
602 
603 	/* Gather up the stats that the hw collects */
604 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
605 			   vsi->stat_offsets_loaded,
606 			   &oes->tx_errors, &es->tx_errors);
607 	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
608 			   vsi->stat_offsets_loaded,
609 			   &oes->rx_discards, &es->rx_discards);
610 	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
611 			   vsi->stat_offsets_loaded,
612 			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
613 	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
614 			   vsi->stat_offsets_loaded,
615 			   &oes->tx_errors, &es->tx_errors);
616 
617 	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
618 			   I40E_GLV_GORCL(stat_idx),
619 			   vsi->stat_offsets_loaded,
620 			   &oes->rx_bytes, &es->rx_bytes);
621 	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
622 			   I40E_GLV_UPRCL(stat_idx),
623 			   vsi->stat_offsets_loaded,
624 			   &oes->rx_unicast, &es->rx_unicast);
625 	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
626 			   I40E_GLV_MPRCL(stat_idx),
627 			   vsi->stat_offsets_loaded,
628 			   &oes->rx_multicast, &es->rx_multicast);
629 	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
630 			   I40E_GLV_BPRCL(stat_idx),
631 			   vsi->stat_offsets_loaded,
632 			   &oes->rx_broadcast, &es->rx_broadcast);
633 
634 	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
635 			   I40E_GLV_GOTCL(stat_idx),
636 			   vsi->stat_offsets_loaded,
637 			   &oes->tx_bytes, &es->tx_bytes);
638 	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
639 			   I40E_GLV_UPTCL(stat_idx),
640 			   vsi->stat_offsets_loaded,
641 			   &oes->tx_unicast, &es->tx_unicast);
642 	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
643 			   I40E_GLV_MPTCL(stat_idx),
644 			   vsi->stat_offsets_loaded,
645 			   &oes->tx_multicast, &es->tx_multicast);
646 	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
647 			   I40E_GLV_BPTCL(stat_idx),
648 			   vsi->stat_offsets_loaded,
649 			   &oes->tx_broadcast, &es->tx_broadcast);
650 	vsi->stat_offsets_loaded = true;
651 }
652 
653 /**
654  * i40e_update_veb_stats - Update Switch component statistics
655  * @veb: the VEB being updated
656  **/
657 static void i40e_update_veb_stats(struct i40e_veb *veb)
658 {
659 	struct i40e_pf *pf = veb->pf;
660 	struct i40e_hw *hw = &pf->hw;
661 	struct i40e_eth_stats *oes;
662 	struct i40e_eth_stats *es;     /* device's eth stats */
663 	struct i40e_veb_tc_stats *veb_oes;
664 	struct i40e_veb_tc_stats *veb_es;
665 	int i, idx = 0;
666 
667 	idx = veb->stats_idx;
668 	es = &veb->stats;
669 	oes = &veb->stats_offsets;
670 	veb_es = &veb->tc_stats;
671 	veb_oes = &veb->tc_stats_offsets;
672 
673 	/* Gather up the stats that the hw collects */
674 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
675 			   veb->stat_offsets_loaded,
676 			   &oes->tx_discards, &es->tx_discards);
677 	if (hw->revision_id > 0)
678 		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
679 				   veb->stat_offsets_loaded,
680 				   &oes->rx_unknown_protocol,
681 				   &es->rx_unknown_protocol);
682 	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
683 			   veb->stat_offsets_loaded,
684 			   &oes->rx_bytes, &es->rx_bytes);
685 	i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
686 			   veb->stat_offsets_loaded,
687 			   &oes->rx_unicast, &es->rx_unicast);
688 	i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
689 			   veb->stat_offsets_loaded,
690 			   &oes->rx_multicast, &es->rx_multicast);
691 	i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
692 			   veb->stat_offsets_loaded,
693 			   &oes->rx_broadcast, &es->rx_broadcast);
694 
695 	i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
696 			   veb->stat_offsets_loaded,
697 			   &oes->tx_bytes, &es->tx_bytes);
698 	i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
699 			   veb->stat_offsets_loaded,
700 			   &oes->tx_unicast, &es->tx_unicast);
701 	i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
702 			   veb->stat_offsets_loaded,
703 			   &oes->tx_multicast, &es->tx_multicast);
704 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
705 			   veb->stat_offsets_loaded,
706 			   &oes->tx_broadcast, &es->tx_broadcast);
707 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
708 		i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
709 				   I40E_GLVEBTC_RPCL(i, idx),
710 				   veb->stat_offsets_loaded,
711 				   &veb_oes->tc_rx_packets[i],
712 				   &veb_es->tc_rx_packets[i]);
713 		i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
714 				   I40E_GLVEBTC_RBCL(i, idx),
715 				   veb->stat_offsets_loaded,
716 				   &veb_oes->tc_rx_bytes[i],
717 				   &veb_es->tc_rx_bytes[i]);
718 		i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
719 				   I40E_GLVEBTC_TPCL(i, idx),
720 				   veb->stat_offsets_loaded,
721 				   &veb_oes->tc_tx_packets[i],
722 				   &veb_es->tc_tx_packets[i]);
723 		i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
724 				   I40E_GLVEBTC_TBCL(i, idx),
725 				   veb->stat_offsets_loaded,
726 				   &veb_oes->tc_tx_bytes[i],
727 				   &veb_es->tc_tx_bytes[i]);
728 	}
729 	veb->stat_offsets_loaded = true;
730 }
731 
732 #ifdef I40E_FCOE
733 /**
734  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
735  * @vsi: the VSI that is capable of doing FCoE
736  **/
737 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
738 {
739 	struct i40e_pf *pf = vsi->back;
740 	struct i40e_hw *hw = &pf->hw;
741 	struct i40e_fcoe_stats *ofs;
742 	struct i40e_fcoe_stats *fs;     /* device's eth stats */
743 	int idx;
744 
745 	if (vsi->type != I40E_VSI_FCOE)
746 		return;
747 
748 	idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
749 	fs = &vsi->fcoe_stats;
750 	ofs = &vsi->fcoe_stats_offsets;
751 
752 	i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
753 			   vsi->fcoe_stat_offsets_loaded,
754 			   &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
755 	i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
756 			   vsi->fcoe_stat_offsets_loaded,
757 			   &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
758 	i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
759 			   vsi->fcoe_stat_offsets_loaded,
760 			   &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
761 	i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
762 			   vsi->fcoe_stat_offsets_loaded,
763 			   &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
764 	i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
765 			   vsi->fcoe_stat_offsets_loaded,
766 			   &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
767 	i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
768 			   vsi->fcoe_stat_offsets_loaded,
769 			   &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
770 	i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
771 			   vsi->fcoe_stat_offsets_loaded,
772 			   &ofs->fcoe_last_error, &fs->fcoe_last_error);
773 	i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
774 			   vsi->fcoe_stat_offsets_loaded,
775 			   &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
776 
777 	vsi->fcoe_stat_offsets_loaded = true;
778 }
779 
780 #endif
781 /**
782  * i40e_update_vsi_stats - Update the vsi statistics counters.
783  * @vsi: the VSI to be updated
784  *
785  * There are a few instances where we store the same stat in a
786  * couple of different structs.  This is partly because we have
787  * the netdev stats that need to be filled out, which is slightly
788  * different from the "eth_stats" defined by the chip and used in
789  * VF communications.  We sort it out here.
790  **/
791 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
792 {
793 	struct i40e_pf *pf = vsi->back;
794 	struct rtnl_link_stats64 *ons;
795 	struct rtnl_link_stats64 *ns;   /* netdev stats */
796 	struct i40e_eth_stats *oes;
797 	struct i40e_eth_stats *es;     /* device's eth stats */
798 	u32 tx_restart, tx_busy;
799 	u64 tx_lost_interrupt;
800 	struct i40e_ring *p;
801 	u32 rx_page, rx_buf;
802 	u64 bytes, packets;
803 	unsigned int start;
804 	u64 tx_linearize;
805 	u64 tx_force_wb;
806 	u64 rx_p, rx_b;
807 	u64 tx_p, tx_b;
808 	u16 q;
809 
810 	if (test_bit(__I40E_DOWN, &vsi->state) ||
811 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
812 		return;
813 
814 	ns = i40e_get_vsi_stats_struct(vsi);
815 	ons = &vsi->net_stats_offsets;
816 	es = &vsi->eth_stats;
817 	oes = &vsi->eth_stats_offsets;
818 
819 	/* Gather up the netdev and vsi stats that the driver collects
820 	 * on the fly during packet processing
821 	 */
822 	rx_b = rx_p = 0;
823 	tx_b = tx_p = 0;
824 	tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
825 	tx_lost_interrupt = 0;
826 	rx_page = 0;
827 	rx_buf = 0;
828 	rcu_read_lock();
829 	for (q = 0; q < vsi->num_queue_pairs; q++) {
830 		/* locate Tx ring */
831 		p = ACCESS_ONCE(vsi->tx_rings[q]);
832 
833 		do {
834 			start = u64_stats_fetch_begin_irq(&p->syncp);
835 			packets = p->stats.packets;
836 			bytes = p->stats.bytes;
837 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
838 		tx_b += bytes;
839 		tx_p += packets;
840 		tx_restart += p->tx_stats.restart_queue;
841 		tx_busy += p->tx_stats.tx_busy;
842 		tx_linearize += p->tx_stats.tx_linearize;
843 		tx_force_wb += p->tx_stats.tx_force_wb;
844 		tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
845 
846 		/* Rx queue is part of the same block as Tx queue */
847 		p = &p[1];
848 		do {
849 			start = u64_stats_fetch_begin_irq(&p->syncp);
850 			packets = p->stats.packets;
851 			bytes = p->stats.bytes;
852 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
853 		rx_b += bytes;
854 		rx_p += packets;
855 		rx_buf += p->rx_stats.alloc_buff_failed;
856 		rx_page += p->rx_stats.alloc_page_failed;
857 	}
858 	rcu_read_unlock();
859 	vsi->tx_restart = tx_restart;
860 	vsi->tx_busy = tx_busy;
861 	vsi->tx_linearize = tx_linearize;
862 	vsi->tx_force_wb = tx_force_wb;
863 	vsi->tx_lost_interrupt = tx_lost_interrupt;
864 	vsi->rx_page_failed = rx_page;
865 	vsi->rx_buf_failed = rx_buf;
866 
867 	ns->rx_packets = rx_p;
868 	ns->rx_bytes = rx_b;
869 	ns->tx_packets = tx_p;
870 	ns->tx_bytes = tx_b;
871 
872 	/* update netdev stats from eth stats */
873 	i40e_update_eth_stats(vsi);
874 	ons->tx_errors = oes->tx_errors;
875 	ns->tx_errors = es->tx_errors;
876 	ons->multicast = oes->rx_multicast;
877 	ns->multicast = es->rx_multicast;
878 	ons->rx_dropped = oes->rx_discards;
879 	ns->rx_dropped = es->rx_discards;
880 	ons->tx_dropped = oes->tx_discards;
881 	ns->tx_dropped = es->tx_discards;
882 
883 	/* pull in a couple PF stats if this is the main vsi */
884 	if (vsi == pf->vsi[pf->lan_vsi]) {
885 		ns->rx_crc_errors = pf->stats.crc_errors;
886 		ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
887 		ns->rx_length_errors = pf->stats.rx_length_errors;
888 	}
889 }
890 
891 /**
892  * i40e_update_pf_stats - Update the PF statistics counters.
893  * @pf: the PF to be updated
894  **/
895 static void i40e_update_pf_stats(struct i40e_pf *pf)
896 {
897 	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
898 	struct i40e_hw_port_stats *nsd = &pf->stats;
899 	struct i40e_hw *hw = &pf->hw;
900 	u32 val;
901 	int i;
902 
903 	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
904 			   I40E_GLPRT_GORCL(hw->port),
905 			   pf->stat_offsets_loaded,
906 			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
907 	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
908 			   I40E_GLPRT_GOTCL(hw->port),
909 			   pf->stat_offsets_loaded,
910 			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
911 	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
912 			   pf->stat_offsets_loaded,
913 			   &osd->eth.rx_discards,
914 			   &nsd->eth.rx_discards);
915 	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
916 			   I40E_GLPRT_UPRCL(hw->port),
917 			   pf->stat_offsets_loaded,
918 			   &osd->eth.rx_unicast,
919 			   &nsd->eth.rx_unicast);
920 	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
921 			   I40E_GLPRT_MPRCL(hw->port),
922 			   pf->stat_offsets_loaded,
923 			   &osd->eth.rx_multicast,
924 			   &nsd->eth.rx_multicast);
925 	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
926 			   I40E_GLPRT_BPRCL(hw->port),
927 			   pf->stat_offsets_loaded,
928 			   &osd->eth.rx_broadcast,
929 			   &nsd->eth.rx_broadcast);
930 	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
931 			   I40E_GLPRT_UPTCL(hw->port),
932 			   pf->stat_offsets_loaded,
933 			   &osd->eth.tx_unicast,
934 			   &nsd->eth.tx_unicast);
935 	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
936 			   I40E_GLPRT_MPTCL(hw->port),
937 			   pf->stat_offsets_loaded,
938 			   &osd->eth.tx_multicast,
939 			   &nsd->eth.tx_multicast);
940 	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
941 			   I40E_GLPRT_BPTCL(hw->port),
942 			   pf->stat_offsets_loaded,
943 			   &osd->eth.tx_broadcast,
944 			   &nsd->eth.tx_broadcast);
945 
946 	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
947 			   pf->stat_offsets_loaded,
948 			   &osd->tx_dropped_link_down,
949 			   &nsd->tx_dropped_link_down);
950 
951 	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
952 			   pf->stat_offsets_loaded,
953 			   &osd->crc_errors, &nsd->crc_errors);
954 
955 	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
956 			   pf->stat_offsets_loaded,
957 			   &osd->illegal_bytes, &nsd->illegal_bytes);
958 
959 	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
960 			   pf->stat_offsets_loaded,
961 			   &osd->mac_local_faults,
962 			   &nsd->mac_local_faults);
963 	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
964 			   pf->stat_offsets_loaded,
965 			   &osd->mac_remote_faults,
966 			   &nsd->mac_remote_faults);
967 
968 	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
969 			   pf->stat_offsets_loaded,
970 			   &osd->rx_length_errors,
971 			   &nsd->rx_length_errors);
972 
973 	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
974 			   pf->stat_offsets_loaded,
975 			   &osd->link_xon_rx, &nsd->link_xon_rx);
976 	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
977 			   pf->stat_offsets_loaded,
978 			   &osd->link_xon_tx, &nsd->link_xon_tx);
979 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
980 			   pf->stat_offsets_loaded,
981 			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
982 	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
983 			   pf->stat_offsets_loaded,
984 			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
985 
986 	for (i = 0; i < 8; i++) {
987 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
988 				   pf->stat_offsets_loaded,
989 				   &osd->priority_xoff_rx[i],
990 				   &nsd->priority_xoff_rx[i]);
991 		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
992 				   pf->stat_offsets_loaded,
993 				   &osd->priority_xon_rx[i],
994 				   &nsd->priority_xon_rx[i]);
995 		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
996 				   pf->stat_offsets_loaded,
997 				   &osd->priority_xon_tx[i],
998 				   &nsd->priority_xon_tx[i]);
999 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1000 				   pf->stat_offsets_loaded,
1001 				   &osd->priority_xoff_tx[i],
1002 				   &nsd->priority_xoff_tx[i]);
1003 		i40e_stat_update32(hw,
1004 				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1005 				   pf->stat_offsets_loaded,
1006 				   &osd->priority_xon_2_xoff[i],
1007 				   &nsd->priority_xon_2_xoff[i]);
1008 	}
1009 
1010 	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1011 			   I40E_GLPRT_PRC64L(hw->port),
1012 			   pf->stat_offsets_loaded,
1013 			   &osd->rx_size_64, &nsd->rx_size_64);
1014 	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1015 			   I40E_GLPRT_PRC127L(hw->port),
1016 			   pf->stat_offsets_loaded,
1017 			   &osd->rx_size_127, &nsd->rx_size_127);
1018 	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1019 			   I40E_GLPRT_PRC255L(hw->port),
1020 			   pf->stat_offsets_loaded,
1021 			   &osd->rx_size_255, &nsd->rx_size_255);
1022 	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1023 			   I40E_GLPRT_PRC511L(hw->port),
1024 			   pf->stat_offsets_loaded,
1025 			   &osd->rx_size_511, &nsd->rx_size_511);
1026 	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1027 			   I40E_GLPRT_PRC1023L(hw->port),
1028 			   pf->stat_offsets_loaded,
1029 			   &osd->rx_size_1023, &nsd->rx_size_1023);
1030 	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1031 			   I40E_GLPRT_PRC1522L(hw->port),
1032 			   pf->stat_offsets_loaded,
1033 			   &osd->rx_size_1522, &nsd->rx_size_1522);
1034 	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1035 			   I40E_GLPRT_PRC9522L(hw->port),
1036 			   pf->stat_offsets_loaded,
1037 			   &osd->rx_size_big, &nsd->rx_size_big);
1038 
1039 	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1040 			   I40E_GLPRT_PTC64L(hw->port),
1041 			   pf->stat_offsets_loaded,
1042 			   &osd->tx_size_64, &nsd->tx_size_64);
1043 	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1044 			   I40E_GLPRT_PTC127L(hw->port),
1045 			   pf->stat_offsets_loaded,
1046 			   &osd->tx_size_127, &nsd->tx_size_127);
1047 	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1048 			   I40E_GLPRT_PTC255L(hw->port),
1049 			   pf->stat_offsets_loaded,
1050 			   &osd->tx_size_255, &nsd->tx_size_255);
1051 	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1052 			   I40E_GLPRT_PTC511L(hw->port),
1053 			   pf->stat_offsets_loaded,
1054 			   &osd->tx_size_511, &nsd->tx_size_511);
1055 	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1056 			   I40E_GLPRT_PTC1023L(hw->port),
1057 			   pf->stat_offsets_loaded,
1058 			   &osd->tx_size_1023, &nsd->tx_size_1023);
1059 	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1060 			   I40E_GLPRT_PTC1522L(hw->port),
1061 			   pf->stat_offsets_loaded,
1062 			   &osd->tx_size_1522, &nsd->tx_size_1522);
1063 	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1064 			   I40E_GLPRT_PTC9522L(hw->port),
1065 			   pf->stat_offsets_loaded,
1066 			   &osd->tx_size_big, &nsd->tx_size_big);
1067 
1068 	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1069 			   pf->stat_offsets_loaded,
1070 			   &osd->rx_undersize, &nsd->rx_undersize);
1071 	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1072 			   pf->stat_offsets_loaded,
1073 			   &osd->rx_fragments, &nsd->rx_fragments);
1074 	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1075 			   pf->stat_offsets_loaded,
1076 			   &osd->rx_oversize, &nsd->rx_oversize);
1077 	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1078 			   pf->stat_offsets_loaded,
1079 			   &osd->rx_jabber, &nsd->rx_jabber);
1080 
1081 	/* FDIR stats */
1082 	i40e_stat_update32(hw,
1083 			   I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1084 			   pf->stat_offsets_loaded,
1085 			   &osd->fd_atr_match, &nsd->fd_atr_match);
1086 	i40e_stat_update32(hw,
1087 			   I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1088 			   pf->stat_offsets_loaded,
1089 			   &osd->fd_sb_match, &nsd->fd_sb_match);
1090 	i40e_stat_update32(hw,
1091 		      I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1092 		      pf->stat_offsets_loaded,
1093 		      &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1094 
1095 	val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096 	nsd->tx_lpi_status =
1097 		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098 			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099 	nsd->rx_lpi_status =
1100 		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101 			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102 	i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103 			   pf->stat_offsets_loaded,
1104 			   &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105 	i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106 			   pf->stat_offsets_loaded,
1107 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108 
1109 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1110 	    !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1111 		nsd->fd_sb_status = true;
1112 	else
1113 		nsd->fd_sb_status = false;
1114 
1115 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1116 	    !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1117 		nsd->fd_atr_status = true;
1118 	else
1119 		nsd->fd_atr_status = false;
1120 
1121 	pf->stat_offsets_loaded = true;
1122 }
1123 
1124 /**
1125  * i40e_update_stats - Update the various statistics counters.
1126  * @vsi: the VSI to be updated
1127  *
1128  * Update the various stats for this VSI and its related entities.
1129  **/
1130 void i40e_update_stats(struct i40e_vsi *vsi)
1131 {
1132 	struct i40e_pf *pf = vsi->back;
1133 
1134 	if (vsi == pf->vsi[pf->lan_vsi])
1135 		i40e_update_pf_stats(pf);
1136 
1137 	i40e_update_vsi_stats(vsi);
1138 #ifdef I40E_FCOE
1139 	i40e_update_fcoe_stats(vsi);
1140 #endif
1141 }
1142 
1143 /**
1144  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1145  * @vsi: the VSI to be searched
1146  * @macaddr: the MAC address
1147  * @vlan: the vlan
1148  * @is_vf: make sure its a VF filter, else doesn't matter
1149  * @is_netdev: make sure its a netdev filter, else doesn't matter
1150  *
1151  * Returns ptr to the filter object or NULL
1152  **/
1153 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1154 						u8 *macaddr, s16 vlan,
1155 						bool is_vf, bool is_netdev)
1156 {
1157 	struct i40e_mac_filter *f;
1158 
1159 	if (!vsi || !macaddr)
1160 		return NULL;
1161 
1162 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1163 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1164 		    (vlan == f->vlan)    &&
1165 		    (!is_vf || f->is_vf) &&
1166 		    (!is_netdev || f->is_netdev))
1167 			return f;
1168 	}
1169 	return NULL;
1170 }
1171 
1172 /**
1173  * i40e_find_mac - Find a mac addr in the macvlan filters list
1174  * @vsi: the VSI to be searched
1175  * @macaddr: the MAC address we are searching for
1176  * @is_vf: make sure its a VF filter, else doesn't matter
1177  * @is_netdev: make sure its a netdev filter, else doesn't matter
1178  *
1179  * Returns the first filter with the provided MAC address or NULL if
1180  * MAC address was not found
1181  **/
1182 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1183 				      bool is_vf, bool is_netdev)
1184 {
1185 	struct i40e_mac_filter *f;
1186 
1187 	if (!vsi || !macaddr)
1188 		return NULL;
1189 
1190 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1191 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1192 		    (!is_vf || f->is_vf) &&
1193 		    (!is_netdev || f->is_netdev))
1194 			return f;
1195 	}
1196 	return NULL;
1197 }
1198 
1199 /**
1200  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1201  * @vsi: the VSI to be searched
1202  *
1203  * Returns true if VSI is in vlan mode or false otherwise
1204  **/
1205 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1206 {
1207 	struct i40e_mac_filter *f;
1208 
1209 	/* Only -1 for all the filters denotes not in vlan mode
1210 	 * so we have to go through all the list in order to make sure
1211 	 */
1212 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1213 		if (f->vlan >= 0 || vsi->info.pvid)
1214 			return true;
1215 	}
1216 
1217 	return false;
1218 }
1219 
1220 /**
1221  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1222  * @vsi: the VSI to be searched
1223  * @macaddr: the mac address to be filtered
1224  * @is_vf: true if it is a VF
1225  * @is_netdev: true if it is a netdev
1226  *
1227  * Goes through all the macvlan filters and adds a
1228  * macvlan filter for each unique vlan that already exists
1229  *
1230  * Returns first filter found on success, else NULL
1231  **/
1232 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1233 					     bool is_vf, bool is_netdev)
1234 {
1235 	struct i40e_mac_filter *f;
1236 
1237 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1238 		if (vsi->info.pvid)
1239 			f->vlan = le16_to_cpu(vsi->info.pvid);
1240 		if (!i40e_find_filter(vsi, macaddr, f->vlan,
1241 				      is_vf, is_netdev)) {
1242 			if (!i40e_add_filter(vsi, macaddr, f->vlan,
1243 					     is_vf, is_netdev))
1244 				return NULL;
1245 		}
1246 	}
1247 
1248 	return list_first_entry_or_null(&vsi->mac_filter_list,
1249 					struct i40e_mac_filter, list);
1250 }
1251 
1252 /**
1253  * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1254  * @vsi: the VSI to be searched
1255  * @macaddr: the mac address to be removed
1256  * @is_vf: true if it is a VF
1257  * @is_netdev: true if it is a netdev
1258  *
1259  * Removes a given MAC address from a VSI, regardless of VLAN
1260  *
1261  * Returns 0 for success, or error
1262  **/
1263 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1264 			  bool is_vf, bool is_netdev)
1265 {
1266 	struct i40e_mac_filter *f = NULL;
1267 	int changed = 0;
1268 
1269 	WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1270 	     "Missing mac_filter_list_lock\n");
1271 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
1272 		if ((ether_addr_equal(macaddr, f->macaddr)) &&
1273 		    (is_vf == f->is_vf) &&
1274 		    (is_netdev == f->is_netdev)) {
1275 			f->counter--;
1276 			changed = 1;
1277 			if (f->counter == 0)
1278 				f->state = I40E_FILTER_REMOVE;
1279 		}
1280 	}
1281 	if (changed) {
1282 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1283 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1284 		return 0;
1285 	}
1286 	return -ENOENT;
1287 }
1288 
1289 /**
1290  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1291  * @vsi: the PF Main VSI - inappropriate for any other VSI
1292  * @macaddr: the MAC address
1293  *
1294  * Remove whatever filter the firmware set up so the driver can manage
1295  * its own filtering intelligently.
1296  **/
1297 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1298 {
1299 	struct i40e_aqc_remove_macvlan_element_data element;
1300 	struct i40e_pf *pf = vsi->back;
1301 
1302 	/* Only appropriate for the PF main VSI */
1303 	if (vsi->type != I40E_VSI_MAIN)
1304 		return;
1305 
1306 	memset(&element, 0, sizeof(element));
1307 	ether_addr_copy(element.mac_addr, macaddr);
1308 	element.vlan_tag = 0;
1309 	/* Ignore error returns, some firmware does it this way... */
1310 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1311 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1312 
1313 	memset(&element, 0, sizeof(element));
1314 	ether_addr_copy(element.mac_addr, macaddr);
1315 	element.vlan_tag = 0;
1316 	/* ...and some firmware does it this way. */
1317 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1318 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1319 	i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1320 }
1321 
1322 /**
1323  * i40e_add_filter - Add a mac/vlan filter to the VSI
1324  * @vsi: the VSI to be searched
1325  * @macaddr: the MAC address
1326  * @vlan: the vlan
1327  * @is_vf: make sure its a VF filter, else doesn't matter
1328  * @is_netdev: make sure its a netdev filter, else doesn't matter
1329  *
1330  * Returns ptr to the filter object or NULL when no memory available.
1331  *
1332  * NOTE: This function is expected to be called with mac_filter_list_lock
1333  * being held.
1334  **/
1335 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1336 					u8 *macaddr, s16 vlan,
1337 					bool is_vf, bool is_netdev)
1338 {
1339 	struct i40e_mac_filter *f;
1340 	int changed = false;
1341 
1342 	if (!vsi || !macaddr)
1343 		return NULL;
1344 
1345 	/* Do not allow broadcast filter to be added since broadcast filter
1346 	 * is added as part of add VSI for any newly created VSI except
1347 	 * FDIR VSI
1348 	 */
1349 	if (is_broadcast_ether_addr(macaddr))
1350 		return NULL;
1351 
1352 	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1353 	if (!f) {
1354 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
1355 		if (!f)
1356 			goto add_filter_out;
1357 
1358 		ether_addr_copy(f->macaddr, macaddr);
1359 		f->vlan = vlan;
1360 		/* If we're in overflow promisc mode, set the state directly
1361 		 * to failed, so we don't bother to try sending the filter
1362 		 * to the hardware.
1363 		 */
1364 		if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
1365 			f->state = I40E_FILTER_FAILED;
1366 		else
1367 			f->state = I40E_FILTER_NEW;
1368 		changed = true;
1369 		INIT_LIST_HEAD(&f->list);
1370 		list_add_tail(&f->list, &vsi->mac_filter_list);
1371 	}
1372 
1373 	/* increment counter and add a new flag if needed */
1374 	if (is_vf) {
1375 		if (!f->is_vf) {
1376 			f->is_vf = true;
1377 			f->counter++;
1378 		}
1379 	} else if (is_netdev) {
1380 		if (!f->is_netdev) {
1381 			f->is_netdev = true;
1382 			f->counter++;
1383 		}
1384 	} else {
1385 		f->counter++;
1386 	}
1387 
1388 	if (changed) {
1389 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1390 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1391 	}
1392 
1393 add_filter_out:
1394 	return f;
1395 }
1396 
1397 /**
1398  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1399  * @vsi: the VSI to be searched
1400  * @macaddr: the MAC address
1401  * @vlan: the vlan
1402  * @is_vf: make sure it's a VF filter, else doesn't matter
1403  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1404  *
1405  * NOTE: This function is expected to be called with mac_filter_list_lock
1406  * being held.
1407  * ANOTHER NOTE: This function MUST be called from within the context of
1408  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1409  * instead of list_for_each_entry().
1410  **/
1411 void i40e_del_filter(struct i40e_vsi *vsi,
1412 		     u8 *macaddr, s16 vlan,
1413 		     bool is_vf, bool is_netdev)
1414 {
1415 	struct i40e_mac_filter *f;
1416 
1417 	if (!vsi || !macaddr)
1418 		return;
1419 
1420 	f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1421 	if (!f || f->counter == 0)
1422 		return;
1423 
1424 	if (is_vf) {
1425 		if (f->is_vf) {
1426 			f->is_vf = false;
1427 			f->counter--;
1428 		}
1429 	} else if (is_netdev) {
1430 		if (f->is_netdev) {
1431 			f->is_netdev = false;
1432 			f->counter--;
1433 		}
1434 	} else {
1435 		/* make sure we don't remove a filter in use by VF or netdev */
1436 		int min_f = 0;
1437 
1438 		min_f += (f->is_vf ? 1 : 0);
1439 		min_f += (f->is_netdev ? 1 : 0);
1440 
1441 		if (f->counter > min_f)
1442 			f->counter--;
1443 	}
1444 
1445 	/* counter == 0 tells sync_filters_subtask to
1446 	 * remove the filter from the firmware's list
1447 	 */
1448 	if (f->counter == 0) {
1449 		if ((f->state == I40E_FILTER_FAILED) ||
1450 		    (f->state == I40E_FILTER_NEW)) {
1451 			/* this one never got added by the FW. Just remove it,
1452 			 * no need to sync anything.
1453 			 */
1454 			list_del(&f->list);
1455 			kfree(f);
1456 		} else {
1457 			f->state = I40E_FILTER_REMOVE;
1458 			vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1459 			vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1460 		}
1461 	}
1462 }
1463 
1464 /**
1465  * i40e_set_mac - NDO callback to set mac address
1466  * @netdev: network interface device structure
1467  * @p: pointer to an address structure
1468  *
1469  * Returns 0 on success, negative on failure
1470  **/
1471 #ifdef I40E_FCOE
1472 int i40e_set_mac(struct net_device *netdev, void *p)
1473 #else
1474 static int i40e_set_mac(struct net_device *netdev, void *p)
1475 #endif
1476 {
1477 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1478 	struct i40e_vsi *vsi = np->vsi;
1479 	struct i40e_pf *pf = vsi->back;
1480 	struct i40e_hw *hw = &pf->hw;
1481 	struct sockaddr *addr = p;
1482 
1483 	if (!is_valid_ether_addr(addr->sa_data))
1484 		return -EADDRNOTAVAIL;
1485 
1486 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1487 		netdev_info(netdev, "already using mac address %pM\n",
1488 			    addr->sa_data);
1489 		return 0;
1490 	}
1491 
1492 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1493 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1494 		return -EADDRNOTAVAIL;
1495 
1496 	if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1497 		netdev_info(netdev, "returning to hw mac address %pM\n",
1498 			    hw->mac.addr);
1499 	else
1500 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1501 
1502 	spin_lock_bh(&vsi->mac_filter_list_lock);
1503 	i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true);
1504 	i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true);
1505 	spin_unlock_bh(&vsi->mac_filter_list_lock);
1506 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
1507 	if (vsi->type == I40E_VSI_MAIN) {
1508 		i40e_status ret;
1509 
1510 		ret = i40e_aq_mac_address_write(&vsi->back->hw,
1511 						I40E_AQC_WRITE_TYPE_LAA_WOL,
1512 						addr->sa_data, NULL);
1513 		if (ret)
1514 			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1515 				    i40e_stat_str(hw, ret),
1516 				    i40e_aq_str(hw, hw->aq.asq_last_status));
1517 	}
1518 
1519 	/* schedule our worker thread which will take care of
1520 	 * applying the new filter changes
1521 	 */
1522 	i40e_service_event_schedule(vsi->back);
1523 	return 0;
1524 }
1525 
1526 /**
1527  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1528  * @vsi: the VSI being setup
1529  * @ctxt: VSI context structure
1530  * @enabled_tc: Enabled TCs bitmap
1531  * @is_add: True if called before Add VSI
1532  *
1533  * Setup VSI queue mapping for enabled traffic classes.
1534  **/
1535 #ifdef I40E_FCOE
1536 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1537 			      struct i40e_vsi_context *ctxt,
1538 			      u8 enabled_tc,
1539 			      bool is_add)
1540 #else
1541 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1542 				     struct i40e_vsi_context *ctxt,
1543 				     u8 enabled_tc,
1544 				     bool is_add)
1545 #endif
1546 {
1547 	struct i40e_pf *pf = vsi->back;
1548 	u16 sections = 0;
1549 	u8 netdev_tc = 0;
1550 	u16 numtc = 0;
1551 	u16 qcount;
1552 	u8 offset;
1553 	u16 qmap;
1554 	int i;
1555 	u16 num_tc_qps = 0;
1556 
1557 	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1558 	offset = 0;
1559 
1560 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1561 		/* Find numtc from enabled TC bitmap */
1562 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1563 			if (enabled_tc & BIT(i)) /* TC is enabled */
1564 				numtc++;
1565 		}
1566 		if (!numtc) {
1567 			dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1568 			numtc = 1;
1569 		}
1570 	} else {
1571 		/* At least TC0 is enabled in case of non-DCB case */
1572 		numtc = 1;
1573 	}
1574 
1575 	vsi->tc_config.numtc = numtc;
1576 	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1577 	/* Number of queues per enabled TC */
1578 	qcount = vsi->alloc_queue_pairs;
1579 
1580 	num_tc_qps = qcount / numtc;
1581 	num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1582 
1583 	/* Setup queue offset/count for all TCs for given VSI */
1584 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1585 		/* See if the given TC is enabled for the given VSI */
1586 		if (vsi->tc_config.enabled_tc & BIT(i)) {
1587 			/* TC is enabled */
1588 			int pow, num_qps;
1589 
1590 			switch (vsi->type) {
1591 			case I40E_VSI_MAIN:
1592 				qcount = min_t(int, pf->alloc_rss_size,
1593 					       num_tc_qps);
1594 				break;
1595 #ifdef I40E_FCOE
1596 			case I40E_VSI_FCOE:
1597 				qcount = num_tc_qps;
1598 				break;
1599 #endif
1600 			case I40E_VSI_FDIR:
1601 			case I40E_VSI_SRIOV:
1602 			case I40E_VSI_VMDQ2:
1603 			default:
1604 				qcount = num_tc_qps;
1605 				WARN_ON(i != 0);
1606 				break;
1607 			}
1608 			vsi->tc_config.tc_info[i].qoffset = offset;
1609 			vsi->tc_config.tc_info[i].qcount = qcount;
1610 
1611 			/* find the next higher power-of-2 of num queue pairs */
1612 			num_qps = qcount;
1613 			pow = 0;
1614 			while (num_qps && (BIT_ULL(pow) < qcount)) {
1615 				pow++;
1616 				num_qps >>= 1;
1617 			}
1618 
1619 			vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1620 			qmap =
1621 			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1622 			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1623 
1624 			offset += qcount;
1625 		} else {
1626 			/* TC is not enabled so set the offset to
1627 			 * default queue and allocate one queue
1628 			 * for the given TC.
1629 			 */
1630 			vsi->tc_config.tc_info[i].qoffset = 0;
1631 			vsi->tc_config.tc_info[i].qcount = 1;
1632 			vsi->tc_config.tc_info[i].netdev_tc = 0;
1633 
1634 			qmap = 0;
1635 		}
1636 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1637 	}
1638 
1639 	/* Set actual Tx/Rx queue pairs */
1640 	vsi->num_queue_pairs = offset;
1641 	if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1642 		if (vsi->req_queue_pairs > 0)
1643 			vsi->num_queue_pairs = vsi->req_queue_pairs;
1644 		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1645 			vsi->num_queue_pairs = pf->num_lan_msix;
1646 	}
1647 
1648 	/* Scheduler section valid can only be set for ADD VSI */
1649 	if (is_add) {
1650 		sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1651 
1652 		ctxt->info.up_enable_bits = enabled_tc;
1653 	}
1654 	if (vsi->type == I40E_VSI_SRIOV) {
1655 		ctxt->info.mapping_flags |=
1656 				     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1657 		for (i = 0; i < vsi->num_queue_pairs; i++)
1658 			ctxt->info.queue_mapping[i] =
1659 					       cpu_to_le16(vsi->base_queue + i);
1660 	} else {
1661 		ctxt->info.mapping_flags |=
1662 					cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1663 		ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1664 	}
1665 	ctxt->info.valid_sections |= cpu_to_le16(sections);
1666 }
1667 
1668 /**
1669  * i40e_set_rx_mode - NDO callback to set the netdev filters
1670  * @netdev: network interface device structure
1671  **/
1672 #ifdef I40E_FCOE
1673 void i40e_set_rx_mode(struct net_device *netdev)
1674 #else
1675 static void i40e_set_rx_mode(struct net_device *netdev)
1676 #endif
1677 {
1678 	struct i40e_netdev_priv *np = netdev_priv(netdev);
1679 	struct i40e_mac_filter *f, *ftmp;
1680 	struct i40e_vsi *vsi = np->vsi;
1681 	struct netdev_hw_addr *uca;
1682 	struct netdev_hw_addr *mca;
1683 	struct netdev_hw_addr *ha;
1684 
1685 	spin_lock_bh(&vsi->mac_filter_list_lock);
1686 
1687 	/* add addr if not already in the filter list */
1688 	netdev_for_each_uc_addr(uca, netdev) {
1689 		if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1690 			if (i40e_is_vsi_in_vlan(vsi))
1691 				i40e_put_mac_in_vlan(vsi, uca->addr,
1692 						     false, true);
1693 			else
1694 				i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1695 						false, true);
1696 		}
1697 	}
1698 
1699 	netdev_for_each_mc_addr(mca, netdev) {
1700 		if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1701 			if (i40e_is_vsi_in_vlan(vsi))
1702 				i40e_put_mac_in_vlan(vsi, mca->addr,
1703 						     false, true);
1704 			else
1705 				i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1706 						false, true);
1707 		}
1708 	}
1709 
1710 	/* remove filter if not in netdev list */
1711 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1712 
1713 		if (!f->is_netdev)
1714 			continue;
1715 
1716 		netdev_for_each_mc_addr(mca, netdev)
1717 			if (ether_addr_equal(mca->addr, f->macaddr))
1718 				goto bottom_of_search_loop;
1719 
1720 		netdev_for_each_uc_addr(uca, netdev)
1721 			if (ether_addr_equal(uca->addr, f->macaddr))
1722 				goto bottom_of_search_loop;
1723 
1724 		for_each_dev_addr(netdev, ha)
1725 			if (ether_addr_equal(ha->addr, f->macaddr))
1726 				goto bottom_of_search_loop;
1727 
1728 		/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1729 		i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1730 
1731 bottom_of_search_loop:
1732 		continue;
1733 	}
1734 	spin_unlock_bh(&vsi->mac_filter_list_lock);
1735 
1736 	/* check for other flag changes */
1737 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
1738 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1739 		vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1740 	}
1741 
1742 	/* schedule our worker thread which will take care of
1743 	 * applying the new filter changes
1744 	 */
1745 	i40e_service_event_schedule(vsi->back);
1746 }
1747 
1748 /**
1749  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1750  * @vsi: pointer to vsi struct
1751  * @from: Pointer to list which contains MAC filter entries - changes to
1752  *        those entries needs to be undone.
1753  *
1754  * MAC filter entries from list were slated to be removed from device.
1755  **/
1756 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1757 					 struct list_head *from)
1758 {
1759 	struct i40e_mac_filter *f, *ftmp;
1760 
1761 	list_for_each_entry_safe(f, ftmp, from, list) {
1762 		/* Move the element back into MAC filter list*/
1763 		list_move_tail(&f->list, &vsi->mac_filter_list);
1764 	}
1765 }
1766 
1767 /**
1768  * i40e_update_filter_state - Update filter state based on return data
1769  * from firmware
1770  * @count: Number of filters added
1771  * @add_list: return data from fw
1772  * @head: pointer to first filter in current batch
1773  * @aq_err: status from fw
1774  *
1775  * MAC filter entries from list were slated to be added to device. Returns
1776  * number of successful filters. Note that 0 does NOT mean success!
1777  **/
1778 static int
1779 i40e_update_filter_state(int count,
1780 			 struct i40e_aqc_add_macvlan_element_data *add_list,
1781 			 struct i40e_mac_filter *add_head, int aq_err)
1782 {
1783 	int retval = 0;
1784 	int i;
1785 
1786 
1787 	if (!aq_err) {
1788 		retval = count;
1789 		/* Everything's good, mark all filters active. */
1790 		for (i = 0; i < count ; i++) {
1791 			add_head->state = I40E_FILTER_ACTIVE;
1792 			add_head = list_next_entry(add_head, list);
1793 		}
1794 	} else if (aq_err == I40E_AQ_RC_ENOSPC) {
1795 		/* Device ran out of filter space. Check the return value
1796 		 * for each filter to see which ones are active.
1797 		 */
1798 		for (i = 0; i < count ; i++) {
1799 			if (add_list[i].match_method ==
1800 			    I40E_AQC_MM_ERR_NO_RES) {
1801 				add_head->state = I40E_FILTER_FAILED;
1802 			} else {
1803 				add_head->state = I40E_FILTER_ACTIVE;
1804 				retval++;
1805 			}
1806 			add_head = list_next_entry(add_head, list);
1807 		}
1808 	} else {
1809 		/* Some other horrible thing happened, fail all filters */
1810 		retval = 0;
1811 		for (i = 0; i < count ; i++) {
1812 			add_head->state = I40E_FILTER_FAILED;
1813 			add_head = list_next_entry(add_head, list);
1814 		}
1815 	}
1816 	return retval;
1817 }
1818 
1819 /**
1820  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1821  * @vsi: ptr to the VSI
1822  *
1823  * Push any outstanding VSI filter changes through the AdminQ.
1824  *
1825  * Returns 0 or error value
1826  **/
1827 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1828 {
1829 	struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
1830 	struct list_head tmp_add_list, tmp_del_list;
1831 	struct i40e_hw *hw = &vsi->back->hw;
1832 	bool promisc_changed = false;
1833 	char vsi_name[16] = "PF";
1834 	int filter_list_len = 0;
1835 	u32 changed_flags = 0;
1836 	i40e_status aq_ret = 0;
1837 	int retval = 0;
1838 	struct i40e_pf *pf;
1839 	int num_add = 0;
1840 	int num_del = 0;
1841 	int aq_err = 0;
1842 	u16 cmd_flags;
1843 	int list_size;
1844 	int fcnt;
1845 
1846 	/* empty array typed pointers, kcalloc later */
1847 	struct i40e_aqc_add_macvlan_element_data *add_list;
1848 	struct i40e_aqc_remove_macvlan_element_data *del_list;
1849 
1850 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1851 		usleep_range(1000, 2000);
1852 	pf = vsi->back;
1853 
1854 	if (vsi->netdev) {
1855 		changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1856 		vsi->current_netdev_flags = vsi->netdev->flags;
1857 	}
1858 
1859 	INIT_LIST_HEAD(&tmp_add_list);
1860 	INIT_LIST_HEAD(&tmp_del_list);
1861 
1862 	if (vsi->type == I40E_VSI_SRIOV)
1863 		snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
1864 	else if (vsi->type != I40E_VSI_MAIN)
1865 		snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
1866 
1867 	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1868 		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1869 
1870 		spin_lock_bh(&vsi->mac_filter_list_lock);
1871 		/* Create a list of filters to delete. */
1872 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1873 			if (f->state == I40E_FILTER_REMOVE) {
1874 				WARN_ON(f->counter != 0);
1875 				/* Move the element into temporary del_list */
1876 				list_move_tail(&f->list, &tmp_del_list);
1877 				vsi->active_filters--;
1878 			}
1879 			if (f->state == I40E_FILTER_NEW) {
1880 				WARN_ON(f->counter == 0);
1881 				/* Move the element into temporary add_list */
1882 				list_move_tail(&f->list, &tmp_add_list);
1883 			}
1884 		}
1885 		spin_unlock_bh(&vsi->mac_filter_list_lock);
1886 	}
1887 
1888 	/* Now process 'del_list' outside the lock */
1889 	if (!list_empty(&tmp_del_list)) {
1890 		filter_list_len = hw->aq.asq_buf_size /
1891 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
1892 		list_size = filter_list_len *
1893 			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
1894 		del_list = kzalloc(list_size, GFP_ATOMIC);
1895 		if (!del_list) {
1896 			/* Undo VSI's MAC filter entry element updates */
1897 			spin_lock_bh(&vsi->mac_filter_list_lock);
1898 			i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1899 			spin_unlock_bh(&vsi->mac_filter_list_lock);
1900 			retval = -ENOMEM;
1901 			goto out;
1902 		}
1903 
1904 		list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
1905 			cmd_flags = 0;
1906 
1907 			/* add to delete list */
1908 			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1909 			if (f->vlan == I40E_VLAN_ANY) {
1910 				del_list[num_del].vlan_tag = 0;
1911 				cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1912 			} else {
1913 				del_list[num_del].vlan_tag =
1914 					cpu_to_le16((u16)(f->vlan));
1915 			}
1916 
1917 			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1918 			del_list[num_del].flags = cmd_flags;
1919 			num_del++;
1920 
1921 			/* flush a full buffer */
1922 			if (num_del == filter_list_len) {
1923 				aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
1924 								del_list,
1925 								num_del, NULL);
1926 				aq_err = hw->aq.asq_last_status;
1927 				num_del = 0;
1928 				memset(del_list, 0, list_size);
1929 
1930 				/* Explicitly ignore and do not report when
1931 				 * firmware returns ENOENT.
1932 				 */
1933 				if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1934 					retval = -EIO;
1935 					dev_info(&pf->pdev->dev,
1936 						 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1937 						 vsi_name,
1938 						 i40e_stat_str(hw, aq_ret),
1939 						 i40e_aq_str(hw, aq_err));
1940 				}
1941 			}
1942 			/* Release memory for MAC filter entries which were
1943 			 * synced up with HW.
1944 			 */
1945 			list_del(&f->list);
1946 			kfree(f);
1947 		}
1948 
1949 		if (num_del) {
1950 			aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
1951 							num_del, NULL);
1952 			aq_err = hw->aq.asq_last_status;
1953 			num_del = 0;
1954 
1955 			/* Explicitly ignore and do not report when firmware
1956 			 * returns ENOENT.
1957 			 */
1958 			if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
1959 				retval = -EIO;
1960 				dev_info(&pf->pdev->dev,
1961 					 "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1962 					 vsi_name,
1963 					 i40e_stat_str(hw, aq_ret),
1964 					 i40e_aq_str(hw, aq_err));
1965 			}
1966 		}
1967 
1968 		kfree(del_list);
1969 		del_list = NULL;
1970 	}
1971 
1972 	if (!list_empty(&tmp_add_list)) {
1973 		/* Do all the adds now. */
1974 		filter_list_len = hw->aq.asq_buf_size /
1975 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
1976 		list_size = filter_list_len *
1977 			       sizeof(struct i40e_aqc_add_macvlan_element_data);
1978 		add_list = kzalloc(list_size, GFP_ATOMIC);
1979 		if (!add_list) {
1980 			retval = -ENOMEM;
1981 			goto out;
1982 		}
1983 		num_add = 0;
1984 		list_for_each_entry(f, &tmp_add_list, list) {
1985 			if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1986 				     &vsi->state)) {
1987 				f->state = I40E_FILTER_FAILED;
1988 				continue;
1989 			}
1990 			/* add to add array */
1991 			if (num_add == 0)
1992 				add_head = f;
1993 			cmd_flags = 0;
1994 			ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1995 			if (f->vlan == I40E_VLAN_ANY) {
1996 				add_list[num_add].vlan_tag = 0;
1997 				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1998 			} else {
1999 				add_list[num_add].vlan_tag =
2000 					cpu_to_le16((u16)(f->vlan));
2001 			}
2002 			add_list[num_add].queue_number = 0;
2003 			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2004 			add_list[num_add].flags = cpu_to_le16(cmd_flags);
2005 			num_add++;
2006 
2007 			/* flush a full buffer */
2008 			if (num_add == filter_list_len) {
2009 				aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2010 							     add_list, num_add,
2011 							     NULL);
2012 				aq_err = hw->aq.asq_last_status;
2013 				fcnt = i40e_update_filter_state(num_add,
2014 								add_list,
2015 								add_head,
2016 								aq_ret);
2017 				vsi->active_filters += fcnt;
2018 
2019 				if (fcnt != num_add) {
2020 					promisc_changed = true;
2021 					set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2022 						&vsi->state);
2023 					vsi->promisc_threshold =
2024 						(vsi->active_filters * 3) / 4;
2025 					dev_warn(&pf->pdev->dev,
2026 						 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2027 						 i40e_aq_str(hw, aq_err),
2028 						 vsi_name);
2029 				}
2030 				memset(add_list, 0, list_size);
2031 				num_add = 0;
2032 			}
2033 		}
2034 		if (num_add) {
2035 			aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2036 						     add_list, num_add, NULL);
2037 			aq_err = hw->aq.asq_last_status;
2038 			fcnt = i40e_update_filter_state(num_add, add_list,
2039 							add_head, aq_ret);
2040 			vsi->active_filters += fcnt;
2041 			if (fcnt != num_add) {
2042 				promisc_changed = true;
2043 				set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2044 					&vsi->state);
2045 				vsi->promisc_threshold =
2046 						(vsi->active_filters * 3) / 4;
2047 				dev_warn(&pf->pdev->dev,
2048 					 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2049 					 i40e_aq_str(hw, aq_err), vsi_name);
2050 			}
2051 		}
2052 		/* Now move all of the filters from the temp add list back to
2053 		 * the VSI's list.
2054 		 */
2055 		spin_lock_bh(&vsi->mac_filter_list_lock);
2056 		list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
2057 			list_move_tail(&f->list, &vsi->mac_filter_list);
2058 		}
2059 		spin_unlock_bh(&vsi->mac_filter_list_lock);
2060 		kfree(add_list);
2061 		add_list = NULL;
2062 	}
2063 
2064 	/* Check to see if we can drop out of overflow promiscuous mode. */
2065 	if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
2066 	    (vsi->active_filters < vsi->promisc_threshold)) {
2067 		int failed_count = 0;
2068 		/* See if we have any failed filters. We can't drop out of
2069 		 * promiscuous until these have all been deleted.
2070 		 */
2071 		spin_lock_bh(&vsi->mac_filter_list_lock);
2072 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
2073 			if (f->state == I40E_FILTER_FAILED)
2074 				failed_count++;
2075 		}
2076 		spin_unlock_bh(&vsi->mac_filter_list_lock);
2077 		if (!failed_count) {
2078 			dev_info(&pf->pdev->dev,
2079 				 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2080 				 vsi_name);
2081 			clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2082 			promisc_changed = true;
2083 			vsi->promisc_threshold = 0;
2084 		}
2085 	}
2086 
2087 	/* if the VF is not trusted do not do promisc */
2088 	if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2089 		clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2090 		goto out;
2091 	}
2092 
2093 	/* check for changes in promiscuous modes */
2094 	if (changed_flags & IFF_ALLMULTI) {
2095 		bool cur_multipromisc;
2096 
2097 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2098 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2099 							       vsi->seid,
2100 							       cur_multipromisc,
2101 							       NULL);
2102 		if (aq_ret) {
2103 			retval = i40e_aq_rc_to_posix(aq_ret,
2104 						     hw->aq.asq_last_status);
2105 			dev_info(&pf->pdev->dev,
2106 				 "set multi promisc failed on %s, err %s aq_err %s\n",
2107 				 vsi_name,
2108 				 i40e_stat_str(hw, aq_ret),
2109 				 i40e_aq_str(hw, hw->aq.asq_last_status));
2110 		}
2111 	}
2112 	if ((changed_flags & IFF_PROMISC) ||
2113 	    (promisc_changed &&
2114 	     test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
2115 		bool cur_promisc;
2116 
2117 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2118 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2119 					&vsi->state));
2120 		if ((vsi->type == I40E_VSI_MAIN) &&
2121 		    (pf->lan_veb != I40E_NO_VEB) &&
2122 		    !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2123 			/* set defport ON for Main VSI instead of true promisc
2124 			 * this way we will get all unicast/multicast and VLAN
2125 			 * promisc behavior but will not get VF or VMDq traffic
2126 			 * replicated on the Main VSI.
2127 			 */
2128 			if (pf->cur_promisc != cur_promisc) {
2129 				pf->cur_promisc = cur_promisc;
2130 				if (cur_promisc)
2131 					aq_ret =
2132 					      i40e_aq_set_default_vsi(hw,
2133 								      vsi->seid,
2134 								      NULL);
2135 				else
2136 					aq_ret =
2137 					    i40e_aq_clear_default_vsi(hw,
2138 								      vsi->seid,
2139 								      NULL);
2140 				if (aq_ret) {
2141 					retval = i40e_aq_rc_to_posix(aq_ret,
2142 							hw->aq.asq_last_status);
2143 					dev_info(&pf->pdev->dev,
2144 						 "Set default VSI failed on %s, err %s, aq_err %s\n",
2145 						 vsi_name,
2146 						 i40e_stat_str(hw, aq_ret),
2147 						 i40e_aq_str(hw,
2148 						     hw->aq.asq_last_status));
2149 				}
2150 			}
2151 		} else {
2152 			aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2153 							  hw,
2154 							  vsi->seid,
2155 							  cur_promisc, NULL,
2156 							  true);
2157 			if (aq_ret) {
2158 				retval =
2159 				i40e_aq_rc_to_posix(aq_ret,
2160 						    hw->aq.asq_last_status);
2161 				dev_info(&pf->pdev->dev,
2162 					 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2163 					 vsi_name,
2164 					 i40e_stat_str(hw, aq_ret),
2165 					 i40e_aq_str(hw,
2166 						     hw->aq.asq_last_status));
2167 			}
2168 			aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2169 							  hw,
2170 							  vsi->seid,
2171 							  cur_promisc, NULL);
2172 			if (aq_ret) {
2173 				retval =
2174 				i40e_aq_rc_to_posix(aq_ret,
2175 						    hw->aq.asq_last_status);
2176 				dev_info(&pf->pdev->dev,
2177 					 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2178 					 vsi_name,
2179 					 i40e_stat_str(hw, aq_ret),
2180 					 i40e_aq_str(hw,
2181 						     hw->aq.asq_last_status));
2182 			}
2183 		}
2184 		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2185 						   vsi->seid,
2186 						   cur_promisc, NULL);
2187 		if (aq_ret) {
2188 			retval = i40e_aq_rc_to_posix(aq_ret,
2189 						     pf->hw.aq.asq_last_status);
2190 			dev_info(&pf->pdev->dev,
2191 				 "set brdcast promisc failed, err %s, aq_err %s\n",
2192 					 i40e_stat_str(hw, aq_ret),
2193 					 i40e_aq_str(hw,
2194 						     hw->aq.asq_last_status));
2195 		}
2196 	}
2197 out:
2198 	/* if something went wrong then set the changed flag so we try again */
2199 	if (retval)
2200 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2201 
2202 	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2203 	return retval;
2204 }
2205 
2206 /**
2207  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2208  * @pf: board private structure
2209  **/
2210 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2211 {
2212 	int v;
2213 
2214 	if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2215 		return;
2216 	pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2217 
2218 	for (v = 0; v < pf->num_alloc_vsi; v++) {
2219 		if (pf->vsi[v] &&
2220 		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2221 			int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2222 
2223 			if (ret) {
2224 				/* come back and try again later */
2225 				pf->flags |= I40E_FLAG_FILTER_SYNC;
2226 				break;
2227 			}
2228 		}
2229 	}
2230 }
2231 
2232 /**
2233  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2234  * @netdev: network interface device structure
2235  * @new_mtu: new value for maximum frame size
2236  *
2237  * Returns 0 on success, negative on failure
2238  **/
2239 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2240 {
2241 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2242 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2243 	struct i40e_vsi *vsi = np->vsi;
2244 
2245 	/* MTU < 68 is an error and causes problems on some kernels */
2246 	if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2247 		return -EINVAL;
2248 
2249 	netdev_info(netdev, "changing MTU from %d to %d\n",
2250 		    netdev->mtu, new_mtu);
2251 	netdev->mtu = new_mtu;
2252 	if (netif_running(netdev))
2253 		i40e_vsi_reinit_locked(vsi);
2254 	i40e_notify_client_of_l2_param_changes(vsi);
2255 	return 0;
2256 }
2257 
2258 /**
2259  * i40e_ioctl - Access the hwtstamp interface
2260  * @netdev: network interface device structure
2261  * @ifr: interface request data
2262  * @cmd: ioctl command
2263  **/
2264 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2265 {
2266 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2267 	struct i40e_pf *pf = np->vsi->back;
2268 
2269 	switch (cmd) {
2270 	case SIOCGHWTSTAMP:
2271 		return i40e_ptp_get_ts_config(pf, ifr);
2272 	case SIOCSHWTSTAMP:
2273 		return i40e_ptp_set_ts_config(pf, ifr);
2274 	default:
2275 		return -EOPNOTSUPP;
2276 	}
2277 }
2278 
2279 /**
2280  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2281  * @vsi: the vsi being adjusted
2282  **/
2283 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2284 {
2285 	struct i40e_vsi_context ctxt;
2286 	i40e_status ret;
2287 
2288 	if ((vsi->info.valid_sections &
2289 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2290 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2291 		return;  /* already enabled */
2292 
2293 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2294 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2295 				    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2296 
2297 	ctxt.seid = vsi->seid;
2298 	ctxt.info = vsi->info;
2299 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2300 	if (ret) {
2301 		dev_info(&vsi->back->pdev->dev,
2302 			 "update vlan stripping failed, err %s aq_err %s\n",
2303 			 i40e_stat_str(&vsi->back->hw, ret),
2304 			 i40e_aq_str(&vsi->back->hw,
2305 				     vsi->back->hw.aq.asq_last_status));
2306 	}
2307 }
2308 
2309 /**
2310  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2311  * @vsi: the vsi being adjusted
2312  **/
2313 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2314 {
2315 	struct i40e_vsi_context ctxt;
2316 	i40e_status ret;
2317 
2318 	if ((vsi->info.valid_sections &
2319 	     cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2320 	    ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2321 	     I40E_AQ_VSI_PVLAN_EMOD_MASK))
2322 		return;  /* already disabled */
2323 
2324 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2325 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2326 				    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2327 
2328 	ctxt.seid = vsi->seid;
2329 	ctxt.info = vsi->info;
2330 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2331 	if (ret) {
2332 		dev_info(&vsi->back->pdev->dev,
2333 			 "update vlan stripping failed, err %s aq_err %s\n",
2334 			 i40e_stat_str(&vsi->back->hw, ret),
2335 			 i40e_aq_str(&vsi->back->hw,
2336 				     vsi->back->hw.aq.asq_last_status));
2337 	}
2338 }
2339 
2340 /**
2341  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2342  * @netdev: network interface to be adjusted
2343  * @features: netdev features to test if VLAN offload is enabled or not
2344  **/
2345 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2346 {
2347 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2348 	struct i40e_vsi *vsi = np->vsi;
2349 
2350 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2351 		i40e_vlan_stripping_enable(vsi);
2352 	else
2353 		i40e_vlan_stripping_disable(vsi);
2354 }
2355 
2356 /**
2357  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2358  * @vsi: the vsi being configured
2359  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2360  **/
2361 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2362 {
2363 	struct i40e_mac_filter *f, *ftmp, *add_f;
2364 	bool is_netdev, is_vf;
2365 
2366 	is_vf = (vsi->type == I40E_VSI_SRIOV);
2367 	is_netdev = !!(vsi->netdev);
2368 
2369 	/* Locked once because all functions invoked below iterates list*/
2370 	spin_lock_bh(&vsi->mac_filter_list_lock);
2371 
2372 	if (is_netdev) {
2373 		add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2374 					is_vf, is_netdev);
2375 		if (!add_f) {
2376 			dev_info(&vsi->back->pdev->dev,
2377 				 "Could not add vlan filter %d for %pM\n",
2378 				 vid, vsi->netdev->dev_addr);
2379 			spin_unlock_bh(&vsi->mac_filter_list_lock);
2380 			return -ENOMEM;
2381 		}
2382 	}
2383 
2384 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2385 		add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2386 		if (!add_f) {
2387 			dev_info(&vsi->back->pdev->dev,
2388 				 "Could not add vlan filter %d for %pM\n",
2389 				 vid, f->macaddr);
2390 			spin_unlock_bh(&vsi->mac_filter_list_lock);
2391 			return -ENOMEM;
2392 		}
2393 	}
2394 
2395 	/* Now if we add a vlan tag, make sure to check if it is the first
2396 	 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2397 	 * with 0, so we now accept untagged and specified tagged traffic
2398 	 * (and not all tags along with untagged)
2399 	 */
2400 	if (vid > 0) {
2401 		if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2402 						  I40E_VLAN_ANY,
2403 						  is_vf, is_netdev)) {
2404 			i40e_del_filter(vsi, vsi->netdev->dev_addr,
2405 					I40E_VLAN_ANY, is_vf, is_netdev);
2406 			add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2407 						is_vf, is_netdev);
2408 			if (!add_f) {
2409 				dev_info(&vsi->back->pdev->dev,
2410 					 "Could not add filter 0 for %pM\n",
2411 					 vsi->netdev->dev_addr);
2412 				spin_unlock_bh(&vsi->mac_filter_list_lock);
2413 				return -ENOMEM;
2414 			}
2415 		}
2416 	}
2417 
2418 	/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2419 	if (vid > 0 && !vsi->info.pvid) {
2420 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2421 			if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2422 					      is_vf, is_netdev))
2423 				continue;
2424 			i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2425 					is_vf, is_netdev);
2426 			add_f = i40e_add_filter(vsi, f->macaddr,
2427 						0, is_vf, is_netdev);
2428 			if (!add_f) {
2429 				dev_info(&vsi->back->pdev->dev,
2430 					 "Could not add filter 0 for %pM\n",
2431 					f->macaddr);
2432 				spin_unlock_bh(&vsi->mac_filter_list_lock);
2433 				return -ENOMEM;
2434 			}
2435 		}
2436 	}
2437 
2438 	spin_unlock_bh(&vsi->mac_filter_list_lock);
2439 
2440 	/* schedule our worker thread which will take care of
2441 	 * applying the new filter changes
2442 	 */
2443 	i40e_service_event_schedule(vsi->back);
2444 	return 0;
2445 }
2446 
2447 /**
2448  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2449  * @vsi: the vsi being configured
2450  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2451  *
2452  * Return: 0 on success or negative otherwise
2453  **/
2454 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2455 {
2456 	struct net_device *netdev = vsi->netdev;
2457 	struct i40e_mac_filter *f, *ftmp, *add_f;
2458 	bool is_vf, is_netdev;
2459 	int filter_count = 0;
2460 
2461 	is_vf = (vsi->type == I40E_VSI_SRIOV);
2462 	is_netdev = !!(netdev);
2463 
2464 	/* Locked once because all functions invoked below iterates list */
2465 	spin_lock_bh(&vsi->mac_filter_list_lock);
2466 
2467 	if (is_netdev)
2468 		i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2469 
2470 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
2471 		i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2472 
2473 	/* go through all the filters for this VSI and if there is only
2474 	 * vid == 0 it means there are no other filters, so vid 0 must
2475 	 * be replaced with -1. This signifies that we should from now
2476 	 * on accept any traffic (with any tag present, or untagged)
2477 	 */
2478 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
2479 		if (is_netdev) {
2480 			if (f->vlan &&
2481 			    ether_addr_equal(netdev->dev_addr, f->macaddr))
2482 				filter_count++;
2483 		}
2484 
2485 		if (f->vlan)
2486 			filter_count++;
2487 	}
2488 
2489 	if (!filter_count && is_netdev) {
2490 		i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2491 		f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2492 				    is_vf, is_netdev);
2493 		if (!f) {
2494 			dev_info(&vsi->back->pdev->dev,
2495 				 "Could not add filter %d for %pM\n",
2496 				 I40E_VLAN_ANY, netdev->dev_addr);
2497 			spin_unlock_bh(&vsi->mac_filter_list_lock);
2498 			return -ENOMEM;
2499 		}
2500 	}
2501 
2502 	if (!filter_count) {
2503 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
2504 			i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2505 			add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2506 						is_vf, is_netdev);
2507 			if (!add_f) {
2508 				dev_info(&vsi->back->pdev->dev,
2509 					 "Could not add filter %d for %pM\n",
2510 					 I40E_VLAN_ANY, f->macaddr);
2511 				spin_unlock_bh(&vsi->mac_filter_list_lock);
2512 				return -ENOMEM;
2513 			}
2514 		}
2515 	}
2516 
2517 	spin_unlock_bh(&vsi->mac_filter_list_lock);
2518 
2519 	/* schedule our worker thread which will take care of
2520 	 * applying the new filter changes
2521 	 */
2522 	i40e_service_event_schedule(vsi->back);
2523 	return 0;
2524 }
2525 
2526 /**
2527  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2528  * @netdev: network interface to be adjusted
2529  * @vid: vlan id to be added
2530  *
2531  * net_device_ops implementation for adding vlan ids
2532  **/
2533 #ifdef I40E_FCOE
2534 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2535 			 __always_unused __be16 proto, u16 vid)
2536 #else
2537 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2538 				__always_unused __be16 proto, u16 vid)
2539 #endif
2540 {
2541 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2542 	struct i40e_vsi *vsi = np->vsi;
2543 	int ret = 0;
2544 
2545 	if (vid > 4095)
2546 		return -EINVAL;
2547 
2548 	/* If the network stack called us with vid = 0 then
2549 	 * it is asking to receive priority tagged packets with
2550 	 * vlan id 0.  Our HW receives them by default when configured
2551 	 * to receive untagged packets so there is no need to add an
2552 	 * extra filter for vlan 0 tagged packets.
2553 	 */
2554 	if (vid)
2555 		ret = i40e_vsi_add_vlan(vsi, vid);
2556 
2557 	if (!ret && (vid < VLAN_N_VID))
2558 		set_bit(vid, vsi->active_vlans);
2559 
2560 	return ret;
2561 }
2562 
2563 /**
2564  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2565  * @netdev: network interface to be adjusted
2566  * @vid: vlan id to be removed
2567  *
2568  * net_device_ops implementation for removing vlan ids
2569  **/
2570 #ifdef I40E_FCOE
2571 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2572 			  __always_unused __be16 proto, u16 vid)
2573 #else
2574 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2575 				 __always_unused __be16 proto, u16 vid)
2576 #endif
2577 {
2578 	struct i40e_netdev_priv *np = netdev_priv(netdev);
2579 	struct i40e_vsi *vsi = np->vsi;
2580 
2581 	/* return code is ignored as there is nothing a user
2582 	 * can do about failure to remove and a log message was
2583 	 * already printed from the other function
2584 	 */
2585 	i40e_vsi_kill_vlan(vsi, vid);
2586 
2587 	clear_bit(vid, vsi->active_vlans);
2588 
2589 	return 0;
2590 }
2591 
2592 /**
2593  * i40e_macaddr_init - explicitly write the mac address filters
2594  *
2595  * @vsi: pointer to the vsi
2596  * @macaddr: the MAC address
2597  *
2598  * This is needed when the macaddr has been obtained by other
2599  * means than the default, e.g., from Open Firmware or IDPROM.
2600  * Returns 0 on success, negative on failure
2601  **/
2602 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
2603 {
2604 	int ret;
2605 	struct i40e_aqc_add_macvlan_element_data element;
2606 
2607 	ret = i40e_aq_mac_address_write(&vsi->back->hw,
2608 					I40E_AQC_WRITE_TYPE_LAA_WOL,
2609 					macaddr, NULL);
2610 	if (ret) {
2611 		dev_info(&vsi->back->pdev->dev,
2612 			 "Addr change for VSI failed: %d\n", ret);
2613 		return -EADDRNOTAVAIL;
2614 	}
2615 
2616 	memset(&element, 0, sizeof(element));
2617 	ether_addr_copy(element.mac_addr, macaddr);
2618 	element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
2619 	ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
2620 	if (ret) {
2621 		dev_info(&vsi->back->pdev->dev,
2622 			 "add filter failed err %s aq_err %s\n",
2623 			 i40e_stat_str(&vsi->back->hw, ret),
2624 			 i40e_aq_str(&vsi->back->hw,
2625 				     vsi->back->hw.aq.asq_last_status));
2626 	}
2627 	return ret;
2628 }
2629 
2630 /**
2631  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2632  * @vsi: the vsi being brought back up
2633  **/
2634 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2635 {
2636 	u16 vid;
2637 
2638 	if (!vsi->netdev)
2639 		return;
2640 
2641 	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2642 
2643 	for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2644 		i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2645 				     vid);
2646 }
2647 
2648 /**
2649  * i40e_vsi_add_pvid - Add pvid for the VSI
2650  * @vsi: the vsi being adjusted
2651  * @vid: the vlan id to set as a PVID
2652  **/
2653 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2654 {
2655 	struct i40e_vsi_context ctxt;
2656 	i40e_status ret;
2657 
2658 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2659 	vsi->info.pvid = cpu_to_le16(vid);
2660 	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2661 				    I40E_AQ_VSI_PVLAN_INSERT_PVID |
2662 				    I40E_AQ_VSI_PVLAN_EMOD_STR;
2663 
2664 	ctxt.seid = vsi->seid;
2665 	ctxt.info = vsi->info;
2666 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2667 	if (ret) {
2668 		dev_info(&vsi->back->pdev->dev,
2669 			 "add pvid failed, err %s aq_err %s\n",
2670 			 i40e_stat_str(&vsi->back->hw, ret),
2671 			 i40e_aq_str(&vsi->back->hw,
2672 				     vsi->back->hw.aq.asq_last_status));
2673 		return -ENOENT;
2674 	}
2675 
2676 	return 0;
2677 }
2678 
2679 /**
2680  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2681  * @vsi: the vsi being adjusted
2682  *
2683  * Just use the vlan_rx_register() service to put it back to normal
2684  **/
2685 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2686 {
2687 	i40e_vlan_stripping_disable(vsi);
2688 
2689 	vsi->info.pvid = 0;
2690 }
2691 
2692 /**
2693  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2694  * @vsi: ptr to the VSI
2695  *
2696  * If this function returns with an error, then it's possible one or
2697  * more of the rings is populated (while the rest are not).  It is the
2698  * callers duty to clean those orphaned rings.
2699  *
2700  * Return 0 on success, negative on failure
2701  **/
2702 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2703 {
2704 	int i, err = 0;
2705 
2706 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2707 		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2708 
2709 	return err;
2710 }
2711 
2712 /**
2713  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2714  * @vsi: ptr to the VSI
2715  *
2716  * Free VSI's transmit software resources
2717  **/
2718 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2719 {
2720 	int i;
2721 
2722 	if (!vsi->tx_rings)
2723 		return;
2724 
2725 	for (i = 0; i < vsi->num_queue_pairs; i++)
2726 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2727 			i40e_free_tx_resources(vsi->tx_rings[i]);
2728 }
2729 
2730 /**
2731  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2732  * @vsi: ptr to the VSI
2733  *
2734  * If this function returns with an error, then it's possible one or
2735  * more of the rings is populated (while the rest are not).  It is the
2736  * callers duty to clean those orphaned rings.
2737  *
2738  * Return 0 on success, negative on failure
2739  **/
2740 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2741 {
2742 	int i, err = 0;
2743 
2744 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2745 		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2746 #ifdef I40E_FCOE
2747 	i40e_fcoe_setup_ddp_resources(vsi);
2748 #endif
2749 	return err;
2750 }
2751 
2752 /**
2753  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2754  * @vsi: ptr to the VSI
2755  *
2756  * Free all receive software resources
2757  **/
2758 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2759 {
2760 	int i;
2761 
2762 	if (!vsi->rx_rings)
2763 		return;
2764 
2765 	for (i = 0; i < vsi->num_queue_pairs; i++)
2766 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2767 			i40e_free_rx_resources(vsi->rx_rings[i]);
2768 #ifdef I40E_FCOE
2769 	i40e_fcoe_free_ddp_resources(vsi);
2770 #endif
2771 }
2772 
2773 /**
2774  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2775  * @ring: The Tx ring to configure
2776  *
2777  * This enables/disables XPS for a given Tx descriptor ring
2778  * based on the TCs enabled for the VSI that ring belongs to.
2779  **/
2780 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2781 {
2782 	struct i40e_vsi *vsi = ring->vsi;
2783 	cpumask_var_t mask;
2784 
2785 	if (!ring->q_vector || !ring->netdev)
2786 		return;
2787 
2788 	/* Single TC mode enable XPS */
2789 	if (vsi->tc_config.numtc <= 1) {
2790 		if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2791 			netif_set_xps_queue(ring->netdev,
2792 					    &ring->q_vector->affinity_mask,
2793 					    ring->queue_index);
2794 	} else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2795 		/* Disable XPS to allow selection based on TC */
2796 		bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2797 		netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2798 		free_cpumask_var(mask);
2799 	}
2800 
2801 	/* schedule our worker thread which will take care of
2802 	 * applying the new filter changes
2803 	 */
2804 	i40e_service_event_schedule(vsi->back);
2805 }
2806 
2807 /**
2808  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2809  * @ring: The Tx ring to configure
2810  *
2811  * Configure the Tx descriptor ring in the HMC context.
2812  **/
2813 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2814 {
2815 	struct i40e_vsi *vsi = ring->vsi;
2816 	u16 pf_q = vsi->base_queue + ring->queue_index;
2817 	struct i40e_hw *hw = &vsi->back->hw;
2818 	struct i40e_hmc_obj_txq tx_ctx;
2819 	i40e_status err = 0;
2820 	u32 qtx_ctl = 0;
2821 
2822 	/* some ATR related tx ring init */
2823 	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2824 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
2825 		ring->atr_count = 0;
2826 	} else {
2827 		ring->atr_sample_rate = 0;
2828 	}
2829 
2830 	/* configure XPS */
2831 	i40e_config_xps_tx_ring(ring);
2832 
2833 	/* clear the context structure first */
2834 	memset(&tx_ctx, 0, sizeof(tx_ctx));
2835 
2836 	tx_ctx.new_context = 1;
2837 	tx_ctx.base = (ring->dma / 128);
2838 	tx_ctx.qlen = ring->count;
2839 	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2840 					       I40E_FLAG_FD_ATR_ENABLED));
2841 #ifdef I40E_FCOE
2842 	tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2843 #endif
2844 	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2845 	/* FDIR VSI tx ring can still use RS bit and writebacks */
2846 	if (vsi->type != I40E_VSI_FDIR)
2847 		tx_ctx.head_wb_ena = 1;
2848 	tx_ctx.head_wb_addr = ring->dma +
2849 			      (ring->count * sizeof(struct i40e_tx_desc));
2850 
2851 	/* As part of VSI creation/update, FW allocates certain
2852 	 * Tx arbitration queue sets for each TC enabled for
2853 	 * the VSI. The FW returns the handles to these queue
2854 	 * sets as part of the response buffer to Add VSI,
2855 	 * Update VSI, etc. AQ commands. It is expected that
2856 	 * these queue set handles be associated with the Tx
2857 	 * queues by the driver as part of the TX queue context
2858 	 * initialization. This has to be done regardless of
2859 	 * DCB as by default everything is mapped to TC0.
2860 	 */
2861 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2862 	tx_ctx.rdylist_act = 0;
2863 
2864 	/* clear the context in the HMC */
2865 	err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2866 	if (err) {
2867 		dev_info(&vsi->back->pdev->dev,
2868 			 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2869 			 ring->queue_index, pf_q, err);
2870 		return -ENOMEM;
2871 	}
2872 
2873 	/* set the context in the HMC */
2874 	err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2875 	if (err) {
2876 		dev_info(&vsi->back->pdev->dev,
2877 			 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2878 			 ring->queue_index, pf_q, err);
2879 		return -ENOMEM;
2880 	}
2881 
2882 	/* Now associate this queue with this PCI function */
2883 	if (vsi->type == I40E_VSI_VMDQ2) {
2884 		qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2885 		qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2886 			   I40E_QTX_CTL_VFVM_INDX_MASK;
2887 	} else {
2888 		qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2889 	}
2890 
2891 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2892 		    I40E_QTX_CTL_PF_INDX_MASK);
2893 	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2894 	i40e_flush(hw);
2895 
2896 	/* cache tail off for easier writes later */
2897 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2898 
2899 	return 0;
2900 }
2901 
2902 /**
2903  * i40e_configure_rx_ring - Configure a receive ring context
2904  * @ring: The Rx ring to configure
2905  *
2906  * Configure the Rx descriptor ring in the HMC context.
2907  **/
2908 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2909 {
2910 	struct i40e_vsi *vsi = ring->vsi;
2911 	u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2912 	u16 pf_q = vsi->base_queue + ring->queue_index;
2913 	struct i40e_hw *hw = &vsi->back->hw;
2914 	struct i40e_hmc_obj_rxq rx_ctx;
2915 	i40e_status err = 0;
2916 
2917 	ring->state = 0;
2918 
2919 	/* clear the context structure first */
2920 	memset(&rx_ctx, 0, sizeof(rx_ctx));
2921 
2922 	ring->rx_buf_len = vsi->rx_buf_len;
2923 
2924 	rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2925 
2926 	rx_ctx.base = (ring->dma / 128);
2927 	rx_ctx.qlen = ring->count;
2928 
2929 	/* use 32 byte descriptors */
2930 	rx_ctx.dsize = 1;
2931 
2932 	/* descriptor type is always zero
2933 	 * rx_ctx.dtype = 0;
2934 	 */
2935 	rx_ctx.hsplit_0 = 0;
2936 
2937 	rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
2938 	if (hw->revision_id == 0)
2939 		rx_ctx.lrxqthresh = 0;
2940 	else
2941 		rx_ctx.lrxqthresh = 2;
2942 	rx_ctx.crcstrip = 1;
2943 	rx_ctx.l2tsel = 1;
2944 	/* this controls whether VLAN is stripped from inner headers */
2945 	rx_ctx.showiv = 0;
2946 #ifdef I40E_FCOE
2947 	rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2948 #endif
2949 	/* set the prefena field to 1 because the manual says to */
2950 	rx_ctx.prefena = 1;
2951 
2952 	/* clear the context in the HMC */
2953 	err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2954 	if (err) {
2955 		dev_info(&vsi->back->pdev->dev,
2956 			 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2957 			 ring->queue_index, pf_q, err);
2958 		return -ENOMEM;
2959 	}
2960 
2961 	/* set the context in the HMC */
2962 	err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2963 	if (err) {
2964 		dev_info(&vsi->back->pdev->dev,
2965 			 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2966 			 ring->queue_index, pf_q, err);
2967 		return -ENOMEM;
2968 	}
2969 
2970 	/* cache tail for quicker writes, and clear the reg before use */
2971 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2972 	writel(0, ring->tail);
2973 
2974 	i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2975 
2976 	return 0;
2977 }
2978 
2979 /**
2980  * i40e_vsi_configure_tx - Configure the VSI for Tx
2981  * @vsi: VSI structure describing this set of rings and resources
2982  *
2983  * Configure the Tx VSI for operation.
2984  **/
2985 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2986 {
2987 	int err = 0;
2988 	u16 i;
2989 
2990 	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2991 		err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2992 
2993 	return err;
2994 }
2995 
2996 /**
2997  * i40e_vsi_configure_rx - Configure the VSI for Rx
2998  * @vsi: the VSI being configured
2999  *
3000  * Configure the Rx VSI for operation.
3001  **/
3002 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3003 {
3004 	int err = 0;
3005 	u16 i;
3006 
3007 	if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
3008 		vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
3009 			       + ETH_FCS_LEN + VLAN_HLEN;
3010 	else
3011 		vsi->max_frame = I40E_RXBUFFER_2048;
3012 
3013 	vsi->rx_buf_len = I40E_RXBUFFER_2048;
3014 
3015 #ifdef I40E_FCOE
3016 	/* setup rx buffer for FCoE */
3017 	if ((vsi->type == I40E_VSI_FCOE) &&
3018 	    (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
3019 		vsi->rx_buf_len = I40E_RXBUFFER_3072;
3020 		vsi->max_frame = I40E_RXBUFFER_3072;
3021 	}
3022 
3023 #endif /* I40E_FCOE */
3024 	/* round up for the chip's needs */
3025 	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
3026 				BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3027 
3028 	/* set up individual rings */
3029 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3030 		err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3031 
3032 	return err;
3033 }
3034 
3035 /**
3036  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3037  * @vsi: ptr to the VSI
3038  **/
3039 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3040 {
3041 	struct i40e_ring *tx_ring, *rx_ring;
3042 	u16 qoffset, qcount;
3043 	int i, n;
3044 
3045 	if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3046 		/* Reset the TC information */
3047 		for (i = 0; i < vsi->num_queue_pairs; i++) {
3048 			rx_ring = vsi->rx_rings[i];
3049 			tx_ring = vsi->tx_rings[i];
3050 			rx_ring->dcb_tc = 0;
3051 			tx_ring->dcb_tc = 0;
3052 		}
3053 	}
3054 
3055 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3056 		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3057 			continue;
3058 
3059 		qoffset = vsi->tc_config.tc_info[n].qoffset;
3060 		qcount = vsi->tc_config.tc_info[n].qcount;
3061 		for (i = qoffset; i < (qoffset + qcount); i++) {
3062 			rx_ring = vsi->rx_rings[i];
3063 			tx_ring = vsi->tx_rings[i];
3064 			rx_ring->dcb_tc = n;
3065 			tx_ring->dcb_tc = n;
3066 		}
3067 	}
3068 }
3069 
3070 /**
3071  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3072  * @vsi: ptr to the VSI
3073  **/
3074 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3075 {
3076 	struct i40e_pf *pf = vsi->back;
3077 	int err;
3078 
3079 	if (vsi->netdev)
3080 		i40e_set_rx_mode(vsi->netdev);
3081 
3082 	if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
3083 		err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
3084 		if (err) {
3085 			dev_warn(&pf->pdev->dev,
3086 				 "could not set up macaddr; err %d\n", err);
3087 		}
3088 	}
3089 }
3090 
3091 /**
3092  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3093  * @vsi: Pointer to the targeted VSI
3094  *
3095  * This function replays the hlist on the hw where all the SB Flow Director
3096  * filters were saved.
3097  **/
3098 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3099 {
3100 	struct i40e_fdir_filter *filter;
3101 	struct i40e_pf *pf = vsi->back;
3102 	struct hlist_node *node;
3103 
3104 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3105 		return;
3106 
3107 	hlist_for_each_entry_safe(filter, node,
3108 				  &pf->fdir_filter_list, fdir_node) {
3109 		i40e_add_del_fdir(vsi, filter, true);
3110 	}
3111 }
3112 
3113 /**
3114  * i40e_vsi_configure - Set up the VSI for action
3115  * @vsi: the VSI being configured
3116  **/
3117 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3118 {
3119 	int err;
3120 
3121 	i40e_set_vsi_rx_mode(vsi);
3122 	i40e_restore_vlan(vsi);
3123 	i40e_vsi_config_dcb_rings(vsi);
3124 	err = i40e_vsi_configure_tx(vsi);
3125 	if (!err)
3126 		err = i40e_vsi_configure_rx(vsi);
3127 
3128 	return err;
3129 }
3130 
3131 /**
3132  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3133  * @vsi: the VSI being configured
3134  **/
3135 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3136 {
3137 	struct i40e_pf *pf = vsi->back;
3138 	struct i40e_hw *hw = &pf->hw;
3139 	u16 vector;
3140 	int i, q;
3141 	u32 qp;
3142 
3143 	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
3144 	 * and PFINT_LNKLSTn registers, e.g.:
3145 	 *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
3146 	 */
3147 	qp = vsi->base_queue;
3148 	vector = vsi->base_vector;
3149 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3150 		struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3151 
3152 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
3153 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3154 		q_vector->rx.latency_range = I40E_LOW_LATENCY;
3155 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3156 		     q_vector->rx.itr);
3157 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3158 		q_vector->tx.latency_range = I40E_LOW_LATENCY;
3159 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3160 		     q_vector->tx.itr);
3161 		wr32(hw, I40E_PFINT_RATEN(vector - 1),
3162 		     INTRL_USEC_TO_REG(vsi->int_rate_limit));
3163 
3164 		/* Linked list for the queuepairs assigned to this vector */
3165 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3166 		for (q = 0; q < q_vector->num_ringpairs; q++) {
3167 			u32 val;
3168 
3169 			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3170 			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
3171 			      (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3172 			      (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3173 			      (I40E_QUEUE_TYPE_TX
3174 				      << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3175 
3176 			wr32(hw, I40E_QINT_RQCTL(qp), val);
3177 
3178 			val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3179 			      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
3180 			      (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3181 			      ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3182 			      (I40E_QUEUE_TYPE_RX
3183 				      << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3184 
3185 			/* Terminate the linked list */
3186 			if (q == (q_vector->num_ringpairs - 1))
3187 				val |= (I40E_QUEUE_END_OF_LIST
3188 					   << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3189 
3190 			wr32(hw, I40E_QINT_TQCTL(qp), val);
3191 			qp++;
3192 		}
3193 	}
3194 
3195 	i40e_flush(hw);
3196 }
3197 
3198 /**
3199  * i40e_enable_misc_int_causes - enable the non-queue interrupts
3200  * @hw: ptr to the hardware info
3201  **/
3202 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3203 {
3204 	struct i40e_hw *hw = &pf->hw;
3205 	u32 val;
3206 
3207 	/* clear things first */
3208 	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
3209 	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
3210 
3211 	val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
3212 	      I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
3213 	      I40E_PFINT_ICR0_ENA_GRST_MASK          |
3214 	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3215 	      I40E_PFINT_ICR0_ENA_GPIO_MASK          |
3216 	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
3217 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
3218 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3219 
3220 	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3221 		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3222 
3223 	if (pf->flags & I40E_FLAG_PTP)
3224 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3225 
3226 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
3227 
3228 	/* SW_ITR_IDX = 0, but don't change INTENA */
3229 	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3230 					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3231 
3232 	/* OTHER_ITR_IDX = 0 */
3233 	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3234 }
3235 
3236 /**
3237  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3238  * @vsi: the VSI being configured
3239  **/
3240 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3241 {
3242 	struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3243 	struct i40e_pf *pf = vsi->back;
3244 	struct i40e_hw *hw = &pf->hw;
3245 	u32 val;
3246 
3247 	/* set the ITR configuration */
3248 	q_vector->itr_countdown = ITR_COUNTDOWN_START;
3249 	q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3250 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
3251 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3252 	q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3253 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
3254 	wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3255 
3256 	i40e_enable_misc_int_causes(pf);
3257 
3258 	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3259 	wr32(hw, I40E_PFINT_LNKLST0, 0);
3260 
3261 	/* Associate the queue pair to the vector and enable the queue int */
3262 	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		      |
3263 	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3264 	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3265 
3266 	wr32(hw, I40E_QINT_RQCTL(0), val);
3267 
3268 	val = I40E_QINT_TQCTL_CAUSE_ENA_MASK		      |
3269 	      (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3270 	      (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3271 
3272 	wr32(hw, I40E_QINT_TQCTL(0), val);
3273 	i40e_flush(hw);
3274 }
3275 
3276 /**
3277  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3278  * @pf: board private structure
3279  **/
3280 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3281 {
3282 	struct i40e_hw *hw = &pf->hw;
3283 
3284 	wr32(hw, I40E_PFINT_DYN_CTL0,
3285 	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3286 	i40e_flush(hw);
3287 }
3288 
3289 /**
3290  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3291  * @pf: board private structure
3292  * @clearpba: true when all pending interrupt events should be cleared
3293  **/
3294 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3295 {
3296 	struct i40e_hw *hw = &pf->hw;
3297 	u32 val;
3298 
3299 	val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3300 	      (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3301 	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3302 
3303 	wr32(hw, I40E_PFINT_DYN_CTL0, val);
3304 	i40e_flush(hw);
3305 }
3306 
3307 /**
3308  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3309  * @irq: interrupt number
3310  * @data: pointer to a q_vector
3311  **/
3312 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3313 {
3314 	struct i40e_q_vector *q_vector = data;
3315 
3316 	if (!q_vector->tx.ring && !q_vector->rx.ring)
3317 		return IRQ_HANDLED;
3318 
3319 	napi_schedule_irqoff(&q_vector->napi);
3320 
3321 	return IRQ_HANDLED;
3322 }
3323 
3324 /**
3325  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3326  * @vsi: the VSI being configured
3327  * @basename: name for the vector
3328  *
3329  * Allocates MSI-X vectors and requests interrupts from the kernel.
3330  **/
3331 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3332 {
3333 	int q_vectors = vsi->num_q_vectors;
3334 	struct i40e_pf *pf = vsi->back;
3335 	int base = vsi->base_vector;
3336 	int rx_int_idx = 0;
3337 	int tx_int_idx = 0;
3338 	int vector, err;
3339 
3340 	for (vector = 0; vector < q_vectors; vector++) {
3341 		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3342 
3343 		if (q_vector->tx.ring && q_vector->rx.ring) {
3344 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3345 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3346 			tx_int_idx++;
3347 		} else if (q_vector->rx.ring) {
3348 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3349 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
3350 		} else if (q_vector->tx.ring) {
3351 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3352 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
3353 		} else {
3354 			/* skip this unused q_vector */
3355 			continue;
3356 		}
3357 		err = request_irq(pf->msix_entries[base + vector].vector,
3358 				  vsi->irq_handler,
3359 				  0,
3360 				  q_vector->name,
3361 				  q_vector);
3362 		if (err) {
3363 			dev_info(&pf->pdev->dev,
3364 				 "MSIX request_irq failed, error: %d\n", err);
3365 			goto free_queue_irqs;
3366 		}
3367 		/* assign the mask for this irq */
3368 		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3369 				      &q_vector->affinity_mask);
3370 	}
3371 
3372 	vsi->irqs_ready = true;
3373 	return 0;
3374 
3375 free_queue_irqs:
3376 	while (vector) {
3377 		vector--;
3378 		irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3379 				      NULL);
3380 		free_irq(pf->msix_entries[base + vector].vector,
3381 			 &(vsi->q_vectors[vector]));
3382 	}
3383 	return err;
3384 }
3385 
3386 /**
3387  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3388  * @vsi: the VSI being un-configured
3389  **/
3390 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3391 {
3392 	struct i40e_pf *pf = vsi->back;
3393 	struct i40e_hw *hw = &pf->hw;
3394 	int base = vsi->base_vector;
3395 	int i;
3396 
3397 	for (i = 0; i < vsi->num_queue_pairs; i++) {
3398 		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3399 		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3400 	}
3401 
3402 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3403 		for (i = vsi->base_vector;
3404 		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3405 			wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3406 
3407 		i40e_flush(hw);
3408 		for (i = 0; i < vsi->num_q_vectors; i++)
3409 			synchronize_irq(pf->msix_entries[i + base].vector);
3410 	} else {
3411 		/* Legacy and MSI mode - this stops all interrupt handling */
3412 		wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3413 		wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3414 		i40e_flush(hw);
3415 		synchronize_irq(pf->pdev->irq);
3416 	}
3417 }
3418 
3419 /**
3420  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3421  * @vsi: the VSI being configured
3422  **/
3423 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3424 {
3425 	struct i40e_pf *pf = vsi->back;
3426 	int i;
3427 
3428 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3429 		for (i = 0; i < vsi->num_q_vectors; i++)
3430 			i40e_irq_dynamic_enable(vsi, i);
3431 	} else {
3432 		i40e_irq_dynamic_enable_icr0(pf, true);
3433 	}
3434 
3435 	i40e_flush(&pf->hw);
3436 	return 0;
3437 }
3438 
3439 /**
3440  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3441  * @pf: board private structure
3442  **/
3443 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3444 {
3445 	/* Disable ICR 0 */
3446 	wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3447 	i40e_flush(&pf->hw);
3448 }
3449 
3450 /**
3451  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3452  * @irq: interrupt number
3453  * @data: pointer to a q_vector
3454  *
3455  * This is the handler used for all MSI/Legacy interrupts, and deals
3456  * with both queue and non-queue interrupts.  This is also used in
3457  * MSIX mode to handle the non-queue interrupts.
3458  **/
3459 static irqreturn_t i40e_intr(int irq, void *data)
3460 {
3461 	struct i40e_pf *pf = (struct i40e_pf *)data;
3462 	struct i40e_hw *hw = &pf->hw;
3463 	irqreturn_t ret = IRQ_NONE;
3464 	u32 icr0, icr0_remaining;
3465 	u32 val, ena_mask;
3466 
3467 	icr0 = rd32(hw, I40E_PFINT_ICR0);
3468 	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3469 
3470 	/* if sharing a legacy IRQ, we might get called w/o an intr pending */
3471 	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3472 		goto enable_intr;
3473 
3474 	/* if interrupt but no bits showing, must be SWINT */
3475 	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3476 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3477 		pf->sw_int_count++;
3478 
3479 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3480 	    (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3481 		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3482 		icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3483 		dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3484 	}
3485 
3486 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3487 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3488 		struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3489 		struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3490 
3491 		/* We do not have a way to disarm Queue causes while leaving
3492 		 * interrupt enabled for all other causes, ideally
3493 		 * interrupt should be disabled while we are in NAPI but
3494 		 * this is not a performance path and napi_schedule()
3495 		 * can deal with rescheduling.
3496 		 */
3497 		if (!test_bit(__I40E_DOWN, &pf->state))
3498 			napi_schedule_irqoff(&q_vector->napi);
3499 	}
3500 
3501 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3502 		ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3503 		set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3504 		i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3505 	}
3506 
3507 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3508 		ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3509 		set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3510 	}
3511 
3512 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3513 		ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3514 		set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3515 	}
3516 
3517 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3518 		if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3519 			set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3520 		ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3521 		val = rd32(hw, I40E_GLGEN_RSTAT);
3522 		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3523 		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3524 		if (val == I40E_RESET_CORER) {
3525 			pf->corer_count++;
3526 		} else if (val == I40E_RESET_GLOBR) {
3527 			pf->globr_count++;
3528 		} else if (val == I40E_RESET_EMPR) {
3529 			pf->empr_count++;
3530 			set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3531 		}
3532 	}
3533 
3534 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3535 		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3536 		dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3537 		dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3538 			 rd32(hw, I40E_PFHMC_ERRORINFO),
3539 			 rd32(hw, I40E_PFHMC_ERRORDATA));
3540 	}
3541 
3542 	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3543 		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3544 
3545 		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3546 			icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3547 			i40e_ptp_tx_hwtstamp(pf);
3548 		}
3549 	}
3550 
3551 	/* If a critical error is pending we have no choice but to reset the
3552 	 * device.
3553 	 * Report and mask out any remaining unexpected interrupts.
3554 	 */
3555 	icr0_remaining = icr0 & ena_mask;
3556 	if (icr0_remaining) {
3557 		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3558 			 icr0_remaining);
3559 		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3560 		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3561 		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3562 			dev_info(&pf->pdev->dev, "device will be reset\n");
3563 			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3564 			i40e_service_event_schedule(pf);
3565 		}
3566 		ena_mask &= ~icr0_remaining;
3567 	}
3568 	ret = IRQ_HANDLED;
3569 
3570 enable_intr:
3571 	/* re-enable interrupt causes */
3572 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3573 	if (!test_bit(__I40E_DOWN, &pf->state)) {
3574 		i40e_service_event_schedule(pf);
3575 		i40e_irq_dynamic_enable_icr0(pf, false);
3576 	}
3577 
3578 	return ret;
3579 }
3580 
3581 /**
3582  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3583  * @tx_ring:  tx ring to clean
3584  * @budget:   how many cleans we're allowed
3585  *
3586  * Returns true if there's any budget left (e.g. the clean is finished)
3587  **/
3588 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3589 {
3590 	struct i40e_vsi *vsi = tx_ring->vsi;
3591 	u16 i = tx_ring->next_to_clean;
3592 	struct i40e_tx_buffer *tx_buf;
3593 	struct i40e_tx_desc *tx_desc;
3594 
3595 	tx_buf = &tx_ring->tx_bi[i];
3596 	tx_desc = I40E_TX_DESC(tx_ring, i);
3597 	i -= tx_ring->count;
3598 
3599 	do {
3600 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3601 
3602 		/* if next_to_watch is not set then there is no work pending */
3603 		if (!eop_desc)
3604 			break;
3605 
3606 		/* prevent any other reads prior to eop_desc */
3607 		read_barrier_depends();
3608 
3609 		/* if the descriptor isn't done, no work yet to do */
3610 		if (!(eop_desc->cmd_type_offset_bsz &
3611 		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3612 			break;
3613 
3614 		/* clear next_to_watch to prevent false hangs */
3615 		tx_buf->next_to_watch = NULL;
3616 
3617 		tx_desc->buffer_addr = 0;
3618 		tx_desc->cmd_type_offset_bsz = 0;
3619 		/* move past filter desc */
3620 		tx_buf++;
3621 		tx_desc++;
3622 		i++;
3623 		if (unlikely(!i)) {
3624 			i -= tx_ring->count;
3625 			tx_buf = tx_ring->tx_bi;
3626 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3627 		}
3628 		/* unmap skb header data */
3629 		dma_unmap_single(tx_ring->dev,
3630 				 dma_unmap_addr(tx_buf, dma),
3631 				 dma_unmap_len(tx_buf, len),
3632 				 DMA_TO_DEVICE);
3633 		if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3634 			kfree(tx_buf->raw_buf);
3635 
3636 		tx_buf->raw_buf = NULL;
3637 		tx_buf->tx_flags = 0;
3638 		tx_buf->next_to_watch = NULL;
3639 		dma_unmap_len_set(tx_buf, len, 0);
3640 		tx_desc->buffer_addr = 0;
3641 		tx_desc->cmd_type_offset_bsz = 0;
3642 
3643 		/* move us past the eop_desc for start of next FD desc */
3644 		tx_buf++;
3645 		tx_desc++;
3646 		i++;
3647 		if (unlikely(!i)) {
3648 			i -= tx_ring->count;
3649 			tx_buf = tx_ring->tx_bi;
3650 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3651 		}
3652 
3653 		/* update budget accounting */
3654 		budget--;
3655 	} while (likely(budget));
3656 
3657 	i += tx_ring->count;
3658 	tx_ring->next_to_clean = i;
3659 
3660 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3661 		i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3662 
3663 	return budget > 0;
3664 }
3665 
3666 /**
3667  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3668  * @irq: interrupt number
3669  * @data: pointer to a q_vector
3670  **/
3671 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3672 {
3673 	struct i40e_q_vector *q_vector = data;
3674 	struct i40e_vsi *vsi;
3675 
3676 	if (!q_vector->tx.ring)
3677 		return IRQ_HANDLED;
3678 
3679 	vsi = q_vector->tx.ring->vsi;
3680 	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3681 
3682 	return IRQ_HANDLED;
3683 }
3684 
3685 /**
3686  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3687  * @vsi: the VSI being configured
3688  * @v_idx: vector index
3689  * @qp_idx: queue pair index
3690  **/
3691 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3692 {
3693 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3694 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3695 	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3696 
3697 	tx_ring->q_vector = q_vector;
3698 	tx_ring->next = q_vector->tx.ring;
3699 	q_vector->tx.ring = tx_ring;
3700 	q_vector->tx.count++;
3701 
3702 	rx_ring->q_vector = q_vector;
3703 	rx_ring->next = q_vector->rx.ring;
3704 	q_vector->rx.ring = rx_ring;
3705 	q_vector->rx.count++;
3706 }
3707 
3708 /**
3709  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3710  * @vsi: the VSI being configured
3711  *
3712  * This function maps descriptor rings to the queue-specific vectors
3713  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3714  * one vector per queue pair, but on a constrained vector budget, we
3715  * group the queue pairs as "efficiently" as possible.
3716  **/
3717 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3718 {
3719 	int qp_remaining = vsi->num_queue_pairs;
3720 	int q_vectors = vsi->num_q_vectors;
3721 	int num_ringpairs;
3722 	int v_start = 0;
3723 	int qp_idx = 0;
3724 
3725 	/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3726 	 * group them so there are multiple queues per vector.
3727 	 * It is also important to go through all the vectors available to be
3728 	 * sure that if we don't use all the vectors, that the remaining vectors
3729 	 * are cleared. This is especially important when decreasing the
3730 	 * number of queues in use.
3731 	 */
3732 	for (; v_start < q_vectors; v_start++) {
3733 		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3734 
3735 		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3736 
3737 		q_vector->num_ringpairs = num_ringpairs;
3738 
3739 		q_vector->rx.count = 0;
3740 		q_vector->tx.count = 0;
3741 		q_vector->rx.ring = NULL;
3742 		q_vector->tx.ring = NULL;
3743 
3744 		while (num_ringpairs--) {
3745 			i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3746 			qp_idx++;
3747 			qp_remaining--;
3748 		}
3749 	}
3750 }
3751 
3752 /**
3753  * i40e_vsi_request_irq - Request IRQ from the OS
3754  * @vsi: the VSI being configured
3755  * @basename: name for the vector
3756  **/
3757 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3758 {
3759 	struct i40e_pf *pf = vsi->back;
3760 	int err;
3761 
3762 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3763 		err = i40e_vsi_request_irq_msix(vsi, basename);
3764 	else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3765 		err = request_irq(pf->pdev->irq, i40e_intr, 0,
3766 				  pf->int_name, pf);
3767 	else
3768 		err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3769 				  pf->int_name, pf);
3770 
3771 	if (err)
3772 		dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3773 
3774 	return err;
3775 }
3776 
3777 #ifdef CONFIG_NET_POLL_CONTROLLER
3778 /**
3779  * i40e_netpoll - A Polling 'interrupt' handler
3780  * @netdev: network interface device structure
3781  *
3782  * This is used by netconsole to send skbs without having to re-enable
3783  * interrupts.  It's not called while the normal interrupt routine is executing.
3784  **/
3785 #ifdef I40E_FCOE
3786 void i40e_netpoll(struct net_device *netdev)
3787 #else
3788 static void i40e_netpoll(struct net_device *netdev)
3789 #endif
3790 {
3791 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3792 	struct i40e_vsi *vsi = np->vsi;
3793 	struct i40e_pf *pf = vsi->back;
3794 	int i;
3795 
3796 	/* if interface is down do nothing */
3797 	if (test_bit(__I40E_DOWN, &vsi->state))
3798 		return;
3799 
3800 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3801 		for (i = 0; i < vsi->num_q_vectors; i++)
3802 			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3803 	} else {
3804 		i40e_intr(pf->pdev->irq, netdev);
3805 	}
3806 }
3807 #endif
3808 
3809 /**
3810  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3811  * @pf: the PF being configured
3812  * @pf_q: the PF queue
3813  * @enable: enable or disable state of the queue
3814  *
3815  * This routine will wait for the given Tx queue of the PF to reach the
3816  * enabled or disabled state.
3817  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3818  * multiple retries; else will return 0 in case of success.
3819  **/
3820 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3821 {
3822 	int i;
3823 	u32 tx_reg;
3824 
3825 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3826 		tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3827 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3828 			break;
3829 
3830 		usleep_range(10, 20);
3831 	}
3832 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3833 		return -ETIMEDOUT;
3834 
3835 	return 0;
3836 }
3837 
3838 /**
3839  * i40e_vsi_control_tx - Start or stop a VSI's rings
3840  * @vsi: the VSI being configured
3841  * @enable: start or stop the rings
3842  **/
3843 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3844 {
3845 	struct i40e_pf *pf = vsi->back;
3846 	struct i40e_hw *hw = &pf->hw;
3847 	int i, j, pf_q, ret = 0;
3848 	u32 tx_reg;
3849 
3850 	pf_q = vsi->base_queue;
3851 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3852 
3853 		/* warn the TX unit of coming changes */
3854 		i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3855 		if (!enable)
3856 			usleep_range(10, 20);
3857 
3858 		for (j = 0; j < 50; j++) {
3859 			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3860 			if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3861 			    ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3862 				break;
3863 			usleep_range(1000, 2000);
3864 		}
3865 		/* Skip if the queue is already in the requested state */
3866 		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3867 			continue;
3868 
3869 		/* turn on/off the queue */
3870 		if (enable) {
3871 			wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3872 			tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3873 		} else {
3874 			tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3875 		}
3876 
3877 		wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3878 		/* No waiting for the Tx queue to disable */
3879 		if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3880 			continue;
3881 
3882 		/* wait for the change to finish */
3883 		ret = i40e_pf_txq_wait(pf, pf_q, enable);
3884 		if (ret) {
3885 			dev_info(&pf->pdev->dev,
3886 				 "VSI seid %d Tx ring %d %sable timeout\n",
3887 				 vsi->seid, pf_q, (enable ? "en" : "dis"));
3888 			break;
3889 		}
3890 	}
3891 
3892 	if (hw->revision_id == 0)
3893 		mdelay(50);
3894 	return ret;
3895 }
3896 
3897 /**
3898  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3899  * @pf: the PF being configured
3900  * @pf_q: the PF queue
3901  * @enable: enable or disable state of the queue
3902  *
3903  * This routine will wait for the given Rx queue of the PF to reach the
3904  * enabled or disabled state.
3905  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3906  * multiple retries; else will return 0 in case of success.
3907  **/
3908 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3909 {
3910 	int i;
3911 	u32 rx_reg;
3912 
3913 	for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3914 		rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3915 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3916 			break;
3917 
3918 		usleep_range(10, 20);
3919 	}
3920 	if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3921 		return -ETIMEDOUT;
3922 
3923 	return 0;
3924 }
3925 
3926 /**
3927  * i40e_vsi_control_rx - Start or stop a VSI's rings
3928  * @vsi: the VSI being configured
3929  * @enable: start or stop the rings
3930  **/
3931 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3932 {
3933 	struct i40e_pf *pf = vsi->back;
3934 	struct i40e_hw *hw = &pf->hw;
3935 	int i, j, pf_q, ret = 0;
3936 	u32 rx_reg;
3937 
3938 	pf_q = vsi->base_queue;
3939 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3940 		for (j = 0; j < 50; j++) {
3941 			rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3942 			if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3943 			    ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3944 				break;
3945 			usleep_range(1000, 2000);
3946 		}
3947 
3948 		/* Skip if the queue is already in the requested state */
3949 		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3950 			continue;
3951 
3952 		/* turn on/off the queue */
3953 		if (enable)
3954 			rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3955 		else
3956 			rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3957 		wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3958 		/* No waiting for the Tx queue to disable */
3959 		if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3960 			continue;
3961 
3962 		/* wait for the change to finish */
3963 		ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3964 		if (ret) {
3965 			dev_info(&pf->pdev->dev,
3966 				 "VSI seid %d Rx ring %d %sable timeout\n",
3967 				 vsi->seid, pf_q, (enable ? "en" : "dis"));
3968 			break;
3969 		}
3970 	}
3971 
3972 	return ret;
3973 }
3974 
3975 /**
3976  * i40e_vsi_control_rings - Start or stop a VSI's rings
3977  * @vsi: the VSI being configured
3978  * @enable: start or stop the rings
3979  **/
3980 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3981 {
3982 	int ret = 0;
3983 
3984 	/* do rx first for enable and last for disable */
3985 	if (request) {
3986 		ret = i40e_vsi_control_rx(vsi, request);
3987 		if (ret)
3988 			return ret;
3989 		ret = i40e_vsi_control_tx(vsi, request);
3990 	} else {
3991 		/* Ignore return value, we need to shutdown whatever we can */
3992 		i40e_vsi_control_tx(vsi, request);
3993 		i40e_vsi_control_rx(vsi, request);
3994 	}
3995 
3996 	return ret;
3997 }
3998 
3999 /**
4000  * i40e_vsi_free_irq - Free the irq association with the OS
4001  * @vsi: the VSI being configured
4002  **/
4003 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4004 {
4005 	struct i40e_pf *pf = vsi->back;
4006 	struct i40e_hw *hw = &pf->hw;
4007 	int base = vsi->base_vector;
4008 	u32 val, qp;
4009 	int i;
4010 
4011 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4012 		if (!vsi->q_vectors)
4013 			return;
4014 
4015 		if (!vsi->irqs_ready)
4016 			return;
4017 
4018 		vsi->irqs_ready = false;
4019 		for (i = 0; i < vsi->num_q_vectors; i++) {
4020 			u16 vector = i + base;
4021 
4022 			/* free only the irqs that were actually requested */
4023 			if (!vsi->q_vectors[i] ||
4024 			    !vsi->q_vectors[i]->num_ringpairs)
4025 				continue;
4026 
4027 			/* clear the affinity_mask in the IRQ descriptor */
4028 			irq_set_affinity_hint(pf->msix_entries[vector].vector,
4029 					      NULL);
4030 			synchronize_irq(pf->msix_entries[vector].vector);
4031 			free_irq(pf->msix_entries[vector].vector,
4032 				 vsi->q_vectors[i]);
4033 
4034 			/* Tear down the interrupt queue link list
4035 			 *
4036 			 * We know that they come in pairs and always
4037 			 * the Rx first, then the Tx.  To clear the
4038 			 * link list, stick the EOL value into the
4039 			 * next_q field of the registers.
4040 			 */
4041 			val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4042 			qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4043 				>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4044 			val |= I40E_QUEUE_END_OF_LIST
4045 				<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4046 			wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4047 
4048 			while (qp != I40E_QUEUE_END_OF_LIST) {
4049 				u32 next;
4050 
4051 				val = rd32(hw, I40E_QINT_RQCTL(qp));
4052 
4053 				val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4054 					 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4055 					 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4056 					 I40E_QINT_RQCTL_INTEVENT_MASK);
4057 
4058 				val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4059 					 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4060 
4061 				wr32(hw, I40E_QINT_RQCTL(qp), val);
4062 
4063 				val = rd32(hw, I40E_QINT_TQCTL(qp));
4064 
4065 				next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4066 					>> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4067 
4068 				val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4069 					 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4070 					 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4071 					 I40E_QINT_TQCTL_INTEVENT_MASK);
4072 
4073 				val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4074 					 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4075 
4076 				wr32(hw, I40E_QINT_TQCTL(qp), val);
4077 				qp = next;
4078 			}
4079 		}
4080 	} else {
4081 		free_irq(pf->pdev->irq, pf);
4082 
4083 		val = rd32(hw, I40E_PFINT_LNKLST0);
4084 		qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4085 			>> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4086 		val |= I40E_QUEUE_END_OF_LIST
4087 			<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4088 		wr32(hw, I40E_PFINT_LNKLST0, val);
4089 
4090 		val = rd32(hw, I40E_QINT_RQCTL(qp));
4091 		val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4092 			 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4093 			 I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4094 			 I40E_QINT_RQCTL_INTEVENT_MASK);
4095 
4096 		val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4097 			I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4098 
4099 		wr32(hw, I40E_QINT_RQCTL(qp), val);
4100 
4101 		val = rd32(hw, I40E_QINT_TQCTL(qp));
4102 
4103 		val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4104 			 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4105 			 I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4106 			 I40E_QINT_TQCTL_INTEVENT_MASK);
4107 
4108 		val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4109 			I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4110 
4111 		wr32(hw, I40E_QINT_TQCTL(qp), val);
4112 	}
4113 }
4114 
4115 /**
4116  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4117  * @vsi: the VSI being configured
4118  * @v_idx: Index of vector to be freed
4119  *
4120  * This function frees the memory allocated to the q_vector.  In addition if
4121  * NAPI is enabled it will delete any references to the NAPI struct prior
4122  * to freeing the q_vector.
4123  **/
4124 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4125 {
4126 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4127 	struct i40e_ring *ring;
4128 
4129 	if (!q_vector)
4130 		return;
4131 
4132 	/* disassociate q_vector from rings */
4133 	i40e_for_each_ring(ring, q_vector->tx)
4134 		ring->q_vector = NULL;
4135 
4136 	i40e_for_each_ring(ring, q_vector->rx)
4137 		ring->q_vector = NULL;
4138 
4139 	/* only VSI w/ an associated netdev is set up w/ NAPI */
4140 	if (vsi->netdev)
4141 		netif_napi_del(&q_vector->napi);
4142 
4143 	vsi->q_vectors[v_idx] = NULL;
4144 
4145 	kfree_rcu(q_vector, rcu);
4146 }
4147 
4148 /**
4149  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4150  * @vsi: the VSI being un-configured
4151  *
4152  * This frees the memory allocated to the q_vectors and
4153  * deletes references to the NAPI struct.
4154  **/
4155 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4156 {
4157 	int v_idx;
4158 
4159 	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4160 		i40e_free_q_vector(vsi, v_idx);
4161 }
4162 
4163 /**
4164  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4165  * @pf: board private structure
4166  **/
4167 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4168 {
4169 	/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4170 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4171 		pci_disable_msix(pf->pdev);
4172 		kfree(pf->msix_entries);
4173 		pf->msix_entries = NULL;
4174 		kfree(pf->irq_pile);
4175 		pf->irq_pile = NULL;
4176 	} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4177 		pci_disable_msi(pf->pdev);
4178 	}
4179 	pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4180 }
4181 
4182 /**
4183  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4184  * @pf: board private structure
4185  *
4186  * We go through and clear interrupt specific resources and reset the structure
4187  * to pre-load conditions
4188  **/
4189 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4190 {
4191 	int i;
4192 
4193 	i40e_stop_misc_vector(pf);
4194 	if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4195 		synchronize_irq(pf->msix_entries[0].vector);
4196 		free_irq(pf->msix_entries[0].vector, pf);
4197 	}
4198 
4199 	i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4200 		      I40E_IWARP_IRQ_PILE_ID);
4201 
4202 	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4203 	for (i = 0; i < pf->num_alloc_vsi; i++)
4204 		if (pf->vsi[i])
4205 			i40e_vsi_free_q_vectors(pf->vsi[i]);
4206 	i40e_reset_interrupt_capability(pf);
4207 }
4208 
4209 /**
4210  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4211  * @vsi: the VSI being configured
4212  **/
4213 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4214 {
4215 	int q_idx;
4216 
4217 	if (!vsi->netdev)
4218 		return;
4219 
4220 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4221 		napi_enable(&vsi->q_vectors[q_idx]->napi);
4222 }
4223 
4224 /**
4225  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4226  * @vsi: the VSI being configured
4227  **/
4228 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4229 {
4230 	int q_idx;
4231 
4232 	if (!vsi->netdev)
4233 		return;
4234 
4235 	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4236 		napi_disable(&vsi->q_vectors[q_idx]->napi);
4237 }
4238 
4239 /**
4240  * i40e_vsi_close - Shut down a VSI
4241  * @vsi: the vsi to be quelled
4242  **/
4243 static void i40e_vsi_close(struct i40e_vsi *vsi)
4244 {
4245 	bool reset = false;
4246 
4247 	if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4248 		i40e_down(vsi);
4249 	i40e_vsi_free_irq(vsi);
4250 	i40e_vsi_free_tx_resources(vsi);
4251 	i40e_vsi_free_rx_resources(vsi);
4252 	vsi->current_netdev_flags = 0;
4253 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4254 		reset = true;
4255 	i40e_notify_client_of_netdev_close(vsi, reset);
4256 }
4257 
4258 /**
4259  * i40e_quiesce_vsi - Pause a given VSI
4260  * @vsi: the VSI being paused
4261  **/
4262 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4263 {
4264 	if (test_bit(__I40E_DOWN, &vsi->state))
4265 		return;
4266 
4267 	/* No need to disable FCoE VSI when Tx suspended */
4268 	if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4269 	    vsi->type == I40E_VSI_FCOE) {
4270 		dev_dbg(&vsi->back->pdev->dev,
4271 			 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
4272 		return;
4273 	}
4274 
4275 	set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4276 	if (vsi->netdev && netif_running(vsi->netdev))
4277 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4278 	else
4279 		i40e_vsi_close(vsi);
4280 }
4281 
4282 /**
4283  * i40e_unquiesce_vsi - Resume a given VSI
4284  * @vsi: the VSI being resumed
4285  **/
4286 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4287 {
4288 	if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4289 		return;
4290 
4291 	clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4292 	if (vsi->netdev && netif_running(vsi->netdev))
4293 		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4294 	else
4295 		i40e_vsi_open(vsi);   /* this clears the DOWN bit */
4296 }
4297 
4298 /**
4299  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4300  * @pf: the PF
4301  **/
4302 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4303 {
4304 	int v;
4305 
4306 	for (v = 0; v < pf->num_alloc_vsi; v++) {
4307 		if (pf->vsi[v])
4308 			i40e_quiesce_vsi(pf->vsi[v]);
4309 	}
4310 }
4311 
4312 /**
4313  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4314  * @pf: the PF
4315  **/
4316 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4317 {
4318 	int v;
4319 
4320 	for (v = 0; v < pf->num_alloc_vsi; v++) {
4321 		if (pf->vsi[v])
4322 			i40e_unquiesce_vsi(pf->vsi[v]);
4323 	}
4324 }
4325 
4326 #ifdef CONFIG_I40E_DCB
4327 /**
4328  * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4329  * @vsi: the VSI being configured
4330  *
4331  * This function waits for the given VSI's queues to be disabled.
4332  **/
4333 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4334 {
4335 	struct i40e_pf *pf = vsi->back;
4336 	int i, pf_q, ret;
4337 
4338 	pf_q = vsi->base_queue;
4339 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4340 		/* Check and wait for the disable status of the queue */
4341 		ret = i40e_pf_txq_wait(pf, pf_q, false);
4342 		if (ret) {
4343 			dev_info(&pf->pdev->dev,
4344 				 "VSI seid %d Tx ring %d disable timeout\n",
4345 				 vsi->seid, pf_q);
4346 			return ret;
4347 		}
4348 	}
4349 
4350 	pf_q = vsi->base_queue;
4351 	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4352 		/* Check and wait for the disable status of the queue */
4353 		ret = i40e_pf_rxq_wait(pf, pf_q, false);
4354 		if (ret) {
4355 			dev_info(&pf->pdev->dev,
4356 				 "VSI seid %d Rx ring %d disable timeout\n",
4357 				 vsi->seid, pf_q);
4358 			return ret;
4359 		}
4360 	}
4361 
4362 	return 0;
4363 }
4364 
4365 /**
4366  * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4367  * @pf: the PF
4368  *
4369  * This function waits for the queues to be in disabled state for all the
4370  * VSIs that are managed by this PF.
4371  **/
4372 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4373 {
4374 	int v, ret = 0;
4375 
4376 	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4377 		/* No need to wait for FCoE VSI queues */
4378 		if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4379 			ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4380 			if (ret)
4381 				break;
4382 		}
4383 	}
4384 
4385 	return ret;
4386 }
4387 
4388 #endif
4389 
4390 /**
4391  * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4392  * @q_idx: TX queue number
4393  * @vsi: Pointer to VSI struct
4394  *
4395  * This function checks specified queue for given VSI. Detects hung condition.
4396  * Sets hung bit since it is two step process. Before next run of service task
4397  * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4398  * hung condition remain unchanged and during subsequent run, this function
4399  * issues SW interrupt to recover from hung condition.
4400  **/
4401 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4402 {
4403 	struct i40e_ring *tx_ring = NULL;
4404 	struct i40e_pf	*pf;
4405 	u32 head, val, tx_pending_hw;
4406 	int i;
4407 
4408 	pf = vsi->back;
4409 
4410 	/* now that we have an index, find the tx_ring struct */
4411 	for (i = 0; i < vsi->num_queue_pairs; i++) {
4412 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4413 			if (q_idx == vsi->tx_rings[i]->queue_index) {
4414 				tx_ring = vsi->tx_rings[i];
4415 				break;
4416 			}
4417 		}
4418 	}
4419 
4420 	if (!tx_ring)
4421 		return;
4422 
4423 	/* Read interrupt register */
4424 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4425 		val = rd32(&pf->hw,
4426 			   I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4427 					       tx_ring->vsi->base_vector - 1));
4428 	else
4429 		val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4430 
4431 	head = i40e_get_head(tx_ring);
4432 
4433 	tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
4434 
4435 	/* HW is done executing descriptors, updated HEAD write back,
4436 	 * but SW hasn't processed those descriptors. If interrupt is
4437 	 * not generated from this point ON, it could result into
4438 	 * dev_watchdog detecting timeout on those netdev_queue,
4439 	 * hence proactively trigger SW interrupt.
4440 	 */
4441 	if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4442 		/* NAPI Poll didn't run and clear since it was set */
4443 		if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
4444 				       &tx_ring->q_vector->hung_detected)) {
4445 			netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4446 				    vsi->seid, q_idx, tx_pending_hw,
4447 				    tx_ring->next_to_clean, head,
4448 				    tx_ring->next_to_use,
4449 				    readl(tx_ring->tail));
4450 			netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4451 				    vsi->seid, q_idx, val);
4452 			i40e_force_wb(vsi, tx_ring->q_vector);
4453 		} else {
4454 			/* First Chance - detected possible hung */
4455 			set_bit(I40E_Q_VECTOR_HUNG_DETECT,
4456 				&tx_ring->q_vector->hung_detected);
4457 		}
4458 	}
4459 
4460 	/* This is the case where we have interrupts missing,
4461 	 * so the tx_pending in HW will most likely be 0, but we
4462 	 * will have tx_pending in SW since the WB happened but the
4463 	 * interrupt got lost.
4464 	 */
4465 	if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
4466 	    (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4467 		if (napi_reschedule(&tx_ring->q_vector->napi))
4468 			tx_ring->tx_stats.tx_lost_interrupt++;
4469 	}
4470 }
4471 
4472 /**
4473  * i40e_detect_recover_hung - Function to detect and recover hung_queues
4474  * @pf:  pointer to PF struct
4475  *
4476  * LAN VSI has netdev and netdev has TX queues. This function is to check
4477  * each of those TX queues if they are hung, trigger recovery by issuing
4478  * SW interrupt.
4479  **/
4480 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4481 {
4482 	struct net_device *netdev;
4483 	struct i40e_vsi *vsi;
4484 	int i;
4485 
4486 	/* Only for LAN VSI */
4487 	vsi = pf->vsi[pf->lan_vsi];
4488 
4489 	if (!vsi)
4490 		return;
4491 
4492 	/* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4493 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4494 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4495 		return;
4496 
4497 	/* Make sure type is MAIN VSI */
4498 	if (vsi->type != I40E_VSI_MAIN)
4499 		return;
4500 
4501 	netdev = vsi->netdev;
4502 	if (!netdev)
4503 		return;
4504 
4505 	/* Bail out if netif_carrier is not OK */
4506 	if (!netif_carrier_ok(netdev))
4507 		return;
4508 
4509 	/* Go thru' TX queues for netdev */
4510 	for (i = 0; i < netdev->num_tx_queues; i++) {
4511 		struct netdev_queue *q;
4512 
4513 		q = netdev_get_tx_queue(netdev, i);
4514 		if (q)
4515 			i40e_detect_recover_hung_queue(i, vsi);
4516 	}
4517 }
4518 
4519 /**
4520  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4521  * @pf: pointer to PF
4522  *
4523  * Get TC map for ISCSI PF type that will include iSCSI TC
4524  * and LAN TC.
4525  **/
4526 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4527 {
4528 	struct i40e_dcb_app_priority_table app;
4529 	struct i40e_hw *hw = &pf->hw;
4530 	u8 enabled_tc = 1; /* TC0 is always enabled */
4531 	u8 tc, i;
4532 	/* Get the iSCSI APP TLV */
4533 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4534 
4535 	for (i = 0; i < dcbcfg->numapps; i++) {
4536 		app = dcbcfg->app[i];
4537 		if (app.selector == I40E_APP_SEL_TCPIP &&
4538 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
4539 			tc = dcbcfg->etscfg.prioritytable[app.priority];
4540 			enabled_tc |= BIT(tc);
4541 			break;
4542 		}
4543 	}
4544 
4545 	return enabled_tc;
4546 }
4547 
4548 /**
4549  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4550  * @dcbcfg: the corresponding DCBx configuration structure
4551  *
4552  * Return the number of TCs from given DCBx configuration
4553  **/
4554 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4555 {
4556 	int i, tc_unused = 0;
4557 	u8 num_tc = 0;
4558 	u8 ret = 0;
4559 
4560 	/* Scan the ETS Config Priority Table to find
4561 	 * traffic class enabled for a given priority
4562 	 * and create a bitmask of enabled TCs
4563 	 */
4564 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4565 		num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4566 
4567 	/* Now scan the bitmask to check for
4568 	 * contiguous TCs starting with TC0
4569 	 */
4570 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4571 		if (num_tc & BIT(i)) {
4572 			if (!tc_unused) {
4573 				ret++;
4574 			} else {
4575 				pr_err("Non-contiguous TC - Disabling DCB\n");
4576 				return 1;
4577 			}
4578 		} else {
4579 			tc_unused = 1;
4580 		}
4581 	}
4582 
4583 	/* There is always at least TC0 */
4584 	if (!ret)
4585 		ret = 1;
4586 
4587 	return ret;
4588 }
4589 
4590 /**
4591  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4592  * @dcbcfg: the corresponding DCBx configuration structure
4593  *
4594  * Query the current DCB configuration and return the number of
4595  * traffic classes enabled from the given DCBX config
4596  **/
4597 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4598 {
4599 	u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4600 	u8 enabled_tc = 1;
4601 	u8 i;
4602 
4603 	for (i = 0; i < num_tc; i++)
4604 		enabled_tc |= BIT(i);
4605 
4606 	return enabled_tc;
4607 }
4608 
4609 /**
4610  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4611  * @pf: PF being queried
4612  *
4613  * Return number of traffic classes enabled for the given PF
4614  **/
4615 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4616 {
4617 	struct i40e_hw *hw = &pf->hw;
4618 	u8 i, enabled_tc = 1;
4619 	u8 num_tc = 0;
4620 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4621 
4622 	/* If DCB is not enabled then always in single TC */
4623 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4624 		return 1;
4625 
4626 	/* SFP mode will be enabled for all TCs on port */
4627 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4628 		return i40e_dcb_get_num_tc(dcbcfg);
4629 
4630 	/* MFP mode return count of enabled TCs for this PF */
4631 	if (pf->hw.func_caps.iscsi)
4632 		enabled_tc =  i40e_get_iscsi_tc_map(pf);
4633 	else
4634 		return 1; /* Only TC0 */
4635 
4636 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4637 		if (enabled_tc & BIT(i))
4638 			num_tc++;
4639 	}
4640 	return num_tc;
4641 }
4642 
4643 /**
4644  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4645  * @pf: PF being queried
4646  *
4647  * Return a bitmap for first enabled traffic class for this PF.
4648  **/
4649 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4650 {
4651 	u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4652 	u8 i = 0;
4653 
4654 	if (!enabled_tc)
4655 		return 0x1; /* TC0 */
4656 
4657 	/* Find the first enabled TC */
4658 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4659 		if (enabled_tc & BIT(i))
4660 			break;
4661 	}
4662 
4663 	return BIT(i);
4664 }
4665 
4666 /**
4667  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4668  * @pf: PF being queried
4669  *
4670  * Return a bitmap for enabled traffic classes for this PF.
4671  **/
4672 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4673 {
4674 	/* If DCB is not enabled for this PF then just return default TC */
4675 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4676 		return i40e_pf_get_default_tc(pf);
4677 
4678 	/* SFP mode we want PF to be enabled for all TCs */
4679 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4680 		return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4681 
4682 	/* MFP enabled and iSCSI PF type */
4683 	if (pf->hw.func_caps.iscsi)
4684 		return i40e_get_iscsi_tc_map(pf);
4685 	else
4686 		return i40e_pf_get_default_tc(pf);
4687 }
4688 
4689 /**
4690  * i40e_vsi_get_bw_info - Query VSI BW Information
4691  * @vsi: the VSI being queried
4692  *
4693  * Returns 0 on success, negative value on failure
4694  **/
4695 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4696 {
4697 	struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4698 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4699 	struct i40e_pf *pf = vsi->back;
4700 	struct i40e_hw *hw = &pf->hw;
4701 	i40e_status ret;
4702 	u32 tc_bw_max;
4703 	int i;
4704 
4705 	/* Get the VSI level BW configuration */
4706 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4707 	if (ret) {
4708 		dev_info(&pf->pdev->dev,
4709 			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4710 			 i40e_stat_str(&pf->hw, ret),
4711 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4712 		return -EINVAL;
4713 	}
4714 
4715 	/* Get the VSI level BW configuration per TC */
4716 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4717 					       NULL);
4718 	if (ret) {
4719 		dev_info(&pf->pdev->dev,
4720 			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4721 			 i40e_stat_str(&pf->hw, ret),
4722 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4723 		return -EINVAL;
4724 	}
4725 
4726 	if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4727 		dev_info(&pf->pdev->dev,
4728 			 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4729 			 bw_config.tc_valid_bits,
4730 			 bw_ets_config.tc_valid_bits);
4731 		/* Still continuing */
4732 	}
4733 
4734 	vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4735 	vsi->bw_max_quanta = bw_config.max_bw;
4736 	tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4737 		    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4738 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4739 		vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4740 		vsi->bw_ets_limit_credits[i] =
4741 					le16_to_cpu(bw_ets_config.credits[i]);
4742 		/* 3 bits out of 4 for each TC */
4743 		vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4744 	}
4745 
4746 	return 0;
4747 }
4748 
4749 /**
4750  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4751  * @vsi: the VSI being configured
4752  * @enabled_tc: TC bitmap
4753  * @bw_credits: BW shared credits per TC
4754  *
4755  * Returns 0 on success, negative value on failure
4756  **/
4757 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4758 				       u8 *bw_share)
4759 {
4760 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4761 	i40e_status ret;
4762 	int i;
4763 
4764 	bw_data.tc_valid_bits = enabled_tc;
4765 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4766 		bw_data.tc_bw_credits[i] = bw_share[i];
4767 
4768 	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4769 				       NULL);
4770 	if (ret) {
4771 		dev_info(&vsi->back->pdev->dev,
4772 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
4773 			 vsi->back->hw.aq.asq_last_status);
4774 		return -EINVAL;
4775 	}
4776 
4777 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4778 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4779 
4780 	return 0;
4781 }
4782 
4783 /**
4784  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4785  * @vsi: the VSI being configured
4786  * @enabled_tc: TC map to be enabled
4787  *
4788  **/
4789 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4790 {
4791 	struct net_device *netdev = vsi->netdev;
4792 	struct i40e_pf *pf = vsi->back;
4793 	struct i40e_hw *hw = &pf->hw;
4794 	u8 netdev_tc = 0;
4795 	int i;
4796 	struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4797 
4798 	if (!netdev)
4799 		return;
4800 
4801 	if (!enabled_tc) {
4802 		netdev_reset_tc(netdev);
4803 		return;
4804 	}
4805 
4806 	/* Set up actual enabled TCs on the VSI */
4807 	if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4808 		return;
4809 
4810 	/* set per TC queues for the VSI */
4811 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4812 		/* Only set TC queues for enabled tcs
4813 		 *
4814 		 * e.g. For a VSI that has TC0 and TC3 enabled the
4815 		 * enabled_tc bitmap would be 0x00001001; the driver
4816 		 * will set the numtc for netdev as 2 that will be
4817 		 * referenced by the netdev layer as TC 0 and 1.
4818 		 */
4819 		if (vsi->tc_config.enabled_tc & BIT(i))
4820 			netdev_set_tc_queue(netdev,
4821 					vsi->tc_config.tc_info[i].netdev_tc,
4822 					vsi->tc_config.tc_info[i].qcount,
4823 					vsi->tc_config.tc_info[i].qoffset);
4824 	}
4825 
4826 	/* Assign UP2TC map for the VSI */
4827 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4828 		/* Get the actual TC# for the UP */
4829 		u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4830 		/* Get the mapped netdev TC# for the UP */
4831 		netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4832 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
4833 	}
4834 }
4835 
4836 /**
4837  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4838  * @vsi: the VSI being configured
4839  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4840  **/
4841 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4842 				      struct i40e_vsi_context *ctxt)
4843 {
4844 	/* copy just the sections touched not the entire info
4845 	 * since not all sections are valid as returned by
4846 	 * update vsi params
4847 	 */
4848 	vsi->info.mapping_flags = ctxt->info.mapping_flags;
4849 	memcpy(&vsi->info.queue_mapping,
4850 	       &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4851 	memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4852 	       sizeof(vsi->info.tc_mapping));
4853 }
4854 
4855 /**
4856  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4857  * @vsi: VSI to be configured
4858  * @enabled_tc: TC bitmap
4859  *
4860  * This configures a particular VSI for TCs that are mapped to the
4861  * given TC bitmap. It uses default bandwidth share for TCs across
4862  * VSIs to configure TC for a particular VSI.
4863  *
4864  * NOTE:
4865  * It is expected that the VSI queues have been quisced before calling
4866  * this function.
4867  **/
4868 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4869 {
4870 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4871 	struct i40e_vsi_context ctxt;
4872 	int ret = 0;
4873 	int i;
4874 
4875 	/* Check if enabled_tc is same as existing or new TCs */
4876 	if (vsi->tc_config.enabled_tc == enabled_tc)
4877 		return ret;
4878 
4879 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
4880 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4881 		if (enabled_tc & BIT(i))
4882 			bw_share[i] = 1;
4883 	}
4884 
4885 	ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4886 	if (ret) {
4887 		dev_info(&vsi->back->pdev->dev,
4888 			 "Failed configuring TC map %d for VSI %d\n",
4889 			 enabled_tc, vsi->seid);
4890 		goto out;
4891 	}
4892 
4893 	/* Update Queue Pairs Mapping for currently enabled UPs */
4894 	ctxt.seid = vsi->seid;
4895 	ctxt.pf_num = vsi->back->hw.pf_id;
4896 	ctxt.vf_num = 0;
4897 	ctxt.uplink_seid = vsi->uplink_seid;
4898 	ctxt.info = vsi->info;
4899 	i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4900 
4901 	if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4902 		ctxt.info.valid_sections |=
4903 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4904 		ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4905 	}
4906 
4907 	/* Update the VSI after updating the VSI queue-mapping information */
4908 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4909 	if (ret) {
4910 		dev_info(&vsi->back->pdev->dev,
4911 			 "Update vsi tc config failed, err %s aq_err %s\n",
4912 			 i40e_stat_str(&vsi->back->hw, ret),
4913 			 i40e_aq_str(&vsi->back->hw,
4914 				     vsi->back->hw.aq.asq_last_status));
4915 		goto out;
4916 	}
4917 	/* update the local VSI info with updated queue map */
4918 	i40e_vsi_update_queue_map(vsi, &ctxt);
4919 	vsi->info.valid_sections = 0;
4920 
4921 	/* Update current VSI BW information */
4922 	ret = i40e_vsi_get_bw_info(vsi);
4923 	if (ret) {
4924 		dev_info(&vsi->back->pdev->dev,
4925 			 "Failed updating vsi bw info, err %s aq_err %s\n",
4926 			 i40e_stat_str(&vsi->back->hw, ret),
4927 			 i40e_aq_str(&vsi->back->hw,
4928 				     vsi->back->hw.aq.asq_last_status));
4929 		goto out;
4930 	}
4931 
4932 	/* Update the netdev TC setup */
4933 	i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4934 out:
4935 	return ret;
4936 }
4937 
4938 /**
4939  * i40e_veb_config_tc - Configure TCs for given VEB
4940  * @veb: given VEB
4941  * @enabled_tc: TC bitmap
4942  *
4943  * Configures given TC bitmap for VEB (switching) element
4944  **/
4945 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4946 {
4947 	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4948 	struct i40e_pf *pf = veb->pf;
4949 	int ret = 0;
4950 	int i;
4951 
4952 	/* No TCs or already enabled TCs just return */
4953 	if (!enabled_tc || veb->enabled_tc == enabled_tc)
4954 		return ret;
4955 
4956 	bw_data.tc_valid_bits = enabled_tc;
4957 	/* bw_data.absolute_credits is not set (relative) */
4958 
4959 	/* Enable ETS TCs with equal BW Share for now */
4960 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4961 		if (enabled_tc & BIT(i))
4962 			bw_data.tc_bw_share_credits[i] = 1;
4963 	}
4964 
4965 	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4966 						   &bw_data, NULL);
4967 	if (ret) {
4968 		dev_info(&pf->pdev->dev,
4969 			 "VEB bw config failed, err %s aq_err %s\n",
4970 			 i40e_stat_str(&pf->hw, ret),
4971 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4972 		goto out;
4973 	}
4974 
4975 	/* Update the BW information */
4976 	ret = i40e_veb_get_bw_info(veb);
4977 	if (ret) {
4978 		dev_info(&pf->pdev->dev,
4979 			 "Failed getting veb bw config, err %s aq_err %s\n",
4980 			 i40e_stat_str(&pf->hw, ret),
4981 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4982 	}
4983 
4984 out:
4985 	return ret;
4986 }
4987 
4988 #ifdef CONFIG_I40E_DCB
4989 /**
4990  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4991  * @pf: PF struct
4992  *
4993  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4994  * the caller would've quiesce all the VSIs before calling
4995  * this function
4996  **/
4997 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4998 {
4999 	u8 tc_map = 0;
5000 	int ret;
5001 	u8 v;
5002 
5003 	/* Enable the TCs available on PF to all VEBs */
5004 	tc_map = i40e_pf_get_tc_map(pf);
5005 	for (v = 0; v < I40E_MAX_VEB; v++) {
5006 		if (!pf->veb[v])
5007 			continue;
5008 		ret = i40e_veb_config_tc(pf->veb[v], tc_map);
5009 		if (ret) {
5010 			dev_info(&pf->pdev->dev,
5011 				 "Failed configuring TC for VEB seid=%d\n",
5012 				 pf->veb[v]->seid);
5013 			/* Will try to configure as many components */
5014 		}
5015 	}
5016 
5017 	/* Update each VSI */
5018 	for (v = 0; v < pf->num_alloc_vsi; v++) {
5019 		if (!pf->vsi[v])
5020 			continue;
5021 
5022 		/* - Enable all TCs for the LAN VSI
5023 #ifdef I40E_FCOE
5024 		 * - For FCoE VSI only enable the TC configured
5025 		 *   as per the APP TLV
5026 #endif
5027 		 * - For all others keep them at TC0 for now
5028 		 */
5029 		if (v == pf->lan_vsi)
5030 			tc_map = i40e_pf_get_tc_map(pf);
5031 		else
5032 			tc_map = i40e_pf_get_default_tc(pf);
5033 #ifdef I40E_FCOE
5034 		if (pf->vsi[v]->type == I40E_VSI_FCOE)
5035 			tc_map = i40e_get_fcoe_tc_map(pf);
5036 #endif /* #ifdef I40E_FCOE */
5037 
5038 		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
5039 		if (ret) {
5040 			dev_info(&pf->pdev->dev,
5041 				 "Failed configuring TC for VSI seid=%d\n",
5042 				 pf->vsi[v]->seid);
5043 			/* Will try to configure as many components */
5044 		} else {
5045 			/* Re-configure VSI vectors based on updated TC map */
5046 			i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
5047 			if (pf->vsi[v]->netdev)
5048 				i40e_dcbnl_set_all(pf->vsi[v]);
5049 		}
5050 	}
5051 }
5052 
5053 /**
5054  * i40e_resume_port_tx - Resume port Tx
5055  * @pf: PF struct
5056  *
5057  * Resume a port's Tx and issue a PF reset in case of failure to
5058  * resume.
5059  **/
5060 static int i40e_resume_port_tx(struct i40e_pf *pf)
5061 {
5062 	struct i40e_hw *hw = &pf->hw;
5063 	int ret;
5064 
5065 	ret = i40e_aq_resume_port_tx(hw, NULL);
5066 	if (ret) {
5067 		dev_info(&pf->pdev->dev,
5068 			 "Resume Port Tx failed, err %s aq_err %s\n",
5069 			  i40e_stat_str(&pf->hw, ret),
5070 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5071 		/* Schedule PF reset to recover */
5072 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5073 		i40e_service_event_schedule(pf);
5074 	}
5075 
5076 	return ret;
5077 }
5078 
5079 /**
5080  * i40e_init_pf_dcb - Initialize DCB configuration
5081  * @pf: PF being configured
5082  *
5083  * Query the current DCB configuration and cache it
5084  * in the hardware structure
5085  **/
5086 static int i40e_init_pf_dcb(struct i40e_pf *pf)
5087 {
5088 	struct i40e_hw *hw = &pf->hw;
5089 	int err = 0;
5090 
5091 	/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5092 	if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
5093 		goto out;
5094 
5095 	/* Get the initial DCB configuration */
5096 	err = i40e_init_dcb(hw);
5097 	if (!err) {
5098 		/* Device/Function is not DCBX capable */
5099 		if ((!hw->func_caps.dcb) ||
5100 		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5101 			dev_info(&pf->pdev->dev,
5102 				 "DCBX offload is not supported or is disabled for this PF.\n");
5103 
5104 			if (pf->flags & I40E_FLAG_MFP_ENABLED)
5105 				goto out;
5106 
5107 		} else {
5108 			/* When status is not DISABLED then DCBX in FW */
5109 			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5110 				       DCB_CAP_DCBX_VER_IEEE;
5111 
5112 			pf->flags |= I40E_FLAG_DCB_CAPABLE;
5113 			/* Enable DCB tagging only when more than one TC
5114 			 * or explicitly disable if only one TC
5115 			 */
5116 			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5117 				pf->flags |= I40E_FLAG_DCB_ENABLED;
5118 			else
5119 				pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5120 			dev_dbg(&pf->pdev->dev,
5121 				"DCBX offload is supported for this PF.\n");
5122 		}
5123 	} else {
5124 		dev_info(&pf->pdev->dev,
5125 			 "Query for DCB configuration failed, err %s aq_err %s\n",
5126 			 i40e_stat_str(&pf->hw, err),
5127 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5128 	}
5129 
5130 out:
5131 	return err;
5132 }
5133 #endif /* CONFIG_I40E_DCB */
5134 #define SPEED_SIZE 14
5135 #define FC_SIZE 8
5136 /**
5137  * i40e_print_link_message - print link up or down
5138  * @vsi: the VSI for which link needs a message
5139  */
5140 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5141 {
5142 	char *speed = "Unknown";
5143 	char *fc = "Unknown";
5144 
5145 	if (vsi->current_isup == isup)
5146 		return;
5147 	vsi->current_isup = isup;
5148 	if (!isup) {
5149 		netdev_info(vsi->netdev, "NIC Link is Down\n");
5150 		return;
5151 	}
5152 
5153 	/* Warn user if link speed on NPAR enabled partition is not at
5154 	 * least 10GB
5155 	 */
5156 	if (vsi->back->hw.func_caps.npar_enable &&
5157 	    (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5158 	     vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5159 		netdev_warn(vsi->netdev,
5160 			    "The partition detected link speed that is less than 10Gbps\n");
5161 
5162 	switch (vsi->back->hw.phy.link_info.link_speed) {
5163 	case I40E_LINK_SPEED_40GB:
5164 		speed = "40 G";
5165 		break;
5166 	case I40E_LINK_SPEED_20GB:
5167 		speed = "20 G";
5168 		break;
5169 	case I40E_LINK_SPEED_10GB:
5170 		speed = "10 G";
5171 		break;
5172 	case I40E_LINK_SPEED_1GB:
5173 		speed = "1000 M";
5174 		break;
5175 	case I40E_LINK_SPEED_100MB:
5176 		speed = "100 M";
5177 		break;
5178 	default:
5179 		break;
5180 	}
5181 
5182 	switch (vsi->back->hw.fc.current_mode) {
5183 	case I40E_FC_FULL:
5184 		fc = "RX/TX";
5185 		break;
5186 	case I40E_FC_TX_PAUSE:
5187 		fc = "TX";
5188 		break;
5189 	case I40E_FC_RX_PAUSE:
5190 		fc = "RX";
5191 		break;
5192 	default:
5193 		fc = "None";
5194 		break;
5195 	}
5196 
5197 	netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
5198 		    speed, fc);
5199 }
5200 
5201 /**
5202  * i40e_up_complete - Finish the last steps of bringing up a connection
5203  * @vsi: the VSI being configured
5204  **/
5205 static int i40e_up_complete(struct i40e_vsi *vsi)
5206 {
5207 	struct i40e_pf *pf = vsi->back;
5208 	int err;
5209 
5210 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5211 		i40e_vsi_configure_msix(vsi);
5212 	else
5213 		i40e_configure_msi_and_legacy(vsi);
5214 
5215 	/* start rings */
5216 	err = i40e_vsi_control_rings(vsi, true);
5217 	if (err)
5218 		return err;
5219 
5220 	clear_bit(__I40E_DOWN, &vsi->state);
5221 	i40e_napi_enable_all(vsi);
5222 	i40e_vsi_enable_irq(vsi);
5223 
5224 	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5225 	    (vsi->netdev)) {
5226 		i40e_print_link_message(vsi, true);
5227 		netif_tx_start_all_queues(vsi->netdev);
5228 		netif_carrier_on(vsi->netdev);
5229 	} else if (vsi->netdev) {
5230 		i40e_print_link_message(vsi, false);
5231 		/* need to check for qualified module here*/
5232 		if ((pf->hw.phy.link_info.link_info &
5233 			I40E_AQ_MEDIA_AVAILABLE) &&
5234 		    (!(pf->hw.phy.link_info.an_info &
5235 			I40E_AQ_QUALIFIED_MODULE)))
5236 			netdev_err(vsi->netdev,
5237 				   "the driver failed to link because an unqualified module was detected.");
5238 	}
5239 
5240 	/* replay FDIR SB filters */
5241 	if (vsi->type == I40E_VSI_FDIR) {
5242 		/* reset fd counters */
5243 		pf->fd_add_err = pf->fd_atr_cnt = 0;
5244 		if (pf->fd_tcp_rule > 0) {
5245 			pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
5246 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5247 				dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5248 			pf->fd_tcp_rule = 0;
5249 		}
5250 		i40e_fdir_filter_restore(vsi);
5251 	}
5252 
5253 	/* On the next run of the service_task, notify any clients of the new
5254 	 * opened netdev
5255 	 */
5256 	pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5257 	i40e_service_event_schedule(pf);
5258 
5259 	return 0;
5260 }
5261 
5262 /**
5263  * i40e_vsi_reinit_locked - Reset the VSI
5264  * @vsi: the VSI being configured
5265  *
5266  * Rebuild the ring structs after some configuration
5267  * has changed, e.g. MTU size.
5268  **/
5269 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5270 {
5271 	struct i40e_pf *pf = vsi->back;
5272 
5273 	WARN_ON(in_interrupt());
5274 	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5275 		usleep_range(1000, 2000);
5276 	i40e_down(vsi);
5277 
5278 	i40e_up(vsi);
5279 	clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5280 }
5281 
5282 /**
5283  * i40e_up - Bring the connection back up after being down
5284  * @vsi: the VSI being configured
5285  **/
5286 int i40e_up(struct i40e_vsi *vsi)
5287 {
5288 	int err;
5289 
5290 	err = i40e_vsi_configure(vsi);
5291 	if (!err)
5292 		err = i40e_up_complete(vsi);
5293 
5294 	return err;
5295 }
5296 
5297 /**
5298  * i40e_down - Shutdown the connection processing
5299  * @vsi: the VSI being stopped
5300  **/
5301 void i40e_down(struct i40e_vsi *vsi)
5302 {
5303 	int i;
5304 
5305 	/* It is assumed that the caller of this function
5306 	 * sets the vsi->state __I40E_DOWN bit.
5307 	 */
5308 	if (vsi->netdev) {
5309 		netif_carrier_off(vsi->netdev);
5310 		netif_tx_disable(vsi->netdev);
5311 	}
5312 	i40e_vsi_disable_irq(vsi);
5313 	i40e_vsi_control_rings(vsi, false);
5314 	i40e_napi_disable_all(vsi);
5315 
5316 	for (i = 0; i < vsi->num_queue_pairs; i++) {
5317 		i40e_clean_tx_ring(vsi->tx_rings[i]);
5318 		i40e_clean_rx_ring(vsi->rx_rings[i]);
5319 	}
5320 
5321 	i40e_notify_client_of_netdev_close(vsi, false);
5322 
5323 }
5324 
5325 /**
5326  * i40e_setup_tc - configure multiple traffic classes
5327  * @netdev: net device to configure
5328  * @tc: number of traffic classes to enable
5329  **/
5330 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5331 {
5332 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5333 	struct i40e_vsi *vsi = np->vsi;
5334 	struct i40e_pf *pf = vsi->back;
5335 	u8 enabled_tc = 0;
5336 	int ret = -EINVAL;
5337 	int i;
5338 
5339 	/* Check if DCB enabled to continue */
5340 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5341 		netdev_info(netdev, "DCB is not enabled for adapter\n");
5342 		goto exit;
5343 	}
5344 
5345 	/* Check if MFP enabled */
5346 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5347 		netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5348 		goto exit;
5349 	}
5350 
5351 	/* Check whether tc count is within enabled limit */
5352 	if (tc > i40e_pf_get_num_tc(pf)) {
5353 		netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5354 		goto exit;
5355 	}
5356 
5357 	/* Generate TC map for number of tc requested */
5358 	for (i = 0; i < tc; i++)
5359 		enabled_tc |= BIT(i);
5360 
5361 	/* Requesting same TC configuration as already enabled */
5362 	if (enabled_tc == vsi->tc_config.enabled_tc)
5363 		return 0;
5364 
5365 	/* Quiesce VSI queues */
5366 	i40e_quiesce_vsi(vsi);
5367 
5368 	/* Configure VSI for enabled TCs */
5369 	ret = i40e_vsi_config_tc(vsi, enabled_tc);
5370 	if (ret) {
5371 		netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5372 			    vsi->seid);
5373 		goto exit;
5374 	}
5375 
5376 	/* Unquiesce VSI */
5377 	i40e_unquiesce_vsi(vsi);
5378 
5379 exit:
5380 	return ret;
5381 }
5382 
5383 #ifdef I40E_FCOE
5384 int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5385 		    struct tc_to_netdev *tc)
5386 #else
5387 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5388 			   struct tc_to_netdev *tc)
5389 #endif
5390 {
5391 	if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
5392 		return -EINVAL;
5393 	return i40e_setup_tc(netdev, tc->tc);
5394 }
5395 
5396 /**
5397  * i40e_open - Called when a network interface is made active
5398  * @netdev: network interface device structure
5399  *
5400  * The open entry point is called when a network interface is made
5401  * active by the system (IFF_UP).  At this point all resources needed
5402  * for transmit and receive operations are allocated, the interrupt
5403  * handler is registered with the OS, the netdev watchdog subtask is
5404  * enabled, and the stack is notified that the interface is ready.
5405  *
5406  * Returns 0 on success, negative value on failure
5407  **/
5408 int i40e_open(struct net_device *netdev)
5409 {
5410 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5411 	struct i40e_vsi *vsi = np->vsi;
5412 	struct i40e_pf *pf = vsi->back;
5413 	int err;
5414 
5415 	/* disallow open during test or if eeprom is broken */
5416 	if (test_bit(__I40E_TESTING, &pf->state) ||
5417 	    test_bit(__I40E_BAD_EEPROM, &pf->state))
5418 		return -EBUSY;
5419 
5420 	netif_carrier_off(netdev);
5421 
5422 	err = i40e_vsi_open(vsi);
5423 	if (err)
5424 		return err;
5425 
5426 	/* configure global TSO hardware offload settings */
5427 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5428 						       TCP_FLAG_FIN) >> 16);
5429 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5430 						       TCP_FLAG_FIN |
5431 						       TCP_FLAG_CWR) >> 16);
5432 	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5433 
5434 	udp_tunnel_get_rx_info(netdev);
5435 
5436 	return 0;
5437 }
5438 
5439 /**
5440  * i40e_vsi_open -
5441  * @vsi: the VSI to open
5442  *
5443  * Finish initialization of the VSI.
5444  *
5445  * Returns 0 on success, negative value on failure
5446  **/
5447 int i40e_vsi_open(struct i40e_vsi *vsi)
5448 {
5449 	struct i40e_pf *pf = vsi->back;
5450 	char int_name[I40E_INT_NAME_STR_LEN];
5451 	int err;
5452 
5453 	/* allocate descriptors */
5454 	err = i40e_vsi_setup_tx_resources(vsi);
5455 	if (err)
5456 		goto err_setup_tx;
5457 	err = i40e_vsi_setup_rx_resources(vsi);
5458 	if (err)
5459 		goto err_setup_rx;
5460 
5461 	err = i40e_vsi_configure(vsi);
5462 	if (err)
5463 		goto err_setup_rx;
5464 
5465 	if (vsi->netdev) {
5466 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5467 			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5468 		err = i40e_vsi_request_irq(vsi, int_name);
5469 		if (err)
5470 			goto err_setup_rx;
5471 
5472 		/* Notify the stack of the actual queue counts. */
5473 		err = netif_set_real_num_tx_queues(vsi->netdev,
5474 						   vsi->num_queue_pairs);
5475 		if (err)
5476 			goto err_set_queues;
5477 
5478 		err = netif_set_real_num_rx_queues(vsi->netdev,
5479 						   vsi->num_queue_pairs);
5480 		if (err)
5481 			goto err_set_queues;
5482 
5483 	} else if (vsi->type == I40E_VSI_FDIR) {
5484 		snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5485 			 dev_driver_string(&pf->pdev->dev),
5486 			 dev_name(&pf->pdev->dev));
5487 		err = i40e_vsi_request_irq(vsi, int_name);
5488 
5489 	} else {
5490 		err = -EINVAL;
5491 		goto err_setup_rx;
5492 	}
5493 
5494 	err = i40e_up_complete(vsi);
5495 	if (err)
5496 		goto err_up_complete;
5497 
5498 	return 0;
5499 
5500 err_up_complete:
5501 	i40e_down(vsi);
5502 err_set_queues:
5503 	i40e_vsi_free_irq(vsi);
5504 err_setup_rx:
5505 	i40e_vsi_free_rx_resources(vsi);
5506 err_setup_tx:
5507 	i40e_vsi_free_tx_resources(vsi);
5508 	if (vsi == pf->vsi[pf->lan_vsi])
5509 		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5510 
5511 	return err;
5512 }
5513 
5514 /**
5515  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5516  * @pf: Pointer to PF
5517  *
5518  * This function destroys the hlist where all the Flow Director
5519  * filters were saved.
5520  **/
5521 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5522 {
5523 	struct i40e_fdir_filter *filter;
5524 	struct hlist_node *node2;
5525 
5526 	hlist_for_each_entry_safe(filter, node2,
5527 				  &pf->fdir_filter_list, fdir_node) {
5528 		hlist_del(&filter->fdir_node);
5529 		kfree(filter);
5530 	}
5531 	pf->fdir_pf_active_filters = 0;
5532 }
5533 
5534 /**
5535  * i40e_close - Disables a network interface
5536  * @netdev: network interface device structure
5537  *
5538  * The close entry point is called when an interface is de-activated
5539  * by the OS.  The hardware is still under the driver's control, but
5540  * this netdev interface is disabled.
5541  *
5542  * Returns 0, this is not allowed to fail
5543  **/
5544 int i40e_close(struct net_device *netdev)
5545 {
5546 	struct i40e_netdev_priv *np = netdev_priv(netdev);
5547 	struct i40e_vsi *vsi = np->vsi;
5548 
5549 	i40e_vsi_close(vsi);
5550 
5551 	return 0;
5552 }
5553 
5554 /**
5555  * i40e_do_reset - Start a PF or Core Reset sequence
5556  * @pf: board private structure
5557  * @reset_flags: which reset is requested
5558  *
5559  * The essential difference in resets is that the PF Reset
5560  * doesn't clear the packet buffers, doesn't reset the PE
5561  * firmware, and doesn't bother the other PFs on the chip.
5562  **/
5563 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5564 {
5565 	u32 val;
5566 
5567 	WARN_ON(in_interrupt());
5568 
5569 
5570 	/* do the biggest reset indicated */
5571 	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5572 
5573 		/* Request a Global Reset
5574 		 *
5575 		 * This will start the chip's countdown to the actual full
5576 		 * chip reset event, and a warning interrupt to be sent
5577 		 * to all PFs, including the requestor.  Our handler
5578 		 * for the warning interrupt will deal with the shutdown
5579 		 * and recovery of the switch setup.
5580 		 */
5581 		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5582 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5583 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5584 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5585 
5586 	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5587 
5588 		/* Request a Core Reset
5589 		 *
5590 		 * Same as Global Reset, except does *not* include the MAC/PHY
5591 		 */
5592 		dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5593 		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5594 		val |= I40E_GLGEN_RTRIG_CORER_MASK;
5595 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5596 		i40e_flush(&pf->hw);
5597 
5598 	} else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5599 
5600 		/* Request a PF Reset
5601 		 *
5602 		 * Resets only the PF-specific registers
5603 		 *
5604 		 * This goes directly to the tear-down and rebuild of
5605 		 * the switch, since we need to do all the recovery as
5606 		 * for the Core Reset.
5607 		 */
5608 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
5609 		i40e_handle_reset_warning(pf);
5610 
5611 	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5612 		int v;
5613 
5614 		/* Find the VSI(s) that requested a re-init */
5615 		dev_info(&pf->pdev->dev,
5616 			 "VSI reinit requested\n");
5617 		for (v = 0; v < pf->num_alloc_vsi; v++) {
5618 			struct i40e_vsi *vsi = pf->vsi[v];
5619 
5620 			if (vsi != NULL &&
5621 			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5622 				i40e_vsi_reinit_locked(pf->vsi[v]);
5623 				clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5624 			}
5625 		}
5626 	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5627 		int v;
5628 
5629 		/* Find the VSI(s) that needs to be brought down */
5630 		dev_info(&pf->pdev->dev, "VSI down requested\n");
5631 		for (v = 0; v < pf->num_alloc_vsi; v++) {
5632 			struct i40e_vsi *vsi = pf->vsi[v];
5633 
5634 			if (vsi != NULL &&
5635 			    test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5636 				set_bit(__I40E_DOWN, &vsi->state);
5637 				i40e_down(vsi);
5638 				clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5639 			}
5640 		}
5641 	} else {
5642 		dev_info(&pf->pdev->dev,
5643 			 "bad reset request 0x%08x\n", reset_flags);
5644 	}
5645 }
5646 
5647 #ifdef CONFIG_I40E_DCB
5648 /**
5649  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5650  * @pf: board private structure
5651  * @old_cfg: current DCB config
5652  * @new_cfg: new DCB config
5653  **/
5654 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5655 			    struct i40e_dcbx_config *old_cfg,
5656 			    struct i40e_dcbx_config *new_cfg)
5657 {
5658 	bool need_reconfig = false;
5659 
5660 	/* Check if ETS configuration has changed */
5661 	if (memcmp(&new_cfg->etscfg,
5662 		   &old_cfg->etscfg,
5663 		   sizeof(new_cfg->etscfg))) {
5664 		/* If Priority Table has changed reconfig is needed */
5665 		if (memcmp(&new_cfg->etscfg.prioritytable,
5666 			   &old_cfg->etscfg.prioritytable,
5667 			   sizeof(new_cfg->etscfg.prioritytable))) {
5668 			need_reconfig = true;
5669 			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5670 		}
5671 
5672 		if (memcmp(&new_cfg->etscfg.tcbwtable,
5673 			   &old_cfg->etscfg.tcbwtable,
5674 			   sizeof(new_cfg->etscfg.tcbwtable)))
5675 			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5676 
5677 		if (memcmp(&new_cfg->etscfg.tsatable,
5678 			   &old_cfg->etscfg.tsatable,
5679 			   sizeof(new_cfg->etscfg.tsatable)))
5680 			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5681 	}
5682 
5683 	/* Check if PFC configuration has changed */
5684 	if (memcmp(&new_cfg->pfc,
5685 		   &old_cfg->pfc,
5686 		   sizeof(new_cfg->pfc))) {
5687 		need_reconfig = true;
5688 		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5689 	}
5690 
5691 	/* Check if APP Table has changed */
5692 	if (memcmp(&new_cfg->app,
5693 		   &old_cfg->app,
5694 		   sizeof(new_cfg->app))) {
5695 		need_reconfig = true;
5696 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5697 	}
5698 
5699 	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5700 	return need_reconfig;
5701 }
5702 
5703 /**
5704  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5705  * @pf: board private structure
5706  * @e: event info posted on ARQ
5707  **/
5708 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5709 				  struct i40e_arq_event_info *e)
5710 {
5711 	struct i40e_aqc_lldp_get_mib *mib =
5712 		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5713 	struct i40e_hw *hw = &pf->hw;
5714 	struct i40e_dcbx_config tmp_dcbx_cfg;
5715 	bool need_reconfig = false;
5716 	int ret = 0;
5717 	u8 type;
5718 
5719 	/* Not DCB capable or capability disabled */
5720 	if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5721 		return ret;
5722 
5723 	/* Ignore if event is not for Nearest Bridge */
5724 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5725 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5726 	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5727 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5728 		return ret;
5729 
5730 	/* Check MIB Type and return if event for Remote MIB update */
5731 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5732 	dev_dbg(&pf->pdev->dev,
5733 		"LLDP event mib type %s\n", type ? "remote" : "local");
5734 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5735 		/* Update the remote cached instance and return */
5736 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5737 				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5738 				&hw->remote_dcbx_config);
5739 		goto exit;
5740 	}
5741 
5742 	/* Store the old configuration */
5743 	tmp_dcbx_cfg = hw->local_dcbx_config;
5744 
5745 	/* Reset the old DCBx configuration data */
5746 	memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5747 	/* Get updated DCBX data from firmware */
5748 	ret = i40e_get_dcb_config(&pf->hw);
5749 	if (ret) {
5750 		dev_info(&pf->pdev->dev,
5751 			 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5752 			 i40e_stat_str(&pf->hw, ret),
5753 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5754 		goto exit;
5755 	}
5756 
5757 	/* No change detected in DCBX configs */
5758 	if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5759 		    sizeof(tmp_dcbx_cfg))) {
5760 		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5761 		goto exit;
5762 	}
5763 
5764 	need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5765 					       &hw->local_dcbx_config);
5766 
5767 	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5768 
5769 	if (!need_reconfig)
5770 		goto exit;
5771 
5772 	/* Enable DCB tagging only when more than one TC */
5773 	if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5774 		pf->flags |= I40E_FLAG_DCB_ENABLED;
5775 	else
5776 		pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5777 
5778 	set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5779 	/* Reconfiguration needed quiesce all VSIs */
5780 	i40e_pf_quiesce_all_vsi(pf);
5781 
5782 	/* Changes in configuration update VEB/VSI */
5783 	i40e_dcb_reconfigure(pf);
5784 
5785 	ret = i40e_resume_port_tx(pf);
5786 
5787 	clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5788 	/* In case of error no point in resuming VSIs */
5789 	if (ret)
5790 		goto exit;
5791 
5792 	/* Wait for the PF's queues to be disabled */
5793 	ret = i40e_pf_wait_queues_disabled(pf);
5794 	if (ret) {
5795 		/* Schedule PF reset to recover */
5796 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5797 		i40e_service_event_schedule(pf);
5798 	} else {
5799 		i40e_pf_unquiesce_all_vsi(pf);
5800 		/* Notify the client for the DCB changes */
5801 		i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
5802 	}
5803 
5804 exit:
5805 	return ret;
5806 }
5807 #endif /* CONFIG_I40E_DCB */
5808 
5809 /**
5810  * i40e_do_reset_safe - Protected reset path for userland calls.
5811  * @pf: board private structure
5812  * @reset_flags: which reset is requested
5813  *
5814  **/
5815 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5816 {
5817 	rtnl_lock();
5818 	i40e_do_reset(pf, reset_flags);
5819 	rtnl_unlock();
5820 }
5821 
5822 /**
5823  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5824  * @pf: board private structure
5825  * @e: event info posted on ARQ
5826  *
5827  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5828  * and VF queues
5829  **/
5830 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5831 					   struct i40e_arq_event_info *e)
5832 {
5833 	struct i40e_aqc_lan_overflow *data =
5834 		(struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5835 	u32 queue = le32_to_cpu(data->prtdcb_rupto);
5836 	u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5837 	struct i40e_hw *hw = &pf->hw;
5838 	struct i40e_vf *vf;
5839 	u16 vf_id;
5840 
5841 	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5842 		queue, qtx_ctl);
5843 
5844 	/* Queue belongs to VF, find the VF and issue VF reset */
5845 	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5846 	    >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5847 		vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5848 			 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5849 		vf_id -= hw->func_caps.vf_base_id;
5850 		vf = &pf->vf[vf_id];
5851 		i40e_vc_notify_vf_reset(vf);
5852 		/* Allow VF to process pending reset notification */
5853 		msleep(20);
5854 		i40e_reset_vf(vf, false);
5855 	}
5856 }
5857 
5858 /**
5859  * i40e_service_event_complete - Finish up the service event
5860  * @pf: board private structure
5861  **/
5862 static void i40e_service_event_complete(struct i40e_pf *pf)
5863 {
5864 	WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5865 
5866 	/* flush memory to make sure state is correct before next watchog */
5867 	smp_mb__before_atomic();
5868 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5869 }
5870 
5871 /**
5872  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5873  * @pf: board private structure
5874  **/
5875 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5876 {
5877 	u32 val, fcnt_prog;
5878 
5879 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5880 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5881 	return fcnt_prog;
5882 }
5883 
5884 /**
5885  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5886  * @pf: board private structure
5887  **/
5888 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5889 {
5890 	u32 val, fcnt_prog;
5891 
5892 	val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5893 	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5894 		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5895 		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5896 	return fcnt_prog;
5897 }
5898 
5899 /**
5900  * i40e_get_global_fd_count - Get total FD filters programmed on device
5901  * @pf: board private structure
5902  **/
5903 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5904 {
5905 	u32 val, fcnt_prog;
5906 
5907 	val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5908 	fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5909 		    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5910 		     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5911 	return fcnt_prog;
5912 }
5913 
5914 /**
5915  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5916  * @pf: board private structure
5917  **/
5918 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5919 {
5920 	struct i40e_fdir_filter *filter;
5921 	u32 fcnt_prog, fcnt_avail;
5922 	struct hlist_node *node;
5923 
5924 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5925 		return;
5926 
5927 	/* Check if, FD SB or ATR was auto disabled and if there is enough room
5928 	 * to re-enable
5929 	 */
5930 	fcnt_prog = i40e_get_global_fd_count(pf);
5931 	fcnt_avail = pf->fdir_pf_filter_count;
5932 	if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5933 	    (pf->fd_add_err == 0) ||
5934 	    (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5935 		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5936 		    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5937 			pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5938 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5939 				dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5940 		}
5941 	}
5942 
5943 	/* Wait for some more space to be available to turn on ATR. We also
5944 	 * must check that no existing ntuple rules for TCP are in effect
5945 	 */
5946 	if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5947 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5948 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
5949 		    (pf->fd_tcp_rule == 0)) {
5950 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5951 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
5952 				dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
5953 		}
5954 	}
5955 
5956 	/* if hw had a problem adding a filter, delete it */
5957 	if (pf->fd_inv > 0) {
5958 		hlist_for_each_entry_safe(filter, node,
5959 					  &pf->fdir_filter_list, fdir_node) {
5960 			if (filter->fd_id == pf->fd_inv) {
5961 				hlist_del(&filter->fdir_node);
5962 				kfree(filter);
5963 				pf->fdir_pf_active_filters--;
5964 			}
5965 		}
5966 	}
5967 }
5968 
5969 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5970 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5971 /**
5972  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5973  * @pf: board private structure
5974  **/
5975 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5976 {
5977 	unsigned long min_flush_time;
5978 	int flush_wait_retry = 50;
5979 	bool disable_atr = false;
5980 	int fd_room;
5981 	int reg;
5982 
5983 	if (!time_after(jiffies, pf->fd_flush_timestamp +
5984 				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5985 		return;
5986 
5987 	/* If the flush is happening too quick and we have mostly SB rules we
5988 	 * should not re-enable ATR for some time.
5989 	 */
5990 	min_flush_time = pf->fd_flush_timestamp +
5991 			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5992 	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5993 
5994 	if (!(time_after(jiffies, min_flush_time)) &&
5995 	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5996 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
5997 			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5998 		disable_atr = true;
5999 	}
6000 
6001 	pf->fd_flush_timestamp = jiffies;
6002 	pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
6003 	/* flush all filters */
6004 	wr32(&pf->hw, I40E_PFQF_CTL_1,
6005 	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
6006 	i40e_flush(&pf->hw);
6007 	pf->fd_flush_cnt++;
6008 	pf->fd_add_err = 0;
6009 	do {
6010 		/* Check FD flush status every 5-6msec */
6011 		usleep_range(5000, 6000);
6012 		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
6013 		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
6014 			break;
6015 	} while (flush_wait_retry--);
6016 	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
6017 		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
6018 	} else {
6019 		/* replay sideband filters */
6020 		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
6021 		if (!disable_atr)
6022 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
6023 		clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
6024 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
6025 			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
6026 	}
6027 }
6028 
6029 /**
6030  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
6031  * @pf: board private structure
6032  **/
6033 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
6034 {
6035 	return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
6036 }
6037 
6038 /* We can see up to 256 filter programming desc in transit if the filters are
6039  * being applied really fast; before we see the first
6040  * filter miss error on Rx queue 0. Accumulating enough error messages before
6041  * reacting will make sure we don't cause flush too often.
6042  */
6043 #define I40E_MAX_FD_PROGRAM_ERROR 256
6044 
6045 /**
6046  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
6047  * @pf: board private structure
6048  **/
6049 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6050 {
6051 
6052 	/* if interface is down do nothing */
6053 	if (test_bit(__I40E_DOWN, &pf->state))
6054 		return;
6055 
6056 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
6057 		i40e_fdir_flush_and_replay(pf);
6058 
6059 	i40e_fdir_check_and_reenable(pf);
6060 
6061 }
6062 
6063 /**
6064  * i40e_vsi_link_event - notify VSI of a link event
6065  * @vsi: vsi to be notified
6066  * @link_up: link up or down
6067  **/
6068 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6069 {
6070 	if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
6071 		return;
6072 
6073 	switch (vsi->type) {
6074 	case I40E_VSI_MAIN:
6075 #ifdef I40E_FCOE
6076 	case I40E_VSI_FCOE:
6077 #endif
6078 		if (!vsi->netdev || !vsi->netdev_registered)
6079 			break;
6080 
6081 		if (link_up) {
6082 			netif_carrier_on(vsi->netdev);
6083 			netif_tx_wake_all_queues(vsi->netdev);
6084 		} else {
6085 			netif_carrier_off(vsi->netdev);
6086 			netif_tx_stop_all_queues(vsi->netdev);
6087 		}
6088 		break;
6089 
6090 	case I40E_VSI_SRIOV:
6091 	case I40E_VSI_VMDQ2:
6092 	case I40E_VSI_CTRL:
6093 	case I40E_VSI_IWARP:
6094 	case I40E_VSI_MIRROR:
6095 	default:
6096 		/* there is no notification for other VSIs */
6097 		break;
6098 	}
6099 }
6100 
6101 /**
6102  * i40e_veb_link_event - notify elements on the veb of a link event
6103  * @veb: veb to be notified
6104  * @link_up: link up or down
6105  **/
6106 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6107 {
6108 	struct i40e_pf *pf;
6109 	int i;
6110 
6111 	if (!veb || !veb->pf)
6112 		return;
6113 	pf = veb->pf;
6114 
6115 	/* depth first... */
6116 	for (i = 0; i < I40E_MAX_VEB; i++)
6117 		if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6118 			i40e_veb_link_event(pf->veb[i], link_up);
6119 
6120 	/* ... now the local VSIs */
6121 	for (i = 0; i < pf->num_alloc_vsi; i++)
6122 		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6123 			i40e_vsi_link_event(pf->vsi[i], link_up);
6124 }
6125 
6126 /**
6127  * i40e_link_event - Update netif_carrier status
6128  * @pf: board private structure
6129  **/
6130 static void i40e_link_event(struct i40e_pf *pf)
6131 {
6132 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6133 	u8 new_link_speed, old_link_speed;
6134 	i40e_status status;
6135 	bool new_link, old_link;
6136 
6137 	/* save off old link status information */
6138 	pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6139 
6140 	/* set this to force the get_link_status call to refresh state */
6141 	pf->hw.phy.get_link_info = true;
6142 
6143 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6144 
6145 	status = i40e_get_link_status(&pf->hw, &new_link);
6146 	if (status) {
6147 		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6148 			status);
6149 		return;
6150 	}
6151 
6152 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
6153 	new_link_speed = pf->hw.phy.link_info.link_speed;
6154 
6155 	if (new_link == old_link &&
6156 	    new_link_speed == old_link_speed &&
6157 	    (test_bit(__I40E_DOWN, &vsi->state) ||
6158 	     new_link == netif_carrier_ok(vsi->netdev)))
6159 		return;
6160 
6161 	if (!test_bit(__I40E_DOWN, &vsi->state))
6162 		i40e_print_link_message(vsi, new_link);
6163 
6164 	/* Notify the base of the switch tree connected to
6165 	 * the link.  Floating VEBs are not notified.
6166 	 */
6167 	if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6168 		i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6169 	else
6170 		i40e_vsi_link_event(vsi, new_link);
6171 
6172 	if (pf->vf)
6173 		i40e_vc_notify_link_state(pf);
6174 
6175 	if (pf->flags & I40E_FLAG_PTP)
6176 		i40e_ptp_set_increment(pf);
6177 }
6178 
6179 /**
6180  * i40e_watchdog_subtask - periodic checks not using event driven response
6181  * @pf: board private structure
6182  **/
6183 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6184 {
6185 	int i;
6186 
6187 	/* if interface is down do nothing */
6188 	if (test_bit(__I40E_DOWN, &pf->state) ||
6189 	    test_bit(__I40E_CONFIG_BUSY, &pf->state))
6190 		return;
6191 
6192 	/* make sure we don't do these things too often */
6193 	if (time_before(jiffies, (pf->service_timer_previous +
6194 				  pf->service_timer_period)))
6195 		return;
6196 	pf->service_timer_previous = jiffies;
6197 
6198 	if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6199 		i40e_link_event(pf);
6200 
6201 	/* Update the stats for active netdevs so the network stack
6202 	 * can look at updated numbers whenever it cares to
6203 	 */
6204 	for (i = 0; i < pf->num_alloc_vsi; i++)
6205 		if (pf->vsi[i] && pf->vsi[i]->netdev)
6206 			i40e_update_stats(pf->vsi[i]);
6207 
6208 	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6209 		/* Update the stats for the active switching components */
6210 		for (i = 0; i < I40E_MAX_VEB; i++)
6211 			if (pf->veb[i])
6212 				i40e_update_veb_stats(pf->veb[i]);
6213 	}
6214 
6215 	i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
6216 }
6217 
6218 /**
6219  * i40e_reset_subtask - Set up for resetting the device and driver
6220  * @pf: board private structure
6221  **/
6222 static void i40e_reset_subtask(struct i40e_pf *pf)
6223 {
6224 	u32 reset_flags = 0;
6225 
6226 	rtnl_lock();
6227 	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
6228 		reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6229 		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6230 	}
6231 	if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
6232 		reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6233 		clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6234 	}
6235 	if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
6236 		reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6237 		clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6238 	}
6239 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
6240 		reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6241 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6242 	}
6243 	if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
6244 		reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6245 		clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6246 	}
6247 
6248 	/* If there's a recovery already waiting, it takes
6249 	 * precedence before starting a new reset sequence.
6250 	 */
6251 	if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6252 		i40e_handle_reset_warning(pf);
6253 		goto unlock;
6254 	}
6255 
6256 	/* If we're already down or resetting, just bail */
6257 	if (reset_flags &&
6258 	    !test_bit(__I40E_DOWN, &pf->state) &&
6259 	    !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6260 		i40e_do_reset(pf, reset_flags);
6261 
6262 unlock:
6263 	rtnl_unlock();
6264 }
6265 
6266 /**
6267  * i40e_handle_link_event - Handle link event
6268  * @pf: board private structure
6269  * @e: event info posted on ARQ
6270  **/
6271 static void i40e_handle_link_event(struct i40e_pf *pf,
6272 				   struct i40e_arq_event_info *e)
6273 {
6274 	struct i40e_aqc_get_link_status *status =
6275 		(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6276 
6277 	/* Do a new status request to re-enable LSE reporting
6278 	 * and load new status information into the hw struct
6279 	 * This completely ignores any state information
6280 	 * in the ARQ event info, instead choosing to always
6281 	 * issue the AQ update link status command.
6282 	 */
6283 	i40e_link_event(pf);
6284 
6285 	/* check for unqualified module, if link is down */
6286 	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6287 	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6288 	    (!(status->link_info & I40E_AQ_LINK_UP)))
6289 		dev_err(&pf->pdev->dev,
6290 			"The driver failed to link because an unqualified module was detected.\n");
6291 }
6292 
6293 /**
6294  * i40e_clean_adminq_subtask - Clean the AdminQ rings
6295  * @pf: board private structure
6296  **/
6297 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6298 {
6299 	struct i40e_arq_event_info event;
6300 	struct i40e_hw *hw = &pf->hw;
6301 	u16 pending, i = 0;
6302 	i40e_status ret;
6303 	u16 opcode;
6304 	u32 oldval;
6305 	u32 val;
6306 
6307 	/* Do not run clean AQ when PF reset fails */
6308 	if (test_bit(__I40E_RESET_FAILED, &pf->state))
6309 		return;
6310 
6311 	/* check for error indications */
6312 	val = rd32(&pf->hw, pf->hw.aq.arq.len);
6313 	oldval = val;
6314 	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6315 		if (hw->debug_mask & I40E_DEBUG_AQ)
6316 			dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6317 		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6318 	}
6319 	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6320 		if (hw->debug_mask & I40E_DEBUG_AQ)
6321 			dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6322 		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6323 		pf->arq_overflows++;
6324 	}
6325 	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6326 		if (hw->debug_mask & I40E_DEBUG_AQ)
6327 			dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6328 		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6329 	}
6330 	if (oldval != val)
6331 		wr32(&pf->hw, pf->hw.aq.arq.len, val);
6332 
6333 	val = rd32(&pf->hw, pf->hw.aq.asq.len);
6334 	oldval = val;
6335 	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6336 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6337 			dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6338 		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6339 	}
6340 	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6341 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6342 			dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6343 		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6344 	}
6345 	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6346 		if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6347 			dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6348 		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6349 	}
6350 	if (oldval != val)
6351 		wr32(&pf->hw, pf->hw.aq.asq.len, val);
6352 
6353 	event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6354 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6355 	if (!event.msg_buf)
6356 		return;
6357 
6358 	do {
6359 		ret = i40e_clean_arq_element(hw, &event, &pending);
6360 		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6361 			break;
6362 		else if (ret) {
6363 			dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6364 			break;
6365 		}
6366 
6367 		opcode = le16_to_cpu(event.desc.opcode);
6368 		switch (opcode) {
6369 
6370 		case i40e_aqc_opc_get_link_status:
6371 			i40e_handle_link_event(pf, &event);
6372 			break;
6373 		case i40e_aqc_opc_send_msg_to_pf:
6374 			ret = i40e_vc_process_vf_msg(pf,
6375 					le16_to_cpu(event.desc.retval),
6376 					le32_to_cpu(event.desc.cookie_high),
6377 					le32_to_cpu(event.desc.cookie_low),
6378 					event.msg_buf,
6379 					event.msg_len);
6380 			break;
6381 		case i40e_aqc_opc_lldp_update_mib:
6382 			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6383 #ifdef CONFIG_I40E_DCB
6384 			rtnl_lock();
6385 			ret = i40e_handle_lldp_event(pf, &event);
6386 			rtnl_unlock();
6387 #endif /* CONFIG_I40E_DCB */
6388 			break;
6389 		case i40e_aqc_opc_event_lan_overflow:
6390 			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6391 			i40e_handle_lan_overflow_event(pf, &event);
6392 			break;
6393 		case i40e_aqc_opc_send_msg_to_peer:
6394 			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6395 			break;
6396 		case i40e_aqc_opc_nvm_erase:
6397 		case i40e_aqc_opc_nvm_update:
6398 		case i40e_aqc_opc_oem_post_update:
6399 			i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6400 				   "ARQ NVM operation 0x%04x completed\n",
6401 				   opcode);
6402 			break;
6403 		default:
6404 			dev_info(&pf->pdev->dev,
6405 				 "ARQ: Unknown event 0x%04x ignored\n",
6406 				 opcode);
6407 			break;
6408 		}
6409 	} while (pending && (i++ < pf->adminq_work_limit));
6410 
6411 	clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6412 	/* re-enable Admin queue interrupt cause */
6413 	val = rd32(hw, I40E_PFINT_ICR0_ENA);
6414 	val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6415 	wr32(hw, I40E_PFINT_ICR0_ENA, val);
6416 	i40e_flush(hw);
6417 
6418 	kfree(event.msg_buf);
6419 }
6420 
6421 /**
6422  * i40e_verify_eeprom - make sure eeprom is good to use
6423  * @pf: board private structure
6424  **/
6425 static void i40e_verify_eeprom(struct i40e_pf *pf)
6426 {
6427 	int err;
6428 
6429 	err = i40e_diag_eeprom_test(&pf->hw);
6430 	if (err) {
6431 		/* retry in case of garbage read */
6432 		err = i40e_diag_eeprom_test(&pf->hw);
6433 		if (err) {
6434 			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6435 				 err);
6436 			set_bit(__I40E_BAD_EEPROM, &pf->state);
6437 		}
6438 	}
6439 
6440 	if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6441 		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6442 		clear_bit(__I40E_BAD_EEPROM, &pf->state);
6443 	}
6444 }
6445 
6446 /**
6447  * i40e_enable_pf_switch_lb
6448  * @pf: pointer to the PF structure
6449  *
6450  * enable switch loop back or die - no point in a return value
6451  **/
6452 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6453 {
6454 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6455 	struct i40e_vsi_context ctxt;
6456 	int ret;
6457 
6458 	ctxt.seid = pf->main_vsi_seid;
6459 	ctxt.pf_num = pf->hw.pf_id;
6460 	ctxt.vf_num = 0;
6461 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6462 	if (ret) {
6463 		dev_info(&pf->pdev->dev,
6464 			 "couldn't get PF vsi config, err %s aq_err %s\n",
6465 			 i40e_stat_str(&pf->hw, ret),
6466 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6467 		return;
6468 	}
6469 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6470 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6471 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6472 
6473 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6474 	if (ret) {
6475 		dev_info(&pf->pdev->dev,
6476 			 "update vsi switch failed, err %s aq_err %s\n",
6477 			 i40e_stat_str(&pf->hw, ret),
6478 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6479 	}
6480 }
6481 
6482 /**
6483  * i40e_disable_pf_switch_lb
6484  * @pf: pointer to the PF structure
6485  *
6486  * disable switch loop back or die - no point in a return value
6487  **/
6488 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6489 {
6490 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6491 	struct i40e_vsi_context ctxt;
6492 	int ret;
6493 
6494 	ctxt.seid = pf->main_vsi_seid;
6495 	ctxt.pf_num = pf->hw.pf_id;
6496 	ctxt.vf_num = 0;
6497 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6498 	if (ret) {
6499 		dev_info(&pf->pdev->dev,
6500 			 "couldn't get PF vsi config, err %s aq_err %s\n",
6501 			 i40e_stat_str(&pf->hw, ret),
6502 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6503 		return;
6504 	}
6505 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6506 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6507 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6508 
6509 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6510 	if (ret) {
6511 		dev_info(&pf->pdev->dev,
6512 			 "update vsi switch failed, err %s aq_err %s\n",
6513 			 i40e_stat_str(&pf->hw, ret),
6514 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6515 	}
6516 }
6517 
6518 /**
6519  * i40e_config_bridge_mode - Configure the HW bridge mode
6520  * @veb: pointer to the bridge instance
6521  *
6522  * Configure the loop back mode for the LAN VSI that is downlink to the
6523  * specified HW bridge instance. It is expected this function is called
6524  * when a new HW bridge is instantiated.
6525  **/
6526 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6527 {
6528 	struct i40e_pf *pf = veb->pf;
6529 
6530 	if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6531 		dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6532 			 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6533 	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6534 		i40e_disable_pf_switch_lb(pf);
6535 	else
6536 		i40e_enable_pf_switch_lb(pf);
6537 }
6538 
6539 /**
6540  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6541  * @veb: pointer to the VEB instance
6542  *
6543  * This is a recursive function that first builds the attached VSIs then
6544  * recurses in to build the next layer of VEB.  We track the connections
6545  * through our own index numbers because the seid's from the HW could
6546  * change across the reset.
6547  **/
6548 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6549 {
6550 	struct i40e_vsi *ctl_vsi = NULL;
6551 	struct i40e_pf *pf = veb->pf;
6552 	int v, veb_idx;
6553 	int ret;
6554 
6555 	/* build VSI that owns this VEB, temporarily attached to base VEB */
6556 	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6557 		if (pf->vsi[v] &&
6558 		    pf->vsi[v]->veb_idx == veb->idx &&
6559 		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6560 			ctl_vsi = pf->vsi[v];
6561 			break;
6562 		}
6563 	}
6564 	if (!ctl_vsi) {
6565 		dev_info(&pf->pdev->dev,
6566 			 "missing owner VSI for veb_idx %d\n", veb->idx);
6567 		ret = -ENOENT;
6568 		goto end_reconstitute;
6569 	}
6570 	if (ctl_vsi != pf->vsi[pf->lan_vsi])
6571 		ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6572 	ret = i40e_add_vsi(ctl_vsi);
6573 	if (ret) {
6574 		dev_info(&pf->pdev->dev,
6575 			 "rebuild of veb_idx %d owner VSI failed: %d\n",
6576 			 veb->idx, ret);
6577 		goto end_reconstitute;
6578 	}
6579 	i40e_vsi_reset_stats(ctl_vsi);
6580 
6581 	/* create the VEB in the switch and move the VSI onto the VEB */
6582 	ret = i40e_add_veb(veb, ctl_vsi);
6583 	if (ret)
6584 		goto end_reconstitute;
6585 
6586 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6587 		veb->bridge_mode = BRIDGE_MODE_VEB;
6588 	else
6589 		veb->bridge_mode = BRIDGE_MODE_VEPA;
6590 	i40e_config_bridge_mode(veb);
6591 
6592 	/* create the remaining VSIs attached to this VEB */
6593 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6594 		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6595 			continue;
6596 
6597 		if (pf->vsi[v]->veb_idx == veb->idx) {
6598 			struct i40e_vsi *vsi = pf->vsi[v];
6599 
6600 			vsi->uplink_seid = veb->seid;
6601 			ret = i40e_add_vsi(vsi);
6602 			if (ret) {
6603 				dev_info(&pf->pdev->dev,
6604 					 "rebuild of vsi_idx %d failed: %d\n",
6605 					 v, ret);
6606 				goto end_reconstitute;
6607 			}
6608 			i40e_vsi_reset_stats(vsi);
6609 		}
6610 	}
6611 
6612 	/* create any VEBs attached to this VEB - RECURSION */
6613 	for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6614 		if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6615 			pf->veb[veb_idx]->uplink_seid = veb->seid;
6616 			ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6617 			if (ret)
6618 				break;
6619 		}
6620 	}
6621 
6622 end_reconstitute:
6623 	return ret;
6624 }
6625 
6626 /**
6627  * i40e_get_capabilities - get info about the HW
6628  * @pf: the PF struct
6629  **/
6630 static int i40e_get_capabilities(struct i40e_pf *pf)
6631 {
6632 	struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6633 	u16 data_size;
6634 	int buf_len;
6635 	int err;
6636 
6637 	buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6638 	do {
6639 		cap_buf = kzalloc(buf_len, GFP_KERNEL);
6640 		if (!cap_buf)
6641 			return -ENOMEM;
6642 
6643 		/* this loads the data into the hw struct for us */
6644 		err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6645 					    &data_size,
6646 					    i40e_aqc_opc_list_func_capabilities,
6647 					    NULL);
6648 		/* data loaded, buffer no longer needed */
6649 		kfree(cap_buf);
6650 
6651 		if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6652 			/* retry with a larger buffer */
6653 			buf_len = data_size;
6654 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6655 			dev_info(&pf->pdev->dev,
6656 				 "capability discovery failed, err %s aq_err %s\n",
6657 				 i40e_stat_str(&pf->hw, err),
6658 				 i40e_aq_str(&pf->hw,
6659 					     pf->hw.aq.asq_last_status));
6660 			return -ENODEV;
6661 		}
6662 	} while (err);
6663 
6664 	if (pf->hw.debug_mask & I40E_DEBUG_USER)
6665 		dev_info(&pf->pdev->dev,
6666 			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6667 			 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6668 			 pf->hw.func_caps.num_msix_vectors,
6669 			 pf->hw.func_caps.num_msix_vectors_vf,
6670 			 pf->hw.func_caps.fd_filters_guaranteed,
6671 			 pf->hw.func_caps.fd_filters_best_effort,
6672 			 pf->hw.func_caps.num_tx_qp,
6673 			 pf->hw.func_caps.num_vsis);
6674 
6675 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6676 		       + pf->hw.func_caps.num_vfs)
6677 	if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6678 		dev_info(&pf->pdev->dev,
6679 			 "got num_vsis %d, setting num_vsis to %d\n",
6680 			 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6681 		pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6682 	}
6683 
6684 	return 0;
6685 }
6686 
6687 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6688 
6689 /**
6690  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6691  * @pf: board private structure
6692  **/
6693 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6694 {
6695 	struct i40e_vsi *vsi;
6696 	int i;
6697 
6698 	/* quick workaround for an NVM issue that leaves a critical register
6699 	 * uninitialized
6700 	 */
6701 	if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6702 		static const u32 hkey[] = {
6703 			0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6704 			0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6705 			0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6706 			0x95b3a76d};
6707 
6708 		for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6709 			wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6710 	}
6711 
6712 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6713 		return;
6714 
6715 	/* find existing VSI and see if it needs configuring */
6716 	vsi = NULL;
6717 	for (i = 0; i < pf->num_alloc_vsi; i++) {
6718 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6719 			vsi = pf->vsi[i];
6720 			break;
6721 		}
6722 	}
6723 
6724 	/* create a new VSI if none exists */
6725 	if (!vsi) {
6726 		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6727 				     pf->vsi[pf->lan_vsi]->seid, 0);
6728 		if (!vsi) {
6729 			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6730 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6731 			return;
6732 		}
6733 	}
6734 
6735 	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6736 }
6737 
6738 /**
6739  * i40e_fdir_teardown - release the Flow Director resources
6740  * @pf: board private structure
6741  **/
6742 static void i40e_fdir_teardown(struct i40e_pf *pf)
6743 {
6744 	int i;
6745 
6746 	i40e_fdir_filter_exit(pf);
6747 	for (i = 0; i < pf->num_alloc_vsi; i++) {
6748 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6749 			i40e_vsi_release(pf->vsi[i]);
6750 			break;
6751 		}
6752 	}
6753 }
6754 
6755 /**
6756  * i40e_prep_for_reset - prep for the core to reset
6757  * @pf: board private structure
6758  *
6759  * Close up the VFs and other things in prep for PF Reset.
6760   **/
6761 static void i40e_prep_for_reset(struct i40e_pf *pf)
6762 {
6763 	struct i40e_hw *hw = &pf->hw;
6764 	i40e_status ret = 0;
6765 	u32 v;
6766 
6767 	clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6768 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6769 		return;
6770 	if (i40e_check_asq_alive(&pf->hw))
6771 		i40e_vc_notify_reset(pf);
6772 
6773 	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6774 
6775 	/* quiesce the VSIs and their queues that are not already DOWN */
6776 	i40e_pf_quiesce_all_vsi(pf);
6777 
6778 	for (v = 0; v < pf->num_alloc_vsi; v++) {
6779 		if (pf->vsi[v])
6780 			pf->vsi[v]->seid = 0;
6781 	}
6782 
6783 	i40e_shutdown_adminq(&pf->hw);
6784 
6785 	/* call shutdown HMC */
6786 	if (hw->hmc.hmc_obj) {
6787 		ret = i40e_shutdown_lan_hmc(hw);
6788 		if (ret)
6789 			dev_warn(&pf->pdev->dev,
6790 				 "shutdown_lan_hmc failed: %d\n", ret);
6791 	}
6792 }
6793 
6794 /**
6795  * i40e_send_version - update firmware with driver version
6796  * @pf: PF struct
6797  */
6798 static void i40e_send_version(struct i40e_pf *pf)
6799 {
6800 	struct i40e_driver_version dv;
6801 
6802 	dv.major_version = DRV_VERSION_MAJOR;
6803 	dv.minor_version = DRV_VERSION_MINOR;
6804 	dv.build_version = DRV_VERSION_BUILD;
6805 	dv.subbuild_version = 0;
6806 	strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6807 	i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6808 }
6809 
6810 /**
6811  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6812  * @pf: board private structure
6813  * @reinit: if the Main VSI needs to re-initialized.
6814  **/
6815 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6816 {
6817 	struct i40e_hw *hw = &pf->hw;
6818 	u8 set_fc_aq_fail = 0;
6819 	i40e_status ret;
6820 	u32 val;
6821 	u32 v;
6822 
6823 	/* Now we wait for GRST to settle out.
6824 	 * We don't have to delete the VEBs or VSIs from the hw switch
6825 	 * because the reset will make them disappear.
6826 	 */
6827 	ret = i40e_pf_reset(hw);
6828 	if (ret) {
6829 		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6830 		set_bit(__I40E_RESET_FAILED, &pf->state);
6831 		goto clear_recovery;
6832 	}
6833 	pf->pfr_count++;
6834 
6835 	if (test_bit(__I40E_DOWN, &pf->state))
6836 		goto clear_recovery;
6837 	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6838 
6839 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6840 	ret = i40e_init_adminq(&pf->hw);
6841 	if (ret) {
6842 		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6843 			 i40e_stat_str(&pf->hw, ret),
6844 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6845 		goto clear_recovery;
6846 	}
6847 
6848 	/* re-verify the eeprom if we just had an EMP reset */
6849 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6850 		i40e_verify_eeprom(pf);
6851 
6852 	i40e_clear_pxe_mode(hw);
6853 	ret = i40e_get_capabilities(pf);
6854 	if (ret)
6855 		goto end_core_reset;
6856 
6857 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6858 				hw->func_caps.num_rx_qp,
6859 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6860 	if (ret) {
6861 		dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6862 		goto end_core_reset;
6863 	}
6864 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6865 	if (ret) {
6866 		dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6867 		goto end_core_reset;
6868 	}
6869 
6870 #ifdef CONFIG_I40E_DCB
6871 	ret = i40e_init_pf_dcb(pf);
6872 	if (ret) {
6873 		dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6874 		pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6875 		/* Continue without DCB enabled */
6876 	}
6877 #endif /* CONFIG_I40E_DCB */
6878 #ifdef I40E_FCOE
6879 	i40e_init_pf_fcoe(pf);
6880 
6881 #endif
6882 	/* do basic switch setup */
6883 	ret = i40e_setup_pf_switch(pf, reinit);
6884 	if (ret)
6885 		goto end_core_reset;
6886 
6887 	/* The driver only wants link up/down and module qualification
6888 	 * reports from firmware.  Note the negative logic.
6889 	 */
6890 	ret = i40e_aq_set_phy_int_mask(&pf->hw,
6891 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
6892 					 I40E_AQ_EVENT_MEDIA_NA |
6893 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
6894 	if (ret)
6895 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6896 			 i40e_stat_str(&pf->hw, ret),
6897 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6898 
6899 	/* make sure our flow control settings are restored */
6900 	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6901 	if (ret)
6902 		dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6903 			i40e_stat_str(&pf->hw, ret),
6904 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6905 
6906 	/* Rebuild the VSIs and VEBs that existed before reset.
6907 	 * They are still in our local switch element arrays, so only
6908 	 * need to rebuild the switch model in the HW.
6909 	 *
6910 	 * If there were VEBs but the reconstitution failed, we'll try
6911 	 * try to recover minimal use by getting the basic PF VSI working.
6912 	 */
6913 	if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6914 		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6915 		/* find the one VEB connected to the MAC, and find orphans */
6916 		for (v = 0; v < I40E_MAX_VEB; v++) {
6917 			if (!pf->veb[v])
6918 				continue;
6919 
6920 			if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6921 			    pf->veb[v]->uplink_seid == 0) {
6922 				ret = i40e_reconstitute_veb(pf->veb[v]);
6923 
6924 				if (!ret)
6925 					continue;
6926 
6927 				/* If Main VEB failed, we're in deep doodoo,
6928 				 * so give up rebuilding the switch and set up
6929 				 * for minimal rebuild of PF VSI.
6930 				 * If orphan failed, we'll report the error
6931 				 * but try to keep going.
6932 				 */
6933 				if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6934 					dev_info(&pf->pdev->dev,
6935 						 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6936 						 ret);
6937 					pf->vsi[pf->lan_vsi]->uplink_seid
6938 								= pf->mac_seid;
6939 					break;
6940 				} else if (pf->veb[v]->uplink_seid == 0) {
6941 					dev_info(&pf->pdev->dev,
6942 						 "rebuild of orphan VEB failed: %d\n",
6943 						 ret);
6944 				}
6945 			}
6946 		}
6947 	}
6948 
6949 	if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6950 		dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6951 		/* no VEB, so rebuild only the Main VSI */
6952 		ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6953 		if (ret) {
6954 			dev_info(&pf->pdev->dev,
6955 				 "rebuild of Main VSI failed: %d\n", ret);
6956 			goto end_core_reset;
6957 		}
6958 	}
6959 
6960 	/* Reconfigure hardware for allowing smaller MSS in the case
6961 	 * of TSO, so that we avoid the MDD being fired and causing
6962 	 * a reset in the case of small MSS+TSO.
6963 	 */
6964 #define I40E_REG_MSS          0x000E64DC
6965 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
6966 #define I40E_64BYTE_MSS       0x400000
6967 	val = rd32(hw, I40E_REG_MSS);
6968 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6969 		val &= ~I40E_REG_MSS_MIN_MASK;
6970 		val |= I40E_64BYTE_MSS;
6971 		wr32(hw, I40E_REG_MSS, val);
6972 	}
6973 
6974 	if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
6975 		msleep(75);
6976 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6977 		if (ret)
6978 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6979 				 i40e_stat_str(&pf->hw, ret),
6980 				 i40e_aq_str(&pf->hw,
6981 					     pf->hw.aq.asq_last_status));
6982 	}
6983 	/* reinit the misc interrupt */
6984 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6985 		ret = i40e_setup_misc_vector(pf);
6986 
6987 	/* Add a filter to drop all Flow control frames from any VSI from being
6988 	 * transmitted. By doing so we stop a malicious VF from sending out
6989 	 * PAUSE or PFC frames and potentially controlling traffic for other
6990 	 * PF/VF VSIs.
6991 	 * The FW can still send Flow control frames if enabled.
6992 	 */
6993 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6994 						       pf->main_vsi_seid);
6995 
6996 	/* restart the VSIs that were rebuilt and running before the reset */
6997 	i40e_pf_unquiesce_all_vsi(pf);
6998 
6999 	if (pf->num_alloc_vfs) {
7000 		for (v = 0; v < pf->num_alloc_vfs; v++)
7001 			i40e_reset_vf(&pf->vf[v], true);
7002 	}
7003 
7004 	/* tell the firmware that we're starting */
7005 	i40e_send_version(pf);
7006 
7007 end_core_reset:
7008 	clear_bit(__I40E_RESET_FAILED, &pf->state);
7009 clear_recovery:
7010 	clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
7011 }
7012 
7013 /**
7014  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
7015  * @pf: board private structure
7016  *
7017  * Close up the VFs and other things in prep for a Core Reset,
7018  * then get ready to rebuild the world.
7019  **/
7020 static void i40e_handle_reset_warning(struct i40e_pf *pf)
7021 {
7022 	i40e_prep_for_reset(pf);
7023 	i40e_reset_and_rebuild(pf, false);
7024 }
7025 
7026 /**
7027  * i40e_handle_mdd_event
7028  * @pf: pointer to the PF structure
7029  *
7030  * Called from the MDD irq handler to identify possibly malicious vfs
7031  **/
7032 static void i40e_handle_mdd_event(struct i40e_pf *pf)
7033 {
7034 	struct i40e_hw *hw = &pf->hw;
7035 	bool mdd_detected = false;
7036 	bool pf_mdd_detected = false;
7037 	struct i40e_vf *vf;
7038 	u32 reg;
7039 	int i;
7040 
7041 	if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
7042 		return;
7043 
7044 	/* find what triggered the MDD event */
7045 	reg = rd32(hw, I40E_GL_MDET_TX);
7046 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7047 		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7048 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
7049 		u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7050 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
7051 		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7052 				I40E_GL_MDET_TX_EVENT_SHIFT;
7053 		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7054 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
7055 				pf->hw.func_caps.base_queue;
7056 		if (netif_msg_tx_err(pf))
7057 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7058 				 event, queue, pf_num, vf_num);
7059 		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7060 		mdd_detected = true;
7061 	}
7062 	reg = rd32(hw, I40E_GL_MDET_RX);
7063 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7064 		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7065 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
7066 		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7067 				I40E_GL_MDET_RX_EVENT_SHIFT;
7068 		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7069 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
7070 				pf->hw.func_caps.base_queue;
7071 		if (netif_msg_rx_err(pf))
7072 			dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7073 				 event, queue, func);
7074 		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7075 		mdd_detected = true;
7076 	}
7077 
7078 	if (mdd_detected) {
7079 		reg = rd32(hw, I40E_PF_MDET_TX);
7080 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7081 			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7082 			dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7083 			pf_mdd_detected = true;
7084 		}
7085 		reg = rd32(hw, I40E_PF_MDET_RX);
7086 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7087 			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7088 			dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7089 			pf_mdd_detected = true;
7090 		}
7091 		/* Queue belongs to the PF, initiate a reset */
7092 		if (pf_mdd_detected) {
7093 			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
7094 			i40e_service_event_schedule(pf);
7095 		}
7096 	}
7097 
7098 	/* see if one of the VFs needs its hand slapped */
7099 	for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7100 		vf = &(pf->vf[i]);
7101 		reg = rd32(hw, I40E_VP_MDET_TX(i));
7102 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7103 			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7104 			vf->num_mdd_events++;
7105 			dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7106 				 i);
7107 		}
7108 
7109 		reg = rd32(hw, I40E_VP_MDET_RX(i));
7110 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7111 			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7112 			vf->num_mdd_events++;
7113 			dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7114 				 i);
7115 		}
7116 
7117 		if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7118 			dev_info(&pf->pdev->dev,
7119 				 "Too many MDD events on VF %d, disabled\n", i);
7120 			dev_info(&pf->pdev->dev,
7121 				 "Use PF Control I/F to re-enable the VF\n");
7122 			set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7123 		}
7124 	}
7125 
7126 	/* re-enable mdd interrupt cause */
7127 	clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7128 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7129 	reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7130 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7131 	i40e_flush(hw);
7132 }
7133 
7134 /**
7135  * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7136  * @pf: board private structure
7137  **/
7138 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7139 {
7140 	struct i40e_hw *hw = &pf->hw;
7141 	i40e_status ret;
7142 	__be16 port;
7143 	int i;
7144 
7145 	if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7146 		return;
7147 
7148 	pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7149 
7150 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7151 		if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7152 			pf->pending_udp_bitmap &= ~BIT_ULL(i);
7153 			port = pf->udp_ports[i].index;
7154 			if (port)
7155 				ret = i40e_aq_add_udp_tunnel(hw, port,
7156 							pf->udp_ports[i].type,
7157 							NULL, NULL);
7158 			else
7159 				ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7160 
7161 			if (ret) {
7162 				dev_dbg(&pf->pdev->dev,
7163 					"%s %s port %d, index %d failed, err %s aq_err %s\n",
7164 					pf->udp_ports[i].type ? "vxlan" : "geneve",
7165 					port ? "add" : "delete",
7166 					ntohs(port), i,
7167 					i40e_stat_str(&pf->hw, ret),
7168 					i40e_aq_str(&pf->hw,
7169 						    pf->hw.aq.asq_last_status));
7170 				pf->udp_ports[i].index = 0;
7171 			}
7172 		}
7173 	}
7174 }
7175 
7176 /**
7177  * i40e_service_task - Run the driver's async subtasks
7178  * @work: pointer to work_struct containing our data
7179  **/
7180 static void i40e_service_task(struct work_struct *work)
7181 {
7182 	struct i40e_pf *pf = container_of(work,
7183 					  struct i40e_pf,
7184 					  service_task);
7185 	unsigned long start_time = jiffies;
7186 
7187 	/* don't bother with service tasks if a reset is in progress */
7188 	if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7189 		i40e_service_event_complete(pf);
7190 		return;
7191 	}
7192 
7193 	i40e_detect_recover_hung(pf);
7194 	i40e_sync_filters_subtask(pf);
7195 	i40e_reset_subtask(pf);
7196 	i40e_handle_mdd_event(pf);
7197 	i40e_vc_process_vflr_event(pf);
7198 	i40e_watchdog_subtask(pf);
7199 	i40e_fdir_reinit_subtask(pf);
7200 	i40e_client_subtask(pf);
7201 	i40e_sync_filters_subtask(pf);
7202 	i40e_sync_udp_filters_subtask(pf);
7203 	i40e_clean_adminq_subtask(pf);
7204 
7205 	i40e_service_event_complete(pf);
7206 
7207 	/* If the tasks have taken longer than one timer cycle or there
7208 	 * is more work to be done, reschedule the service task now
7209 	 * rather than wait for the timer to tick again.
7210 	 */
7211 	if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7212 	    test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)		 ||
7213 	    test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)		 ||
7214 	    test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7215 		i40e_service_event_schedule(pf);
7216 }
7217 
7218 /**
7219  * i40e_service_timer - timer callback
7220  * @data: pointer to PF struct
7221  **/
7222 static void i40e_service_timer(unsigned long data)
7223 {
7224 	struct i40e_pf *pf = (struct i40e_pf *)data;
7225 
7226 	mod_timer(&pf->service_timer,
7227 		  round_jiffies(jiffies + pf->service_timer_period));
7228 	i40e_service_event_schedule(pf);
7229 }
7230 
7231 /**
7232  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7233  * @vsi: the VSI being configured
7234  **/
7235 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7236 {
7237 	struct i40e_pf *pf = vsi->back;
7238 
7239 	switch (vsi->type) {
7240 	case I40E_VSI_MAIN:
7241 		vsi->alloc_queue_pairs = pf->num_lan_qps;
7242 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7243 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7244 		if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7245 			vsi->num_q_vectors = pf->num_lan_msix;
7246 		else
7247 			vsi->num_q_vectors = 1;
7248 
7249 		break;
7250 
7251 	case I40E_VSI_FDIR:
7252 		vsi->alloc_queue_pairs = 1;
7253 		vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7254 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7255 		vsi->num_q_vectors = pf->num_fdsb_msix;
7256 		break;
7257 
7258 	case I40E_VSI_VMDQ2:
7259 		vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7260 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7261 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7262 		vsi->num_q_vectors = pf->num_vmdq_msix;
7263 		break;
7264 
7265 	case I40E_VSI_SRIOV:
7266 		vsi->alloc_queue_pairs = pf->num_vf_qps;
7267 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7268 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7269 		break;
7270 
7271 #ifdef I40E_FCOE
7272 	case I40E_VSI_FCOE:
7273 		vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7274 		vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7275 				      I40E_REQ_DESCRIPTOR_MULTIPLE);
7276 		vsi->num_q_vectors = pf->num_fcoe_msix;
7277 		break;
7278 
7279 #endif /* I40E_FCOE */
7280 	default:
7281 		WARN_ON(1);
7282 		return -ENODATA;
7283 	}
7284 
7285 	return 0;
7286 }
7287 
7288 /**
7289  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7290  * @type: VSI pointer
7291  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7292  *
7293  * On error: returns error code (negative)
7294  * On success: returns 0
7295  **/
7296 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7297 {
7298 	int size;
7299 	int ret = 0;
7300 
7301 	/* allocate memory for both Tx and Rx ring pointers */
7302 	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7303 	vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7304 	if (!vsi->tx_rings)
7305 		return -ENOMEM;
7306 	vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7307 
7308 	if (alloc_qvectors) {
7309 		/* allocate memory for q_vector pointers */
7310 		size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7311 		vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7312 		if (!vsi->q_vectors) {
7313 			ret = -ENOMEM;
7314 			goto err_vectors;
7315 		}
7316 	}
7317 	return ret;
7318 
7319 err_vectors:
7320 	kfree(vsi->tx_rings);
7321 	return ret;
7322 }
7323 
7324 /**
7325  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7326  * @pf: board private structure
7327  * @type: type of VSI
7328  *
7329  * On error: returns error code (negative)
7330  * On success: returns vsi index in PF (positive)
7331  **/
7332 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7333 {
7334 	int ret = -ENODEV;
7335 	struct i40e_vsi *vsi;
7336 	int vsi_idx;
7337 	int i;
7338 
7339 	/* Need to protect the allocation of the VSIs at the PF level */
7340 	mutex_lock(&pf->switch_mutex);
7341 
7342 	/* VSI list may be fragmented if VSI creation/destruction has
7343 	 * been happening.  We can afford to do a quick scan to look
7344 	 * for any free VSIs in the list.
7345 	 *
7346 	 * find next empty vsi slot, looping back around if necessary
7347 	 */
7348 	i = pf->next_vsi;
7349 	while (i < pf->num_alloc_vsi && pf->vsi[i])
7350 		i++;
7351 	if (i >= pf->num_alloc_vsi) {
7352 		i = 0;
7353 		while (i < pf->next_vsi && pf->vsi[i])
7354 			i++;
7355 	}
7356 
7357 	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7358 		vsi_idx = i;             /* Found one! */
7359 	} else {
7360 		ret = -ENODEV;
7361 		goto unlock_pf;  /* out of VSI slots! */
7362 	}
7363 	pf->next_vsi = ++i;
7364 
7365 	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7366 	if (!vsi) {
7367 		ret = -ENOMEM;
7368 		goto unlock_pf;
7369 	}
7370 	vsi->type = type;
7371 	vsi->back = pf;
7372 	set_bit(__I40E_DOWN, &vsi->state);
7373 	vsi->flags = 0;
7374 	vsi->idx = vsi_idx;
7375 	vsi->int_rate_limit = 0;
7376 	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7377 				pf->rss_table_size : 64;
7378 	vsi->netdev_registered = false;
7379 	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7380 	INIT_LIST_HEAD(&vsi->mac_filter_list);
7381 	vsi->irqs_ready = false;
7382 
7383 	ret = i40e_set_num_rings_in_vsi(vsi);
7384 	if (ret)
7385 		goto err_rings;
7386 
7387 	ret = i40e_vsi_alloc_arrays(vsi, true);
7388 	if (ret)
7389 		goto err_rings;
7390 
7391 	/* Setup default MSIX irq handler for VSI */
7392 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7393 
7394 	/* Initialize VSI lock */
7395 	spin_lock_init(&vsi->mac_filter_list_lock);
7396 	pf->vsi[vsi_idx] = vsi;
7397 	ret = vsi_idx;
7398 	goto unlock_pf;
7399 
7400 err_rings:
7401 	pf->next_vsi = i - 1;
7402 	kfree(vsi);
7403 unlock_pf:
7404 	mutex_unlock(&pf->switch_mutex);
7405 	return ret;
7406 }
7407 
7408 /**
7409  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7410  * @type: VSI pointer
7411  * @free_qvectors: a bool to specify if q_vectors need to be freed.
7412  *
7413  * On error: returns error code (negative)
7414  * On success: returns 0
7415  **/
7416 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7417 {
7418 	/* free the ring and vector containers */
7419 	if (free_qvectors) {
7420 		kfree(vsi->q_vectors);
7421 		vsi->q_vectors = NULL;
7422 	}
7423 	kfree(vsi->tx_rings);
7424 	vsi->tx_rings = NULL;
7425 	vsi->rx_rings = NULL;
7426 }
7427 
7428 /**
7429  * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7430  * and lookup table
7431  * @vsi: Pointer to VSI structure
7432  */
7433 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7434 {
7435 	if (!vsi)
7436 		return;
7437 
7438 	kfree(vsi->rss_hkey_user);
7439 	vsi->rss_hkey_user = NULL;
7440 
7441 	kfree(vsi->rss_lut_user);
7442 	vsi->rss_lut_user = NULL;
7443 }
7444 
7445 /**
7446  * i40e_vsi_clear - Deallocate the VSI provided
7447  * @vsi: the VSI being un-configured
7448  **/
7449 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7450 {
7451 	struct i40e_pf *pf;
7452 
7453 	if (!vsi)
7454 		return 0;
7455 
7456 	if (!vsi->back)
7457 		goto free_vsi;
7458 	pf = vsi->back;
7459 
7460 	mutex_lock(&pf->switch_mutex);
7461 	if (!pf->vsi[vsi->idx]) {
7462 		dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7463 			vsi->idx, vsi->idx, vsi, vsi->type);
7464 		goto unlock_vsi;
7465 	}
7466 
7467 	if (pf->vsi[vsi->idx] != vsi) {
7468 		dev_err(&pf->pdev->dev,
7469 			"pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7470 			pf->vsi[vsi->idx]->idx,
7471 			pf->vsi[vsi->idx],
7472 			pf->vsi[vsi->idx]->type,
7473 			vsi->idx, vsi, vsi->type);
7474 		goto unlock_vsi;
7475 	}
7476 
7477 	/* updates the PF for this cleared vsi */
7478 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7479 	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7480 
7481 	i40e_vsi_free_arrays(vsi, true);
7482 	i40e_clear_rss_config_user(vsi);
7483 
7484 	pf->vsi[vsi->idx] = NULL;
7485 	if (vsi->idx < pf->next_vsi)
7486 		pf->next_vsi = vsi->idx;
7487 
7488 unlock_vsi:
7489 	mutex_unlock(&pf->switch_mutex);
7490 free_vsi:
7491 	kfree(vsi);
7492 
7493 	return 0;
7494 }
7495 
7496 /**
7497  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7498  * @vsi: the VSI being cleaned
7499  **/
7500 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7501 {
7502 	int i;
7503 
7504 	if (vsi->tx_rings && vsi->tx_rings[0]) {
7505 		for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7506 			kfree_rcu(vsi->tx_rings[i], rcu);
7507 			vsi->tx_rings[i] = NULL;
7508 			vsi->rx_rings[i] = NULL;
7509 		}
7510 	}
7511 }
7512 
7513 /**
7514  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7515  * @vsi: the VSI being configured
7516  **/
7517 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7518 {
7519 	struct i40e_ring *tx_ring, *rx_ring;
7520 	struct i40e_pf *pf = vsi->back;
7521 	int i;
7522 
7523 	/* Set basic values in the rings to be used later during open() */
7524 	for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7525 		/* allocate space for both Tx and Rx in one shot */
7526 		tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7527 		if (!tx_ring)
7528 			goto err_out;
7529 
7530 		tx_ring->queue_index = i;
7531 		tx_ring->reg_idx = vsi->base_queue + i;
7532 		tx_ring->ring_active = false;
7533 		tx_ring->vsi = vsi;
7534 		tx_ring->netdev = vsi->netdev;
7535 		tx_ring->dev = &pf->pdev->dev;
7536 		tx_ring->count = vsi->num_desc;
7537 		tx_ring->size = 0;
7538 		tx_ring->dcb_tc = 0;
7539 		if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7540 			tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7541 		tx_ring->tx_itr_setting = pf->tx_itr_default;
7542 		vsi->tx_rings[i] = tx_ring;
7543 
7544 		rx_ring = &tx_ring[1];
7545 		rx_ring->queue_index = i;
7546 		rx_ring->reg_idx = vsi->base_queue + i;
7547 		rx_ring->ring_active = false;
7548 		rx_ring->vsi = vsi;
7549 		rx_ring->netdev = vsi->netdev;
7550 		rx_ring->dev = &pf->pdev->dev;
7551 		rx_ring->count = vsi->num_desc;
7552 		rx_ring->size = 0;
7553 		rx_ring->dcb_tc = 0;
7554 		rx_ring->rx_itr_setting = pf->rx_itr_default;
7555 		vsi->rx_rings[i] = rx_ring;
7556 	}
7557 
7558 	return 0;
7559 
7560 err_out:
7561 	i40e_vsi_clear_rings(vsi);
7562 	return -ENOMEM;
7563 }
7564 
7565 /**
7566  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7567  * @pf: board private structure
7568  * @vectors: the number of MSI-X vectors to request
7569  *
7570  * Returns the number of vectors reserved, or error
7571  **/
7572 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7573 {
7574 	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7575 					I40E_MIN_MSIX, vectors);
7576 	if (vectors < 0) {
7577 		dev_info(&pf->pdev->dev,
7578 			 "MSI-X vector reservation failed: %d\n", vectors);
7579 		vectors = 0;
7580 	}
7581 
7582 	return vectors;
7583 }
7584 
7585 /**
7586  * i40e_init_msix - Setup the MSIX capability
7587  * @pf: board private structure
7588  *
7589  * Work with the OS to set up the MSIX vectors needed.
7590  *
7591  * Returns the number of vectors reserved or negative on failure
7592  **/
7593 static int i40e_init_msix(struct i40e_pf *pf)
7594 {
7595 	struct i40e_hw *hw = &pf->hw;
7596 	int vectors_left;
7597 	int v_budget, i;
7598 	int v_actual;
7599 	int iwarp_requested = 0;
7600 
7601 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7602 		return -ENODEV;
7603 
7604 	/* The number of vectors we'll request will be comprised of:
7605 	 *   - Add 1 for "other" cause for Admin Queue events, etc.
7606 	 *   - The number of LAN queue pairs
7607 	 *	- Queues being used for RSS.
7608 	 *		We don't need as many as max_rss_size vectors.
7609 	 *		use rss_size instead in the calculation since that
7610 	 *		is governed by number of cpus in the system.
7611 	 *	- assumes symmetric Tx/Rx pairing
7612 	 *   - The number of VMDq pairs
7613 	 *   - The CPU count within the NUMA node if iWARP is enabled
7614 #ifdef I40E_FCOE
7615 	 *   - The number of FCOE qps.
7616 #endif
7617 	 * Once we count this up, try the request.
7618 	 *
7619 	 * If we can't get what we want, we'll simplify to nearly nothing
7620 	 * and try again.  If that still fails, we punt.
7621 	 */
7622 	vectors_left = hw->func_caps.num_msix_vectors;
7623 	v_budget = 0;
7624 
7625 	/* reserve one vector for miscellaneous handler */
7626 	if (vectors_left) {
7627 		v_budget++;
7628 		vectors_left--;
7629 	}
7630 
7631 	/* reserve vectors for the main PF traffic queues */
7632 	pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7633 	vectors_left -= pf->num_lan_msix;
7634 	v_budget += pf->num_lan_msix;
7635 
7636 	/* reserve one vector for sideband flow director */
7637 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7638 		if (vectors_left) {
7639 			pf->num_fdsb_msix = 1;
7640 			v_budget++;
7641 			vectors_left--;
7642 		} else {
7643 			pf->num_fdsb_msix = 0;
7644 		}
7645 	}
7646 
7647 #ifdef I40E_FCOE
7648 	/* can we reserve enough for FCoE? */
7649 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7650 		if (!vectors_left)
7651 			pf->num_fcoe_msix = 0;
7652 		else if (vectors_left >= pf->num_fcoe_qps)
7653 			pf->num_fcoe_msix = pf->num_fcoe_qps;
7654 		else
7655 			pf->num_fcoe_msix = 1;
7656 		v_budget += pf->num_fcoe_msix;
7657 		vectors_left -= pf->num_fcoe_msix;
7658 	}
7659 
7660 #endif
7661 	/* can we reserve enough for iWARP? */
7662 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7663 		iwarp_requested = pf->num_iwarp_msix;
7664 
7665 		if (!vectors_left)
7666 			pf->num_iwarp_msix = 0;
7667 		else if (vectors_left < pf->num_iwarp_msix)
7668 			pf->num_iwarp_msix = 1;
7669 		v_budget += pf->num_iwarp_msix;
7670 		vectors_left -= pf->num_iwarp_msix;
7671 	}
7672 
7673 	/* any vectors left over go for VMDq support */
7674 	if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7675 		int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7676 		int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7677 
7678 		if (!vectors_left) {
7679 			pf->num_vmdq_msix = 0;
7680 			pf->num_vmdq_qps = 0;
7681 		} else {
7682 			/* if we're short on vectors for what's desired, we limit
7683 			 * the queues per vmdq.  If this is still more than are
7684 			 * available, the user will need to change the number of
7685 			 * queues/vectors used by the PF later with the ethtool
7686 			 * channels command
7687 			 */
7688 			if (vmdq_vecs < vmdq_vecs_wanted)
7689 				pf->num_vmdq_qps = 1;
7690 			pf->num_vmdq_msix = pf->num_vmdq_qps;
7691 
7692 			v_budget += vmdq_vecs;
7693 			vectors_left -= vmdq_vecs;
7694 		}
7695 	}
7696 
7697 	pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7698 				   GFP_KERNEL);
7699 	if (!pf->msix_entries)
7700 		return -ENOMEM;
7701 
7702 	for (i = 0; i < v_budget; i++)
7703 		pf->msix_entries[i].entry = i;
7704 	v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7705 
7706 	if (v_actual < I40E_MIN_MSIX) {
7707 		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7708 		kfree(pf->msix_entries);
7709 		pf->msix_entries = NULL;
7710 		return -ENODEV;
7711 
7712 	} else if (v_actual == I40E_MIN_MSIX) {
7713 		/* Adjust for minimal MSIX use */
7714 		pf->num_vmdq_vsis = 0;
7715 		pf->num_vmdq_qps = 0;
7716 		pf->num_lan_qps = 1;
7717 		pf->num_lan_msix = 1;
7718 
7719 	} else if (!vectors_left) {
7720 		/* If we have limited resources, we will start with no vectors
7721 		 * for the special features and then allocate vectors to some
7722 		 * of these features based on the policy and at the end disable
7723 		 * the features that did not get any vectors.
7724 		 */
7725 		int vec;
7726 
7727 		dev_info(&pf->pdev->dev,
7728 			 "MSI-X vector limit reached, attempting to redistribute vectors\n");
7729 		/* reserve the misc vector */
7730 		vec = v_actual - 1;
7731 
7732 		/* Scale vector usage down */
7733 		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7734 		pf->num_vmdq_vsis = 1;
7735 		pf->num_vmdq_qps = 1;
7736 #ifdef I40E_FCOE
7737 		pf->num_fcoe_qps = 0;
7738 		pf->num_fcoe_msix = 0;
7739 #endif
7740 
7741 		/* partition out the remaining vectors */
7742 		switch (vec) {
7743 		case 2:
7744 			pf->num_lan_msix = 1;
7745 			break;
7746 		case 3:
7747 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7748 				pf->num_lan_msix = 1;
7749 				pf->num_iwarp_msix = 1;
7750 			} else {
7751 				pf->num_lan_msix = 2;
7752 			}
7753 #ifdef I40E_FCOE
7754 			/* give one vector to FCoE */
7755 			if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7756 				pf->num_lan_msix = 1;
7757 				pf->num_fcoe_msix = 1;
7758 			}
7759 #endif
7760 			break;
7761 		default:
7762 			if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7763 				pf->num_iwarp_msix = min_t(int, (vec / 3),
7764 						 iwarp_requested);
7765 				pf->num_vmdq_vsis = min_t(int, (vec / 3),
7766 						  I40E_DEFAULT_NUM_VMDQ_VSI);
7767 			} else {
7768 				pf->num_vmdq_vsis = min_t(int, (vec / 2),
7769 						  I40E_DEFAULT_NUM_VMDQ_VSI);
7770 			}
7771 			if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7772 				pf->num_fdsb_msix = 1;
7773 				vec--;
7774 			}
7775 			pf->num_lan_msix = min_t(int,
7776 			       (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7777 							      pf->num_lan_msix);
7778 			pf->num_lan_qps = pf->num_lan_msix;
7779 #ifdef I40E_FCOE
7780 			/* give one vector to FCoE */
7781 			if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7782 				pf->num_fcoe_msix = 1;
7783 				vec--;
7784 			}
7785 #endif
7786 			break;
7787 		}
7788 	}
7789 
7790 	if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
7791 	    (pf->num_fdsb_msix == 0)) {
7792 		dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
7793 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7794 	}
7795 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7796 	    (pf->num_vmdq_msix == 0)) {
7797 		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7798 		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7799 	}
7800 
7801 	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7802 	    (pf->num_iwarp_msix == 0)) {
7803 		dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7804 		pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7805 	}
7806 #ifdef I40E_FCOE
7807 
7808 	if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7809 		dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7810 		pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7811 	}
7812 #endif
7813 	i40e_debug(&pf->hw, I40E_DEBUG_INIT,
7814 		   "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
7815 		   pf->num_lan_msix,
7816 		   pf->num_vmdq_msix * pf->num_vmdq_vsis,
7817 		   pf->num_fdsb_msix,
7818 		   pf->num_iwarp_msix);
7819 
7820 	return v_actual;
7821 }
7822 
7823 /**
7824  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7825  * @vsi: the VSI being configured
7826  * @v_idx: index of the vector in the vsi struct
7827  * @cpu: cpu to be used on affinity_mask
7828  *
7829  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7830  **/
7831 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
7832 {
7833 	struct i40e_q_vector *q_vector;
7834 
7835 	/* allocate q_vector */
7836 	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7837 	if (!q_vector)
7838 		return -ENOMEM;
7839 
7840 	q_vector->vsi = vsi;
7841 	q_vector->v_idx = v_idx;
7842 	cpumask_set_cpu(cpu, &q_vector->affinity_mask);
7843 
7844 	if (vsi->netdev)
7845 		netif_napi_add(vsi->netdev, &q_vector->napi,
7846 			       i40e_napi_poll, NAPI_POLL_WEIGHT);
7847 
7848 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
7849 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
7850 
7851 	/* tie q_vector and vsi together */
7852 	vsi->q_vectors[v_idx] = q_vector;
7853 
7854 	return 0;
7855 }
7856 
7857 /**
7858  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7859  * @vsi: the VSI being configured
7860  *
7861  * We allocate one q_vector per queue interrupt.  If allocation fails we
7862  * return -ENOMEM.
7863  **/
7864 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7865 {
7866 	struct i40e_pf *pf = vsi->back;
7867 	int err, v_idx, num_q_vectors, current_cpu;
7868 
7869 	/* if not MSIX, give the one vector only to the LAN VSI */
7870 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7871 		num_q_vectors = vsi->num_q_vectors;
7872 	else if (vsi == pf->vsi[pf->lan_vsi])
7873 		num_q_vectors = 1;
7874 	else
7875 		return -EINVAL;
7876 
7877 	current_cpu = cpumask_first(cpu_online_mask);
7878 
7879 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7880 		err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
7881 		if (err)
7882 			goto err_out;
7883 		current_cpu = cpumask_next(current_cpu, cpu_online_mask);
7884 		if (unlikely(current_cpu >= nr_cpu_ids))
7885 			current_cpu = cpumask_first(cpu_online_mask);
7886 	}
7887 
7888 	return 0;
7889 
7890 err_out:
7891 	while (v_idx--)
7892 		i40e_free_q_vector(vsi, v_idx);
7893 
7894 	return err;
7895 }
7896 
7897 /**
7898  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7899  * @pf: board private structure to initialize
7900  **/
7901 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7902 {
7903 	int vectors = 0;
7904 	ssize_t size;
7905 
7906 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7907 		vectors = i40e_init_msix(pf);
7908 		if (vectors < 0) {
7909 			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	|
7910 				       I40E_FLAG_IWARP_ENABLED	|
7911 #ifdef I40E_FCOE
7912 				       I40E_FLAG_FCOE_ENABLED	|
7913 #endif
7914 				       I40E_FLAG_RSS_ENABLED	|
7915 				       I40E_FLAG_DCB_CAPABLE	|
7916 				       I40E_FLAG_DCB_ENABLED	|
7917 				       I40E_FLAG_SRIOV_ENABLED	|
7918 				       I40E_FLAG_FD_SB_ENABLED	|
7919 				       I40E_FLAG_FD_ATR_ENABLED	|
7920 				       I40E_FLAG_VMDQ_ENABLED);
7921 
7922 			/* rework the queue expectations without MSIX */
7923 			i40e_determine_queue_usage(pf);
7924 		}
7925 	}
7926 
7927 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7928 	    (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7929 		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7930 		vectors = pci_enable_msi(pf->pdev);
7931 		if (vectors < 0) {
7932 			dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7933 				 vectors);
7934 			pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7935 		}
7936 		vectors = 1;  /* one MSI or Legacy vector */
7937 	}
7938 
7939 	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7940 		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7941 
7942 	/* set up vector assignment tracking */
7943 	size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7944 	pf->irq_pile = kzalloc(size, GFP_KERNEL);
7945 	if (!pf->irq_pile) {
7946 		dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7947 		return -ENOMEM;
7948 	}
7949 	pf->irq_pile->num_entries = vectors;
7950 	pf->irq_pile->search_hint = 0;
7951 
7952 	/* track first vector for misc interrupts, ignore return */
7953 	(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7954 
7955 	return 0;
7956 }
7957 
7958 /**
7959  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7960  * @pf: board private structure
7961  *
7962  * This sets up the handler for MSIX 0, which is used to manage the
7963  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7964  * when in MSI or Legacy interrupt mode.
7965  **/
7966 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7967 {
7968 	struct i40e_hw *hw = &pf->hw;
7969 	int err = 0;
7970 
7971 	/* Only request the irq if this is the first time through, and
7972 	 * not when we're rebuilding after a Reset
7973 	 */
7974 	if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7975 		err = request_irq(pf->msix_entries[0].vector,
7976 				  i40e_intr, 0, pf->int_name, pf);
7977 		if (err) {
7978 			dev_info(&pf->pdev->dev,
7979 				 "request_irq for %s failed: %d\n",
7980 				 pf->int_name, err);
7981 			return -EFAULT;
7982 		}
7983 	}
7984 
7985 	i40e_enable_misc_int_causes(pf);
7986 
7987 	/* associate no queues to the misc vector */
7988 	wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7989 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7990 
7991 	i40e_flush(hw);
7992 
7993 	i40e_irq_dynamic_enable_icr0(pf, true);
7994 
7995 	return err;
7996 }
7997 
7998 /**
7999  * i40e_config_rss_aq - Prepare for RSS using AQ commands
8000  * @vsi: vsi structure
8001  * @seed: RSS hash seed
8002  **/
8003 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8004 			      u8 *lut, u16 lut_size)
8005 {
8006 	struct i40e_pf *pf = vsi->back;
8007 	struct i40e_hw *hw = &pf->hw;
8008 	int ret = 0;
8009 
8010 	if (seed) {
8011 		struct i40e_aqc_get_set_rss_key_data *seed_dw =
8012 			(struct i40e_aqc_get_set_rss_key_data *)seed;
8013 		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
8014 		if (ret) {
8015 			dev_info(&pf->pdev->dev,
8016 				 "Cannot set RSS key, err %s aq_err %s\n",
8017 				 i40e_stat_str(hw, ret),
8018 				 i40e_aq_str(hw, hw->aq.asq_last_status));
8019 			return ret;
8020 		}
8021 	}
8022 	if (lut) {
8023 		bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8024 
8025 		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8026 		if (ret) {
8027 			dev_info(&pf->pdev->dev,
8028 				 "Cannot set RSS lut, err %s aq_err %s\n",
8029 				 i40e_stat_str(hw, ret),
8030 				 i40e_aq_str(hw, hw->aq.asq_last_status));
8031 			return ret;
8032 		}
8033 	}
8034 	return ret;
8035 }
8036 
8037 /**
8038  * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
8039  * @vsi: Pointer to vsi structure
8040  * @seed: Buffter to store the hash keys
8041  * @lut: Buffer to store the lookup table entries
8042  * @lut_size: Size of buffer to store the lookup table entries
8043  *
8044  * Return 0 on success, negative on failure
8045  */
8046 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8047 			   u8 *lut, u16 lut_size)
8048 {
8049 	struct i40e_pf *pf = vsi->back;
8050 	struct i40e_hw *hw = &pf->hw;
8051 	int ret = 0;
8052 
8053 	if (seed) {
8054 		ret = i40e_aq_get_rss_key(hw, vsi->id,
8055 			(struct i40e_aqc_get_set_rss_key_data *)seed);
8056 		if (ret) {
8057 			dev_info(&pf->pdev->dev,
8058 				 "Cannot get RSS key, err %s aq_err %s\n",
8059 				 i40e_stat_str(&pf->hw, ret),
8060 				 i40e_aq_str(&pf->hw,
8061 					     pf->hw.aq.asq_last_status));
8062 			return ret;
8063 		}
8064 	}
8065 
8066 	if (lut) {
8067 		bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8068 
8069 		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8070 		if (ret) {
8071 			dev_info(&pf->pdev->dev,
8072 				 "Cannot get RSS lut, err %s aq_err %s\n",
8073 				 i40e_stat_str(&pf->hw, ret),
8074 				 i40e_aq_str(&pf->hw,
8075 					     pf->hw.aq.asq_last_status));
8076 			return ret;
8077 		}
8078 	}
8079 
8080 	return ret;
8081 }
8082 
8083 /**
8084  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
8085  * @vsi: VSI structure
8086  **/
8087 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
8088 {
8089 	u8 seed[I40E_HKEY_ARRAY_SIZE];
8090 	struct i40e_pf *pf = vsi->back;
8091 	u8 *lut;
8092 	int ret;
8093 
8094 	if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
8095 		return 0;
8096 
8097 	if (!vsi->rss_size)
8098 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
8099 				      vsi->num_queue_pairs);
8100 	if (!vsi->rss_size)
8101 		return -EINVAL;
8102 
8103 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8104 	if (!lut)
8105 		return -ENOMEM;
8106 	/* Use the user configured hash keys and lookup table if there is one,
8107 	 * otherwise use default
8108 	 */
8109 	if (vsi->rss_lut_user)
8110 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8111 	else
8112 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8113 	if (vsi->rss_hkey_user)
8114 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8115 	else
8116 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8117 	ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8118 	kfree(lut);
8119 
8120 	return ret;
8121 }
8122 
8123 /**
8124  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
8125  * @vsi: Pointer to vsi structure
8126  * @seed: RSS hash seed
8127  * @lut: Lookup table
8128  * @lut_size: Lookup table size
8129  *
8130  * Returns 0 on success, negative on failure
8131  **/
8132 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8133 			       const u8 *lut, u16 lut_size)
8134 {
8135 	struct i40e_pf *pf = vsi->back;
8136 	struct i40e_hw *hw = &pf->hw;
8137 	u16 vf_id = vsi->vf_id;
8138 	u8 i;
8139 
8140 	/* Fill out hash function seed */
8141 	if (seed) {
8142 		u32 *seed_dw = (u32 *)seed;
8143 
8144 		if (vsi->type == I40E_VSI_MAIN) {
8145 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8146 				i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
8147 						  seed_dw[i]);
8148 		} else if (vsi->type == I40E_VSI_SRIOV) {
8149 			for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8150 				i40e_write_rx_ctl(hw,
8151 						  I40E_VFQF_HKEY1(i, vf_id),
8152 						  seed_dw[i]);
8153 		} else {
8154 			dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8155 		}
8156 	}
8157 
8158 	if (lut) {
8159 		u32 *lut_dw = (u32 *)lut;
8160 
8161 		if (vsi->type == I40E_VSI_MAIN) {
8162 			if (lut_size != I40E_HLUT_ARRAY_SIZE)
8163 				return -EINVAL;
8164 			for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8165 				wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8166 		} else if (vsi->type == I40E_VSI_SRIOV) {
8167 			if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8168 				return -EINVAL;
8169 			for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8170 				i40e_write_rx_ctl(hw,
8171 						  I40E_VFQF_HLUT1(i, vf_id),
8172 						  lut_dw[i]);
8173 		} else {
8174 			dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8175 		}
8176 	}
8177 	i40e_flush(hw);
8178 
8179 	return 0;
8180 }
8181 
8182 /**
8183  * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8184  * @vsi: Pointer to VSI structure
8185  * @seed: Buffer to store the keys
8186  * @lut: Buffer to store the lookup table entries
8187  * @lut_size: Size of buffer to store the lookup table entries
8188  *
8189  * Returns 0 on success, negative on failure
8190  */
8191 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8192 			    u8 *lut, u16 lut_size)
8193 {
8194 	struct i40e_pf *pf = vsi->back;
8195 	struct i40e_hw *hw = &pf->hw;
8196 	u16 i;
8197 
8198 	if (seed) {
8199 		u32 *seed_dw = (u32 *)seed;
8200 
8201 		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8202 			seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8203 	}
8204 	if (lut) {
8205 		u32 *lut_dw = (u32 *)lut;
8206 
8207 		if (lut_size != I40E_HLUT_ARRAY_SIZE)
8208 			return -EINVAL;
8209 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8210 			lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8211 	}
8212 
8213 	return 0;
8214 }
8215 
8216 /**
8217  * i40e_config_rss - Configure RSS keys and lut
8218  * @vsi: Pointer to VSI structure
8219  * @seed: RSS hash seed
8220  * @lut: Lookup table
8221  * @lut_size: Lookup table size
8222  *
8223  * Returns 0 on success, negative on failure
8224  */
8225 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8226 {
8227 	struct i40e_pf *pf = vsi->back;
8228 
8229 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8230 		return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8231 	else
8232 		return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8233 }
8234 
8235 /**
8236  * i40e_get_rss - Get RSS keys and lut
8237  * @vsi: Pointer to VSI structure
8238  * @seed: Buffer to store the keys
8239  * @lut: Buffer to store the lookup table entries
8240  * lut_size: Size of buffer to store the lookup table entries
8241  *
8242  * Returns 0 on success, negative on failure
8243  */
8244 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8245 {
8246 	struct i40e_pf *pf = vsi->back;
8247 
8248 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8249 		return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8250 	else
8251 		return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8252 }
8253 
8254 /**
8255  * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8256  * @pf: Pointer to board private structure
8257  * @lut: Lookup table
8258  * @rss_table_size: Lookup table size
8259  * @rss_size: Range of queue number for hashing
8260  */
8261 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8262 		       u16 rss_table_size, u16 rss_size)
8263 {
8264 	u16 i;
8265 
8266 	for (i = 0; i < rss_table_size; i++)
8267 		lut[i] = i % rss_size;
8268 }
8269 
8270 /**
8271  * i40e_pf_config_rss - Prepare for RSS if used
8272  * @pf: board private structure
8273  **/
8274 static int i40e_pf_config_rss(struct i40e_pf *pf)
8275 {
8276 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8277 	u8 seed[I40E_HKEY_ARRAY_SIZE];
8278 	u8 *lut;
8279 	struct i40e_hw *hw = &pf->hw;
8280 	u32 reg_val;
8281 	u64 hena;
8282 	int ret;
8283 
8284 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8285 	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8286 		((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8287 	hena |= i40e_pf_get_default_rss_hena(pf);
8288 
8289 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8290 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8291 
8292 	/* Determine the RSS table size based on the hardware capabilities */
8293 	reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8294 	reg_val = (pf->rss_table_size == 512) ?
8295 			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8296 			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8297 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8298 
8299 	/* Determine the RSS size of the VSI */
8300 	if (!vsi->rss_size)
8301 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
8302 				      vsi->num_queue_pairs);
8303 	if (!vsi->rss_size)
8304 		return -EINVAL;
8305 
8306 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8307 	if (!lut)
8308 		return -ENOMEM;
8309 
8310 	/* Use user configured lut if there is one, otherwise use default */
8311 	if (vsi->rss_lut_user)
8312 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8313 	else
8314 		i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8315 
8316 	/* Use user configured hash key if there is one, otherwise
8317 	 * use default.
8318 	 */
8319 	if (vsi->rss_hkey_user)
8320 		memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8321 	else
8322 		netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8323 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8324 	kfree(lut);
8325 
8326 	return ret;
8327 }
8328 
8329 /**
8330  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8331  * @pf: board private structure
8332  * @queue_count: the requested queue count for rss.
8333  *
8334  * returns 0 if rss is not enabled, if enabled returns the final rss queue
8335  * count which may be different from the requested queue count.
8336  **/
8337 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8338 {
8339 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8340 	int new_rss_size;
8341 
8342 	if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8343 		return 0;
8344 
8345 	new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8346 
8347 	if (queue_count != vsi->num_queue_pairs) {
8348 		vsi->req_queue_pairs = queue_count;
8349 		i40e_prep_for_reset(pf);
8350 
8351 		pf->alloc_rss_size = new_rss_size;
8352 
8353 		i40e_reset_and_rebuild(pf, true);
8354 
8355 		/* Discard the user configured hash keys and lut, if less
8356 		 * queues are enabled.
8357 		 */
8358 		if (queue_count < vsi->rss_size) {
8359 			i40e_clear_rss_config_user(vsi);
8360 			dev_dbg(&pf->pdev->dev,
8361 				"discard user configured hash keys and lut\n");
8362 		}
8363 
8364 		/* Reset vsi->rss_size, as number of enabled queues changed */
8365 		vsi->rss_size = min_t(int, pf->alloc_rss_size,
8366 				      vsi->num_queue_pairs);
8367 
8368 		i40e_pf_config_rss(pf);
8369 	}
8370 	dev_info(&pf->pdev->dev, "RSS count/HW max RSS count:  %d/%d\n",
8371 		 pf->alloc_rss_size, pf->rss_size_max);
8372 	return pf->alloc_rss_size;
8373 }
8374 
8375 /**
8376  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8377  * @pf: board private structure
8378  **/
8379 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8380 {
8381 	i40e_status status;
8382 	bool min_valid, max_valid;
8383 	u32 max_bw, min_bw;
8384 
8385 	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8386 					   &min_valid, &max_valid);
8387 
8388 	if (!status) {
8389 		if (min_valid)
8390 			pf->npar_min_bw = min_bw;
8391 		if (max_valid)
8392 			pf->npar_max_bw = max_bw;
8393 	}
8394 
8395 	return status;
8396 }
8397 
8398 /**
8399  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8400  * @pf: board private structure
8401  **/
8402 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8403 {
8404 	struct i40e_aqc_configure_partition_bw_data bw_data;
8405 	i40e_status status;
8406 
8407 	/* Set the valid bit for this PF */
8408 	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8409 	bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8410 	bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8411 
8412 	/* Set the new bandwidths */
8413 	status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8414 
8415 	return status;
8416 }
8417 
8418 /**
8419  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8420  * @pf: board private structure
8421  **/
8422 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8423 {
8424 	/* Commit temporary BW setting to permanent NVM image */
8425 	enum i40e_admin_queue_err last_aq_status;
8426 	i40e_status ret;
8427 	u16 nvm_word;
8428 
8429 	if (pf->hw.partition_id != 1) {
8430 		dev_info(&pf->pdev->dev,
8431 			 "Commit BW only works on partition 1! This is partition %d",
8432 			 pf->hw.partition_id);
8433 		ret = I40E_NOT_SUPPORTED;
8434 		goto bw_commit_out;
8435 	}
8436 
8437 	/* Acquire NVM for read access */
8438 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8439 	last_aq_status = pf->hw.aq.asq_last_status;
8440 	if (ret) {
8441 		dev_info(&pf->pdev->dev,
8442 			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8443 			 i40e_stat_str(&pf->hw, ret),
8444 			 i40e_aq_str(&pf->hw, last_aq_status));
8445 		goto bw_commit_out;
8446 	}
8447 
8448 	/* Read word 0x10 of NVM - SW compatibility word 1 */
8449 	ret = i40e_aq_read_nvm(&pf->hw,
8450 			       I40E_SR_NVM_CONTROL_WORD,
8451 			       0x10, sizeof(nvm_word), &nvm_word,
8452 			       false, NULL);
8453 	/* Save off last admin queue command status before releasing
8454 	 * the NVM
8455 	 */
8456 	last_aq_status = pf->hw.aq.asq_last_status;
8457 	i40e_release_nvm(&pf->hw);
8458 	if (ret) {
8459 		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8460 			 i40e_stat_str(&pf->hw, ret),
8461 			 i40e_aq_str(&pf->hw, last_aq_status));
8462 		goto bw_commit_out;
8463 	}
8464 
8465 	/* Wait a bit for NVM release to complete */
8466 	msleep(50);
8467 
8468 	/* Acquire NVM for write access */
8469 	ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8470 	last_aq_status = pf->hw.aq.asq_last_status;
8471 	if (ret) {
8472 		dev_info(&pf->pdev->dev,
8473 			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8474 			 i40e_stat_str(&pf->hw, ret),
8475 			 i40e_aq_str(&pf->hw, last_aq_status));
8476 		goto bw_commit_out;
8477 	}
8478 	/* Write it back out unchanged to initiate update NVM,
8479 	 * which will force a write of the shadow (alt) RAM to
8480 	 * the NVM - thus storing the bandwidth values permanently.
8481 	 */
8482 	ret = i40e_aq_update_nvm(&pf->hw,
8483 				 I40E_SR_NVM_CONTROL_WORD,
8484 				 0x10, sizeof(nvm_word),
8485 				 &nvm_word, true, NULL);
8486 	/* Save off last admin queue command status before releasing
8487 	 * the NVM
8488 	 */
8489 	last_aq_status = pf->hw.aq.asq_last_status;
8490 	i40e_release_nvm(&pf->hw);
8491 	if (ret)
8492 		dev_info(&pf->pdev->dev,
8493 			 "BW settings NOT SAVED, err %s aq_err %s\n",
8494 			 i40e_stat_str(&pf->hw, ret),
8495 			 i40e_aq_str(&pf->hw, last_aq_status));
8496 bw_commit_out:
8497 
8498 	return ret;
8499 }
8500 
8501 /**
8502  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8503  * @pf: board private structure to initialize
8504  *
8505  * i40e_sw_init initializes the Adapter private data structure.
8506  * Fields are initialized based on PCI device information and
8507  * OS network device settings (MTU size).
8508  **/
8509 static int i40e_sw_init(struct i40e_pf *pf)
8510 {
8511 	int err = 0;
8512 	int size;
8513 
8514 	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
8515 				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
8516 	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
8517 		if (I40E_DEBUG_USER & debug)
8518 			pf->hw.debug_mask = debug;
8519 		pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
8520 						I40E_DEFAULT_MSG_ENABLE);
8521 	}
8522 
8523 	/* Set default capability flags */
8524 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8525 		    I40E_FLAG_MSI_ENABLED     |
8526 		    I40E_FLAG_MSIX_ENABLED;
8527 
8528 	/* Set default ITR */
8529 	pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8530 	pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8531 
8532 	/* Depending on PF configurations, it is possible that the RSS
8533 	 * maximum might end up larger than the available queues
8534 	 */
8535 	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8536 	pf->alloc_rss_size = 1;
8537 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8538 	pf->rss_size_max = min_t(int, pf->rss_size_max,
8539 				 pf->hw.func_caps.num_tx_qp);
8540 	if (pf->hw.func_caps.rss) {
8541 		pf->flags |= I40E_FLAG_RSS_ENABLED;
8542 		pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8543 					   num_online_cpus());
8544 	}
8545 
8546 	/* MFP mode enabled */
8547 	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8548 		pf->flags |= I40E_FLAG_MFP_ENABLED;
8549 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8550 		if (i40e_get_npar_bw_setting(pf))
8551 			dev_warn(&pf->pdev->dev,
8552 				 "Could not get NPAR bw settings\n");
8553 		else
8554 			dev_info(&pf->pdev->dev,
8555 				 "Min BW = %8.8x, Max BW = %8.8x\n",
8556 				 pf->npar_min_bw, pf->npar_max_bw);
8557 	}
8558 
8559 	/* FW/NVM is not yet fixed in this regard */
8560 	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8561 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8562 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8563 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8564 		if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8565 		    pf->hw.num_partitions > 1)
8566 			dev_info(&pf->pdev->dev,
8567 				 "Flow Director Sideband mode Disabled in MFP mode\n");
8568 		else
8569 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8570 		pf->fdir_pf_filter_count =
8571 				 pf->hw.func_caps.fd_filters_guaranteed;
8572 		pf->hw.fdir_shared_filter_count =
8573 				 pf->hw.func_caps.fd_filters_best_effort;
8574 	}
8575 
8576 	if (i40e_is_mac_710(&pf->hw) &&
8577 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
8578 	    (pf->hw.aq.fw_maj_ver < 4))) {
8579 		pf->flags |= I40E_FLAG_RESTART_AUTONEG;
8580 		/* No DCB support  for FW < v4.33 */
8581 		pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8582 	}
8583 
8584 	/* Disable FW LLDP if FW < v4.3 */
8585 	if (i40e_is_mac_710(&pf->hw) &&
8586 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
8587 	    (pf->hw.aq.fw_maj_ver < 4)))
8588 		pf->flags |= I40E_FLAG_STOP_FW_LLDP;
8589 
8590 	/* Use the FW Set LLDP MIB API if FW > v4.40 */
8591 	if (i40e_is_mac_710(&pf->hw) &&
8592 	    (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
8593 	    (pf->hw.aq.fw_maj_ver >= 5)))
8594 		pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
8595 
8596 	if (pf->hw.func_caps.vmdq) {
8597 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
8598 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
8599 		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8600 	}
8601 
8602 	if (pf->hw.func_caps.iwarp) {
8603 		pf->flags |= I40E_FLAG_IWARP_ENABLED;
8604 		/* IWARP needs one extra vector for CQP just like MISC.*/
8605 		pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8606 	}
8607 
8608 #ifdef I40E_FCOE
8609 	i40e_init_pf_fcoe(pf);
8610 
8611 #endif /* I40E_FCOE */
8612 #ifdef CONFIG_PCI_IOV
8613 	if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
8614 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8615 		pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8616 		pf->num_req_vfs = min_t(int,
8617 					pf->hw.func_caps.num_vfs,
8618 					I40E_MAX_VF_COUNT);
8619 	}
8620 #endif /* CONFIG_PCI_IOV */
8621 	if (pf->hw.mac.type == I40E_MAC_X722) {
8622 		pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8623 			     I40E_FLAG_128_QP_RSS_CAPABLE |
8624 			     I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8625 			     I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8626 			     I40E_FLAG_WB_ON_ITR_CAPABLE |
8627 			     I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8628 			     I40E_FLAG_NO_PCI_LINK_CHECK |
8629 			     I40E_FLAG_USE_SET_LLDP_MIB |
8630 			     I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8631 	} else if ((pf->hw.aq.api_maj_ver > 1) ||
8632 		   ((pf->hw.aq.api_maj_ver == 1) &&
8633 		    (pf->hw.aq.api_min_ver > 4))) {
8634 		/* Supported in FW API version higher than 1.4 */
8635 		pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8636 		pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8637 	} else {
8638 		pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8639 	}
8640 
8641 	pf->eeprom_version = 0xDEAD;
8642 	pf->lan_veb = I40E_NO_VEB;
8643 	pf->lan_vsi = I40E_NO_VSI;
8644 
8645 	/* By default FW has this off for performance reasons */
8646 	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8647 
8648 	/* set up queue assignment tracking */
8649 	size = sizeof(struct i40e_lump_tracking)
8650 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8651 	pf->qp_pile = kzalloc(size, GFP_KERNEL);
8652 	if (!pf->qp_pile) {
8653 		err = -ENOMEM;
8654 		goto sw_init_done;
8655 	}
8656 	pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8657 	pf->qp_pile->search_hint = 0;
8658 
8659 	pf->tx_timeout_recovery_level = 1;
8660 
8661 	mutex_init(&pf->switch_mutex);
8662 
8663 	/* If NPAR is enabled nudge the Tx scheduler */
8664 	if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8665 		i40e_set_npar_bw_setting(pf);
8666 
8667 sw_init_done:
8668 	return err;
8669 }
8670 
8671 /**
8672  * i40e_set_ntuple - set the ntuple feature flag and take action
8673  * @pf: board private structure to initialize
8674  * @features: the feature set that the stack is suggesting
8675  *
8676  * returns a bool to indicate if reset needs to happen
8677  **/
8678 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8679 {
8680 	bool need_reset = false;
8681 
8682 	/* Check if Flow Director n-tuple support was enabled or disabled.  If
8683 	 * the state changed, we need to reset.
8684 	 */
8685 	if (features & NETIF_F_NTUPLE) {
8686 		/* Enable filters and mark for reset */
8687 		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8688 			need_reset = true;
8689 		/* enable FD_SB only if there is MSI-X vector */
8690 		if (pf->num_fdsb_msix > 0)
8691 			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8692 	} else {
8693 		/* turn off filters, mark for reset and clear SW filter list */
8694 		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8695 			need_reset = true;
8696 			i40e_fdir_filter_exit(pf);
8697 		}
8698 		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8699 		pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8700 		/* reset fd counters */
8701 		pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8702 		pf->fdir_pf_active_filters = 0;
8703 		/* if ATR was auto disabled it can be re-enabled. */
8704 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8705 		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
8706 			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8707 			if (I40E_DEBUG_FD & pf->hw.debug_mask)
8708 				dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8709 		}
8710 	}
8711 	return need_reset;
8712 }
8713 
8714 /**
8715  * i40e_clear_rss_lut - clear the rx hash lookup table
8716  * @vsi: the VSI being configured
8717  **/
8718 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
8719 {
8720 	struct i40e_pf *pf = vsi->back;
8721 	struct i40e_hw *hw = &pf->hw;
8722 	u16 vf_id = vsi->vf_id;
8723 	u8 i;
8724 
8725 	if (vsi->type == I40E_VSI_MAIN) {
8726 		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8727 			wr32(hw, I40E_PFQF_HLUT(i), 0);
8728 	} else if (vsi->type == I40E_VSI_SRIOV) {
8729 		for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8730 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
8731 	} else {
8732 		dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8733 	}
8734 }
8735 
8736 /**
8737  * i40e_set_features - set the netdev feature flags
8738  * @netdev: ptr to the netdev being adjusted
8739  * @features: the feature set that the stack is suggesting
8740  **/
8741 static int i40e_set_features(struct net_device *netdev,
8742 			     netdev_features_t features)
8743 {
8744 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8745 	struct i40e_vsi *vsi = np->vsi;
8746 	struct i40e_pf *pf = vsi->back;
8747 	bool need_reset;
8748 
8749 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
8750 		i40e_pf_config_rss(pf);
8751 	else if (!(features & NETIF_F_RXHASH) &&
8752 		 netdev->features & NETIF_F_RXHASH)
8753 		i40e_clear_rss_lut(vsi);
8754 
8755 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
8756 		i40e_vlan_stripping_enable(vsi);
8757 	else
8758 		i40e_vlan_stripping_disable(vsi);
8759 
8760 	need_reset = i40e_set_ntuple(pf, features);
8761 
8762 	if (need_reset)
8763 		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8764 
8765 	return 0;
8766 }
8767 
8768 /**
8769  * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
8770  * @pf: board private structure
8771  * @port: The UDP port to look up
8772  *
8773  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8774  **/
8775 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8776 {
8777 	u8 i;
8778 
8779 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8780 		if (pf->udp_ports[i].index == port)
8781 			return i;
8782 	}
8783 
8784 	return i;
8785 }
8786 
8787 /**
8788  * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
8789  * @netdev: This physical port's netdev
8790  * @ti: Tunnel endpoint information
8791  **/
8792 static void i40e_udp_tunnel_add(struct net_device *netdev,
8793 				struct udp_tunnel_info *ti)
8794 {
8795 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8796 	struct i40e_vsi *vsi = np->vsi;
8797 	struct i40e_pf *pf = vsi->back;
8798 	__be16 port = ti->port;
8799 	u8 next_idx;
8800 	u8 idx;
8801 
8802 	idx = i40e_get_udp_port_idx(pf, port);
8803 
8804 	/* Check if port already exists */
8805 	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8806 		netdev_info(netdev, "port %d already offloaded\n",
8807 			    ntohs(port));
8808 		return;
8809 	}
8810 
8811 	/* Now check if there is space to add the new port */
8812 	next_idx = i40e_get_udp_port_idx(pf, 0);
8813 
8814 	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8815 		netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
8816 			    ntohs(port));
8817 		return;
8818 	}
8819 
8820 	switch (ti->type) {
8821 	case UDP_TUNNEL_TYPE_VXLAN:
8822 		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8823 		break;
8824 	case UDP_TUNNEL_TYPE_GENEVE:
8825 		if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8826 			return;
8827 		pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8828 		break;
8829 	default:
8830 		return;
8831 	}
8832 
8833 	/* New port: add it and mark its index in the bitmap */
8834 	pf->udp_ports[next_idx].index = port;
8835 	pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8836 	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8837 }
8838 
8839 /**
8840  * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
8841  * @netdev: This physical port's netdev
8842  * @ti: Tunnel endpoint information
8843  **/
8844 static void i40e_udp_tunnel_del(struct net_device *netdev,
8845 				struct udp_tunnel_info *ti)
8846 {
8847 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8848 	struct i40e_vsi *vsi = np->vsi;
8849 	struct i40e_pf *pf = vsi->back;
8850 	__be16 port = ti->port;
8851 	u8 idx;
8852 
8853 	idx = i40e_get_udp_port_idx(pf, port);
8854 
8855 	/* Check if port already exists */
8856 	if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
8857 		goto not_found;
8858 
8859 	switch (ti->type) {
8860 	case UDP_TUNNEL_TYPE_VXLAN:
8861 		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
8862 			goto not_found;
8863 		break;
8864 	case UDP_TUNNEL_TYPE_GENEVE:
8865 		if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
8866 			goto not_found;
8867 		break;
8868 	default:
8869 		goto not_found;
8870 	}
8871 
8872 	/* if port exists, set it to 0 (mark for deletion)
8873 	 * and make it pending
8874 	 */
8875 	pf->udp_ports[idx].index = 0;
8876 	pf->pending_udp_bitmap |= BIT_ULL(idx);
8877 	pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8878 
8879 	return;
8880 not_found:
8881 	netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
8882 		    ntohs(port));
8883 }
8884 
8885 static int i40e_get_phys_port_id(struct net_device *netdev,
8886 				 struct netdev_phys_item_id *ppid)
8887 {
8888 	struct i40e_netdev_priv *np = netdev_priv(netdev);
8889 	struct i40e_pf *pf = np->vsi->back;
8890 	struct i40e_hw *hw = &pf->hw;
8891 
8892 	if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8893 		return -EOPNOTSUPP;
8894 
8895 	ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8896 	memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8897 
8898 	return 0;
8899 }
8900 
8901 /**
8902  * i40e_ndo_fdb_add - add an entry to the hardware database
8903  * @ndm: the input from the stack
8904  * @tb: pointer to array of nladdr (unused)
8905  * @dev: the net device pointer
8906  * @addr: the MAC address entry being added
8907  * @flags: instructions from stack about fdb operation
8908  */
8909 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8910 			    struct net_device *dev,
8911 			    const unsigned char *addr, u16 vid,
8912 			    u16 flags)
8913 {
8914 	struct i40e_netdev_priv *np = netdev_priv(dev);
8915 	struct i40e_pf *pf = np->vsi->back;
8916 	int err = 0;
8917 
8918 	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8919 		return -EOPNOTSUPP;
8920 
8921 	if (vid) {
8922 		pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8923 		return -EINVAL;
8924 	}
8925 
8926 	/* Hardware does not support aging addresses so if a
8927 	 * ndm_state is given only allow permanent addresses
8928 	 */
8929 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8930 		netdev_info(dev, "FDB only supports static addresses\n");
8931 		return -EINVAL;
8932 	}
8933 
8934 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8935 		err = dev_uc_add_excl(dev, addr);
8936 	else if (is_multicast_ether_addr(addr))
8937 		err = dev_mc_add_excl(dev, addr);
8938 	else
8939 		err = -EINVAL;
8940 
8941 	/* Only return duplicate errors if NLM_F_EXCL is set */
8942 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
8943 		err = 0;
8944 
8945 	return err;
8946 }
8947 
8948 /**
8949  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8950  * @dev: the netdev being configured
8951  * @nlh: RTNL message
8952  *
8953  * Inserts a new hardware bridge if not already created and
8954  * enables the bridging mode requested (VEB or VEPA). If the
8955  * hardware bridge has already been inserted and the request
8956  * is to change the mode then that requires a PF reset to
8957  * allow rebuild of the components with required hardware
8958  * bridge mode enabled.
8959  **/
8960 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8961 				   struct nlmsghdr *nlh,
8962 				   u16 flags)
8963 {
8964 	struct i40e_netdev_priv *np = netdev_priv(dev);
8965 	struct i40e_vsi *vsi = np->vsi;
8966 	struct i40e_pf *pf = vsi->back;
8967 	struct i40e_veb *veb = NULL;
8968 	struct nlattr *attr, *br_spec;
8969 	int i, rem;
8970 
8971 	/* Only for PF VSI for now */
8972 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8973 		return -EOPNOTSUPP;
8974 
8975 	/* Find the HW bridge for PF VSI */
8976 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8977 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8978 			veb = pf->veb[i];
8979 	}
8980 
8981 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8982 
8983 	nla_for_each_nested(attr, br_spec, rem) {
8984 		__u16 mode;
8985 
8986 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
8987 			continue;
8988 
8989 		mode = nla_get_u16(attr);
8990 		if ((mode != BRIDGE_MODE_VEPA) &&
8991 		    (mode != BRIDGE_MODE_VEB))
8992 			return -EINVAL;
8993 
8994 		/* Insert a new HW bridge */
8995 		if (!veb) {
8996 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8997 					     vsi->tc_config.enabled_tc);
8998 			if (veb) {
8999 				veb->bridge_mode = mode;
9000 				i40e_config_bridge_mode(veb);
9001 			} else {
9002 				/* No Bridge HW offload available */
9003 				return -ENOENT;
9004 			}
9005 			break;
9006 		} else if (mode != veb->bridge_mode) {
9007 			/* Existing HW bridge but different mode needs reset */
9008 			veb->bridge_mode = mode;
9009 			/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
9010 			if (mode == BRIDGE_MODE_VEB)
9011 				pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9012 			else
9013 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9014 			i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
9015 			break;
9016 		}
9017 	}
9018 
9019 	return 0;
9020 }
9021 
9022 /**
9023  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
9024  * @skb: skb buff
9025  * @pid: process id
9026  * @seq: RTNL message seq #
9027  * @dev: the netdev being configured
9028  * @filter_mask: unused
9029  * @nlflags: netlink flags passed in
9030  *
9031  * Return the mode in which the hardware bridge is operating in
9032  * i.e VEB or VEPA.
9033  **/
9034 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9035 				   struct net_device *dev,
9036 				   u32 __always_unused filter_mask,
9037 				   int nlflags)
9038 {
9039 	struct i40e_netdev_priv *np = netdev_priv(dev);
9040 	struct i40e_vsi *vsi = np->vsi;
9041 	struct i40e_pf *pf = vsi->back;
9042 	struct i40e_veb *veb = NULL;
9043 	int i;
9044 
9045 	/* Only for PF VSI for now */
9046 	if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9047 		return -EOPNOTSUPP;
9048 
9049 	/* Find the HW bridge for the PF VSI */
9050 	for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9051 		if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9052 			veb = pf->veb[i];
9053 	}
9054 
9055 	if (!veb)
9056 		return 0;
9057 
9058 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9059 				       nlflags, 0, 0, filter_mask, NULL);
9060 }
9061 
9062 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes
9063  * inner mac plus all inner ethertypes.
9064  */
9065 #define I40E_MAX_TUNNEL_HDR_LEN 128
9066 /**
9067  * i40e_features_check - Validate encapsulated packet conforms to limits
9068  * @skb: skb buff
9069  * @dev: This physical port's netdev
9070  * @features: Offload features that the stack believes apply
9071  **/
9072 static netdev_features_t i40e_features_check(struct sk_buff *skb,
9073 					     struct net_device *dev,
9074 					     netdev_features_t features)
9075 {
9076 	if (skb->encapsulation &&
9077 	    ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
9078 	     I40E_MAX_TUNNEL_HDR_LEN))
9079 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9080 
9081 	return features;
9082 }
9083 
9084 static const struct net_device_ops i40e_netdev_ops = {
9085 	.ndo_open		= i40e_open,
9086 	.ndo_stop		= i40e_close,
9087 	.ndo_start_xmit		= i40e_lan_xmit_frame,
9088 	.ndo_get_stats64	= i40e_get_netdev_stats_struct,
9089 	.ndo_set_rx_mode	= i40e_set_rx_mode,
9090 	.ndo_validate_addr	= eth_validate_addr,
9091 	.ndo_set_mac_address	= i40e_set_mac,
9092 	.ndo_change_mtu		= i40e_change_mtu,
9093 	.ndo_do_ioctl		= i40e_ioctl,
9094 	.ndo_tx_timeout		= i40e_tx_timeout,
9095 	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,
9096 	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid,
9097 #ifdef CONFIG_NET_POLL_CONTROLLER
9098 	.ndo_poll_controller	= i40e_netpoll,
9099 #endif
9100 	.ndo_setup_tc		= __i40e_setup_tc,
9101 #ifdef I40E_FCOE
9102 	.ndo_fcoe_enable	= i40e_fcoe_enable,
9103 	.ndo_fcoe_disable	= i40e_fcoe_disable,
9104 #endif
9105 	.ndo_set_features	= i40e_set_features,
9106 	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,
9107 	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan,
9108 	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,
9109 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
9110 	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
9111 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
9112 	.ndo_set_vf_trust	= i40e_ndo_set_vf_trust,
9113 	.ndo_udp_tunnel_add	= i40e_udp_tunnel_add,
9114 	.ndo_udp_tunnel_del	= i40e_udp_tunnel_del,
9115 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
9116 	.ndo_fdb_add		= i40e_ndo_fdb_add,
9117 	.ndo_features_check	= i40e_features_check,
9118 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
9119 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
9120 };
9121 
9122 /**
9123  * i40e_config_netdev - Setup the netdev flags
9124  * @vsi: the VSI being configured
9125  *
9126  * Returns 0 on success, negative value on failure
9127  **/
9128 static int i40e_config_netdev(struct i40e_vsi *vsi)
9129 {
9130 	struct i40e_pf *pf = vsi->back;
9131 	struct i40e_hw *hw = &pf->hw;
9132 	struct i40e_netdev_priv *np;
9133 	struct net_device *netdev;
9134 	u8 mac_addr[ETH_ALEN];
9135 	int etherdev_size;
9136 
9137 	etherdev_size = sizeof(struct i40e_netdev_priv);
9138 	netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9139 	if (!netdev)
9140 		return -ENOMEM;
9141 
9142 	vsi->netdev = netdev;
9143 	np = netdev_priv(netdev);
9144 	np->vsi = vsi;
9145 
9146 	netdev->hw_enc_features |= NETIF_F_SG			|
9147 				   NETIF_F_IP_CSUM		|
9148 				   NETIF_F_IPV6_CSUM		|
9149 				   NETIF_F_HIGHDMA		|
9150 				   NETIF_F_SOFT_FEATURES	|
9151 				   NETIF_F_TSO			|
9152 				   NETIF_F_TSO_ECN		|
9153 				   NETIF_F_TSO6			|
9154 				   NETIF_F_GSO_GRE		|
9155 				   NETIF_F_GSO_GRE_CSUM		|
9156 				   NETIF_F_GSO_IPXIP4		|
9157 				   NETIF_F_GSO_IPXIP6		|
9158 				   NETIF_F_GSO_UDP_TUNNEL	|
9159 				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
9160 				   NETIF_F_GSO_PARTIAL		|
9161 				   NETIF_F_SCTP_CRC		|
9162 				   NETIF_F_RXHASH		|
9163 				   NETIF_F_RXCSUM		|
9164 				   0;
9165 
9166 	if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
9167 		netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9168 
9169 	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
9170 
9171 	/* record features VLANs can make use of */
9172 	netdev->vlan_features |= netdev->hw_enc_features |
9173 				 NETIF_F_TSO_MANGLEID;
9174 
9175 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9176 		netdev->hw_features |= NETIF_F_NTUPLE;
9177 
9178 	netdev->hw_features |= netdev->hw_enc_features	|
9179 			       NETIF_F_HW_VLAN_CTAG_TX	|
9180 			       NETIF_F_HW_VLAN_CTAG_RX;
9181 
9182 	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9183 	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
9184 
9185 	if (vsi->type == I40E_VSI_MAIN) {
9186 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9187 		ether_addr_copy(mac_addr, hw->mac.perm_addr);
9188 		/* The following steps are necessary to prevent reception
9189 		 * of tagged packets - some older NVM configurations load a
9190 		 * default a MAC-VLAN filter that accepts any tagged packet
9191 		 * which must be replaced by a normal filter.
9192 		 */
9193 		i40e_rm_default_mac_filter(vsi, mac_addr);
9194 		spin_lock_bh(&vsi->mac_filter_list_lock);
9195 		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
9196 		spin_unlock_bh(&vsi->mac_filter_list_lock);
9197 	} else {
9198 		/* relate the VSI_VMDQ name to the VSI_MAIN name */
9199 		snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9200 			 pf->vsi[pf->lan_vsi]->netdev->name);
9201 		random_ether_addr(mac_addr);
9202 
9203 		spin_lock_bh(&vsi->mac_filter_list_lock);
9204 		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
9205 		spin_unlock_bh(&vsi->mac_filter_list_lock);
9206 	}
9207 
9208 	ether_addr_copy(netdev->dev_addr, mac_addr);
9209 	ether_addr_copy(netdev->perm_addr, mac_addr);
9210 
9211 	netdev->priv_flags |= IFF_UNICAST_FLT;
9212 	netdev->priv_flags |= IFF_SUPP_NOFCS;
9213 	/* Setup netdev TC information */
9214 	i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9215 
9216 	netdev->netdev_ops = &i40e_netdev_ops;
9217 	netdev->watchdog_timeo = 5 * HZ;
9218 	i40e_set_ethtool_ops(netdev);
9219 #ifdef I40E_FCOE
9220 	i40e_fcoe_config_netdev(netdev, vsi);
9221 #endif
9222 
9223 	return 0;
9224 }
9225 
9226 /**
9227  * i40e_vsi_delete - Delete a VSI from the switch
9228  * @vsi: the VSI being removed
9229  *
9230  * Returns 0 on success, negative value on failure
9231  **/
9232 static void i40e_vsi_delete(struct i40e_vsi *vsi)
9233 {
9234 	/* remove default VSI is not allowed */
9235 	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9236 		return;
9237 
9238 	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9239 }
9240 
9241 /**
9242  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9243  * @vsi: the VSI being queried
9244  *
9245  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9246  **/
9247 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9248 {
9249 	struct i40e_veb *veb;
9250 	struct i40e_pf *pf = vsi->back;
9251 
9252 	/* Uplink is not a bridge so default to VEB */
9253 	if (vsi->veb_idx == I40E_NO_VEB)
9254 		return 1;
9255 
9256 	veb = pf->veb[vsi->veb_idx];
9257 	if (!veb) {
9258 		dev_info(&pf->pdev->dev,
9259 			 "There is no veb associated with the bridge\n");
9260 		return -ENOENT;
9261 	}
9262 
9263 	/* Uplink is a bridge in VEPA mode */
9264 	if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9265 		return 0;
9266 	} else {
9267 		/* Uplink is a bridge in VEB mode */
9268 		return 1;
9269 	}
9270 
9271 	/* VEPA is now default bridge, so return 0 */
9272 	return 0;
9273 }
9274 
9275 /**
9276  * i40e_add_vsi - Add a VSI to the switch
9277  * @vsi: the VSI being configured
9278  *
9279  * This initializes a VSI context depending on the VSI type to be added and
9280  * passes it down to the add_vsi aq command.
9281  **/
9282 static int i40e_add_vsi(struct i40e_vsi *vsi)
9283 {
9284 	int ret = -ENODEV;
9285 	i40e_status aq_ret = 0;
9286 	struct i40e_pf *pf = vsi->back;
9287 	struct i40e_hw *hw = &pf->hw;
9288 	struct i40e_vsi_context ctxt;
9289 	struct i40e_mac_filter *f, *ftmp;
9290 
9291 	u8 enabled_tc = 0x1; /* TC0 enabled */
9292 	int f_count = 0;
9293 
9294 	memset(&ctxt, 0, sizeof(ctxt));
9295 	switch (vsi->type) {
9296 	case I40E_VSI_MAIN:
9297 		/* The PF's main VSI is already setup as part of the
9298 		 * device initialization, so we'll not bother with
9299 		 * the add_vsi call, but we will retrieve the current
9300 		 * VSI context.
9301 		 */
9302 		ctxt.seid = pf->main_vsi_seid;
9303 		ctxt.pf_num = pf->hw.pf_id;
9304 		ctxt.vf_num = 0;
9305 		ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9306 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9307 		if (ret) {
9308 			dev_info(&pf->pdev->dev,
9309 				 "couldn't get PF vsi config, err %s aq_err %s\n",
9310 				 i40e_stat_str(&pf->hw, ret),
9311 				 i40e_aq_str(&pf->hw,
9312 					     pf->hw.aq.asq_last_status));
9313 			return -ENOENT;
9314 		}
9315 		vsi->info = ctxt.info;
9316 		vsi->info.valid_sections = 0;
9317 
9318 		vsi->seid = ctxt.seid;
9319 		vsi->id = ctxt.vsi_number;
9320 
9321 		enabled_tc = i40e_pf_get_tc_map(pf);
9322 
9323 		/* MFP mode setup queue map and update VSI */
9324 		if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9325 		    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
9326 			memset(&ctxt, 0, sizeof(ctxt));
9327 			ctxt.seid = pf->main_vsi_seid;
9328 			ctxt.pf_num = pf->hw.pf_id;
9329 			ctxt.vf_num = 0;
9330 			i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9331 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9332 			if (ret) {
9333 				dev_info(&pf->pdev->dev,
9334 					 "update vsi failed, err %s aq_err %s\n",
9335 					 i40e_stat_str(&pf->hw, ret),
9336 					 i40e_aq_str(&pf->hw,
9337 						    pf->hw.aq.asq_last_status));
9338 				ret = -ENOENT;
9339 				goto err;
9340 			}
9341 			/* update the local VSI info queue map */
9342 			i40e_vsi_update_queue_map(vsi, &ctxt);
9343 			vsi->info.valid_sections = 0;
9344 		} else {
9345 			/* Default/Main VSI is only enabled for TC0
9346 			 * reconfigure it to enable all TCs that are
9347 			 * available on the port in SFP mode.
9348 			 * For MFP case the iSCSI PF would use this
9349 			 * flow to enable LAN+iSCSI TC.
9350 			 */
9351 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
9352 			if (ret) {
9353 				dev_info(&pf->pdev->dev,
9354 					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9355 					 enabled_tc,
9356 					 i40e_stat_str(&pf->hw, ret),
9357 					 i40e_aq_str(&pf->hw,
9358 						    pf->hw.aq.asq_last_status));
9359 				ret = -ENOENT;
9360 			}
9361 		}
9362 		break;
9363 
9364 	case I40E_VSI_FDIR:
9365 		ctxt.pf_num = hw->pf_id;
9366 		ctxt.vf_num = 0;
9367 		ctxt.uplink_seid = vsi->uplink_seid;
9368 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9369 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9370 		if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9371 		    (i40e_is_vsi_uplink_mode_veb(vsi))) {
9372 			ctxt.info.valid_sections |=
9373 			     cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9374 			ctxt.info.switch_id =
9375 			   cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9376 		}
9377 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9378 		break;
9379 
9380 	case I40E_VSI_VMDQ2:
9381 		ctxt.pf_num = hw->pf_id;
9382 		ctxt.vf_num = 0;
9383 		ctxt.uplink_seid = vsi->uplink_seid;
9384 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9385 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9386 
9387 		/* This VSI is connected to VEB so the switch_id
9388 		 * should be set to zero by default.
9389 		 */
9390 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9391 			ctxt.info.valid_sections |=
9392 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9393 			ctxt.info.switch_id =
9394 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9395 		}
9396 
9397 		/* Setup the VSI tx/rx queue map for TC0 only for now */
9398 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9399 		break;
9400 
9401 	case I40E_VSI_SRIOV:
9402 		ctxt.pf_num = hw->pf_id;
9403 		ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9404 		ctxt.uplink_seid = vsi->uplink_seid;
9405 		ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9406 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9407 
9408 		/* This VSI is connected to VEB so the switch_id
9409 		 * should be set to zero by default.
9410 		 */
9411 		if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9412 			ctxt.info.valid_sections |=
9413 				cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9414 			ctxt.info.switch_id =
9415 				cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9416 		}
9417 
9418 		if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9419 			ctxt.info.valid_sections |=
9420 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9421 			ctxt.info.queueing_opt_flags |=
9422 				(I40E_AQ_VSI_QUE_OPT_TCP_ENA |
9423 				 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
9424 		}
9425 
9426 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9427 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9428 		if (pf->vf[vsi->vf_id].spoofchk) {
9429 			ctxt.info.valid_sections |=
9430 				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9431 			ctxt.info.sec_flags |=
9432 				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9433 				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9434 		}
9435 		/* Setup the VSI tx/rx queue map for TC0 only for now */
9436 		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9437 		break;
9438 
9439 #ifdef I40E_FCOE
9440 	case I40E_VSI_FCOE:
9441 		ret = i40e_fcoe_vsi_init(vsi, &ctxt);
9442 		if (ret) {
9443 			dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
9444 			return ret;
9445 		}
9446 		break;
9447 
9448 #endif /* I40E_FCOE */
9449 	case I40E_VSI_IWARP:
9450 		/* send down message to iWARP */
9451 		break;
9452 
9453 	default:
9454 		return -ENODEV;
9455 	}
9456 
9457 	if (vsi->type != I40E_VSI_MAIN) {
9458 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9459 		if (ret) {
9460 			dev_info(&vsi->back->pdev->dev,
9461 				 "add vsi failed, err %s aq_err %s\n",
9462 				 i40e_stat_str(&pf->hw, ret),
9463 				 i40e_aq_str(&pf->hw,
9464 					     pf->hw.aq.asq_last_status));
9465 			ret = -ENOENT;
9466 			goto err;
9467 		}
9468 		vsi->info = ctxt.info;
9469 		vsi->info.valid_sections = 0;
9470 		vsi->seid = ctxt.seid;
9471 		vsi->id = ctxt.vsi_number;
9472 	}
9473 	/* Except FDIR VSI, for all othet VSI set the broadcast filter */
9474 	if (vsi->type != I40E_VSI_FDIR) {
9475 		aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
9476 		if (aq_ret) {
9477 			ret = i40e_aq_rc_to_posix(aq_ret,
9478 						  hw->aq.asq_last_status);
9479 			dev_info(&pf->pdev->dev,
9480 				 "set brdcast promisc failed, err %s, aq_err %s\n",
9481 				 i40e_stat_str(hw, aq_ret),
9482 				 i40e_aq_str(hw, hw->aq.asq_last_status));
9483 		}
9484 	}
9485 
9486 	vsi->active_filters = 0;
9487 	clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
9488 	spin_lock_bh(&vsi->mac_filter_list_lock);
9489 	/* If macvlan filters already exist, force them to get loaded */
9490 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
9491 		f->state = I40E_FILTER_NEW;
9492 		f_count++;
9493 	}
9494 	spin_unlock_bh(&vsi->mac_filter_list_lock);
9495 
9496 	if (f_count) {
9497 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9498 		pf->flags |= I40E_FLAG_FILTER_SYNC;
9499 	}
9500 
9501 	/* Update VSI BW information */
9502 	ret = i40e_vsi_get_bw_info(vsi);
9503 	if (ret) {
9504 		dev_info(&pf->pdev->dev,
9505 			 "couldn't get vsi bw info, err %s aq_err %s\n",
9506 			 i40e_stat_str(&pf->hw, ret),
9507 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9508 		/* VSI is already added so not tearing that up */
9509 		ret = 0;
9510 	}
9511 
9512 err:
9513 	return ret;
9514 }
9515 
9516 /**
9517  * i40e_vsi_release - Delete a VSI and free its resources
9518  * @vsi: the VSI being removed
9519  *
9520  * Returns 0 on success or < 0 on error
9521  **/
9522 int i40e_vsi_release(struct i40e_vsi *vsi)
9523 {
9524 	struct i40e_mac_filter *f, *ftmp;
9525 	struct i40e_veb *veb = NULL;
9526 	struct i40e_pf *pf;
9527 	u16 uplink_seid;
9528 	int i, n;
9529 
9530 	pf = vsi->back;
9531 
9532 	/* release of a VEB-owner or last VSI is not allowed */
9533 	if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9534 		dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9535 			 vsi->seid, vsi->uplink_seid);
9536 		return -ENODEV;
9537 	}
9538 	if (vsi == pf->vsi[pf->lan_vsi] &&
9539 	    !test_bit(__I40E_DOWN, &pf->state)) {
9540 		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9541 		return -ENODEV;
9542 	}
9543 
9544 	uplink_seid = vsi->uplink_seid;
9545 	if (vsi->type != I40E_VSI_SRIOV) {
9546 		if (vsi->netdev_registered) {
9547 			vsi->netdev_registered = false;
9548 			if (vsi->netdev) {
9549 				/* results in a call to i40e_close() */
9550 				unregister_netdev(vsi->netdev);
9551 			}
9552 		} else {
9553 			i40e_vsi_close(vsi);
9554 		}
9555 		i40e_vsi_disable_irq(vsi);
9556 	}
9557 
9558 	spin_lock_bh(&vsi->mac_filter_list_lock);
9559 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
9560 		i40e_del_filter(vsi, f->macaddr, f->vlan,
9561 				f->is_vf, f->is_netdev);
9562 	spin_unlock_bh(&vsi->mac_filter_list_lock);
9563 
9564 	i40e_sync_vsi_filters(vsi);
9565 
9566 	i40e_vsi_delete(vsi);
9567 	i40e_vsi_free_q_vectors(vsi);
9568 	if (vsi->netdev) {
9569 		free_netdev(vsi->netdev);
9570 		vsi->netdev = NULL;
9571 	}
9572 	i40e_vsi_clear_rings(vsi);
9573 	i40e_vsi_clear(vsi);
9574 
9575 	/* If this was the last thing on the VEB, except for the
9576 	 * controlling VSI, remove the VEB, which puts the controlling
9577 	 * VSI onto the next level down in the switch.
9578 	 *
9579 	 * Well, okay, there's one more exception here: don't remove
9580 	 * the orphan VEBs yet.  We'll wait for an explicit remove request
9581 	 * from up the network stack.
9582 	 */
9583 	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
9584 		if (pf->vsi[i] &&
9585 		    pf->vsi[i]->uplink_seid == uplink_seid &&
9586 		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9587 			n++;      /* count the VSIs */
9588 		}
9589 	}
9590 	for (i = 0; i < I40E_MAX_VEB; i++) {
9591 		if (!pf->veb[i])
9592 			continue;
9593 		if (pf->veb[i]->uplink_seid == uplink_seid)
9594 			n++;     /* count the VEBs */
9595 		if (pf->veb[i]->seid == uplink_seid)
9596 			veb = pf->veb[i];
9597 	}
9598 	if (n == 0 && veb && veb->uplink_seid != 0)
9599 		i40e_veb_release(veb);
9600 
9601 	return 0;
9602 }
9603 
9604 /**
9605  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9606  * @vsi: ptr to the VSI
9607  *
9608  * This should only be called after i40e_vsi_mem_alloc() which allocates the
9609  * corresponding SW VSI structure and initializes num_queue_pairs for the
9610  * newly allocated VSI.
9611  *
9612  * Returns 0 on success or negative on failure
9613  **/
9614 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9615 {
9616 	int ret = -ENOENT;
9617 	struct i40e_pf *pf = vsi->back;
9618 
9619 	if (vsi->q_vectors[0]) {
9620 		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9621 			 vsi->seid);
9622 		return -EEXIST;
9623 	}
9624 
9625 	if (vsi->base_vector) {
9626 		dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
9627 			 vsi->seid, vsi->base_vector);
9628 		return -EEXIST;
9629 	}
9630 
9631 	ret = i40e_vsi_alloc_q_vectors(vsi);
9632 	if (ret) {
9633 		dev_info(&pf->pdev->dev,
9634 			 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9635 			 vsi->num_q_vectors, vsi->seid, ret);
9636 		vsi->num_q_vectors = 0;
9637 		goto vector_setup_out;
9638 	}
9639 
9640 	/* In Legacy mode, we do not have to get any other vector since we
9641 	 * piggyback on the misc/ICR0 for queue interrupts.
9642 	*/
9643 	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9644 		return ret;
9645 	if (vsi->num_q_vectors)
9646 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9647 						 vsi->num_q_vectors, vsi->idx);
9648 	if (vsi->base_vector < 0) {
9649 		dev_info(&pf->pdev->dev,
9650 			 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9651 			 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
9652 		i40e_vsi_free_q_vectors(vsi);
9653 		ret = -ENOENT;
9654 		goto vector_setup_out;
9655 	}
9656 
9657 vector_setup_out:
9658 	return ret;
9659 }
9660 
9661 /**
9662  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9663  * @vsi: pointer to the vsi.
9664  *
9665  * This re-allocates a vsi's queue resources.
9666  *
9667  * Returns pointer to the successfully allocated and configured VSI sw struct
9668  * on success, otherwise returns NULL on failure.
9669  **/
9670 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9671 {
9672 	struct i40e_pf *pf;
9673 	u8 enabled_tc;
9674 	int ret;
9675 
9676 	if (!vsi)
9677 		return NULL;
9678 
9679 	pf = vsi->back;
9680 
9681 	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9682 	i40e_vsi_clear_rings(vsi);
9683 
9684 	i40e_vsi_free_arrays(vsi, false);
9685 	i40e_set_num_rings_in_vsi(vsi);
9686 	ret = i40e_vsi_alloc_arrays(vsi, false);
9687 	if (ret)
9688 		goto err_vsi;
9689 
9690 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9691 	if (ret < 0) {
9692 		dev_info(&pf->pdev->dev,
9693 			 "failed to get tracking for %d queues for VSI %d err %d\n",
9694 			 vsi->alloc_queue_pairs, vsi->seid, ret);
9695 		goto err_vsi;
9696 	}
9697 	vsi->base_queue = ret;
9698 
9699 	/* Update the FW view of the VSI. Force a reset of TC and queue
9700 	 * layout configurations.
9701 	 */
9702 	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9703 	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9704 	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9705 	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9706 	if (vsi->type == I40E_VSI_MAIN)
9707 		i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
9708 
9709 	/* assign it some queues */
9710 	ret = i40e_alloc_rings(vsi);
9711 	if (ret)
9712 		goto err_rings;
9713 
9714 	/* map all of the rings to the q_vectors */
9715 	i40e_vsi_map_rings_to_vectors(vsi);
9716 	return vsi;
9717 
9718 err_rings:
9719 	i40e_vsi_free_q_vectors(vsi);
9720 	if (vsi->netdev_registered) {
9721 		vsi->netdev_registered = false;
9722 		unregister_netdev(vsi->netdev);
9723 		free_netdev(vsi->netdev);
9724 		vsi->netdev = NULL;
9725 	}
9726 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9727 err_vsi:
9728 	i40e_vsi_clear(vsi);
9729 	return NULL;
9730 }
9731 
9732 /**
9733  * i40e_vsi_setup - Set up a VSI by a given type
9734  * @pf: board private structure
9735  * @type: VSI type
9736  * @uplink_seid: the switch element to link to
9737  * @param1: usage depends upon VSI type. For VF types, indicates VF id
9738  *
9739  * This allocates the sw VSI structure and its queue resources, then add a VSI
9740  * to the identified VEB.
9741  *
9742  * Returns pointer to the successfully allocated and configure VSI sw struct on
9743  * success, otherwise returns NULL on failure.
9744  **/
9745 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9746 				u16 uplink_seid, u32 param1)
9747 {
9748 	struct i40e_vsi *vsi = NULL;
9749 	struct i40e_veb *veb = NULL;
9750 	int ret, i;
9751 	int v_idx;
9752 
9753 	/* The requested uplink_seid must be either
9754 	 *     - the PF's port seid
9755 	 *              no VEB is needed because this is the PF
9756 	 *              or this is a Flow Director special case VSI
9757 	 *     - seid of an existing VEB
9758 	 *     - seid of a VSI that owns an existing VEB
9759 	 *     - seid of a VSI that doesn't own a VEB
9760 	 *              a new VEB is created and the VSI becomes the owner
9761 	 *     - seid of the PF VSI, which is what creates the first VEB
9762 	 *              this is a special case of the previous
9763 	 *
9764 	 * Find which uplink_seid we were given and create a new VEB if needed
9765 	 */
9766 	for (i = 0; i < I40E_MAX_VEB; i++) {
9767 		if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9768 			veb = pf->veb[i];
9769 			break;
9770 		}
9771 	}
9772 
9773 	if (!veb && uplink_seid != pf->mac_seid) {
9774 
9775 		for (i = 0; i < pf->num_alloc_vsi; i++) {
9776 			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9777 				vsi = pf->vsi[i];
9778 				break;
9779 			}
9780 		}
9781 		if (!vsi) {
9782 			dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9783 				 uplink_seid);
9784 			return NULL;
9785 		}
9786 
9787 		if (vsi->uplink_seid == pf->mac_seid)
9788 			veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9789 					     vsi->tc_config.enabled_tc);
9790 		else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9791 			veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9792 					     vsi->tc_config.enabled_tc);
9793 		if (veb) {
9794 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9795 				dev_info(&vsi->back->pdev->dev,
9796 					 "New VSI creation error, uplink seid of LAN VSI expected.\n");
9797 				return NULL;
9798 			}
9799 			/* We come up by default in VEPA mode if SRIOV is not
9800 			 * already enabled, in which case we can't force VEPA
9801 			 * mode.
9802 			 */
9803 			if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9804 				veb->bridge_mode = BRIDGE_MODE_VEPA;
9805 				pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9806 			}
9807 			i40e_config_bridge_mode(veb);
9808 		}
9809 		for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9810 			if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9811 				veb = pf->veb[i];
9812 		}
9813 		if (!veb) {
9814 			dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9815 			return NULL;
9816 		}
9817 
9818 		vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9819 		uplink_seid = veb->seid;
9820 	}
9821 
9822 	/* get vsi sw struct */
9823 	v_idx = i40e_vsi_mem_alloc(pf, type);
9824 	if (v_idx < 0)
9825 		goto err_alloc;
9826 	vsi = pf->vsi[v_idx];
9827 	if (!vsi)
9828 		goto err_alloc;
9829 	vsi->type = type;
9830 	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9831 
9832 	if (type == I40E_VSI_MAIN)
9833 		pf->lan_vsi = v_idx;
9834 	else if (type == I40E_VSI_SRIOV)
9835 		vsi->vf_id = param1;
9836 	/* assign it some queues */
9837 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9838 				vsi->idx);
9839 	if (ret < 0) {
9840 		dev_info(&pf->pdev->dev,
9841 			 "failed to get tracking for %d queues for VSI %d err=%d\n",
9842 			 vsi->alloc_queue_pairs, vsi->seid, ret);
9843 		goto err_vsi;
9844 	}
9845 	vsi->base_queue = ret;
9846 
9847 	/* get a VSI from the hardware */
9848 	vsi->uplink_seid = uplink_seid;
9849 	ret = i40e_add_vsi(vsi);
9850 	if (ret)
9851 		goto err_vsi;
9852 
9853 	switch (vsi->type) {
9854 	/* setup the netdev if needed */
9855 	case I40E_VSI_MAIN:
9856 		/* Apply relevant filters if a platform-specific mac
9857 		 * address was selected.
9858 		 */
9859 		if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
9860 			ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
9861 			if (ret) {
9862 				dev_warn(&pf->pdev->dev,
9863 					 "could not set up macaddr; err %d\n",
9864 					 ret);
9865 			}
9866 		}
9867 	case I40E_VSI_VMDQ2:
9868 	case I40E_VSI_FCOE:
9869 		ret = i40e_config_netdev(vsi);
9870 		if (ret)
9871 			goto err_netdev;
9872 		ret = register_netdev(vsi->netdev);
9873 		if (ret)
9874 			goto err_netdev;
9875 		vsi->netdev_registered = true;
9876 		netif_carrier_off(vsi->netdev);
9877 #ifdef CONFIG_I40E_DCB
9878 		/* Setup DCB netlink interface */
9879 		i40e_dcbnl_setup(vsi);
9880 #endif /* CONFIG_I40E_DCB */
9881 		/* fall through */
9882 
9883 	case I40E_VSI_FDIR:
9884 		/* set up vectors and rings if needed */
9885 		ret = i40e_vsi_setup_vectors(vsi);
9886 		if (ret)
9887 			goto err_msix;
9888 
9889 		ret = i40e_alloc_rings(vsi);
9890 		if (ret)
9891 			goto err_rings;
9892 
9893 		/* map all of the rings to the q_vectors */
9894 		i40e_vsi_map_rings_to_vectors(vsi);
9895 
9896 		i40e_vsi_reset_stats(vsi);
9897 		break;
9898 
9899 	default:
9900 		/* no netdev or rings for the other VSI types */
9901 		break;
9902 	}
9903 
9904 	if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9905 	    (vsi->type == I40E_VSI_VMDQ2)) {
9906 		ret = i40e_vsi_config_rss(vsi);
9907 	}
9908 	return vsi;
9909 
9910 err_rings:
9911 	i40e_vsi_free_q_vectors(vsi);
9912 err_msix:
9913 	if (vsi->netdev_registered) {
9914 		vsi->netdev_registered = false;
9915 		unregister_netdev(vsi->netdev);
9916 		free_netdev(vsi->netdev);
9917 		vsi->netdev = NULL;
9918 	}
9919 err_netdev:
9920 	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9921 err_vsi:
9922 	i40e_vsi_clear(vsi);
9923 err_alloc:
9924 	return NULL;
9925 }
9926 
9927 /**
9928  * i40e_veb_get_bw_info - Query VEB BW information
9929  * @veb: the veb to query
9930  *
9931  * Query the Tx scheduler BW configuration data for given VEB
9932  **/
9933 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9934 {
9935 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9936 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9937 	struct i40e_pf *pf = veb->pf;
9938 	struct i40e_hw *hw = &pf->hw;
9939 	u32 tc_bw_max;
9940 	int ret = 0;
9941 	int i;
9942 
9943 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9944 						  &bw_data, NULL);
9945 	if (ret) {
9946 		dev_info(&pf->pdev->dev,
9947 			 "query veb bw config failed, err %s aq_err %s\n",
9948 			 i40e_stat_str(&pf->hw, ret),
9949 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9950 		goto out;
9951 	}
9952 
9953 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9954 						   &ets_data, NULL);
9955 	if (ret) {
9956 		dev_info(&pf->pdev->dev,
9957 			 "query veb bw ets config failed, err %s aq_err %s\n",
9958 			 i40e_stat_str(&pf->hw, ret),
9959 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9960 		goto out;
9961 	}
9962 
9963 	veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9964 	veb->bw_max_quanta = ets_data.tc_bw_max;
9965 	veb->is_abs_credits = bw_data.absolute_credits_enable;
9966 	veb->enabled_tc = ets_data.tc_valid_bits;
9967 	tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9968 		    (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9969 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9970 		veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9971 		veb->bw_tc_limit_credits[i] =
9972 					le16_to_cpu(bw_data.tc_bw_limits[i]);
9973 		veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9974 	}
9975 
9976 out:
9977 	return ret;
9978 }
9979 
9980 /**
9981  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9982  * @pf: board private structure
9983  *
9984  * On error: returns error code (negative)
9985  * On success: returns vsi index in PF (positive)
9986  **/
9987 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9988 {
9989 	int ret = -ENOENT;
9990 	struct i40e_veb *veb;
9991 	int i;
9992 
9993 	/* Need to protect the allocation of switch elements at the PF level */
9994 	mutex_lock(&pf->switch_mutex);
9995 
9996 	/* VEB list may be fragmented if VEB creation/destruction has
9997 	 * been happening.  We can afford to do a quick scan to look
9998 	 * for any free slots in the list.
9999 	 *
10000 	 * find next empty veb slot, looping back around if necessary
10001 	 */
10002 	i = 0;
10003 	while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
10004 		i++;
10005 	if (i >= I40E_MAX_VEB) {
10006 		ret = -ENOMEM;
10007 		goto err_alloc_veb;  /* out of VEB slots! */
10008 	}
10009 
10010 	veb = kzalloc(sizeof(*veb), GFP_KERNEL);
10011 	if (!veb) {
10012 		ret = -ENOMEM;
10013 		goto err_alloc_veb;
10014 	}
10015 	veb->pf = pf;
10016 	veb->idx = i;
10017 	veb->enabled_tc = 1;
10018 
10019 	pf->veb[i] = veb;
10020 	ret = i;
10021 err_alloc_veb:
10022 	mutex_unlock(&pf->switch_mutex);
10023 	return ret;
10024 }
10025 
10026 /**
10027  * i40e_switch_branch_release - Delete a branch of the switch tree
10028  * @branch: where to start deleting
10029  *
10030  * This uses recursion to find the tips of the branch to be
10031  * removed, deleting until we get back to and can delete this VEB.
10032  **/
10033 static void i40e_switch_branch_release(struct i40e_veb *branch)
10034 {
10035 	struct i40e_pf *pf = branch->pf;
10036 	u16 branch_seid = branch->seid;
10037 	u16 veb_idx = branch->idx;
10038 	int i;
10039 
10040 	/* release any VEBs on this VEB - RECURSION */
10041 	for (i = 0; i < I40E_MAX_VEB; i++) {
10042 		if (!pf->veb[i])
10043 			continue;
10044 		if (pf->veb[i]->uplink_seid == branch->seid)
10045 			i40e_switch_branch_release(pf->veb[i]);
10046 	}
10047 
10048 	/* Release the VSIs on this VEB, but not the owner VSI.
10049 	 *
10050 	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
10051 	 *       the VEB itself, so don't use (*branch) after this loop.
10052 	 */
10053 	for (i = 0; i < pf->num_alloc_vsi; i++) {
10054 		if (!pf->vsi[i])
10055 			continue;
10056 		if (pf->vsi[i]->uplink_seid == branch_seid &&
10057 		   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10058 			i40e_vsi_release(pf->vsi[i]);
10059 		}
10060 	}
10061 
10062 	/* There's one corner case where the VEB might not have been
10063 	 * removed, so double check it here and remove it if needed.
10064 	 * This case happens if the veb was created from the debugfs
10065 	 * commands and no VSIs were added to it.
10066 	 */
10067 	if (pf->veb[veb_idx])
10068 		i40e_veb_release(pf->veb[veb_idx]);
10069 }
10070 
10071 /**
10072  * i40e_veb_clear - remove veb struct
10073  * @veb: the veb to remove
10074  **/
10075 static void i40e_veb_clear(struct i40e_veb *veb)
10076 {
10077 	if (!veb)
10078 		return;
10079 
10080 	if (veb->pf) {
10081 		struct i40e_pf *pf = veb->pf;
10082 
10083 		mutex_lock(&pf->switch_mutex);
10084 		if (pf->veb[veb->idx] == veb)
10085 			pf->veb[veb->idx] = NULL;
10086 		mutex_unlock(&pf->switch_mutex);
10087 	}
10088 
10089 	kfree(veb);
10090 }
10091 
10092 /**
10093  * i40e_veb_release - Delete a VEB and free its resources
10094  * @veb: the VEB being removed
10095  **/
10096 void i40e_veb_release(struct i40e_veb *veb)
10097 {
10098 	struct i40e_vsi *vsi = NULL;
10099 	struct i40e_pf *pf;
10100 	int i, n = 0;
10101 
10102 	pf = veb->pf;
10103 
10104 	/* find the remaining VSI and check for extras */
10105 	for (i = 0; i < pf->num_alloc_vsi; i++) {
10106 		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10107 			n++;
10108 			vsi = pf->vsi[i];
10109 		}
10110 	}
10111 	if (n != 1) {
10112 		dev_info(&pf->pdev->dev,
10113 			 "can't remove VEB %d with %d VSIs left\n",
10114 			 veb->seid, n);
10115 		return;
10116 	}
10117 
10118 	/* move the remaining VSI to uplink veb */
10119 	vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10120 	if (veb->uplink_seid) {
10121 		vsi->uplink_seid = veb->uplink_seid;
10122 		if (veb->uplink_seid == pf->mac_seid)
10123 			vsi->veb_idx = I40E_NO_VEB;
10124 		else
10125 			vsi->veb_idx = veb->veb_idx;
10126 	} else {
10127 		/* floating VEB */
10128 		vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10129 		vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10130 	}
10131 
10132 	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10133 	i40e_veb_clear(veb);
10134 }
10135 
10136 /**
10137  * i40e_add_veb - create the VEB in the switch
10138  * @veb: the VEB to be instantiated
10139  * @vsi: the controlling VSI
10140  **/
10141 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10142 {
10143 	struct i40e_pf *pf = veb->pf;
10144 	bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10145 	int ret;
10146 
10147 	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10148 			      veb->enabled_tc, false,
10149 			      &veb->seid, enable_stats, NULL);
10150 
10151 	/* get a VEB from the hardware */
10152 	if (ret) {
10153 		dev_info(&pf->pdev->dev,
10154 			 "couldn't add VEB, err %s aq_err %s\n",
10155 			 i40e_stat_str(&pf->hw, ret),
10156 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10157 		return -EPERM;
10158 	}
10159 
10160 	/* get statistics counter */
10161 	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10162 					 &veb->stats_idx, NULL, NULL, NULL);
10163 	if (ret) {
10164 		dev_info(&pf->pdev->dev,
10165 			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10166 			 i40e_stat_str(&pf->hw, ret),
10167 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10168 		return -EPERM;
10169 	}
10170 	ret = i40e_veb_get_bw_info(veb);
10171 	if (ret) {
10172 		dev_info(&pf->pdev->dev,
10173 			 "couldn't get VEB bw info, err %s aq_err %s\n",
10174 			 i40e_stat_str(&pf->hw, ret),
10175 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10176 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10177 		return -ENOENT;
10178 	}
10179 
10180 	vsi->uplink_seid = veb->seid;
10181 	vsi->veb_idx = veb->idx;
10182 	vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10183 
10184 	return 0;
10185 }
10186 
10187 /**
10188  * i40e_veb_setup - Set up a VEB
10189  * @pf: board private structure
10190  * @flags: VEB setup flags
10191  * @uplink_seid: the switch element to link to
10192  * @vsi_seid: the initial VSI seid
10193  * @enabled_tc: Enabled TC bit-map
10194  *
10195  * This allocates the sw VEB structure and links it into the switch
10196  * It is possible and legal for this to be a duplicate of an already
10197  * existing VEB.  It is also possible for both uplink and vsi seids
10198  * to be zero, in order to create a floating VEB.
10199  *
10200  * Returns pointer to the successfully allocated VEB sw struct on
10201  * success, otherwise returns NULL on failure.
10202  **/
10203 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10204 				u16 uplink_seid, u16 vsi_seid,
10205 				u8 enabled_tc)
10206 {
10207 	struct i40e_veb *veb, *uplink_veb = NULL;
10208 	int vsi_idx, veb_idx;
10209 	int ret;
10210 
10211 	/* if one seid is 0, the other must be 0 to create a floating relay */
10212 	if ((uplink_seid == 0 || vsi_seid == 0) &&
10213 	    (uplink_seid + vsi_seid != 0)) {
10214 		dev_info(&pf->pdev->dev,
10215 			 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10216 			 uplink_seid, vsi_seid);
10217 		return NULL;
10218 	}
10219 
10220 	/* make sure there is such a vsi and uplink */
10221 	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10222 		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10223 			break;
10224 	if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10225 		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10226 			 vsi_seid);
10227 		return NULL;
10228 	}
10229 
10230 	if (uplink_seid && uplink_seid != pf->mac_seid) {
10231 		for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10232 			if (pf->veb[veb_idx] &&
10233 			    pf->veb[veb_idx]->seid == uplink_seid) {
10234 				uplink_veb = pf->veb[veb_idx];
10235 				break;
10236 			}
10237 		}
10238 		if (!uplink_veb) {
10239 			dev_info(&pf->pdev->dev,
10240 				 "uplink seid %d not found\n", uplink_seid);
10241 			return NULL;
10242 		}
10243 	}
10244 
10245 	/* get veb sw struct */
10246 	veb_idx = i40e_veb_mem_alloc(pf);
10247 	if (veb_idx < 0)
10248 		goto err_alloc;
10249 	veb = pf->veb[veb_idx];
10250 	veb->flags = flags;
10251 	veb->uplink_seid = uplink_seid;
10252 	veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10253 	veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10254 
10255 	/* create the VEB in the switch */
10256 	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10257 	if (ret)
10258 		goto err_veb;
10259 	if (vsi_idx == pf->lan_vsi)
10260 		pf->lan_veb = veb->idx;
10261 
10262 	return veb;
10263 
10264 err_veb:
10265 	i40e_veb_clear(veb);
10266 err_alloc:
10267 	return NULL;
10268 }
10269 
10270 /**
10271  * i40e_setup_pf_switch_element - set PF vars based on switch type
10272  * @pf: board private structure
10273  * @ele: element we are building info from
10274  * @num_reported: total number of elements
10275  * @printconfig: should we print the contents
10276  *
10277  * helper function to assist in extracting a few useful SEID values.
10278  **/
10279 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10280 				struct i40e_aqc_switch_config_element_resp *ele,
10281 				u16 num_reported, bool printconfig)
10282 {
10283 	u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10284 	u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10285 	u8 element_type = ele->element_type;
10286 	u16 seid = le16_to_cpu(ele->seid);
10287 
10288 	if (printconfig)
10289 		dev_info(&pf->pdev->dev,
10290 			 "type=%d seid=%d uplink=%d downlink=%d\n",
10291 			 element_type, seid, uplink_seid, downlink_seid);
10292 
10293 	switch (element_type) {
10294 	case I40E_SWITCH_ELEMENT_TYPE_MAC:
10295 		pf->mac_seid = seid;
10296 		break;
10297 	case I40E_SWITCH_ELEMENT_TYPE_VEB:
10298 		/* Main VEB? */
10299 		if (uplink_seid != pf->mac_seid)
10300 			break;
10301 		if (pf->lan_veb == I40E_NO_VEB) {
10302 			int v;
10303 
10304 			/* find existing or else empty VEB */
10305 			for (v = 0; v < I40E_MAX_VEB; v++) {
10306 				if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10307 					pf->lan_veb = v;
10308 					break;
10309 				}
10310 			}
10311 			if (pf->lan_veb == I40E_NO_VEB) {
10312 				v = i40e_veb_mem_alloc(pf);
10313 				if (v < 0)
10314 					break;
10315 				pf->lan_veb = v;
10316 			}
10317 		}
10318 
10319 		pf->veb[pf->lan_veb]->seid = seid;
10320 		pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10321 		pf->veb[pf->lan_veb]->pf = pf;
10322 		pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10323 		break;
10324 	case I40E_SWITCH_ELEMENT_TYPE_VSI:
10325 		if (num_reported != 1)
10326 			break;
10327 		/* This is immediately after a reset so we can assume this is
10328 		 * the PF's VSI
10329 		 */
10330 		pf->mac_seid = uplink_seid;
10331 		pf->pf_seid = downlink_seid;
10332 		pf->main_vsi_seid = seid;
10333 		if (printconfig)
10334 			dev_info(&pf->pdev->dev,
10335 				 "pf_seid=%d main_vsi_seid=%d\n",
10336 				 pf->pf_seid, pf->main_vsi_seid);
10337 		break;
10338 	case I40E_SWITCH_ELEMENT_TYPE_PF:
10339 	case I40E_SWITCH_ELEMENT_TYPE_VF:
10340 	case I40E_SWITCH_ELEMENT_TYPE_EMP:
10341 	case I40E_SWITCH_ELEMENT_TYPE_BMC:
10342 	case I40E_SWITCH_ELEMENT_TYPE_PE:
10343 	case I40E_SWITCH_ELEMENT_TYPE_PA:
10344 		/* ignore these for now */
10345 		break;
10346 	default:
10347 		dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10348 			 element_type, seid);
10349 		break;
10350 	}
10351 }
10352 
10353 /**
10354  * i40e_fetch_switch_configuration - Get switch config from firmware
10355  * @pf: board private structure
10356  * @printconfig: should we print the contents
10357  *
10358  * Get the current switch configuration from the device and
10359  * extract a few useful SEID values.
10360  **/
10361 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10362 {
10363 	struct i40e_aqc_get_switch_config_resp *sw_config;
10364 	u16 next_seid = 0;
10365 	int ret = 0;
10366 	u8 *aq_buf;
10367 	int i;
10368 
10369 	aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10370 	if (!aq_buf)
10371 		return -ENOMEM;
10372 
10373 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10374 	do {
10375 		u16 num_reported, num_total;
10376 
10377 		ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10378 						I40E_AQ_LARGE_BUF,
10379 						&next_seid, NULL);
10380 		if (ret) {
10381 			dev_info(&pf->pdev->dev,
10382 				 "get switch config failed err %s aq_err %s\n",
10383 				 i40e_stat_str(&pf->hw, ret),
10384 				 i40e_aq_str(&pf->hw,
10385 					     pf->hw.aq.asq_last_status));
10386 			kfree(aq_buf);
10387 			return -ENOENT;
10388 		}
10389 
10390 		num_reported = le16_to_cpu(sw_config->header.num_reported);
10391 		num_total = le16_to_cpu(sw_config->header.num_total);
10392 
10393 		if (printconfig)
10394 			dev_info(&pf->pdev->dev,
10395 				 "header: %d reported %d total\n",
10396 				 num_reported, num_total);
10397 
10398 		for (i = 0; i < num_reported; i++) {
10399 			struct i40e_aqc_switch_config_element_resp *ele =
10400 				&sw_config->element[i];
10401 
10402 			i40e_setup_pf_switch_element(pf, ele, num_reported,
10403 						     printconfig);
10404 		}
10405 	} while (next_seid != 0);
10406 
10407 	kfree(aq_buf);
10408 	return ret;
10409 }
10410 
10411 /**
10412  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10413  * @pf: board private structure
10414  * @reinit: if the Main VSI needs to re-initialized.
10415  *
10416  * Returns 0 on success, negative value on failure
10417  **/
10418 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10419 {
10420 	u16 flags = 0;
10421 	int ret;
10422 
10423 	/* find out what's out there already */
10424 	ret = i40e_fetch_switch_configuration(pf, false);
10425 	if (ret) {
10426 		dev_info(&pf->pdev->dev,
10427 			 "couldn't fetch switch config, err %s aq_err %s\n",
10428 			 i40e_stat_str(&pf->hw, ret),
10429 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10430 		return ret;
10431 	}
10432 	i40e_pf_reset_stats(pf);
10433 
10434 	/* set the switch config bit for the whole device to
10435 	 * support limited promisc or true promisc
10436 	 * when user requests promisc. The default is limited
10437 	 * promisc.
10438 	*/
10439 
10440 	if ((pf->hw.pf_id == 0) &&
10441 	    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
10442 		flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10443 
10444 	if (pf->hw.pf_id == 0) {
10445 		u16 valid_flags;
10446 
10447 		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
10448 		ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
10449 						NULL);
10450 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
10451 			dev_info(&pf->pdev->dev,
10452 				 "couldn't set switch config bits, err %s aq_err %s\n",
10453 				 i40e_stat_str(&pf->hw, ret),
10454 				 i40e_aq_str(&pf->hw,
10455 					     pf->hw.aq.asq_last_status));
10456 			/* not a fatal problem, just keep going */
10457 		}
10458 	}
10459 
10460 	/* first time setup */
10461 	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10462 		struct i40e_vsi *vsi = NULL;
10463 		u16 uplink_seid;
10464 
10465 		/* Set up the PF VSI associated with the PF's main VSI
10466 		 * that is already in the HW switch
10467 		 */
10468 		if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10469 			uplink_seid = pf->veb[pf->lan_veb]->seid;
10470 		else
10471 			uplink_seid = pf->mac_seid;
10472 		if (pf->lan_vsi == I40E_NO_VSI)
10473 			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10474 		else if (reinit)
10475 			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
10476 		if (!vsi) {
10477 			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10478 			i40e_fdir_teardown(pf);
10479 			return -EAGAIN;
10480 		}
10481 	} else {
10482 		/* force a reset of TC and queue layout configurations */
10483 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10484 
10485 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10486 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10487 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10488 	}
10489 	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10490 
10491 	i40e_fdir_sb_setup(pf);
10492 
10493 	/* Setup static PF queue filter control settings */
10494 	ret = i40e_setup_pf_filter_control(pf);
10495 	if (ret) {
10496 		dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10497 			 ret);
10498 		/* Failure here should not stop continuing other steps */
10499 	}
10500 
10501 	/* enable RSS in the HW, even for only one queue, as the stack can use
10502 	 * the hash
10503 	 */
10504 	if ((pf->flags & I40E_FLAG_RSS_ENABLED))
10505 		i40e_pf_config_rss(pf);
10506 
10507 	/* fill in link information and enable LSE reporting */
10508 	i40e_update_link_info(&pf->hw);
10509 	i40e_link_event(pf);
10510 
10511 	/* Initialize user-specific link properties */
10512 	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10513 				  I40E_AQ_AN_COMPLETED) ? true : false);
10514 
10515 	i40e_ptp_init(pf);
10516 
10517 	return ret;
10518 }
10519 
10520 /**
10521  * i40e_determine_queue_usage - Work out queue distribution
10522  * @pf: board private structure
10523  **/
10524 static void i40e_determine_queue_usage(struct i40e_pf *pf)
10525 {
10526 	int queues_left;
10527 
10528 	pf->num_lan_qps = 0;
10529 #ifdef I40E_FCOE
10530 	pf->num_fcoe_qps = 0;
10531 #endif
10532 
10533 	/* Find the max queues to be put into basic use.  We'll always be
10534 	 * using TC0, whether or not DCB is running, and TC0 will get the
10535 	 * big RSS set.
10536 	 */
10537 	queues_left = pf->hw.func_caps.num_tx_qp;
10538 
10539 	if ((queues_left == 1) ||
10540 	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
10541 		/* one qp for PF, no queues for anything else */
10542 		queues_left = 0;
10543 		pf->alloc_rss_size = pf->num_lan_qps = 1;
10544 
10545 		/* make sure all the fancies are disabled */
10546 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
10547 			       I40E_FLAG_IWARP_ENABLED	|
10548 #ifdef I40E_FCOE
10549 			       I40E_FLAG_FCOE_ENABLED	|
10550 #endif
10551 			       I40E_FLAG_FD_SB_ENABLED	|
10552 			       I40E_FLAG_FD_ATR_ENABLED	|
10553 			       I40E_FLAG_DCB_CAPABLE	|
10554 			       I40E_FLAG_DCB_ENABLED	|
10555 			       I40E_FLAG_SRIOV_ENABLED	|
10556 			       I40E_FLAG_VMDQ_ENABLED);
10557 	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10558 				  I40E_FLAG_FD_SB_ENABLED |
10559 				  I40E_FLAG_FD_ATR_ENABLED |
10560 				  I40E_FLAG_DCB_CAPABLE))) {
10561 		/* one qp for PF */
10562 		pf->alloc_rss_size = pf->num_lan_qps = 1;
10563 		queues_left -= pf->num_lan_qps;
10564 
10565 		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	|
10566 			       I40E_FLAG_IWARP_ENABLED	|
10567 #ifdef I40E_FCOE
10568 			       I40E_FLAG_FCOE_ENABLED	|
10569 #endif
10570 			       I40E_FLAG_FD_SB_ENABLED	|
10571 			       I40E_FLAG_FD_ATR_ENABLED	|
10572 			       I40E_FLAG_DCB_ENABLED	|
10573 			       I40E_FLAG_VMDQ_ENABLED);
10574 	} else {
10575 		/* Not enough queues for all TCs */
10576 		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10577 		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10578 			pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
10579 					I40E_FLAG_DCB_ENABLED);
10580 			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10581 		}
10582 		pf->num_lan_qps = max_t(int, pf->rss_size_max,
10583 					num_online_cpus());
10584 		pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10585 					pf->hw.func_caps.num_tx_qp);
10586 
10587 		queues_left -= pf->num_lan_qps;
10588 	}
10589 
10590 #ifdef I40E_FCOE
10591 	if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10592 		if (I40E_DEFAULT_FCOE <= queues_left) {
10593 			pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10594 		} else if (I40E_MINIMUM_FCOE <= queues_left) {
10595 			pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10596 		} else {
10597 			pf->num_fcoe_qps = 0;
10598 			pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10599 			dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10600 		}
10601 
10602 		queues_left -= pf->num_fcoe_qps;
10603 	}
10604 
10605 #endif
10606 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10607 		if (queues_left > 1) {
10608 			queues_left -= 1; /* save 1 queue for FD */
10609 		} else {
10610 			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10611 			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10612 		}
10613 	}
10614 
10615 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10616 	    pf->num_vf_qps && pf->num_req_vfs && queues_left) {
10617 		pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10618 					(queues_left / pf->num_vf_qps));
10619 		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10620 	}
10621 
10622 	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10623 	    pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10624 		pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10625 					  (queues_left / pf->num_vmdq_qps));
10626 		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10627 	}
10628 
10629 	pf->queues_left = queues_left;
10630 	dev_dbg(&pf->pdev->dev,
10631 		"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10632 		pf->hw.func_caps.num_tx_qp,
10633 		!!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
10634 		pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10635 		pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10636 		queues_left);
10637 #ifdef I40E_FCOE
10638 	dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
10639 #endif
10640 }
10641 
10642 /**
10643  * i40e_setup_pf_filter_control - Setup PF static filter control
10644  * @pf: PF to be setup
10645  *
10646  * i40e_setup_pf_filter_control sets up a PF's initial filter control
10647  * settings. If PE/FCoE are enabled then it will also set the per PF
10648  * based filter sizes required for them. It also enables Flow director,
10649  * ethertype and macvlan type filter settings for the pf.
10650  *
10651  * Returns 0 on success, negative on failure
10652  **/
10653 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10654 {
10655 	struct i40e_filter_control_settings *settings = &pf->filter_settings;
10656 
10657 	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10658 
10659 	/* Flow Director is enabled */
10660 	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
10661 		settings->enable_fdir = true;
10662 
10663 	/* Ethtype and MACVLAN filters enabled for PF */
10664 	settings->enable_ethtype = true;
10665 	settings->enable_macvlan = true;
10666 
10667 	if (i40e_set_filter_control(&pf->hw, settings))
10668 		return -ENOENT;
10669 
10670 	return 0;
10671 }
10672 
10673 #define INFO_STRING_LEN 255
10674 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
10675 static void i40e_print_features(struct i40e_pf *pf)
10676 {
10677 	struct i40e_hw *hw = &pf->hw;
10678 	char *buf;
10679 	int i;
10680 
10681 	buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10682 	if (!buf)
10683 		return;
10684 
10685 	i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
10686 #ifdef CONFIG_PCI_IOV
10687 	i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
10688 #endif
10689 	i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
10690 		      pf->hw.func_caps.num_vsis,
10691 		      pf->vsi[pf->lan_vsi]->num_queue_pairs);
10692 	if (pf->flags & I40E_FLAG_RSS_ENABLED)
10693 		i += snprintf(&buf[i], REMAIN(i), " RSS");
10694 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
10695 		i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
10696 	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10697 		i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10698 		i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
10699 	}
10700 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10701 		i += snprintf(&buf[i], REMAIN(i), " DCB");
10702 	i += snprintf(&buf[i], REMAIN(i), " VxLAN");
10703 	i += snprintf(&buf[i], REMAIN(i), " Geneve");
10704 	if (pf->flags & I40E_FLAG_PTP)
10705 		i += snprintf(&buf[i], REMAIN(i), " PTP");
10706 #ifdef I40E_FCOE
10707 	if (pf->flags & I40E_FLAG_FCOE_ENABLED)
10708 		i += snprintf(&buf[i], REMAIN(i), " FCOE");
10709 #endif
10710 	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10711 		i += snprintf(&buf[i], REMAIN(i), " VEB");
10712 	else
10713 		i += snprintf(&buf[i], REMAIN(i), " VEPA");
10714 
10715 	dev_info(&pf->pdev->dev, "%s\n", buf);
10716 	kfree(buf);
10717 	WARN_ON(i > INFO_STRING_LEN);
10718 }
10719 
10720 /**
10721  * i40e_get_platform_mac_addr - get platform-specific MAC address
10722  *
10723  * @pdev: PCI device information struct
10724  * @pf: board private structure
10725  *
10726  * Look up the MAC address in Open Firmware  on systems that support it,
10727  * and use IDPROM on SPARC if no OF address is found. On return, the
10728  * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
10729  * has been selected.
10730  **/
10731 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10732 {
10733 	pf->flags &= ~I40E_FLAG_PF_MAC;
10734 	if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
10735 		pf->flags |= I40E_FLAG_PF_MAC;
10736 }
10737 
10738 /**
10739  * i40e_probe - Device initialization routine
10740  * @pdev: PCI device information struct
10741  * @ent: entry in i40e_pci_tbl
10742  *
10743  * i40e_probe initializes a PF identified by a pci_dev structure.
10744  * The OS initialization, configuring of the PF private structure,
10745  * and a hardware reset occur.
10746  *
10747  * Returns 0 on success, negative on failure
10748  **/
10749 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10750 {
10751 	struct i40e_aq_get_phy_abilities_resp abilities;
10752 	struct i40e_pf *pf;
10753 	struct i40e_hw *hw;
10754 	static u16 pfs_found;
10755 	u16 wol_nvm_bits;
10756 	u16 link_status;
10757 	int err;
10758 	u32 val;
10759 	u32 i;
10760 	u8 set_fc_aq_fail;
10761 
10762 	err = pci_enable_device_mem(pdev);
10763 	if (err)
10764 		return err;
10765 
10766 	/* set up for high or low dma */
10767 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10768 	if (err) {
10769 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10770 		if (err) {
10771 			dev_err(&pdev->dev,
10772 				"DMA configuration failed: 0x%x\n", err);
10773 			goto err_dma;
10774 		}
10775 	}
10776 
10777 	/* set up pci connections */
10778 	err = pci_request_mem_regions(pdev, i40e_driver_name);
10779 	if (err) {
10780 		dev_info(&pdev->dev,
10781 			 "pci_request_selected_regions failed %d\n", err);
10782 		goto err_pci_reg;
10783 	}
10784 
10785 	pci_enable_pcie_error_reporting(pdev);
10786 	pci_set_master(pdev);
10787 
10788 	/* Now that we have a PCI connection, we need to do the
10789 	 * low level device setup.  This is primarily setting up
10790 	 * the Admin Queue structures and then querying for the
10791 	 * device's current profile information.
10792 	 */
10793 	pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10794 	if (!pf) {
10795 		err = -ENOMEM;
10796 		goto err_pf_alloc;
10797 	}
10798 	pf->next_vsi = 0;
10799 	pf->pdev = pdev;
10800 	set_bit(__I40E_DOWN, &pf->state);
10801 
10802 	hw = &pf->hw;
10803 	hw->back = pf;
10804 
10805 	pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10806 				I40E_MAX_CSR_SPACE);
10807 
10808 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10809 	if (!hw->hw_addr) {
10810 		err = -EIO;
10811 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10812 			 (unsigned int)pci_resource_start(pdev, 0),
10813 			 pf->ioremap_len, err);
10814 		goto err_ioremap;
10815 	}
10816 	hw->vendor_id = pdev->vendor;
10817 	hw->device_id = pdev->device;
10818 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10819 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
10820 	hw->subsystem_device_id = pdev->subsystem_device;
10821 	hw->bus.device = PCI_SLOT(pdev->devfn);
10822 	hw->bus.func = PCI_FUNC(pdev->devfn);
10823 	pf->instance = pfs_found;
10824 
10825 	/* set up the locks for the AQ, do this only once in probe
10826 	 * and destroy them only once in remove
10827 	 */
10828 	mutex_init(&hw->aq.asq_mutex);
10829 	mutex_init(&hw->aq.arq_mutex);
10830 
10831 	if (debug != -1) {
10832 		pf->msg_enable = pf->hw.debug_mask;
10833 		pf->msg_enable = debug;
10834 	}
10835 
10836 	/* do a special CORER for clearing PXE mode once at init */
10837 	if (hw->revision_id == 0 &&
10838 	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10839 		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10840 		i40e_flush(hw);
10841 		msleep(200);
10842 		pf->corer_count++;
10843 
10844 		i40e_clear_pxe_mode(hw);
10845 	}
10846 
10847 	/* Reset here to make sure all is clean and to define PF 'n' */
10848 	i40e_clear_hw(hw);
10849 	err = i40e_pf_reset(hw);
10850 	if (err) {
10851 		dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10852 		goto err_pf_reset;
10853 	}
10854 	pf->pfr_count++;
10855 
10856 	hw->aq.num_arq_entries = I40E_AQ_LEN;
10857 	hw->aq.num_asq_entries = I40E_AQ_LEN;
10858 	hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10859 	hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10860 	pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10861 
10862 	snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10863 		 "%s-%s:misc",
10864 		 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10865 
10866 	err = i40e_init_shared_code(hw);
10867 	if (err) {
10868 		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10869 			 err);
10870 		goto err_pf_reset;
10871 	}
10872 
10873 	/* set up a default setting for link flow control */
10874 	pf->hw.fc.requested_mode = I40E_FC_NONE;
10875 
10876 	err = i40e_init_adminq(hw);
10877 	if (err) {
10878 		if (err == I40E_ERR_FIRMWARE_API_VERSION)
10879 			dev_info(&pdev->dev,
10880 				 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10881 		else
10882 			dev_info(&pdev->dev,
10883 				 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10884 
10885 		goto err_pf_reset;
10886 	}
10887 
10888 	/* provide nvm, fw, api versions */
10889 	dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10890 		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10891 		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
10892 		 i40e_nvm_version_str(hw));
10893 
10894 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10895 	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10896 		dev_info(&pdev->dev,
10897 			 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10898 	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10899 		 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10900 		dev_info(&pdev->dev,
10901 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10902 
10903 	i40e_verify_eeprom(pf);
10904 
10905 	/* Rev 0 hardware was never productized */
10906 	if (hw->revision_id < 1)
10907 		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10908 
10909 	i40e_clear_pxe_mode(hw);
10910 	err = i40e_get_capabilities(pf);
10911 	if (err)
10912 		goto err_adminq_setup;
10913 
10914 	err = i40e_sw_init(pf);
10915 	if (err) {
10916 		dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10917 		goto err_sw_init;
10918 	}
10919 
10920 	err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10921 				hw->func_caps.num_rx_qp,
10922 				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10923 	if (err) {
10924 		dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10925 		goto err_init_lan_hmc;
10926 	}
10927 
10928 	err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10929 	if (err) {
10930 		dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10931 		err = -ENOENT;
10932 		goto err_configure_lan_hmc;
10933 	}
10934 
10935 	/* Disable LLDP for NICs that have firmware versions lower than v4.3.
10936 	 * Ignore error return codes because if it was already disabled via
10937 	 * hardware settings this will fail
10938 	 */
10939 	if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
10940 		dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10941 		i40e_aq_stop_lldp(hw, true, NULL);
10942 	}
10943 
10944 	i40e_get_mac_addr(hw, hw->mac.addr);
10945 	/* allow a platform config to override the HW addr */
10946 	i40e_get_platform_mac_addr(pdev, pf);
10947 	if (!is_valid_ether_addr(hw->mac.addr)) {
10948 		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10949 		err = -EIO;
10950 		goto err_mac_addr;
10951 	}
10952 	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10953 	ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10954 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10955 	if (is_valid_ether_addr(hw->mac.port_addr))
10956 		pf->flags |= I40E_FLAG_PORT_ID_VALID;
10957 #ifdef I40E_FCOE
10958 	err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10959 	if (err)
10960 		dev_info(&pdev->dev,
10961 			 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10962 	if (!is_valid_ether_addr(hw->mac.san_addr)) {
10963 		dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10964 			 hw->mac.san_addr);
10965 		ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10966 	}
10967 	dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10968 #endif /* I40E_FCOE */
10969 
10970 	pci_set_drvdata(pdev, pf);
10971 	pci_save_state(pdev);
10972 #ifdef CONFIG_I40E_DCB
10973 	err = i40e_init_pf_dcb(pf);
10974 	if (err) {
10975 		dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10976 		pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
10977 		/* Continue without DCB enabled */
10978 	}
10979 #endif /* CONFIG_I40E_DCB */
10980 
10981 	/* set up periodic task facility */
10982 	setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10983 	pf->service_timer_period = HZ;
10984 
10985 	INIT_WORK(&pf->service_task, i40e_service_task);
10986 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10987 	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10988 
10989 	/* NVM bit on means WoL disabled for the port */
10990 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
10991 	if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
10992 		pf->wol_en = false;
10993 	else
10994 		pf->wol_en = true;
10995 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10996 
10997 	/* set up the main switch operations */
10998 	i40e_determine_queue_usage(pf);
10999 	err = i40e_init_interrupt_scheme(pf);
11000 	if (err)
11001 		goto err_switch_setup;
11002 
11003 	/* The number of VSIs reported by the FW is the minimum guaranteed
11004 	 * to us; HW supports far more and we share the remaining pool with
11005 	 * the other PFs. We allocate space for more than the guarantee with
11006 	 * the understanding that we might not get them all later.
11007 	 */
11008 	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
11009 		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
11010 	else
11011 		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
11012 
11013 	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
11014 	pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
11015 			  GFP_KERNEL);
11016 	if (!pf->vsi) {
11017 		err = -ENOMEM;
11018 		goto err_switch_setup;
11019 	}
11020 
11021 #ifdef CONFIG_PCI_IOV
11022 	/* prep for VF support */
11023 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11024 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11025 	    !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11026 		if (pci_num_vf(pdev))
11027 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11028 	}
11029 #endif
11030 	err = i40e_setup_pf_switch(pf, false);
11031 	if (err) {
11032 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11033 		goto err_vsis;
11034 	}
11035 
11036 	/* Make sure flow control is set according to current settings */
11037 	err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11038 	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11039 		dev_dbg(&pf->pdev->dev,
11040 			"Set fc with err %s aq_err %s on get_phy_cap\n",
11041 			i40e_stat_str(hw, err),
11042 			i40e_aq_str(hw, hw->aq.asq_last_status));
11043 	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11044 		dev_dbg(&pf->pdev->dev,
11045 			"Set fc with err %s aq_err %s on set_phy_config\n",
11046 			i40e_stat_str(hw, err),
11047 			i40e_aq_str(hw, hw->aq.asq_last_status));
11048 	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11049 		dev_dbg(&pf->pdev->dev,
11050 			"Set fc with err %s aq_err %s on get_link_info\n",
11051 			i40e_stat_str(hw, err),
11052 			i40e_aq_str(hw, hw->aq.asq_last_status));
11053 
11054 	/* if FDIR VSI was set up, start it now */
11055 	for (i = 0; i < pf->num_alloc_vsi; i++) {
11056 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11057 			i40e_vsi_open(pf->vsi[i]);
11058 			break;
11059 		}
11060 	}
11061 
11062 	/* The driver only wants link up/down and module qualification
11063 	 * reports from firmware.  Note the negative logic.
11064 	 */
11065 	err = i40e_aq_set_phy_int_mask(&pf->hw,
11066 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
11067 					 I40E_AQ_EVENT_MEDIA_NA |
11068 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11069 	if (err)
11070 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11071 			 i40e_stat_str(&pf->hw, err),
11072 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11073 
11074 	/* Reconfigure hardware for allowing smaller MSS in the case
11075 	 * of TSO, so that we avoid the MDD being fired and causing
11076 	 * a reset in the case of small MSS+TSO.
11077 	 */
11078 	val = rd32(hw, I40E_REG_MSS);
11079 	if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11080 		val &= ~I40E_REG_MSS_MIN_MASK;
11081 		val |= I40E_64BYTE_MSS;
11082 		wr32(hw, I40E_REG_MSS, val);
11083 	}
11084 
11085 	if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
11086 		msleep(75);
11087 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11088 		if (err)
11089 			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11090 				 i40e_stat_str(&pf->hw, err),
11091 				 i40e_aq_str(&pf->hw,
11092 					     pf->hw.aq.asq_last_status));
11093 	}
11094 	/* The main driver is (mostly) up and happy. We need to set this state
11095 	 * before setting up the misc vector or we get a race and the vector
11096 	 * ends up disabled forever.
11097 	 */
11098 	clear_bit(__I40E_DOWN, &pf->state);
11099 
11100 	/* In case of MSIX we are going to setup the misc vector right here
11101 	 * to handle admin queue events etc. In case of legacy and MSI
11102 	 * the misc functionality and queue processing is combined in
11103 	 * the same vector and that gets setup at open.
11104 	 */
11105 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11106 		err = i40e_setup_misc_vector(pf);
11107 		if (err) {
11108 			dev_info(&pdev->dev,
11109 				 "setup of misc vector failed: %d\n", err);
11110 			goto err_vsis;
11111 		}
11112 	}
11113 
11114 #ifdef CONFIG_PCI_IOV
11115 	/* prep for VF support */
11116 	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11117 	    (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11118 	    !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11119 		/* disable link interrupts for VFs */
11120 		val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11121 		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11122 		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11123 		i40e_flush(hw);
11124 
11125 		if (pci_num_vf(pdev)) {
11126 			dev_info(&pdev->dev,
11127 				 "Active VFs found, allocating resources.\n");
11128 			err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11129 			if (err)
11130 				dev_info(&pdev->dev,
11131 					 "Error %d allocating resources for existing VFs\n",
11132 					 err);
11133 		}
11134 	}
11135 #endif /* CONFIG_PCI_IOV */
11136 
11137 	if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11138 		pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11139 						      pf->num_iwarp_msix,
11140 						      I40E_IWARP_IRQ_PILE_ID);
11141 		if (pf->iwarp_base_vector < 0) {
11142 			dev_info(&pdev->dev,
11143 				 "failed to get tracking for %d vectors for IWARP err=%d\n",
11144 				 pf->num_iwarp_msix, pf->iwarp_base_vector);
11145 			pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11146 		}
11147 	}
11148 
11149 	i40e_dbg_pf_init(pf);
11150 
11151 	/* tell the firmware that we're starting */
11152 	i40e_send_version(pf);
11153 
11154 	/* since everything's happy, start the service_task timer */
11155 	mod_timer(&pf->service_timer,
11156 		  round_jiffies(jiffies + pf->service_timer_period));
11157 
11158 	/* add this PF to client device list and launch a client service task */
11159 	err = i40e_lan_add_device(pf);
11160 	if (err)
11161 		dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11162 			 err);
11163 
11164 #ifdef I40E_FCOE
11165 	/* create FCoE interface */
11166 	i40e_fcoe_vsi_setup(pf);
11167 
11168 #endif
11169 #define PCI_SPEED_SIZE 8
11170 #define PCI_WIDTH_SIZE 8
11171 	/* Devices on the IOSF bus do not have this information
11172 	 * and will report PCI Gen 1 x 1 by default so don't bother
11173 	 * checking them.
11174 	 */
11175 	if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11176 		char speed[PCI_SPEED_SIZE] = "Unknown";
11177 		char width[PCI_WIDTH_SIZE] = "Unknown";
11178 
11179 		/* Get the negotiated link width and speed from PCI config
11180 		 * space
11181 		 */
11182 		pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11183 					  &link_status);
11184 
11185 		i40e_set_pci_config_data(hw, link_status);
11186 
11187 		switch (hw->bus.speed) {
11188 		case i40e_bus_speed_8000:
11189 			strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11190 		case i40e_bus_speed_5000:
11191 			strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11192 		case i40e_bus_speed_2500:
11193 			strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11194 		default:
11195 			break;
11196 		}
11197 		switch (hw->bus.width) {
11198 		case i40e_bus_width_pcie_x8:
11199 			strncpy(width, "8", PCI_WIDTH_SIZE); break;
11200 		case i40e_bus_width_pcie_x4:
11201 			strncpy(width, "4", PCI_WIDTH_SIZE); break;
11202 		case i40e_bus_width_pcie_x2:
11203 			strncpy(width, "2", PCI_WIDTH_SIZE); break;
11204 		case i40e_bus_width_pcie_x1:
11205 			strncpy(width, "1", PCI_WIDTH_SIZE); break;
11206 		default:
11207 			break;
11208 		}
11209 
11210 		dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11211 			 speed, width);
11212 
11213 		if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11214 		    hw->bus.speed < i40e_bus_speed_8000) {
11215 			dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11216 			dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11217 		}
11218 	}
11219 
11220 	/* get the requested speeds from the fw */
11221 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11222 	if (err)
11223 		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
11224 			i40e_stat_str(&pf->hw, err),
11225 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11226 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11227 
11228 	/* get the supported phy types from the fw */
11229 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11230 	if (err)
11231 		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
11232 			i40e_stat_str(&pf->hw, err),
11233 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11234 	pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
11235 
11236 	/* Add a filter to drop all Flow control frames from any VSI from being
11237 	 * transmitted. By doing so we stop a malicious VF from sending out
11238 	 * PAUSE or PFC frames and potentially controlling traffic for other
11239 	 * PF/VF VSIs.
11240 	 * The FW can still send Flow control frames if enabled.
11241 	 */
11242 	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11243 						       pf->main_vsi_seid);
11244 
11245 	if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11246 	    (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11247 		pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
11248 
11249 	/* print a string summarizing features */
11250 	i40e_print_features(pf);
11251 
11252 	return 0;
11253 
11254 	/* Unwind what we've done if something failed in the setup */
11255 err_vsis:
11256 	set_bit(__I40E_DOWN, &pf->state);
11257 	i40e_clear_interrupt_scheme(pf);
11258 	kfree(pf->vsi);
11259 err_switch_setup:
11260 	i40e_reset_interrupt_capability(pf);
11261 	del_timer_sync(&pf->service_timer);
11262 err_mac_addr:
11263 err_configure_lan_hmc:
11264 	(void)i40e_shutdown_lan_hmc(hw);
11265 err_init_lan_hmc:
11266 	kfree(pf->qp_pile);
11267 err_sw_init:
11268 err_adminq_setup:
11269 err_pf_reset:
11270 	iounmap(hw->hw_addr);
11271 err_ioremap:
11272 	kfree(pf);
11273 err_pf_alloc:
11274 	pci_disable_pcie_error_reporting(pdev);
11275 	pci_release_mem_regions(pdev);
11276 err_pci_reg:
11277 err_dma:
11278 	pci_disable_device(pdev);
11279 	return err;
11280 }
11281 
11282 /**
11283  * i40e_remove - Device removal routine
11284  * @pdev: PCI device information struct
11285  *
11286  * i40e_remove is called by the PCI subsystem to alert the driver
11287  * that is should release a PCI device.  This could be caused by a
11288  * Hot-Plug event, or because the driver is going to be removed from
11289  * memory.
11290  **/
11291 static void i40e_remove(struct pci_dev *pdev)
11292 {
11293 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11294 	struct i40e_hw *hw = &pf->hw;
11295 	i40e_status ret_code;
11296 	int i;
11297 
11298 	i40e_dbg_pf_exit(pf);
11299 
11300 	i40e_ptp_stop(pf);
11301 
11302 	/* Disable RSS in hw */
11303 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11304 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11305 
11306 	/* no more scheduling of any task */
11307 	set_bit(__I40E_SUSPENDED, &pf->state);
11308 	set_bit(__I40E_DOWN, &pf->state);
11309 	if (pf->service_timer.data)
11310 		del_timer_sync(&pf->service_timer);
11311 	if (pf->service_task.func)
11312 		cancel_work_sync(&pf->service_task);
11313 
11314 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11315 		i40e_free_vfs(pf);
11316 		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11317 	}
11318 
11319 	i40e_fdir_teardown(pf);
11320 
11321 	/* If there is a switch structure or any orphans, remove them.
11322 	 * This will leave only the PF's VSI remaining.
11323 	 */
11324 	for (i = 0; i < I40E_MAX_VEB; i++) {
11325 		if (!pf->veb[i])
11326 			continue;
11327 
11328 		if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11329 		    pf->veb[i]->uplink_seid == 0)
11330 			i40e_switch_branch_release(pf->veb[i]);
11331 	}
11332 
11333 	/* Now we can shutdown the PF's VSI, just before we kill
11334 	 * adminq and hmc.
11335 	 */
11336 	if (pf->vsi[pf->lan_vsi])
11337 		i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11338 
11339 	/* remove attached clients */
11340 	ret_code = i40e_lan_del_device(pf);
11341 	if (ret_code) {
11342 		dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11343 			 ret_code);
11344 	}
11345 
11346 	/* shutdown and destroy the HMC */
11347 	if (hw->hmc.hmc_obj) {
11348 		ret_code = i40e_shutdown_lan_hmc(hw);
11349 		if (ret_code)
11350 			dev_warn(&pdev->dev,
11351 				 "Failed to destroy the HMC resources: %d\n",
11352 				 ret_code);
11353 	}
11354 
11355 	/* shutdown the adminq */
11356 	i40e_shutdown_adminq(hw);
11357 
11358 	/* destroy the locks only once, here */
11359 	mutex_destroy(&hw->aq.arq_mutex);
11360 	mutex_destroy(&hw->aq.asq_mutex);
11361 
11362 	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11363 	i40e_clear_interrupt_scheme(pf);
11364 	for (i = 0; i < pf->num_alloc_vsi; i++) {
11365 		if (pf->vsi[i]) {
11366 			i40e_vsi_clear_rings(pf->vsi[i]);
11367 			i40e_vsi_clear(pf->vsi[i]);
11368 			pf->vsi[i] = NULL;
11369 		}
11370 	}
11371 
11372 	for (i = 0; i < I40E_MAX_VEB; i++) {
11373 		kfree(pf->veb[i]);
11374 		pf->veb[i] = NULL;
11375 	}
11376 
11377 	kfree(pf->qp_pile);
11378 	kfree(pf->vsi);
11379 
11380 	iounmap(hw->hw_addr);
11381 	kfree(pf);
11382 	pci_release_mem_regions(pdev);
11383 
11384 	pci_disable_pcie_error_reporting(pdev);
11385 	pci_disable_device(pdev);
11386 }
11387 
11388 /**
11389  * i40e_pci_error_detected - warning that something funky happened in PCI land
11390  * @pdev: PCI device information struct
11391  *
11392  * Called to warn that something happened and the error handling steps
11393  * are in progress.  Allows the driver to quiesce things, be ready for
11394  * remediation.
11395  **/
11396 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11397 						enum pci_channel_state error)
11398 {
11399 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11400 
11401 	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11402 
11403 	if (!pf) {
11404 		dev_info(&pdev->dev,
11405 			 "Cannot recover - error happened during device probe\n");
11406 		return PCI_ERS_RESULT_DISCONNECT;
11407 	}
11408 
11409 	/* shutdown all operations */
11410 	if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11411 		rtnl_lock();
11412 		i40e_prep_for_reset(pf);
11413 		rtnl_unlock();
11414 	}
11415 
11416 	/* Request a slot reset */
11417 	return PCI_ERS_RESULT_NEED_RESET;
11418 }
11419 
11420 /**
11421  * i40e_pci_error_slot_reset - a PCI slot reset just happened
11422  * @pdev: PCI device information struct
11423  *
11424  * Called to find if the driver can work with the device now that
11425  * the pci slot has been reset.  If a basic connection seems good
11426  * (registers are readable and have sane content) then return a
11427  * happy little PCI_ERS_RESULT_xxx.
11428  **/
11429 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11430 {
11431 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11432 	pci_ers_result_t result;
11433 	int err;
11434 	u32 reg;
11435 
11436 	dev_dbg(&pdev->dev, "%s\n", __func__);
11437 	if (pci_enable_device_mem(pdev)) {
11438 		dev_info(&pdev->dev,
11439 			 "Cannot re-enable PCI device after reset.\n");
11440 		result = PCI_ERS_RESULT_DISCONNECT;
11441 	} else {
11442 		pci_set_master(pdev);
11443 		pci_restore_state(pdev);
11444 		pci_save_state(pdev);
11445 		pci_wake_from_d3(pdev, false);
11446 
11447 		reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11448 		if (reg == 0)
11449 			result = PCI_ERS_RESULT_RECOVERED;
11450 		else
11451 			result = PCI_ERS_RESULT_DISCONNECT;
11452 	}
11453 
11454 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
11455 	if (err) {
11456 		dev_info(&pdev->dev,
11457 			 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11458 			 err);
11459 		/* non-fatal, continue */
11460 	}
11461 
11462 	return result;
11463 }
11464 
11465 /**
11466  * i40e_pci_error_resume - restart operations after PCI error recovery
11467  * @pdev: PCI device information struct
11468  *
11469  * Called to allow the driver to bring things back up after PCI error
11470  * and/or reset recovery has finished.
11471  **/
11472 static void i40e_pci_error_resume(struct pci_dev *pdev)
11473 {
11474 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11475 
11476 	dev_dbg(&pdev->dev, "%s\n", __func__);
11477 	if (test_bit(__I40E_SUSPENDED, &pf->state))
11478 		return;
11479 
11480 	rtnl_lock();
11481 	i40e_handle_reset_warning(pf);
11482 	rtnl_unlock();
11483 }
11484 
11485 /**
11486  * i40e_shutdown - PCI callback for shutting down
11487  * @pdev: PCI device information struct
11488  **/
11489 static void i40e_shutdown(struct pci_dev *pdev)
11490 {
11491 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11492 	struct i40e_hw *hw = &pf->hw;
11493 
11494 	set_bit(__I40E_SUSPENDED, &pf->state);
11495 	set_bit(__I40E_DOWN, &pf->state);
11496 	rtnl_lock();
11497 	i40e_prep_for_reset(pf);
11498 	rtnl_unlock();
11499 
11500 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11501 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11502 
11503 	del_timer_sync(&pf->service_timer);
11504 	cancel_work_sync(&pf->service_task);
11505 	i40e_fdir_teardown(pf);
11506 
11507 	rtnl_lock();
11508 	i40e_prep_for_reset(pf);
11509 	rtnl_unlock();
11510 
11511 	wr32(hw, I40E_PFPM_APM,
11512 	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11513 	wr32(hw, I40E_PFPM_WUFC,
11514 	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11515 
11516 	i40e_clear_interrupt_scheme(pf);
11517 
11518 	if (system_state == SYSTEM_POWER_OFF) {
11519 		pci_wake_from_d3(pdev, pf->wol_en);
11520 		pci_set_power_state(pdev, PCI_D3hot);
11521 	}
11522 }
11523 
11524 #ifdef CONFIG_PM
11525 /**
11526  * i40e_suspend - PCI callback for moving to D3
11527  * @pdev: PCI device information struct
11528  **/
11529 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11530 {
11531 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11532 	struct i40e_hw *hw = &pf->hw;
11533 	int retval = 0;
11534 
11535 	set_bit(__I40E_SUSPENDED, &pf->state);
11536 	set_bit(__I40E_DOWN, &pf->state);
11537 
11538 	rtnl_lock();
11539 	i40e_prep_for_reset(pf);
11540 	rtnl_unlock();
11541 
11542 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11543 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11544 
11545 	i40e_stop_misc_vector(pf);
11546 
11547 	retval = pci_save_state(pdev);
11548 	if (retval)
11549 		return retval;
11550 
11551 	pci_wake_from_d3(pdev, pf->wol_en);
11552 	pci_set_power_state(pdev, PCI_D3hot);
11553 
11554 	return retval;
11555 }
11556 
11557 /**
11558  * i40e_resume - PCI callback for waking up from D3
11559  * @pdev: PCI device information struct
11560  **/
11561 static int i40e_resume(struct pci_dev *pdev)
11562 {
11563 	struct i40e_pf *pf = pci_get_drvdata(pdev);
11564 	u32 err;
11565 
11566 	pci_set_power_state(pdev, PCI_D0);
11567 	pci_restore_state(pdev);
11568 	/* pci_restore_state() clears dev->state_saves, so
11569 	 * call pci_save_state() again to restore it.
11570 	 */
11571 	pci_save_state(pdev);
11572 
11573 	err = pci_enable_device_mem(pdev);
11574 	if (err) {
11575 		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
11576 		return err;
11577 	}
11578 	pci_set_master(pdev);
11579 
11580 	/* no wakeup events while running */
11581 	pci_wake_from_d3(pdev, false);
11582 
11583 	/* handling the reset will rebuild the device state */
11584 	if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11585 		clear_bit(__I40E_DOWN, &pf->state);
11586 		rtnl_lock();
11587 		i40e_reset_and_rebuild(pf, false);
11588 		rtnl_unlock();
11589 	}
11590 
11591 	return 0;
11592 }
11593 
11594 #endif
11595 static const struct pci_error_handlers i40e_err_handler = {
11596 	.error_detected = i40e_pci_error_detected,
11597 	.slot_reset = i40e_pci_error_slot_reset,
11598 	.resume = i40e_pci_error_resume,
11599 };
11600 
11601 static struct pci_driver i40e_driver = {
11602 	.name     = i40e_driver_name,
11603 	.id_table = i40e_pci_tbl,
11604 	.probe    = i40e_probe,
11605 	.remove   = i40e_remove,
11606 #ifdef CONFIG_PM
11607 	.suspend  = i40e_suspend,
11608 	.resume   = i40e_resume,
11609 #endif
11610 	.shutdown = i40e_shutdown,
11611 	.err_handler = &i40e_err_handler,
11612 	.sriov_configure = i40e_pci_sriov_configure,
11613 };
11614 
11615 /**
11616  * i40e_init_module - Driver registration routine
11617  *
11618  * i40e_init_module is the first routine called when the driver is
11619  * loaded. All it does is register with the PCI subsystem.
11620  **/
11621 static int __init i40e_init_module(void)
11622 {
11623 	pr_info("%s: %s - version %s\n", i40e_driver_name,
11624 		i40e_driver_string, i40e_driver_version_str);
11625 	pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
11626 
11627 	/* we will see if single thread per module is enough for now,
11628 	 * it can't be any worse than using the system workqueue which
11629 	 * was already single threaded
11630 	 */
11631 	i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
11632 				  i40e_driver_name);
11633 	if (!i40e_wq) {
11634 		pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
11635 		return -ENOMEM;
11636 	}
11637 
11638 	i40e_dbg_init();
11639 	return pci_register_driver(&i40e_driver);
11640 }
11641 module_init(i40e_init_module);
11642 
11643 /**
11644  * i40e_exit_module - Driver exit cleanup routine
11645  *
11646  * i40e_exit_module is called just before the driver is removed
11647  * from memory.
11648  **/
11649 static void __exit i40e_exit_module(void)
11650 {
11651 	pci_unregister_driver(&i40e_driver);
11652 	destroy_workqueue(i40e_wq);
11653 	i40e_dbg_exit();
11654 }
11655 module_exit(i40e_exit_module);
11656